hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1bfb09529160e3112f6b7052c050e2180370fa
| 3,170 |
py
|
Python
|
messenger.py
|
taidalab/yunba-smarthome
|
01027f3d86f3424eea335083456d47c614cf3ad2
|
[
"MIT"
] | 12 |
2016-01-14T15:10:35.000Z
|
2019-07-15T08:11:40.000Z
|
messenger.py
|
taidalab/yunba-smarthome
|
01027f3d86f3424eea335083456d47c614cf3ad2
|
[
"MIT"
] | null | null | null |
messenger.py
|
taidalab/yunba-smarthome
|
01027f3d86f3424eea335083456d47c614cf3ad2
|
[
"MIT"
] | 6 |
2016-01-27T03:05:37.000Z
|
2018-08-21T11:58:01.000Z
|
#!/usr/bin/env python
import time
import sys
import thread
from socketIO_client import SocketIO
import config
import led
class Messenger:
def __init__(self, message_callback):
self.can_pub = False
self.first_connect = True
print('messenger init')
self.message_callback = message_callback
thread.start_new_thread(self.timer, ())
def __del__(self):
print('messenger del')
def on_socket_connect_ack(self, args):
print 'on_socket_connect_ack: ', args
if self.first_connect == True:
led.turn_on(config.LED_LIVING, 1, 100) #socktio connected
self.first_connect = False
self.socketIO.emit('connect', {'appkey': config.APPKEY, 'customid': config.CUSTOMID})
# self.socketIO.emit('connect', {'appkey': config.APPKEY})
def on_connack(self, args):
print 'on_connack: ', args
# self.socketIO.emit('subscribe', {'topic': config.TOPIC})
self.socketIO.emit('set_alias', {'alias': config.ALIAS})
def on_puback(self, args):
#print 'on_puback: ', args
pass
def on_suback(self, args):
print 'on_suback: ', args
self.socketIO.emit('set_alias', {'alias': config.ALIAS})
def on_message(self, args):
print 'on_message: ', args
if self.message_callback != None:
self.message_callback(args)
def on_set_alias(self, args):
print 'on_set_alias: ', args
self.can_pub = True
def on_get_alias(self, args):
print 'on_get_alias: ', args
def on_alias(self, args):
print 'on_alias: ', args
def on_get_topic_list_ack(self, args):
print 'on_get_topic_list_ack: ', args
def on_get_alias_list_ack(self, args):
print 'on_get_alias_list_ack: ', args
def on_publish2_ack(self, args):
print 'on_publish2_ack: ', args
def on_publish2_recvack(self, args):
print 'on_publish2_recvack: ', args
def on_get_state_ack(self, args):
print 'on_get_state_ack: ', args
def timer(self):
self.socketIO = SocketIO('sock.yunba.io', 3000)
self.socketIO.on('socketconnectack', self.on_socket_connect_ack)
self.socketIO.on('connack', self.on_connack)
self.socketIO.on('puback', self.on_puback)
self.socketIO.on('suback', self.on_suback)
self.socketIO.on('message', self.on_message)
self.socketIO.on('set_alias_ack', self.on_set_alias)
self.socketIO.on('get_topic_list_ack', self.on_get_topic_list_ack)
self.socketIO.on('get_alias_list_ack', self.on_get_alias_list_ack)
# self.socketIO.on('puback', self.on_publish2_ack)
self.socketIO.on('recvack', self.on_publish2_recvack)
self.socketIO.on('get_state_ack', self.on_get_state_ack)
self.socketIO.on('alias', self.on_alias)
self.socketIO.wait()
def publish(self, msg, qos):
if self.can_pub == True:
print 'publish: ', msg
self.socketIO.emit('publish', {'topic': config.TOPIC, 'msg': msg, 'qos': qos})
if __name__ == '__main__':
msg = Messenger(None)
while True:
time.sleep(1)
| 31.386139 | 93 | 0.643218 |
4a1bfc208e0bd6eb78d218c6cbca937ed3fb2b5a
| 5,985 |
py
|
Python
|
data/data_scapers/index_exchanges_tickers.py
|
AlainDaccache98/Quantropy
|
5d678a802adb4720c17e6ae4c313b1e37db8f313
|
[
"MIT"
] | 1 |
2021-02-10T03:50:21.000Z
|
2021-02-10T03:50:21.000Z
|
data/data_scapers/index_exchanges_tickers.py
|
AlainDaccache98/Quantropy
|
5d678a802adb4720c17e6ae4c313b1e37db8f313
|
[
"MIT"
] | null | null | null |
data/data_scapers/index_exchanges_tickers.py
|
AlainDaccache98/Quantropy
|
5d678a802adb4720c17e6ae4c313b1e37db8f313
|
[
"MIT"
] | null | null | null |
import unicodedata
import requests
import config
import pandas as pd
import os
import urllib
from bs4 import BeautifulSoup
from datetime import datetime
def store_to_csv(file_path, tickers):
current_date = datetime.now().strftime('%Y-%m-%d')
if not os.path.exists(file_path):
df = pd.DataFrame.from_dict({current_date: tickers})
df.to_csv(file_path)
else:
df = pd.read_csv(file_path, index_col=0)
df[current_date] = tickers
df = df.T
df.to_csv(file_path)
def save_current_nasdaq():
urls = ['ftp://ftp.nasdaqtrader.com/symboldirectory/nasdaqlisted.txt',
'ftp://ftp.nasdaqtrader.com/symboldirectory/nasdaqtraded.txt']
main_df = pd.DataFrame()
for url in urls:
path = os.path.join(config.MARKET_EXCHANGES_DIR_PATH, 'NASDAQ_Listed.txt')
urllib.request.urlretrieve(url, path)
if main_df.empty:
main_df = pd.read_csv(path, sep="|")[:-1]
else:
cur_df = pd.read_csv(path, sep="|")[:-1]
main_df = pd.concat([main_df, cur_df], axis=0, ignore_index=True)
main_df = main_df[main_df['ETF'] == 'N'] # remove etfs
main_df = main_df.drop_duplicates('Symbol').reset_index(drop=True)
main_df.set_index('Symbol', inplace=True)
main_df.sort_index(inplace=True)
main_df = main_df[~main_df.index.str.contains('\$')] # this is to remove derived asset classes
os.remove(path)
df = pd.DataFrame.from_dict({datetime.now(): main_df.index})
pd.DataFrame.to_pickle(df, path=os.path.join(config.MARKET_INDICES_DIR_PATH, 'NASDAQ-Historical-Constituents.pkl'))
return main_df.index
def save_historical_dow_jones_tickers(save_pickle=True):
url = 'https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average'
resp = requests.get(url)
soup = BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = unicodedata.normalize("NFKD", row.findAll('td')[1].text).split(': ')[-1]
ticker = ticker.strip()
tickers.append(ticker)
if save_pickle:
df = pd.DataFrame.from_dict({datetime.today(): tickers}, orient='index')
pd.DataFrame.to_pickle(df, path=os.path.join(config.DATA_DIR_PATH, 'test_data',
'Dow-Jones-Historical-Constituents.pkl'))
return {datetime.today(): tickers}
def save_historical_sp500_tickers(save_pickle=True):
resp = requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = BeautifulSoup(resp.text, 'lxml')
current_tickers_table, historical_tickers_table = soup.findAll('table', {'class': 'wikitable sortable'})
current_tickers = [row.findAll('td')[0].text.strip() for row in current_tickers_table.findAll('tr')[1:]]
historical_changes_dictio = {}
for row in historical_tickers_table.findAll('tr')[2:]:
row_data = row.findAll('td')
date = datetime.strptime(row_data[0].text.rstrip(), '%B %d, %Y')
ticker_added, ticker_removed = row_data[1].text.rstrip(), row_data[3].text.rstrip()
if date not in historical_changes_dictio.keys():
historical_changes_dictio[date] = {'Added': [], 'Removed': []}
historical_changes_dictio[date]['Added'].append(ticker_added)
historical_changes_dictio[date]['Removed'].append(ticker_removed)
cumulative_dictio = {}
# TODO Not perfectly accurate, as ticker names can change with time (i.e. SAIC = SAI)
for date, added_removed in historical_changes_dictio.items():
cumulative_dictio[date] = current_tickers
for added in added_removed['Added']:
if len(added) > 0: # before this date, the ticker wasn't there
try:
current_tickers.remove(added)
except:
print(f'Manual check needed for added ticker {added} on {date}')
for removed in added_removed['Removed']:
if len(removed) > 0: # before this date, the ticker was there
current_tickers.append(removed)
if save_pickle:
cumulative_df = pd.DataFrame.from_dict(cumulative_dictio, orient='index')
pd.DataFrame.to_pickle(cumulative_df,
path=os.path.join(config.MARKET_INDICES_DIR_PATH, 'S&P-500-Historical-Constituents.pkl'))
return cumulative_dictio
def url_to_pickle_clean_to_df(url: str, output_name: str, skiprows: int = 0):
path = os.path.join(config.MARKET_INDICES_DIR_PATH, output_name)
urllib.request.urlretrieve(url, path)
try:
df = pd.read_excel(pd.ExcelFile(path), index_col=0, skiprows=skiprows)
except:
df = pd.read_html(path, index_col=0, skiprows=skiprows)
os.remove(path)
# print(df.to_string())
tickers = list(df.index)
file_path = os.path.join(config.MARKET_INDICES_DIR_PATH, output_name)
cumulative_df = pd.DataFrame.from_dict({datetime.now(): tickers}, orient='index')
pd.DataFrame.to_pickle(cumulative_df, path=file_path)
return tickers
def save_current_russell_3000_tickers():
return url_to_pickle_clean_to_df(
url="http://www.beatthemarketanalyzer.com/blog/wp-content/uploads/2016/10/Russell-3000-Stock-Tickers-List.xlsx",
output_name='Russell-3000-Stock-Tickers.csv',
skiprows=3)
def save_total_us_stock_market_tickers():
return url_to_pickle_clean_to_df(
url='https://www.ishares.com/us/products/239724/ishares-core-sp-total-us-stock-market-etf/1521942788811.ajax?fileType=xls&fileName=iShares-Core-SP-Total-US-Stock-Market-ETF_fund&dataType=fund',
output_name='US-Stock-Market-Tickers.xls', skiprows=7)
if __name__ == '__main__':
# save_current_dow_jones_tickers()
# save_current_nasdaq()
# save_current_russell_3000_tickers()
# save_total_us_stock_market_tickers()
save_historical_sp500_tickers()
| 42.147887 | 201 | 0.675021 |
4a1bfc31dac2bc9b0d78adeda1d2cf5fd12fe5a4
| 153 |
py
|
Python
|
7_kyu/Smallest_Product.py
|
UlrichBerntien/Codewars-Katas
|
bbd025e67aa352d313564d3862db19fffa39f552
|
[
"MIT"
] | null | null | null |
7_kyu/Smallest_Product.py
|
UlrichBerntien/Codewars-Katas
|
bbd025e67aa352d313564d3862db19fffa39f552
|
[
"MIT"
] | null | null | null |
7_kyu/Smallest_Product.py
|
UlrichBerntien/Codewars-Katas
|
bbd025e67aa352d313564d3862db19fffa39f552
|
[
"MIT"
] | null | null | null |
from typing import List
from functools import reduce
def smallest_product(a: List[List[int]]) -> int:
return min(reduce(int.__mul__,it) for it in a)
| 30.6 | 50 | 0.745098 |
4a1bfc42143fe9dd620f3a8873c2d17fbcdcb993
| 7,793 |
py
|
Python
|
projectragan/docs/conf.py
|
hsiaoyi0504/RaGAN
|
45b84401964af14fb9642071b81b309a9764f055
|
[
"MIT"
] | 1 |
2019-07-10T17:57:13.000Z
|
2019-07-10T17:57:13.000Z
|
projectragan/docs/conf.py
|
hsiaoyi0504/RaGAN
|
45b84401964af14fb9642071b81b309a9764f055
|
[
"MIT"
] | null | null | null |
projectragan/docs/conf.py
|
hsiaoyi0504/RaGAN
|
45b84401964af14fb9642071b81b309a9764f055
|
[
"MIT"
] | 4 |
2018-09-12T16:36:09.000Z
|
2019-01-10T17:02:32.000Z
|
# -*- coding: utf-8 -*-
#
# ProjectRaGAN documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ProjectRaGAN'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'projectragandoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'projectragan.tex',
u'ProjectRaGAN Documentation',
u"TeamRaGAN", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'projectragan', u'ProjectRaGAN Documentation',
[u"TeamRaGAN"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'projectragan', u'ProjectRaGAN Documentation',
u"TeamRaGAN", 'ProjectRaGAN',
'GANs applied to radiology images', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.808163 | 80 | 0.708071 |
4a1bfdf750c99279f4f32b78d9e5104f72de9ed9
| 17,107 |
py
|
Python
|
grr/lib/flows/general/collectors_test.py
|
ethicalhackeragnidhra/Grr
|
9ff9178396d9d16575e42dded33627cb09ac3af1
|
[
"Apache-2.0"
] | 1 |
2020-12-18T00:47:19.000Z
|
2020-12-18T00:47:19.000Z
|
grr/lib/flows/general/collectors_test.py
|
ethicalhackeragnidhra/Grr
|
9ff9178396d9d16575e42dded33627cb09ac3af1
|
[
"Apache-2.0"
] | null | null | null |
grr/lib/flows/general/collectors_test.py
|
ethicalhackeragnidhra/Grr
|
9ff9178396d9d16575e42dded33627cb09ac3af1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Test the collector flows.
To reduce the size of this module, additional collector flow tests are split out
into collectors_*_test.py files.
"""
import os
import mock
import psutil
from grr import config
from grr.client.client_actions import standard
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact
from grr.lib import artifact_registry
from grr.lib import artifact_utils
from grr.lib import flags
from grr.lib import flow
from grr.lib import sequential_collection
from grr.lib import test_lib
from grr.lib import utils
# pylint: disable=unused-import
from grr.lib.flows.general import artifact_fallbacks
# pylint: enable=unused-import
from grr.lib.flows.general import collectors
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
def ProcessIter():
return iter([test_lib.MockWindowsProcess()])
class TestArtifactCollectors(test_lib.FlowTestsBaseclass):
"""Test the artifact collection mechanism with fake artifacts."""
def setUp(self):
"""Make sure things are initialized."""
super(TestArtifactCollectors, self).setUp()
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
artifact_registry.REGISTRY.AddFileSource(test_artifacts_file)
self.fakeartifact = artifact_registry.REGISTRY.GetArtifact("FakeArtifact")
self.fakeartifact2 = artifact_registry.REGISTRY.GetArtifact("FakeArtifact2")
self.output_count = 0
with aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") as fd:
fd.Set(fd.Schema.SYSTEM("Linux"))
kb = fd.Schema.KNOWLEDGE_BASE()
artifact.SetCoreGRRKnowledgeBaseValues(kb, fd)
fd.Set(kb)
def tearDown(self):
super(TestArtifactCollectors, self).tearDown()
self.fakeartifact.sources = [] # Reset any ArtifactSources
self.fakeartifact.conditions = [] # Reset any Conditions
self.fakeartifact2.sources = [] # Reset any ArtifactSources
self.fakeartifact2.conditions = [] # Reset any Conditions
def testInterpolateArgs(self):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
kb = rdf_client.KnowledgeBase()
kb.MergeOrAddUser(rdf_client.User(username="test1"))
kb.MergeOrAddUser(rdf_client.User(username="test2"))
collect_flow.state["knowledge_base"] = kb
collect_flow.current_artifact_name = "blah"
collect_flow.args = artifact_utils.ArtifactCollectorFlowArgs()
test_rdf = rdf_client.KnowledgeBase()
action_args = {
"usernames": ["%%users.username%%", "%%users.username%%"],
"nointerp": "asdfsdf",
"notastring": test_rdf
}
kwargs = collect_flow.InterpolateDict(action_args)
self.assertItemsEqual(kwargs["usernames"],
["test1", "test2", "test1", "test2"])
self.assertEqual(kwargs["nointerp"], "asdfsdf")
self.assertEqual(kwargs["notastring"], test_rdf)
# We should be using an array since users.username will expand to multiple
# values.
self.assertRaises(ValueError, collect_flow.InterpolateDict,
{"bad": "%%users.username%%"})
list_args = collect_flow.InterpolateList(
["%%users.username%%", r"%%users.username%%\aa"])
self.assertItemsEqual(list_args,
["test1", "test2", r"test1\aa", r"test2\aa"])
list_args = collect_flow.InterpolateList(["one"])
self.assertEqual(list_args, ["one"])
# Ignore the failure in users.desktop, report the others.
collect_flow.args.ignore_interpolation_errors = True
list_args = collect_flow.InterpolateList(
["%%users.desktop%%", r"%%users.username%%\aa"])
self.assertItemsEqual(list_args, [r"test1\aa", r"test2\aa"])
# Both fail.
list_args = collect_flow.InterpolateList(
[r"%%users.desktop%%\aa", r"%%users.sid%%\aa"])
self.assertItemsEqual(list_args, [])
def testGrepRegexCombination(self):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
self.assertEqual(collect_flow._CombineRegex([r"simple"]), "simple")
self.assertEqual(collect_flow._CombineRegex(["a", "b"]), "(a)|(b)")
self.assertEqual(collect_flow._CombineRegex(["a", "b", "c"]), "(a)|(b)|(c)")
self.assertEqual(
collect_flow._CombineRegex(["a|b", "[^_]b", "c|d"]),
"(a|b)|([^_]b)|(c|d)")
def testGrep(self):
class MockCallFlow(object):
def CallFlow(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
mock_call_flow = MockCallFlow()
with utils.Stubber(collectors.ArtifactCollectorFlow, "CallFlow",
mock_call_flow.CallFlow):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
collect_flow.args = mock.Mock()
collect_flow.args.ignore_interpolation_errors = False
kb = rdf_client.KnowledgeBase()
kb.MergeOrAddUser(rdf_client.User(username="test1"))
kb.MergeOrAddUser(rdf_client.User(username="test2"))
collect_flow.state["knowledge_base"] = kb
collect_flow.current_artifact_name = "blah"
collector = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.GREP,
attributes={
"paths": ["/etc/passwd"],
"content_regex_list": [r"^a%%users.username%%b$"]
})
collect_flow.Grep(collector, rdf_paths.PathSpec.PathType.TSK)
conditions = mock_call_flow.kwargs["conditions"]
self.assertEqual(len(conditions), 1)
regexes = conditions[0].contents_regex_match.regex.SerializeToString()
self.assertItemsEqual(regexes.split("|"), ["(^atest1b$)", "(^atest2b$)"])
self.assertEqual(mock_call_flow.kwargs["paths"], ["/etc/passwd"])
def testGetArtifact1(self):
"""Test we can get a basic artifact."""
client_mock = action_mocks.FileFinderClientMock()
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
# Dynamically add an ArtifactSource specifying the base path.
file_path = os.path.join(self.base_path, "test_img.dd")
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.FILE,
attributes={"paths": [file_path]})
self.fakeartifact.sources.append(coll1)
artifact_list = ["FakeArtifact"]
for _ in test_lib.TestFlowHelper(
collectors.ArtifactCollectorFlow.__name__,
client_mock,
artifact_list=artifact_list,
use_tsk=False,
token=self.token,
client_id=self.client_id):
pass
# Test the AFF4 file that was created.
fd1 = aff4.FACTORY.Open(
"%s/fs/os/%s" % (self.client_id, file_path), token=self.token)
fd2 = open(file_path, "rb")
fd2.seek(0, 2)
self.assertEqual(fd2.tell(), int(fd1.Get(fd1.Schema.SIZE)))
def testArtifactSkipping(self):
client_mock = action_mocks.ActionMock()
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
# This does not match the Artifact so it will not be collected.
client.Set(client.Schema.SYSTEM("Windows"))
kb = client.Get(client.Schema.KNOWLEDGE_BASE)
kb.os = "Windows"
client.Set(client.Schema.KNOWLEDGE_BASE, kb)
client.Flush()
artifact_list = ["FakeArtifact"]
for s in test_lib.TestFlowHelper(
collectors.ArtifactCollectorFlow.__name__,
client_mock,
artifact_list=artifact_list,
use_tsk=False,
token=self.token,
client_id=self.client_id):
session_id = s
flow_obj = aff4.FACTORY.Open(session_id, token=self.token)
self.assertEqual(len(flow_obj.state.artifacts_skipped_due_to_condition), 1)
self.assertEqual(flow_obj.state.artifacts_skipped_due_to_condition[0],
["FakeArtifact", "os == 'Linux'"])
def testRunGrrClientActionArtifact(self):
"""Test we can get a GRR client artifact."""
with utils.Stubber(psutil, "process_iter", ProcessIter):
client_mock = action_mocks.ActionMock(standard.ListProcesses)
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.GRR_CLIENT_ACTION,
attributes={"client_action": standard.ListProcesses.__name__})
self.fakeartifact.sources.append(coll1)
artifact_list = ["FakeArtifact"]
for s in test_lib.TestFlowHelper(
collectors.ArtifactCollectorFlow.__name__,
client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id):
session_id = s
fd = flow.GRRFlow.ResultCollectionForFID(session_id, token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdf_client.Process))
self.assertTrue(len(fd) == 1)
def testRunGrrClientActionArtifactSplit(self):
"""Test that artifacts get split into separate collections."""
with utils.Stubber(psutil, "process_iter", ProcessIter):
client_mock = action_mocks.ActionMock(standard.ListProcesses)
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.GRR_CLIENT_ACTION,
attributes={"client_action": standard.ListProcesses.__name__})
self.fakeartifact.sources.append(coll1)
self.fakeartifact2.sources.append(coll1)
artifact_list = ["FakeArtifact", "FakeArtifact2"]
for s in test_lib.TestFlowHelper(
collectors.ArtifactCollectorFlow.__name__,
client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id,
split_output_by_artifact=True):
session_id = s
# Check that we got two separate collections based on artifact name
fd = collectors.ArtifactCollectorFlow.ResultCollectionForArtifact(
session_id, "FakeArtifact", token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdf_client.Process))
self.assertEqual(len(fd), 1)
fd = collectors.ArtifactCollectorFlow.ResultCollectionForArtifact(
session_id, "FakeArtifact2", token=self.token)
self.assertEqual(len(fd), 1)
self.assertTrue(isinstance(list(fd)[0], rdf_client.Process))
def testConditions(self):
"""Test we can get a GRR client artifact with conditions."""
with utils.Stubber(psutil, "process_iter", ProcessIter):
# Run with false condition.
client_mock = action_mocks.ActionMock(standard.ListProcesses)
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.GRR_CLIENT_ACTION,
attributes={"client_action": standard.ListProcesses.__name__},
conditions=["os == 'Windows'"])
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__,
sequential_collection.GeneralIndexedCollection)
self.assertEqual(len(fd), 0)
# Now run with matching or condition.
coll1.conditions = ["os == 'Linux' or os == 'Windows'"]
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__,
sequential_collection.GeneralIndexedCollection)
self.assertNotEqual(len(fd), 0)
# Now run with impossible or condition.
coll1.conditions.append("os == 'NotTrue'")
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__,
sequential_collection.GeneralIndexedCollection)
self.assertEqual(len(fd), 0)
def testRegistryValueArtifact(self):
with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.REGISTRY,
test_lib.FakeRegistryVFSHandler):
with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
test_lib.FakeFullVFSHandler):
client_mock = action_mocks.ActionMock(standard.StatFile)
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.REGISTRY_VALUE,
attributes={
"key_value_pairs": [{
"key": (r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet"
r"\Control\Session Manager"),
"value":
"BootExecute"
}]
})
self.fakeartifact.sources.append(coll1)
artifact_list = ["FakeArtifact"]
for s in test_lib.TestFlowHelper(
collectors.ArtifactCollectorFlow.__name__,
client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id):
session_id = s
# Test the statentry got stored.
fd = flow.GRRFlow.ResultCollectionForFID(session_id, token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdf_client.StatEntry))
urn = fd[0].pathspec.AFF4Path(self.client_id)
self.assertTrue(str(urn).endswith("BootExecute"))
def testRegistryDefaultValueArtifact(self):
with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.REGISTRY,
test_lib.FakeRegistryVFSHandler):
with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
test_lib.FakeFullVFSHandler):
client_mock = action_mocks.ActionMock(standard.StatFile)
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.REGISTRY_VALUE,
attributes={
"key_value_pairs": [{
"key": (r"HKEY_LOCAL_MACHINE/SOFTWARE/ListingTest"),
"value": ""
}]
})
self.fakeartifact.sources.append(coll1)
artifact_list = ["FakeArtifact"]
for s in test_lib.TestFlowHelper(
collectors.ArtifactCollectorFlow.__name__,
client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id):
session_id = s
fd = flow.GRRFlow.ResultCollectionForFID(session_id, token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdf_client.StatEntry))
self.assertEqual(fd[0].registry_data.GetValue(), "DefaultValue")
def testSupportedOS(self):
"""Test supported_os inside the collector object."""
with utils.Stubber(psutil, "process_iter", ProcessIter):
# Run with false condition.
client_mock = action_mocks.ActionMock(standard.ListProcesses)
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.GRR_CLIENT_ACTION,
attributes={"client_action": standard.ListProcesses.__name__},
supported_os=["Windows"])
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__,
sequential_collection.GeneralIndexedCollection)
self.assertEqual(len(fd), 0)
# Now run with matching or condition.
coll1.conditions = []
coll1.supported_os = ["Linux", "Windows"]
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__,
sequential_collection.GeneralIndexedCollection)
self.assertNotEqual(len(fd), 0)
# Now run with impossible or condition.
coll1.conditions = ["os == 'Linux' or os == 'Windows'"]
coll1.supported_os = ["NotTrue"]
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__,
sequential_collection.GeneralIndexedCollection)
self.assertEqual(len(fd), 0)
def _RunClientActionArtifact(self, client_mock, artifact_list):
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
self.output_count += 1
for s in test_lib.TestFlowHelper(
collectors.ArtifactCollectorFlow.__name__,
client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id):
session_id = s
return flow.GRRFlow.ResultCollectionForFID(session_id, token=self.token)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| 39.876457 | 80 | 0.680248 |
4a1bfec4845888649be252b5f55e4b20f4453e54
| 1,086 |
py
|
Python
|
pytglib/api/types/message_game_score.py
|
iTeam-co/pytglib
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 6 |
2019-10-30T08:57:27.000Z
|
2021-02-08T14:17:43.000Z
|
pytglib/api/types/message_game_score.py
|
iTeam-co/python-telegram
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 1 |
2021-08-19T05:44:10.000Z
|
2021-08-19T07:14:56.000Z
|
pytglib/api/types/message_game_score.py
|
iTeam-co/python-telegram
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 5 |
2019-12-04T05:30:39.000Z
|
2021-05-21T18:23:32.000Z
|
from ..utils import Object
class MessageGameScore(Object):
"""
A new high score was achieved in a game
Attributes:
ID (:obj:`str`): ``MessageGameScore``
Args:
game_message_id (:obj:`int`):
Identifier of the message with the game, can be an identifier of a deleted message
game_id (:obj:`int`):
Identifier of the game; may be different from the games presented in the message with the game
score (:obj:`int`):
New score
Returns:
MessageContent
Raises:
:class:`telegram.Error`
"""
ID = "messageGameScore"
def __init__(self, game_message_id, game_id, score, **kwargs):
self.game_message_id = game_message_id # int
self.game_id = game_id # int
self.score = score # int
@staticmethod
def read(q: dict, *args) -> "MessageGameScore":
game_message_id = q.get('game_message_id')
game_id = q.get('game_id')
score = q.get('score')
return MessageGameScore(game_message_id, game_id, score)
| 26.487805 | 107 | 0.606814 |
4a1bfef20caeb2ffb710c534bb60b1a9f2013c03
| 8,234 |
py
|
Python
|
options/base_options.py
|
panaali/pytorch-CycleGAN-and-pix2pix
|
8e1bead0767ecc5fa472cc3fb906e92a14949761
|
[
"BSD-3-Clause"
] | null | null | null |
options/base_options.py
|
panaali/pytorch-CycleGAN-and-pix2pix
|
8e1bead0767ecc5fa472cc3fb906e92a14949761
|
[
"BSD-3-Clause"
] | null | null | null |
options/base_options.py
|
panaali/pytorch-CycleGAN-and-pix2pix
|
8e1bead0767ecc5fa472cc3fb906e92a14949761
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--augment_dataset', action='store_true' , help='data augmentation by rotating 90 degrees and fliping resulting in 7 new images for each image')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 59.666667 | 235 | 0.663711 |
4a1bff3811660050e546800ceaf67ce1761b81be
| 4,544 |
py
|
Python
|
sopaper/fetcher/sciencedirect.py
|
gonzalorodrigo/SoPaper
|
0246c1baeb3a863cb6415ab769f363eb86267bd6
|
[
"CC-BY-4.0"
] | 158 |
2015-02-07T13:19:38.000Z
|
2022-03-02T15:29:22.000Z
|
sopaper/fetcher/sciencedirect.py
|
gonzalorodrigo/SoPaper
|
0246c1baeb3a863cb6415ab769f363eb86267bd6
|
[
"CC-BY-4.0"
] | 8 |
2015-06-15T04:06:30.000Z
|
2020-01-25T14:04:02.000Z
|
sopaper/fetcher/sciencedirect.py
|
gonzalorodrigo/SoPaper
|
0246c1baeb3a863cb6415ab769f363eb86267bd6
|
[
"CC-BY-4.0"
] | 39 |
2016-01-01T07:14:32.000Z
|
2021-04-26T08:25:02.000Z
|
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: sciencedirect.py
# Date: 一 6月 09 17:06:26 2014 +0000
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import re
from . import register_parser, RecoverableErr
from .base import FetcherBase, direct_download
from ..uklogger import *
from .. import ukconfig
from urlparse import urlparse
import requests
from bs4 import BeautifulSoup
HOSTNAME = 'www.sciencedirect.com'
# not working right now
#@register_parser(name='sciencedirect.com', urlmatch='sciencedirect.com',
#meta_field=['author', 'bibtex', 'abstract'],
#priority=8)
class ScienceDirect(FetcherBase):
def _do_pre_parse(self):
self.text = requests.get(self.url).text.encode('utf-8')
with open("/tmp/b.html", 'w') as f:
f.write(self.text)
#text = open("/tmp/b.html").read()
self.soup = BeautifulSoup(self.text)
def _do_download(self, updater):
pdf = self.soup.findAll(attrs={'id': 'pdfLink'})
if pdf:
try:
url = pdf[0]['pdfurl']
print url
except:
# probably something need to be fixed
log_exc('')
else:
raise RecoverableErr("No available download at {0}".format(self.url))
return direct_download(url, updater)
def _do_get_title(self):
titles = self.soup.findAll(attrs={'name': 'citation_title'})
return titles[0]['content']
def _do_get_meta(self):
meta = {}
try:
log_info("Getting author...")
authors = self.soup.findAll(
attrs={'title': 'Author Profile Page'})
author = [a.text for a in authors]
meta['author'] = author
except KeyboardInterrupt:
raise
except:
pass
try:
log_info("Getting abstract...")
abstract_url = re.findall(r'\'tab_abstract.+\d+\'', self.text)[0][1:-1]
abstract_text = requests.get('http://{0}/'.format(HOSTNAME) + abstract_url).text.encode('utf-8')
abstract_soup = BeautifulSoup(abstract_text)
abstract = abstract_soup.findAll('p')[0].text
meta['abstract'] = abstract
except KeyboardInterrupt:
raise
except:
pass
try:
log_info("Getting refs ...")
ref_url = re.findall(r'\'tab_references.+\d+\'', self.text)[0][1:-1]
ref_text = requests.get('http://{0}/'.format(HOSTNAME) + ref_url).text.encode('utf-8')
ref_soup = BeautifulSoup(ref_text)
trs = ref_soup.findAll('tr')
reference = []
for tr in trs:
records = tr.findAll('a')
if len(records) > 0:
href = 'http://{0}/'.format(HOSTNAME) + records[0].get('href')
ref = records[0].text.strip()
reference.append({'ref': ref, 'href': href})
meta['references'] = reference
except KeyboardInterrupt:
raise
except:
pass
try:
log_info("Getting cited ...")
cite_url = re.findall(r'\'tab_citings.+\d+\'', self.text)[0][1:-1]
cite_text = requests.get('http://{0}/'.format(HOSTNAME) +
cite_url, timeout=5
).text.encode('utf-8')
cite_soup = BeautifulSoup(cite_text)
trs = cite_soup.findAll('tr')
citing = []
for tr in trs:
records = tr.findAll('a')
if len(records) > 0:
href = 'http://{0}/'.format(HOSTNAME) + records[0].get('href')
cite = records[0].text.strip()
citing.append({'citing': cite, 'href': href})
meta['citedby'] = citing
except KeyboardInterrupt:
raise
except requests.exceptions.Timeout:
pass
except:
pass
try:
log_info("Getting bibtex...")
bibtex_url = re.findall(r'exportformats.+bibtex', self.text)[0]
bibtex_text = requests.get('http://{0}/'.format(HOSTNAME) + bibtex_url).text.encode('utf-8')
bibtex_soup = BeautifulSoup(bibtex_text)
pre = bibtex_soup.find('pre')
bibtex = pre.text.strip()
meta['bibtex'] = bibtex
except KeyboardInterrupt:
raise
except:
pass
return meta
| 35.5 | 108 | 0.529489 |
4a1c0029440fc0519152835375d426f7913f5bf0
| 2,590 |
py
|
Python
|
store/migrations/0001_initial.py
|
kshitizJ/Ecommerce_Website_Django
|
6484a3493b8f24475b8a48923d9465ace98632b3
|
[
"MIT"
] | 3 |
2021-03-15T04:57:54.000Z
|
2021-09-06T12:41:08.000Z
|
store/migrations/0001_initial.py
|
Omichougule/Ecommerce_Website_Django
|
6484a3493b8f24475b8a48923d9465ace98632b3
|
[
"MIT"
] | null | null | null |
store/migrations/0001_initial.py
|
Omichougule/Ecommerce_Website_Django
|
6484a3493b8f24475b8a48923d9465ace98632b3
|
[
"MIT"
] | 2 |
2021-03-24T13:59:14.000Z
|
2021-09-06T12:40:45.000Z
|
# Generated by Django 3.0.6 on 2021-04-20 10:19
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('phone', models.CharField(max_length=15)),
('email', models.EmailField(max_length=254)),
('password', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('price', models.IntegerField(default=0)),
('description', models.CharField(blank=True, default='', max_length=200, null=True)),
('image', models.ImageField(upload_to='uploads/products/')),
('category', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='store.Category')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('price', models.IntegerField()),
('address', models.CharField(blank=True, default='', max_length=50)),
('phone', models.CharField(blank=True, default='', max_length=50)),
('date', models.DateField(default=datetime.datetime.today)),
('status', models.BooleanField(default=False)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.Customer')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.Product')),
],
),
]
| 43.166667 | 125 | 0.572587 |
4a1c00e8fd71194ad8988183f48a230039976cfc
| 455 |
py
|
Python
|
plotly/validators/histogram2d/hoverlabel/font/_colorsrc.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2 |
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/histogram2d/hoverlabel/font/_colorsrc.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/histogram2d/hoverlabel/font/_colorsrc.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4 |
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='colorsrc',
parent_name='histogram2d.hoverlabel.font',
**kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='none',
role='info',
**kwargs
)
| 23.947368 | 67 | 0.604396 |
4a1c02ad2614fcf49e67404139c13b35d5dec48c
| 3,232 |
py
|
Python
|
pysnmp-with-texts/CISCO-WAN-CELL-EXT-CAPABILITY.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8 |
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/CISCO-WAN-CELL-EXT-CAPABILITY.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4 |
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/CISCO-WAN-CELL-EXT-CAPABILITY.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10 |
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CISCO-WAN-CELL-EXT-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-WAN-CELL-EXT-CAPABILITY
# Produced by pysmi-0.3.4 at Wed May 1 12:20:15 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
AgentCapabilities, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "AgentCapabilities", "NotificationGroup", "ModuleCompliance")
TimeTicks, Counter64, Counter32, Unsigned32, Gauge32, MibIdentifier, iso, Integer32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, IpAddress, ObjectIdentity, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Counter64", "Counter32", "Unsigned32", "Gauge32", "MibIdentifier", "iso", "Integer32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "IpAddress", "ObjectIdentity", "ModuleIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoWanCellExtCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 625))
ciscoWanCellExtCapability.setRevisions(('2014-03-21 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoWanCellExtCapability.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoWanCellExtCapability.setLastUpdated('201403210000Z')
if mibBuilder.loadTexts: ciscoWanCellExtCapability.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoWanCellExtCapability.setContactInfo('Cisco Systems Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-lan-switch-snmp@cisco.com')
if mibBuilder.loadTexts: ciscoWanCellExtCapability.setDescription('The capabilities description of CISCO-WAN-CELL-EXT-MIB.')
cwceCapV15R0501PIsr = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 625, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cwceCapV15R0501PIsr = cwceCapV15R0501PIsr.setProductRelease('Cisco IOS 15.5(1) Version on Cisco ISR\n 3900/2900/1900/3800/2800/1800/800 series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cwceCapV15R0501PIsr = cwceCapV15R0501PIsr.setStatus('current')
if mibBuilder.loadTexts: cwceCapV15R0501PIsr.setDescription('CISCO-WAN-CELL-EXT-MIB agent capabilities.')
mibBuilder.exportSymbols("CISCO-WAN-CELL-EXT-CAPABILITY", PYSNMP_MODULE_ID=ciscoWanCellExtCapability, cwceCapV15R0501PIsr=cwceCapV15R0501PIsr, ciscoWanCellExtCapability=ciscoWanCellExtCapability)
| 104.258065 | 477 | 0.784963 |
4a1c032717f50b5c9d6314a828cb4aa45cfe48e3
| 102,082 |
py
|
Python
|
bigquery/tests/system.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | null | null | null |
bigquery/tests/system.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | 40 |
2019-07-16T10:04:48.000Z
|
2020-01-20T09:04:59.000Z
|
bigquery/tests/system.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | 2 |
2019-07-18T00:05:31.000Z
|
2019-11-27T14:17:22.000Z
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import collections
import concurrent.futures
import csv
import datetime
import decimal
import json
import operator
import os
import time
import unittest
import uuid
import re
import requests
import six
import psutil
import pytest
import pytz
try:
from google.cloud import bigquery_storage_v1beta1
except ImportError: # pragma: NO COVER
bigquery_storage_v1beta1 = None
try:
import pandas
except ImportError: # pragma: NO COVER
pandas = None
try:
import pyarrow
import pyarrow.types
except ImportError: # pragma: NO COVER
pyarrow = None
try:
import IPython
from IPython.utils import io
from IPython.testing import tools
from IPython.terminal import interactiveshell
except ImportError: # pragma: NO COVER
IPython = None
from google.api_core.exceptions import PreconditionFailed
from google.api_core.exceptions import BadRequest
from google.api_core.exceptions import Conflict
from google.api_core.exceptions import Forbidden
from google.api_core.exceptions import GoogleAPICallError
from google.api_core.exceptions import NotFound
from google.api_core.exceptions import InternalServerError
from google.api_core.exceptions import ServiceUnavailable
from google.api_core.exceptions import TooManyRequests
from google.cloud import bigquery
from google.cloud import bigquery_v2
from google.cloud.bigquery.dataset import Dataset
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.table import Table
from google.cloud._helpers import UTC
from google.cloud.bigquery import dbapi
from google.cloud import storage
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
JOB_TIMEOUT = 120 # 2 minutes
WHERE = os.path.abspath(os.path.dirname(__file__))
# Common table data used for many tests.
ROWS = [
("Phred Phlyntstone", 32),
("Bharney Rhubble", 33),
("Wylma Phlyntstone", 29),
("Bhettye Rhubble", 27),
]
HEADER_ROW = ("Full Name", "Age")
SCHEMA = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
]
TIME_PARTITIONING_CLUSTERING_FIELDS_SCHEMA = [
bigquery.SchemaField("transaction_time", "TIMESTAMP", mode="REQUIRED"),
bigquery.SchemaField("transaction_id", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("user_email", "STRING", mode="REQUIRED"),
bigquery.SchemaField("store_code", "STRING", mode="REQUIRED"),
bigquery.SchemaField(
"items",
"RECORD",
mode="REPEATED",
fields=[
bigquery.SchemaField("item_code", "STRING", mode="REQUIRED"),
bigquery.SchemaField("quantity", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("comments", "STRING", mode="NULLABLE"),
bigquery.SchemaField("expiration_date", "DATE", mode="REQUIRED"),
],
),
]
# The VPC-SC team maintains a mirror of the GCS bucket used for code
# samples. The public bucket crosses the configured security boundary.
# See: https://github.com/googleapis/google-cloud-python/issues/8550
SAMPLES_BUCKET = os.environ.get("GCLOUD_TEST_SAMPLES_BUCKET", "cloud-samples-data")
retry_storage_errors = RetryErrors(
(TooManyRequests, InternalServerError, ServiceUnavailable)
)
def _has_rows(result):
return len(result) > 0
def _make_dataset_id(prefix):
return "%s%s" % (prefix, unique_resource_id())
def _load_json_schema(filename="data/schema.json"):
from google.cloud.bigquery.table import _parse_schema_resource
json_filename = os.path.join(WHERE, filename)
with open(json_filename, "r") as schema_file:
return _parse_schema_resource(json.load(schema_file))
def _rate_limit_exceeded(forbidden):
"""Predicate: pass only exceptions with 'rateLimitExceeded' as reason."""
return any(error["reason"] == "rateLimitExceeded" for error in forbidden._errors)
# We need to wait to stay within the rate limits.
# The alternative outcome is a 403 Forbidden response from upstream, which
# they return instead of the more appropriate 429.
# See https://cloud.google.com/bigquery/quota-policy
retry_403 = RetryErrors(Forbidden, error_predicate=_rate_limit_exceeded)
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
CURSOR = None
def setUpModule():
Config.CLIENT = bigquery.Client()
Config.CURSOR = dbapi.connect(Config.CLIENT).cursor()
class TestBigQuery(unittest.TestCase):
def setUp(self):
self.to_delete = []
def tearDown(self):
def _still_in_use(bad_request):
return any(
error["reason"] == "resourceInUse" for error in bad_request._errors
)
retry_in_use = RetryErrors(BadRequest, error_predicate=_still_in_use)
retry_storage_errors_conflict = RetryErrors(
(Conflict, TooManyRequests, InternalServerError, ServiceUnavailable)
)
for doomed in self.to_delete:
if isinstance(doomed, storage.Bucket):
retry_storage_errors_conflict(doomed.delete)(force=True)
elif isinstance(doomed, (Dataset, bigquery.DatasetReference)):
retry_in_use(Config.CLIENT.delete_dataset)(doomed, delete_contents=True)
elif isinstance(doomed, (Table, bigquery.TableReference)):
retry_in_use(Config.CLIENT.delete_table)(doomed)
else:
doomed.delete()
def test_get_service_account_email(self):
client = Config.CLIENT
got = client.get_service_account_email()
self.assertIsInstance(got, six.text_type)
self.assertIn("@", got)
def _create_bucket(self, bucket_name, location=None):
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
retry_storage_errors(bucket.create)(location=location)
self.to_delete.append(bucket)
return bucket
def test_close_releases_open_sockets(self):
current_process = psutil.Process()
conn_count_start = len(current_process.connections())
client = Config.CLIENT
client.query(
"""
SELECT
source_year AS year, COUNT(is_male) AS birth_count
FROM `bigquery-public-data.samples.natality`
GROUP BY year
ORDER BY year DESC
LIMIT 15
"""
)
client.close()
conn_count_end = len(current_process.connections())
self.assertEqual(conn_count_end, conn_count_start)
def test_create_dataset(self):
DATASET_ID = _make_dataset_id("create_dataset")
dataset = self.temp_dataset(DATASET_ID)
self.assertTrue(_dataset_exists(dataset))
self.assertEqual(dataset.dataset_id, DATASET_ID)
self.assertEqual(dataset.project, Config.CLIENT.project)
def test_get_dataset(self):
dataset_id = _make_dataset_id("get_dataset")
client = Config.CLIENT
dataset_arg = Dataset(client.dataset(dataset_id))
dataset_arg.friendly_name = "Friendly"
dataset_arg.description = "Description"
dataset = retry_403(client.create_dataset)(dataset_arg)
self.to_delete.append(dataset)
dataset_ref = client.dataset(dataset_id)
# Get with a reference.
got = client.get_dataset(dataset_ref)
self.assertEqual(got.friendly_name, "Friendly")
self.assertEqual(got.description, "Description")
# Get with a string.
got = client.get_dataset(dataset_id)
self.assertEqual(got.friendly_name, "Friendly")
self.assertEqual(got.description, "Description")
# Get with a fully-qualified string.
got = client.get_dataset("{}.{}".format(client.project, dataset_id))
self.assertEqual(got.friendly_name, "Friendly")
self.assertEqual(got.description, "Description")
def test_update_dataset(self):
dataset = self.temp_dataset(_make_dataset_id("update_dataset"))
self.assertTrue(_dataset_exists(dataset))
self.assertIsNone(dataset.friendly_name)
self.assertIsNone(dataset.description)
self.assertEqual(dataset.labels, {})
dataset.friendly_name = "Friendly"
dataset.description = "Description"
dataset.labels = {"priority": "high", "color": "blue"}
ds2 = Config.CLIENT.update_dataset(
dataset, ("friendly_name", "description", "labels")
)
self.assertEqual(ds2.friendly_name, "Friendly")
self.assertEqual(ds2.description, "Description")
self.assertEqual(ds2.labels, {"priority": "high", "color": "blue"})
ds2.labels = {
"color": "green", # change
"shape": "circle", # add
"priority": None, # delete
}
ds3 = Config.CLIENT.update_dataset(ds2, ["labels"])
self.assertEqual(ds3.labels, {"color": "green", "shape": "circle"})
# If we try to update using d2 again, it will fail because the
# previous update changed the ETag.
ds2.description = "no good"
with self.assertRaises(PreconditionFailed):
Config.CLIENT.update_dataset(ds2, ["description"])
def test_list_datasets(self):
datasets_to_create = [
"new" + unique_resource_id(),
"newer" + unique_resource_id(),
"newest" + unique_resource_id(),
]
for dataset_id in datasets_to_create:
self.temp_dataset(dataset_id)
# Retrieve the datasets.
iterator = Config.CLIENT.list_datasets()
all_datasets = list(iterator)
self.assertIsNone(iterator.next_page_token)
created = [
dataset
for dataset in all_datasets
if dataset.dataset_id in datasets_to_create
and dataset.project == Config.CLIENT.project
]
self.assertEqual(len(created), len(datasets_to_create))
def test_list_datasets_w_project(self):
# Retrieve datasets from a different project.
iterator = Config.CLIENT.list_datasets(project="bigquery-public-data")
all_datasets = frozenset([dataset.dataset_id for dataset in iterator])
self.assertIn("usa_names", all_datasets)
def test_create_table(self):
dataset = self.temp_dataset(_make_dataset_id("create_table"))
table_id = "test_table"
table_arg = Table(dataset.table(table_id), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertEqual(table.table_id, table_id)
def test_create_table_w_time_partitioning_w_clustering_fields(self):
from google.cloud.bigquery.table import TimePartitioning
from google.cloud.bigquery.table import TimePartitioningType
dataset = self.temp_dataset(_make_dataset_id("create_table_tp_cf"))
table_id = "test_table"
table_arg = Table(
dataset.table(table_id), schema=TIME_PARTITIONING_CLUSTERING_FIELDS_SCHEMA
)
self.assertFalse(_table_exists(table_arg))
table_arg.time_partitioning = TimePartitioning(field="transaction_time")
table_arg.clustering_fields = ["user_email", "store_code"]
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertEqual(table.table_id, table_id)
time_partitioning = table.time_partitioning
self.assertEqual(time_partitioning.type_, TimePartitioningType.DAY)
self.assertEqual(time_partitioning.field, "transaction_time")
self.assertEqual(table.clustering_fields, ["user_email", "store_code"])
def test_delete_dataset_with_string(self):
dataset_id = _make_dataset_id("delete_table_true")
dataset_ref = Config.CLIENT.dataset(dataset_id)
retry_403(Config.CLIENT.create_dataset)(Dataset(dataset_ref))
self.assertTrue(_dataset_exists(dataset_ref))
Config.CLIENT.delete_dataset(dataset_id)
self.assertFalse(_dataset_exists(dataset_ref))
def test_delete_dataset_delete_contents_true(self):
dataset_id = _make_dataset_id("delete_table_true")
dataset = retry_403(Config.CLIENT.create_dataset)(
Dataset(Config.CLIENT.dataset(dataset_id))
)
table_id = "test_table"
table_arg = Table(dataset.table(table_id), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
Config.CLIENT.delete_dataset(dataset, delete_contents=True)
self.assertFalse(_table_exists(table))
def test_delete_dataset_delete_contents_false(self):
from google.api_core import exceptions
dataset = self.temp_dataset(_make_dataset_id("delete_table_false"))
table_id = "test_table"
table_arg = Table(dataset.table(table_id), schema=SCHEMA)
retry_403(Config.CLIENT.create_table)(table_arg)
with self.assertRaises(exceptions.BadRequest):
Config.CLIENT.delete_dataset(dataset)
def test_get_table_w_public_dataset(self):
public = "bigquery-public-data"
dataset_id = "samples"
table_id = "shakespeare"
table_ref = DatasetReference(public, dataset_id).table(table_id)
# Get table with reference.
table = Config.CLIENT.get_table(table_ref)
self.assertEqual(table.table_id, table_id)
self.assertEqual(table.dataset_id, dataset_id)
self.assertEqual(table.project, public)
schema_names = [field.name for field in table.schema]
self.assertEqual(schema_names, ["word", "word_count", "corpus", "corpus_date"])
# Get table with string.
table = Config.CLIENT.get_table("{}.{}.{}".format(public, dataset_id, table_id))
self.assertEqual(table.table_id, table_id)
self.assertEqual(table.dataset_id, dataset_id)
self.assertEqual(table.project, public)
def test_list_partitions(self):
table_ref = DatasetReference(
"bigquery-public-data", "ethereum_blockchain"
).table("blocks")
all_rows = Config.CLIENT.list_partitions(table_ref)
self.assertIn("20180801", all_rows)
self.assertGreater(len(all_rows), 1000)
def test_list_tables(self):
dataset_id = _make_dataset_id("list_tables")
dataset = self.temp_dataset(dataset_id)
# Retrieve tables before any are created for the dataset.
iterator = Config.CLIENT.list_tables(dataset)
all_tables = list(iterator)
self.assertEqual(all_tables, [])
self.assertIsNone(iterator.next_page_token)
# Insert some tables to be listed.
tables_to_create = [
"new" + unique_resource_id(),
"newer" + unique_resource_id(),
"newest" + unique_resource_id(),
]
for table_name in tables_to_create:
table = Table(dataset.table(table_name), schema=SCHEMA)
created_table = retry_403(Config.CLIENT.create_table)(table)
self.to_delete.insert(0, created_table)
# Retrieve the tables.
iterator = Config.CLIENT.list_tables(dataset)
all_tables = list(iterator)
self.assertIsNone(iterator.next_page_token)
created = [
table
for table in all_tables
if (table.table_id in tables_to_create and table.dataset_id == dataset_id)
]
self.assertEqual(len(created), len(tables_to_create))
# List tables with a string ID.
iterator = Config.CLIENT.list_tables(dataset_id)
self.assertGreater(len(list(iterator)), 0)
# List tables with a fully-qualified string ID.
iterator = Config.CLIENT.list_tables(
"{}.{}".format(Config.CLIENT.project, dataset_id)
)
self.assertGreater(len(list(iterator)), 0)
def test_update_table(self):
dataset = self.temp_dataset(_make_dataset_id("update_table"))
TABLE_NAME = "test_table"
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertIsNone(table.friendly_name)
self.assertIsNone(table.description)
self.assertEqual(table.labels, {})
table.friendly_name = "Friendly"
table.description = "Description"
table.labels = {"priority": "high", "color": "blue"}
table2 = Config.CLIENT.update_table(
table, ["friendly_name", "description", "labels"]
)
self.assertEqual(table2.friendly_name, "Friendly")
self.assertEqual(table2.description, "Description")
self.assertEqual(table2.labels, {"priority": "high", "color": "blue"})
table2.description = None
table2.labels = {
"color": "green", # change
"shape": "circle", # add
"priority": None, # delete
}
table3 = Config.CLIENT.update_table(table2, ["description", "labels"])
self.assertIsNone(table3.description)
self.assertEqual(table3.labels, {"color": "green", "shape": "circle"})
# If we try to update using table2 again, it will fail because the
# previous update changed the ETag.
table2.description = "no good"
with self.assertRaises(PreconditionFailed):
Config.CLIENT.update_table(table2, ["description"])
def test_update_table_schema(self):
dataset = self.temp_dataset(_make_dataset_id("update_table"))
TABLE_NAME = "test_table"
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
voter = bigquery.SchemaField("voter", "BOOLEAN", mode="NULLABLE")
schema = table.schema
schema.append(voter)
table.schema = schema
updated_table = Config.CLIENT.update_table(table, ["schema"])
self.assertEqual(len(updated_table.schema), len(schema))
for found, expected in zip(updated_table.schema, schema):
self.assertEqual(found.name, expected.name)
self.assertEqual(found.field_type, expected.field_type)
self.assertEqual(found.mode, expected.mode)
@staticmethod
def _fetch_single_page(table, selected_fields=None):
iterator = Config.CLIENT.list_rows(table, selected_fields=selected_fields)
page = six.next(iterator.pages)
return list(page)
def _create_table_many_columns(self, rowcount):
# Generate a table of maximum width via CREATE TABLE AS SELECT.
# first column is named 'rowval', and has a value from 1..rowcount
# Subsequent column is named col_<N> and contains the value N*rowval,
# where N is between 1 and 9999 inclusive.
dsname = _make_dataset_id("wide_schema")
dataset = self.temp_dataset(dsname)
table_id = "many_columns"
table_ref = dataset.table(table_id)
self.to_delete.insert(0, table_ref)
colprojections = ",".join(
["r * {} as col_{}".format(n, n) for n in range(1, 10000)]
)
sql = """
CREATE TABLE {}.{}
AS
SELECT
r as rowval,
{}
FROM
UNNEST(GENERATE_ARRAY(1,{},1)) as r
""".format(
dsname, table_id, colprojections, rowcount
)
query_job = Config.CLIENT.query(sql)
query_job.result()
self.assertEqual(query_job.statement_type, "CREATE_TABLE_AS_SELECT")
self.assertEqual(query_job.ddl_operation_performed, "CREATE")
self.assertEqual(query_job.ddl_target_table, table_ref)
return table_ref
def test_query_many_columns(self):
# Test working with the widest schema BigQuery supports, 10k columns.
row_count = 2
table_ref = self._create_table_many_columns(row_count)
rows = list(
Config.CLIENT.query(
"SELECT * FROM `{}.{}`".format(table_ref.dataset_id, table_ref.table_id)
)
)
self.assertEqual(len(rows), row_count)
# check field representations adhere to expected values.
correctwidth = 0
badvals = 0
for r in rows:
vals = r._xxx_values
rowval = vals[0]
if len(vals) == 10000:
correctwidth = correctwidth + 1
for n in range(1, 10000):
if vals[n] != rowval * (n):
badvals = badvals + 1
self.assertEqual(correctwidth, row_count)
self.assertEqual(badvals, 0)
def test_insert_rows_then_dump_table(self):
NOW_SECONDS = 1448911495.484366
NOW = datetime.datetime.utcfromtimestamp(NOW_SECONDS).replace(tzinfo=UTC)
ROWS = [
("Phred Phlyntstone", 32, NOW),
("Bharney Rhubble", 33, NOW + datetime.timedelta(seconds=10)),
("Wylma Phlyntstone", 29, NOW + datetime.timedelta(seconds=20)),
("Bhettye Rhubble", 27, None),
]
ROW_IDS = range(len(ROWS))
dataset = self.temp_dataset(_make_dataset_id("insert_rows_then_dump"))
TABLE_ID = "test_table"
schema = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("now", "TIMESTAMP"),
]
table_arg = Table(dataset.table(TABLE_ID), schema=schema)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
errors = Config.CLIENT.insert_rows(table, ROWS, row_ids=ROW_IDS)
self.assertEqual(len(errors), 0)
rows = ()
# Allow for "warm up" before rows visible. See
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
row_tuples = [r.values() for r in rows]
by_age = operator.itemgetter(1)
self.assertEqual(sorted(row_tuples, key=by_age), sorted(ROWS, key=by_age))
def test_load_table_from_local_avro_file_then_dump_table(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
TABLE_NAME = "test_table_avro"
ROWS = [
("violet", 400),
("indigo", 445),
("blue", 475),
("green", 510),
("yellow", 570),
("orange", 590),
("red", 650),
]
dataset = self.temp_dataset(_make_dataset_id("load_local_then_dump"))
table_ref = dataset.table(TABLE_NAME)
table = Table(table_ref)
self.to_delete.insert(0, table)
with open(os.path.join(WHERE, "data", "colors.avro"), "rb") as avrof:
config = bigquery.LoadJobConfig()
config.source_format = SourceFormat.AVRO
config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job = Config.CLIENT.load_table_from_file(
avrof, table_ref, job_config=config
)
# Retry until done.
job.result(timeout=JOB_TIMEOUT)
self.assertEqual(job.output_rows, len(ROWS))
table = Config.CLIENT.get_table(table)
rows = self._fetch_single_page(table)
row_tuples = [r.values() for r in rows]
by_wavelength = operator.itemgetter(1)
self.assertEqual(
sorted(row_tuples, key=by_wavelength), sorted(ROWS, key=by_wavelength)
)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_automatic_schema(self):
"""Test that a DataFrame with dtypes that map well to BigQuery types
can be uploaded without specifying a schema.
https://github.com/googleapis/google-cloud-python/issues/9044
"""
df_data = collections.OrderedDict(
[
("bool_col", pandas.Series([True, False, True], dtype="bool")),
(
"ts_col",
pandas.Series(
[
datetime.datetime(2010, 1, 2, 3, 44, 50),
datetime.datetime(2011, 2, 3, 14, 50, 59),
datetime.datetime(2012, 3, 14, 15, 16),
],
dtype="datetime64[ns]",
).dt.tz_localize(pytz.utc),
),
(
"dt_col",
pandas.Series(
[
datetime.datetime(2010, 1, 2, 3, 44, 50),
datetime.datetime(2011, 2, 3, 14, 50, 59),
datetime.datetime(2012, 3, 14, 15, 16),
],
dtype="datetime64[ns]",
),
),
("float32_col", pandas.Series([1.0, 2.0, 3.0], dtype="float32")),
("float64_col", pandas.Series([4.0, 5.0, 6.0], dtype="float64")),
("int8_col", pandas.Series([-12, -11, -10], dtype="int8")),
("int16_col", pandas.Series([-9, -8, -7], dtype="int16")),
("int32_col", pandas.Series([-6, -5, -4], dtype="int32")),
("int64_col", pandas.Series([-3, -2, -1], dtype="int64")),
("uint8_col", pandas.Series([0, 1, 2], dtype="uint8")),
("uint16_col", pandas.Series([3, 4, 5], dtype="uint16")),
("uint32_col", pandas.Series([6, 7, 8], dtype="uint32")),
]
)
dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
dataset_id = _make_dataset_id("bq_load_test")
self.temp_dataset(dataset_id)
table_id = "{}.{}.load_table_from_dataframe_w_automatic_schema".format(
Config.CLIENT.project, dataset_id
)
load_job = Config.CLIENT.load_table_from_dataframe(dataframe, table_id)
load_job.result()
table = Config.CLIENT.get_table(table_id)
self.assertEqual(
tuple(table.schema),
(
bigquery.SchemaField("bool_col", "BOOLEAN"),
bigquery.SchemaField("ts_col", "TIMESTAMP"),
# BigQuery does not support uploading DATETIME values from
# Parquet files. See:
# https://github.com/googleapis/google-cloud-python/issues/9996
bigquery.SchemaField("dt_col", "TIMESTAMP"),
bigquery.SchemaField("float32_col", "FLOAT"),
bigquery.SchemaField("float64_col", "FLOAT"),
bigquery.SchemaField("int8_col", "INTEGER"),
bigquery.SchemaField("int16_col", "INTEGER"),
bigquery.SchemaField("int32_col", "INTEGER"),
bigquery.SchemaField("int64_col", "INTEGER"),
bigquery.SchemaField("uint8_col", "INTEGER"),
bigquery.SchemaField("uint16_col", "INTEGER"),
bigquery.SchemaField("uint32_col", "INTEGER"),
),
)
self.assertEqual(table.num_rows, 3)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_nulls(self):
"""Test that a DataFrame with null columns can be uploaded if a
BigQuery schema is specified.
See: https://github.com/googleapis/google-cloud-python/issues/7370
"""
# Schema with all scalar types.
scalars_schema = (
bigquery.SchemaField("bool_col", "BOOLEAN"),
bigquery.SchemaField("bytes_col", "BYTES"),
bigquery.SchemaField("date_col", "DATE"),
bigquery.SchemaField("dt_col", "DATETIME"),
bigquery.SchemaField("float_col", "FLOAT"),
bigquery.SchemaField("geo_col", "GEOGRAPHY"),
bigquery.SchemaField("int_col", "INTEGER"),
bigquery.SchemaField("num_col", "NUMERIC"),
bigquery.SchemaField("str_col", "STRING"),
bigquery.SchemaField("time_col", "TIME"),
bigquery.SchemaField("ts_col", "TIMESTAMP"),
)
table_schema = scalars_schema + (
# TODO: Array columns can't be read due to NULLABLE versus REPEATED
# mode mismatch. See:
# https://issuetracker.google.com/133415569#comment3
# bigquery.SchemaField("array_col", "INTEGER", mode="REPEATED"),
# TODO: Support writing StructArrays to Parquet. See:
# https://jira.apache.org/jira/browse/ARROW-2587
# bigquery.SchemaField("struct_col", "RECORD", fields=scalars_schema),
)
num_rows = 100
nulls = [None] * num_rows
df_data = collections.OrderedDict(
[
("bool_col", nulls),
("bytes_col", nulls),
("date_col", nulls),
("dt_col", nulls),
("float_col", nulls),
("geo_col", nulls),
("int_col", nulls),
("num_col", nulls),
("str_col", nulls),
("time_col", nulls),
("ts_col", nulls),
]
)
dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
dataset_id = _make_dataset_id("bq_load_test")
self.temp_dataset(dataset_id)
table_id = "{}.{}.load_table_from_dataframe_w_nulls".format(
Config.CLIENT.project, dataset_id
)
# Create the table before loading so that schema mismatch errors are
# identified.
table = retry_403(Config.CLIENT.create_table)(
Table(table_id, schema=table_schema)
)
self.to_delete.insert(0, table)
job_config = bigquery.LoadJobConfig(schema=table_schema)
load_job = Config.CLIENT.load_table_from_dataframe(
dataframe, table_id, job_config=job_config
)
load_job.result()
table = Config.CLIENT.get_table(table)
self.assertEqual(tuple(table.schema), table_schema)
self.assertEqual(table.num_rows, num_rows)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_required(self):
"""Test that a DataFrame with required columns can be uploaded if a
BigQuery schema is specified.
See: https://github.com/googleapis/google-cloud-python/issues/8093
"""
table_schema = (
bigquery.SchemaField("name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
)
records = [{"name": "Chip", "age": 2}, {"name": "Dale", "age": 3}]
dataframe = pandas.DataFrame(records, columns=["name", "age"])
job_config = bigquery.LoadJobConfig(schema=table_schema)
dataset_id = _make_dataset_id("bq_load_test")
self.temp_dataset(dataset_id)
table_id = "{}.{}.load_table_from_dataframe_w_required".format(
Config.CLIENT.project, dataset_id
)
# Create the table before loading so that schema mismatch errors are
# identified.
table = retry_403(Config.CLIENT.create_table)(
Table(table_id, schema=table_schema)
)
self.to_delete.insert(0, table)
job_config = bigquery.LoadJobConfig(schema=table_schema)
load_job = Config.CLIENT.load_table_from_dataframe(
dataframe, table_id, job_config=job_config
)
load_job.result()
table = Config.CLIENT.get_table(table)
self.assertEqual(tuple(table.schema), table_schema)
self.assertEqual(table.num_rows, 2)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_explicit_schema(self):
# Schema with all scalar types.
scalars_schema = (
bigquery.SchemaField("bool_col", "BOOLEAN"),
bigquery.SchemaField("bytes_col", "BYTES"),
bigquery.SchemaField("date_col", "DATE"),
bigquery.SchemaField("dt_col", "DATETIME"),
bigquery.SchemaField("float_col", "FLOAT"),
bigquery.SchemaField("geo_col", "GEOGRAPHY"),
bigquery.SchemaField("int_col", "INTEGER"),
bigquery.SchemaField("num_col", "NUMERIC"),
bigquery.SchemaField("str_col", "STRING"),
bigquery.SchemaField("time_col", "TIME"),
bigquery.SchemaField("ts_col", "TIMESTAMP"),
)
table_schema = scalars_schema + (
# TODO: Array columns can't be read due to NULLABLE versus REPEATED
# mode mismatch. See:
# https://issuetracker.google.com/133415569#comment3
# bigquery.SchemaField("array_col", "INTEGER", mode="REPEATED"),
# TODO: Support writing StructArrays to Parquet. See:
# https://jira.apache.org/jira/browse/ARROW-2587
# bigquery.SchemaField("struct_col", "RECORD", fields=scalars_schema),
)
df_data = collections.OrderedDict(
[
("bool_col", [True, None, False]),
("bytes_col", [b"abc", None, b"def"]),
(
"date_col",
[datetime.date(1, 1, 1), None, datetime.date(9999, 12, 31)],
),
(
"dt_col",
[
datetime.datetime(1, 1, 1, 0, 0, 0),
None,
datetime.datetime(9999, 12, 31, 23, 59, 59, 999999),
],
),
("float_col", [float("-inf"), float("nan"), float("inf")]),
(
"geo_col",
[
"POINT(30 10)",
None,
"POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))",
],
),
("int_col", [-9223372036854775808, None, 9223372036854775807]),
(
"num_col",
[
decimal.Decimal("-99999999999999999999999999999.999999999"),
None,
decimal.Decimal("99999999999999999999999999999.999999999"),
],
),
("str_col", [u"abc", None, u"def"]),
(
"time_col",
[datetime.time(0, 0, 0), None, datetime.time(23, 59, 59, 999999)],
),
(
"ts_col",
[
datetime.datetime(1, 1, 1, 0, 0, 0, tzinfo=pytz.utc),
None,
datetime.datetime(
9999, 12, 31, 23, 59, 59, 999999, tzinfo=pytz.utc
),
],
),
]
)
dataframe = pandas.DataFrame(df_data, dtype="object", columns=df_data.keys())
dataset_id = _make_dataset_id("bq_load_test")
self.temp_dataset(dataset_id)
table_id = "{}.{}.load_table_from_dataframe_w_explicit_schema".format(
Config.CLIENT.project, dataset_id
)
job_config = bigquery.LoadJobConfig(schema=table_schema)
load_job = Config.CLIENT.load_table_from_dataframe(
dataframe, table_id, job_config=job_config
)
load_job.result()
table = Config.CLIENT.get_table(table_id)
self.assertEqual(tuple(table.schema), table_schema)
self.assertEqual(table.num_rows, 3)
def test_load_table_from_json_basic_use(self):
table_schema = (
bigquery.SchemaField("name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("birthday", "DATE", mode="REQUIRED"),
bigquery.SchemaField("is_awesome", "BOOLEAN", mode="REQUIRED"),
)
json_rows = [
{"name": "John", "age": 18, "birthday": "2001-10-15", "is_awesome": False},
{"name": "Chuck", "age": 79, "birthday": "1940-03-10", "is_awesome": True},
]
dataset_id = _make_dataset_id("bq_system_test")
self.temp_dataset(dataset_id)
table_id = "{}.{}.load_table_from_json_basic_use".format(
Config.CLIENT.project, dataset_id
)
# Create the table before loading so that schema mismatch errors are
# identified.
table = retry_403(Config.CLIENT.create_table)(
Table(table_id, schema=table_schema)
)
self.to_delete.insert(0, table)
job_config = bigquery.LoadJobConfig(schema=table_schema)
load_job = Config.CLIENT.load_table_from_json(
json_rows, table_id, job_config=job_config
)
load_job.result()
table = Config.CLIENT.get_table(table)
self.assertEqual(tuple(table.schema), table_schema)
self.assertEqual(table.num_rows, 2)
def test_load_table_from_json_schema_autodetect(self):
json_rows = [
{"name": "John", "age": 18, "birthday": "2001-10-15", "is_awesome": False},
{"name": "Chuck", "age": 79, "birthday": "1940-03-10", "is_awesome": True},
]
dataset_id = _make_dataset_id("bq_system_test")
self.temp_dataset(dataset_id)
table_id = "{}.{}.load_table_from_json_basic_use".format(
Config.CLIENT.project, dataset_id
)
# Use schema with NULLABLE fields, because schema autodetection
# defaults to field mode NULLABLE.
table_schema = (
bigquery.SchemaField("name", "STRING", mode="NULLABLE"),
bigquery.SchemaField("age", "INTEGER", mode="NULLABLE"),
bigquery.SchemaField("birthday", "DATE", mode="NULLABLE"),
bigquery.SchemaField("is_awesome", "BOOLEAN", mode="NULLABLE"),
)
# create the table before loading so that the column order is predictable
table = retry_403(Config.CLIENT.create_table)(
Table(table_id, schema=table_schema)
)
self.to_delete.insert(0, table)
# do not pass an explicit job config to trigger automatic schema detection
load_job = Config.CLIENT.load_table_from_json(json_rows, table_id)
load_job.result()
table = Config.CLIENT.get_table(table)
self.assertEqual(tuple(table.schema), table_schema)
self.assertEqual(table.num_rows, 2)
def test_load_avro_from_uri_then_dump_table(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
table_name = "test_table"
rows = [
("violet", 400),
("indigo", 445),
("blue", 475),
("green", 510),
("yellow", 570),
("orange", 590),
("red", 650),
]
with open(os.path.join(WHERE, "data", "colors.avro"), "rb") as f:
GS_URL = self._write_avro_to_storage(
"bq_load_test" + unique_resource_id(), "colors.avro", f
)
dataset = self.temp_dataset(_make_dataset_id("bq_load_test"))
table_arg = dataset.table(table_name)
table = retry_403(Config.CLIENT.create_table)(Table(table_arg))
self.to_delete.insert(0, table)
config = bigquery.LoadJobConfig()
config.create_disposition = CreateDisposition.CREATE_NEVER
config.source_format = SourceFormat.AVRO
config.write_disposition = WriteDisposition.WRITE_EMPTY
job = Config.CLIENT.load_table_from_uri(GS_URL, table_arg, job_config=config)
job.result(timeout=JOB_TIMEOUT)
self.assertEqual(job.output_rows, len(rows))
table = Config.CLIENT.get_table(table)
fetched = self._fetch_single_page(table)
row_tuples = [r.values() for r in fetched]
self.assertEqual(
sorted(row_tuples, key=lambda x: x[1]), sorted(rows, key=lambda x: x[1])
)
def test_load_table_from_uri_then_dump_table(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
TABLE_ID = "test_table"
GS_URL = self._write_csv_to_storage(
"bq_load_test" + unique_resource_id(), "person_ages.csv", HEADER_ROW, ROWS
)
dataset = self.temp_dataset(_make_dataset_id("load_gcs_then_dump"))
table_arg = Table(dataset.table(TABLE_ID), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
config = bigquery.LoadJobConfig()
config.create_disposition = CreateDisposition.CREATE_NEVER
config.skip_leading_rows = 1
config.source_format = SourceFormat.CSV
config.write_disposition = WriteDisposition.WRITE_EMPTY
job = Config.CLIENT.load_table_from_uri(
GS_URL, dataset.table(TABLE_ID), job_config=config
)
# Allow for 90 seconds of "warm up" before rows visible. See
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
rows = self._fetch_single_page(table)
row_tuples = [r.values() for r in rows]
by_age = operator.itemgetter(1)
self.assertEqual(sorted(row_tuples, key=by_age), sorted(ROWS, key=by_age))
def test_load_table_from_file_w_explicit_location(self):
# Create a temporary bucket for extract files.
bucket_name = "bq_load_table_eu_extract_test" + unique_resource_id()
self._create_bucket(bucket_name, location="eu")
# Create a temporary dataset & table in the EU.
table_bytes = six.BytesIO(b"a,3\nb,2\nc,1\n")
client = Config.CLIENT
dataset = self.temp_dataset(_make_dataset_id("eu_load_file"), location="EU")
table_ref = dataset.table("letters")
job_config = bigquery.LoadJobConfig()
job_config.skip_leading_rows = 0
job_config.schema = [
bigquery.SchemaField("letter", "STRING"),
bigquery.SchemaField("value", "INTEGER"),
]
# Load the file to an EU dataset with an EU load job.
load_job = client.load_table_from_file(
table_bytes, table_ref, location="EU", job_config=job_config
)
load_job.result()
job_id = load_job.job_id
# Can get the job from the EU.
load_job = client.get_job(job_id, location="EU")
self.assertEqual(job_id, load_job.job_id)
self.assertEqual("EU", load_job.location)
self.assertTrue(load_job.exists())
# Cannot get the job from the US.
with self.assertRaises(NotFound):
client.get_job(job_id, location="US")
load_job_us = client.get_job(job_id)
load_job_us._properties["jobReference"]["location"] = "US"
self.assertFalse(load_job_us.exists())
with self.assertRaises(NotFound):
load_job_us.reload()
# Can cancel the job from the EU.
self.assertTrue(load_job.cancel())
load_job = client.cancel_job(job_id, location="EU")
self.assertEqual(job_id, load_job.job_id)
self.assertEqual("EU", load_job.location)
# Cannot cancel the job from the US.
with self.assertRaises(NotFound):
client.cancel_job(job_id, location="US")
with self.assertRaises(NotFound):
load_job_us.cancel()
# Can list the table rows.
table = client.get_table(table_ref)
self.assertEqual(table.num_rows, 3)
rows = [(row.letter, row.value) for row in client.list_rows(table)]
self.assertEqual(list(sorted(rows)), [("a", 3), ("b", 2), ("c", 1)])
# Verify location behavior with queries
query_config = bigquery.QueryJobConfig()
query_config.dry_run = True
query_string = "SELECT * FROM `{}.letters` LIMIT 1".format(dataset.dataset_id)
eu_query = client.query(query_string, location="EU", job_config=query_config)
self.assertTrue(eu_query.done)
# Cannot query from US.
with self.assertRaises(GoogleAPICallError):
list(client.query(query_string, location="US", job_config=query_config))
# Cannot copy from US.
with self.assertRaises(GoogleAPICallError):
client.copy_table(
table_ref, dataset.table("letters2_us"), location="US"
).result()
# Cannot extract from US.
with self.assertRaises(GoogleAPICallError):
client.extract_table(
table_ref, "gs://{}/letters-us.csv".format(bucket_name), location="US"
).result()
def _write_csv_to_storage(self, bucket_name, blob_name, header_row, data_rows):
from google.cloud._testing import _NamedTemporaryFile
bucket = self._create_bucket(bucket_name)
blob = bucket.blob(blob_name)
with _NamedTemporaryFile() as temp:
with open(temp.name, "w") as csv_write:
writer = csv.writer(csv_write)
writer.writerow(header_row)
writer.writerows(data_rows)
with open(temp.name, "rb") as csv_read:
retry_storage_errors(blob.upload_from_file)(
csv_read, content_type="text/csv"
)
self.to_delete.insert(0, blob)
return "gs://{}/{}".format(bucket_name, blob_name)
def _write_avro_to_storage(self, bucket_name, blob_name, avro_file):
bucket = self._create_bucket(bucket_name)
blob = bucket.blob(blob_name)
retry_storage_errors(blob.upload_from_file)(
avro_file, content_type="application/x-avro-binary"
)
self.to_delete.insert(0, blob)
return "gs://{}/{}".format(bucket_name, blob_name)
def _load_table_for_extract_table(self, bucket, blob_name, table, rows):
from google.cloud._testing import _NamedTemporaryFile
blob = bucket.blob(blob_name)
with _NamedTemporaryFile() as temp:
with open(temp.name, "w") as csv_write:
writer = csv.writer(csv_write)
writer.writerow(HEADER_ROW)
writer.writerows(rows)
with open(temp.name, "rb") as csv_read:
retry_storage_errors(blob.upload_from_file)(
csv_read, content_type="text/csv"
)
self.to_delete.insert(0, blob)
dataset = self.temp_dataset(table.dataset_id)
table_ref = dataset.table(table.table_id)
config = bigquery.LoadJobConfig()
config.autodetect = True
gs_url = "gs://{}/{}".format(bucket.name, blob_name)
job = Config.CLIENT.load_table_from_uri(gs_url, table_ref, job_config=config)
# TODO(jba): do we need this retry now that we have job.result()?
# Allow for 90 seconds of "warm up" before rows visible. See
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
def test_extract_table(self):
local_id = unique_resource_id()
bucket_name = "bq_extract_test" + local_id
source_blob_name = "person_ages.csv"
dataset_id = _make_dataset_id("load_gcs_then_extract")
table_id = "test_table"
table_ref = Config.CLIENT.dataset(dataset_id).table(table_id)
table = Table(table_ref)
self.to_delete.insert(0, table)
bucket = self._create_bucket(bucket_name)
self._load_table_for_extract_table(bucket, source_blob_name, table_ref, ROWS)
destination_blob_name = "person_ages_out.csv"
destination = bucket.blob(destination_blob_name)
destination_uri = "gs://{}/person_ages_out.csv".format(bucket_name)
job = Config.CLIENT.extract_table(table_ref, destination_uri)
job.result(timeout=100)
self.to_delete.insert(0, destination)
got_bytes = retry_storage_errors(destination.download_as_string)()
got = got_bytes.decode("utf-8")
self.assertIn("Bharney Rhubble", got)
def test_copy_table(self):
# If we create a new table to copy from, the test won't work
# because the new rows will be stored in the streaming buffer,
# and copy jobs don't read the streaming buffer.
# We could wait for the streaming buffer to empty, but that could
# take minutes. Instead we copy a small public table.
source_dataset = DatasetReference("bigquery-public-data", "samples")
source_ref = source_dataset.table("shakespeare")
dest_dataset = self.temp_dataset(_make_dataset_id("copy_table"))
dest_ref = dest_dataset.table("destination_table")
job_config = bigquery.CopyJobConfig()
job = Config.CLIENT.copy_table(source_ref, dest_ref, job_config=job_config)
job.result()
dest_table = Config.CLIENT.get_table(dest_ref)
self.to_delete.insert(0, dest_table)
# Just check that we got some rows.
got_rows = self._fetch_single_page(dest_table)
self.assertTrue(len(got_rows) > 0)
def test_job_cancel(self):
DATASET_ID = _make_dataset_id("job_cancel")
JOB_ID_PREFIX = "fetch_" + DATASET_ID
TABLE_NAME = "test_table"
QUERY = "SELECT * FROM %s.%s" % (DATASET_ID, TABLE_NAME)
dataset = self.temp_dataset(DATASET_ID)
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
job = Config.CLIENT.query(QUERY, job_id_prefix=JOB_ID_PREFIX)
job.cancel()
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
# The `cancel` API doesn't leave any reliable traces on
# the status of the job resource, so we can't really assert for
# them here. The best we can do is not that the API call didn't
# raise an error, and that the job completed (in the `retry()`
# above).
def test_get_failed_job(self):
# issue 4246
from google.api_core.exceptions import BadRequest
JOB_ID = "invalid_{}".format(str(uuid.uuid4()))
QUERY = "SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);"
PARAM = bigquery.ScalarQueryParameter("ts_value", "TIMESTAMP", 1.4810976e9)
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = [PARAM]
with self.assertRaises(BadRequest):
Config.CLIENT.query(QUERY, job_id=JOB_ID, job_config=job_config).result()
job = Config.CLIENT.get_job(JOB_ID)
with self.assertRaises(ValueError):
job.query_parameters
def test_query_w_legacy_sql_types(self):
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
stamp = "%s %s" % (naive.date().isoformat(), naive.time().isoformat())
zoned = naive.replace(tzinfo=UTC)
examples = [
{"sql": "SELECT 1", "expected": 1},
{"sql": "SELECT 1.3", "expected": 1.3},
{"sql": "SELECT TRUE", "expected": True},
{"sql": 'SELECT "ABC"', "expected": "ABC"},
{"sql": 'SELECT CAST("foo" AS BYTES)', "expected": b"foo"},
{"sql": 'SELECT CAST("%s" AS TIMESTAMP)' % (stamp,), "expected": zoned},
]
for example in examples:
job_config = bigquery.QueryJobConfig()
job_config.use_legacy_sql = True
rows = list(Config.CLIENT.query(example["sql"], job_config=job_config))
self.assertEqual(len(rows), 1)
self.assertEqual(len(rows[0]), 1)
self.assertEqual(rows[0][0], example["expected"])
def _generate_standard_sql_types_examples(self):
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
naive_microseconds = datetime.datetime(2016, 12, 5, 12, 41, 9, 250000)
stamp = "%s %s" % (naive.date().isoformat(), naive.time().isoformat())
stamp_microseconds = stamp + ".250000"
zoned = naive.replace(tzinfo=UTC)
zoned_microseconds = naive_microseconds.replace(tzinfo=UTC)
numeric = decimal.Decimal("123456789.123456789")
return [
{"sql": "SELECT 1", "expected": 1},
{"sql": "SELECT 1.3", "expected": 1.3},
{"sql": "SELECT TRUE", "expected": True},
{"sql": 'SELECT "ABC"', "expected": "ABC"},
{"sql": 'SELECT CAST("foo" AS BYTES)', "expected": b"foo"},
{"sql": 'SELECT TIMESTAMP "%s"' % (stamp,), "expected": zoned},
{
"sql": 'SELECT TIMESTAMP "%s"' % (stamp_microseconds,),
"expected": zoned_microseconds,
},
{"sql": 'SELECT DATETIME(TIMESTAMP "%s")' % (stamp,), "expected": naive},
{
"sql": 'SELECT DATETIME(TIMESTAMP "%s")' % (stamp_microseconds,),
"expected": naive_microseconds,
},
{"sql": 'SELECT DATE(TIMESTAMP "%s")' % (stamp,), "expected": naive.date()},
{"sql": 'SELECT TIME(TIMESTAMP "%s")' % (stamp,), "expected": naive.time()},
{"sql": 'SELECT NUMERIC "%s"' % (numeric,), "expected": numeric},
{"sql": "SELECT (1, 2)", "expected": {"_field_1": 1, "_field_2": 2}},
{
"sql": "SELECT ((1, 2), (3, 4), 5)",
"expected": {
"_field_1": {"_field_1": 1, "_field_2": 2},
"_field_2": {"_field_1": 3, "_field_2": 4},
"_field_3": 5,
},
},
{"sql": "SELECT [1, 2, 3]", "expected": [1, 2, 3]},
{
"sql": "SELECT ([1, 2], 3, [4, 5])",
"expected": {"_field_1": [1, 2], "_field_2": 3, "_field_3": [4, 5]},
},
{
"sql": "SELECT [(1, 2, 3), (4, 5, 6)]",
"expected": [
{"_field_1": 1, "_field_2": 2, "_field_3": 3},
{"_field_1": 4, "_field_2": 5, "_field_3": 6},
],
},
{
"sql": "SELECT [([1, 2, 3], 4), ([5, 6], 7)]",
"expected": [
{u"_field_1": [1, 2, 3], u"_field_2": 4},
{u"_field_1": [5, 6], u"_field_2": 7},
],
},
{
"sql": "SELECT ARRAY(SELECT STRUCT([1, 2]))",
"expected": [{u"_field_1": [1, 2]}],
},
{"sql": "SELECT ST_GeogPoint(1, 2)", "expected": "POINT(1 2)"},
]
def test_query_w_standard_sql_types(self):
examples = self._generate_standard_sql_types_examples()
for example in examples:
rows = list(Config.CLIENT.query(example["sql"]))
self.assertEqual(len(rows), 1)
self.assertEqual(len(rows[0]), 1)
self.assertEqual(rows[0][0], example["expected"])
def test_query_w_failed_query(self):
from google.api_core.exceptions import BadRequest
with self.assertRaises(BadRequest):
Config.CLIENT.query("invalid syntax;").result()
def test_query_w_wrong_config(self):
from google.cloud.bigquery.job import LoadJobConfig
good_query = "SELECT 1;"
rows = list(Config.CLIENT.query("SELECT 1;").result())
assert rows[0][0] == 1
bad_config = LoadJobConfig()
bad_config.destination = Config.CLIENT.dataset("dset").table("tbl")
with self.assertRaises(Exception):
Config.CLIENT.query(good_query, job_config=bad_config).result()
def test_query_w_timeout(self):
query_job = Config.CLIENT.query(
"SELECT * FROM `bigquery-public-data.github_repos.commits`;",
job_id_prefix="test_query_w_timeout_",
)
with self.assertRaises(concurrent.futures.TimeoutError):
# 1 second is much too short for this query.
query_job.result(timeout=1)
def test_query_w_page_size(self):
page_size = 45
query_job = Config.CLIENT.query(
"SELECT word FROM `bigquery-public-data.samples.shakespeare`;",
job_id_prefix="test_query_w_page_size_",
)
iterator = query_job.result(page_size=page_size)
self.assertEqual(next(iterator.pages).num_items, page_size)
def test_query_statistics(self):
"""
A system test to exercise some of the extended query statistics.
Note: We construct a query that should need at least three stages by
specifying a JOIN query. Exact plan and stats are effectively
non-deterministic, so we're largely interested in confirming values
are present.
"""
job_config = bigquery.QueryJobConfig()
job_config.use_query_cache = False
query_job = Config.CLIENT.query(
"""
SELECT
COUNT(1)
FROM
(
SELECT
year,
wban_number
FROM `bigquery-public-data.samples.gsod`
LIMIT 1000
) lside
INNER JOIN
(
SELECT
year,
state
FROM `bigquery-public-data.samples.natality`
LIMIT 1000
) rside
ON
lside.year = rside.year
""",
location="US",
job_config=job_config,
)
# run the job to completion
query_job.result()
# Assert top-level stats
self.assertFalse(query_job.cache_hit)
self.assertIsNotNone(query_job.destination)
self.assertTrue(query_job.done)
self.assertFalse(query_job.dry_run)
self.assertIsNone(query_job.num_dml_affected_rows)
self.assertEqual(query_job.priority, "INTERACTIVE")
self.assertGreater(query_job.total_bytes_billed, 1)
self.assertGreater(query_job.total_bytes_processed, 1)
self.assertEqual(query_job.statement_type, "SELECT")
self.assertGreater(query_job.slot_millis, 1)
# Make assertions on the shape of the query plan.
plan = query_job.query_plan
self.assertGreaterEqual(len(plan), 3)
first_stage = plan[0]
self.assertIsNotNone(first_stage.start)
self.assertIsNotNone(first_stage.end)
self.assertIsNotNone(first_stage.entry_id)
self.assertIsNotNone(first_stage.name)
self.assertGreater(first_stage.parallel_inputs, 0)
self.assertGreater(first_stage.completed_parallel_inputs, 0)
self.assertGreater(first_stage.shuffle_output_bytes, 0)
self.assertEqual(first_stage.status, "COMPLETE")
# Query plan is a digraph. Ensure it has inter-stage links,
# but not every stage has inputs.
stages_with_inputs = 0
for entry in plan:
if len(entry.input_stages) > 0:
stages_with_inputs = stages_with_inputs + 1
self.assertGreater(stages_with_inputs, 0)
self.assertGreater(len(plan), stages_with_inputs)
def test_dbapi_w_standard_sql_types(self):
examples = self._generate_standard_sql_types_examples()
for example in examples:
Config.CURSOR.execute(example["sql"])
self.assertEqual(Config.CURSOR.rowcount, 1)
row = Config.CURSOR.fetchone()
self.assertEqual(len(row), 1)
self.assertEqual(row[0], example["expected"])
row = Config.CURSOR.fetchone()
self.assertIsNone(row)
def test_dbapi_fetchall(self):
query = "SELECT * FROM UNNEST([(1, 2), (3, 4), (5, 6)])"
for arraysize in range(1, 5):
Config.CURSOR.execute(query)
self.assertEqual(Config.CURSOR.rowcount, 3, "expected 3 rows")
Config.CURSOR.arraysize = arraysize
rows = Config.CURSOR.fetchall()
row_tuples = [r.values() for r in rows]
self.assertEqual(row_tuples, [(1, 2), (3, 4), (5, 6)])
def _load_table_for_dml(self, rows, dataset_id, table_id):
from google.cloud._testing import _NamedTemporaryFile
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
dataset = self.temp_dataset(dataset_id)
greeting = bigquery.SchemaField("greeting", "STRING", mode="NULLABLE")
table_ref = dataset.table(table_id)
table_arg = Table(table_ref, schema=[greeting])
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
with _NamedTemporaryFile() as temp:
with open(temp.name, "w") as csv_write:
writer = csv.writer(csv_write)
writer.writerow(("Greeting",))
writer.writerows(rows)
with open(temp.name, "rb") as csv_read:
config = bigquery.LoadJobConfig()
config.source_format = SourceFormat.CSV
config.skip_leading_rows = 1
config.create_disposition = CreateDisposition.CREATE_NEVER
config.write_disposition = WriteDisposition.WRITE_EMPTY
job = Config.CLIENT.load_table_from_file(
csv_read, table_ref, job_config=config
)
# Retry until done.
job.result(timeout=JOB_TIMEOUT)
self._fetch_single_page(table)
def test_query_w_dml(self):
dataset_name = _make_dataset_id("dml_query")
table_name = "test_table"
self._load_table_for_dml([("Hello World",)], dataset_name, table_name)
query_template = """UPDATE {}.{}
SET greeting = 'Guten Tag'
WHERE greeting = 'Hello World'
"""
query_job = Config.CLIENT.query(
query_template.format(dataset_name, table_name),
job_id_prefix="test_query_w_dml_",
)
query_job.result()
self.assertEqual(query_job.num_dml_affected_rows, 1)
def test_dbapi_w_dml(self):
dataset_name = _make_dataset_id("dml_dbapi")
table_name = "test_table"
self._load_table_for_dml([("Hello World",)], dataset_name, table_name)
query_template = """UPDATE {}.{}
SET greeting = 'Guten Tag'
WHERE greeting = 'Hello World'
"""
Config.CURSOR.execute(
query_template.format(dataset_name, table_name),
job_id="test_dbapi_w_dml_{}".format(str(uuid.uuid4())),
)
self.assertEqual(Config.CURSOR.rowcount, 1)
self.assertIsNone(Config.CURSOR.fetchone())
def test_query_w_query_params(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import ArrayQueryParameter
from google.cloud.bigquery.query import ScalarQueryParameter
from google.cloud.bigquery.query import StructQueryParameter
question = "What is the answer to life, the universe, and everything?"
question_param = ScalarQueryParameter(
name="question", type_="STRING", value=question
)
answer = 42
answer_param = ScalarQueryParameter(name="answer", type_="INT64", value=answer)
pi = 3.1415926
pi_param = ScalarQueryParameter(name="pi", type_="FLOAT64", value=pi)
pi_numeric = decimal.Decimal("3.141592654")
pi_numeric_param = ScalarQueryParameter(
name="pi_numeric_param", type_="NUMERIC", value=pi_numeric
)
truthy = True
truthy_param = ScalarQueryParameter(name="truthy", type_="BOOL", value=truthy)
beef = b"DEADBEEF"
beef_param = ScalarQueryParameter(name="beef", type_="BYTES", value=beef)
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
naive_param = ScalarQueryParameter(name="naive", type_="DATETIME", value=naive)
naive_date_param = ScalarQueryParameter(
name="naive_date", type_="DATE", value=naive.date()
)
naive_time_param = ScalarQueryParameter(
name="naive_time", type_="TIME", value=naive.time()
)
zoned = naive.replace(tzinfo=UTC)
zoned_param = ScalarQueryParameter(name="zoned", type_="TIMESTAMP", value=zoned)
array_param = ArrayQueryParameter(
name="array_param", array_type="INT64", values=[1, 2]
)
struct_param = StructQueryParameter("hitchhiker", question_param, answer_param)
phred_name = "Phred Phlyntstone"
phred_name_param = ScalarQueryParameter(
name="name", type_="STRING", value=phred_name
)
phred_age = 32
phred_age_param = ScalarQueryParameter(
name="age", type_="INT64", value=phred_age
)
phred_param = StructQueryParameter(None, phred_name_param, phred_age_param)
bharney_name = "Bharney Rhubbyl"
bharney_name_param = ScalarQueryParameter(
name="name", type_="STRING", value=bharney_name
)
bharney_age = 31
bharney_age_param = ScalarQueryParameter(
name="age", type_="INT64", value=bharney_age
)
bharney_param = StructQueryParameter(
None, bharney_name_param, bharney_age_param
)
characters_param = ArrayQueryParameter(
name=None, array_type="RECORD", values=[phred_param, bharney_param]
)
hero_param = StructQueryParameter("hero", phred_name_param, phred_age_param)
sidekick_param = StructQueryParameter(
"sidekick", bharney_name_param, bharney_age_param
)
roles_param = StructQueryParameter("roles", hero_param, sidekick_param)
friends_param = ArrayQueryParameter(
name="friends", array_type="STRING", values=[phred_name, bharney_name]
)
with_friends_param = StructQueryParameter(None, friends_param)
top_left_param = StructQueryParameter(
"top_left",
ScalarQueryParameter("x", "INT64", 12),
ScalarQueryParameter("y", "INT64", 102),
)
bottom_right_param = StructQueryParameter(
"bottom_right",
ScalarQueryParameter("x", "INT64", 22),
ScalarQueryParameter("y", "INT64", 92),
)
rectangle_param = StructQueryParameter(
"rectangle", top_left_param, bottom_right_param
)
examples = [
{
"sql": "SELECT @question",
"expected": question,
"query_parameters": [question_param],
},
{
"sql": "SELECT @answer",
"expected": answer,
"query_parameters": [answer_param],
},
{"sql": "SELECT @pi", "expected": pi, "query_parameters": [pi_param]},
{
"sql": "SELECT @pi_numeric_param",
"expected": pi_numeric,
"query_parameters": [pi_numeric_param],
},
{
"sql": "SELECT @truthy",
"expected": truthy,
"query_parameters": [truthy_param],
},
{"sql": "SELECT @beef", "expected": beef, "query_parameters": [beef_param]},
{
"sql": "SELECT @naive",
"expected": naive,
"query_parameters": [naive_param],
},
{
"sql": "SELECT @naive_date",
"expected": naive.date(),
"query_parameters": [naive_date_param],
},
{
"sql": "SELECT @naive_time",
"expected": naive.time(),
"query_parameters": [naive_time_param],
},
{
"sql": "SELECT @zoned",
"expected": zoned,
"query_parameters": [zoned_param],
},
{
"sql": "SELECT @array_param",
"expected": [1, 2],
"query_parameters": [array_param],
},
{
"sql": "SELECT (@hitchhiker.question, @hitchhiker.answer)",
"expected": ({"_field_1": question, "_field_2": answer}),
"query_parameters": [struct_param],
},
{
"sql": "SELECT "
"((@rectangle.bottom_right.x - @rectangle.top_left.x) "
"* (@rectangle.top_left.y - @rectangle.bottom_right.y))",
"expected": 100,
"query_parameters": [rectangle_param],
},
{
"sql": "SELECT ?",
"expected": [
{"name": phred_name, "age": phred_age},
{"name": bharney_name, "age": bharney_age},
],
"query_parameters": [characters_param],
},
{
"sql": "SELECT @roles",
"expected": {
"hero": {"name": phred_name, "age": phred_age},
"sidekick": {"name": bharney_name, "age": bharney_age},
},
"query_parameters": [roles_param],
},
{
"sql": "SELECT ?",
"expected": {"friends": [phred_name, bharney_name]},
"query_parameters": [with_friends_param],
},
]
for example in examples:
jconfig = QueryJobConfig()
jconfig.query_parameters = example["query_parameters"]
query_job = Config.CLIENT.query(
example["sql"],
job_config=jconfig,
job_id_prefix="test_query_w_query_params",
)
rows = list(query_job.result())
self.assertEqual(len(rows), 1)
self.assertEqual(len(rows[0]), 1)
self.assertEqual(rows[0][0], example["expected"])
def test_dbapi_w_query_parameters(self):
examples = [
{
"sql": "SELECT %(boolval)s",
"expected": True,
"query_parameters": {"boolval": True},
},
{
"sql": 'SELECT %(a "very" weird `name`)s',
"expected": True,
"query_parameters": {'a "very" weird `name`': True},
},
{
"sql": "SELECT %(select)s",
"expected": True,
"query_parameters": {"select": True}, # this name is a keyword
},
{"sql": "SELECT %s", "expected": False, "query_parameters": [False]},
{
"sql": "SELECT %(intval)s",
"expected": 123,
"query_parameters": {"intval": 123},
},
{
"sql": "SELECT %s",
"expected": -123456789,
"query_parameters": [-123456789],
},
{
"sql": "SELECT %(floatval)s",
"expected": 1.25,
"query_parameters": {"floatval": 1.25},
},
{
"sql": "SELECT LOWER(%(strval)s)",
"query_parameters": {"strval": "I Am A String"},
"expected": "i am a string",
},
{
"sql": "SELECT DATE_SUB(%(dateval)s, INTERVAL 1 DAY)",
"query_parameters": {"dateval": datetime.date(2017, 4, 2)},
"expected": datetime.date(2017, 4, 1),
},
{
"sql": "SELECT TIME_ADD(%(timeval)s, INTERVAL 4 SECOND)",
"query_parameters": {"timeval": datetime.time(12, 34, 56)},
"expected": datetime.time(12, 35, 0),
},
{
"sql": ("SELECT DATETIME_ADD(%(datetimeval)s, INTERVAL 53 SECOND)"),
"query_parameters": {
"datetimeval": datetime.datetime(2012, 3, 4, 5, 6, 7)
},
"expected": datetime.datetime(2012, 3, 4, 5, 7, 0),
},
{
"sql": "SELECT TIMESTAMP_TRUNC(%(zoned)s, MINUTE)",
"query_parameters": {
"zoned": datetime.datetime(2012, 3, 4, 5, 6, 7, tzinfo=UTC)
},
"expected": datetime.datetime(2012, 3, 4, 5, 6, 0, tzinfo=UTC),
},
{
"sql": "SELECT TIMESTAMP_TRUNC(%(zoned)s, MINUTE)",
"query_parameters": {
"zoned": datetime.datetime(2012, 3, 4, 5, 6, 7, 250000, tzinfo=UTC)
},
"expected": datetime.datetime(2012, 3, 4, 5, 6, 0, tzinfo=UTC),
},
]
for example in examples:
msg = "sql: {} query_parameters: {}".format(
example["sql"], example["query_parameters"]
)
Config.CURSOR.execute(example["sql"], example["query_parameters"])
self.assertEqual(Config.CURSOR.rowcount, 1, msg=msg)
row = Config.CURSOR.fetchone()
self.assertEqual(len(row), 1, msg=msg)
self.assertEqual(row[0], example["expected"], msg=msg)
row = Config.CURSOR.fetchone()
self.assertIsNone(row, msg=msg)
def test_large_query_w_public_data(self):
PUBLIC = "bigquery-public-data"
DATASET_ID = "samples"
TABLE_NAME = "natality"
LIMIT = 1000
SQL = "SELECT * from `{}.{}.{}` LIMIT {}".format(
PUBLIC, DATASET_ID, TABLE_NAME, LIMIT
)
query_job = Config.CLIENT.query(SQL)
rows = list(query_job)
self.assertEqual(len(rows), LIMIT)
def test_query_future(self):
query_job = Config.CLIENT.query("SELECT 1")
iterator = query_job.result(timeout=JOB_TIMEOUT)
row_tuples = [r.values() for r in iterator]
self.assertEqual(row_tuples, [(1,)])
def test_query_iter(self):
import types
query_job = Config.CLIENT.query("SELECT 1")
self.assertIsInstance(iter(query_job), types.GeneratorType)
row_tuples = [r.values() for r in query_job]
self.assertEqual(row_tuples, [(1,)])
def test_querying_data_w_timeout(self):
job_config = bigquery.QueryJobConfig()
job_config.use_query_cache = False
query_job = Config.CLIENT.query(
"""
SELECT name, SUM(number) AS total_people
FROM `bigquery-public-data.usa_names.usa_1910_current`
GROUP BY name
""",
location="US",
job_config=job_config,
)
# Specify a very tight deadline to demonstrate that the timeout
# actually has effect.
with self.assertRaises(requests.exceptions.Timeout):
query_job.done(timeout=0.1)
# Now wait for the result using a more realistic deadline.
query_job.result(timeout=30)
self.assertTrue(query_job.done(timeout=30))
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_query_results_to_dataframe(self):
QUERY = """
SELECT id, author, time_ts, dead
FROM `bigquery-public-data.hacker_news.comments`
LIMIT 10
"""
df = Config.CLIENT.query(QUERY).result().to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 10) # verify the number of rows
column_names = ["id", "author", "time_ts", "dead"]
self.assertEqual(list(df), column_names) # verify the column names
exp_datatypes = {
"id": int,
"author": six.text_type,
"time_ts": pandas.Timestamp,
"dead": bool,
}
for index, row in df.iterrows():
for col in column_names:
# all the schema fields are nullable, so None is acceptable
if not row[col] is None:
self.assertIsInstance(row[col], exp_datatypes[col])
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_query_results_to_dataframe_w_bqstorage(self):
dest_dataset = self.temp_dataset(_make_dataset_id("bqstorage_to_dataframe_"))
dest_ref = dest_dataset.table("query_results")
query = """
SELECT id, author, time_ts, dead
FROM `bigquery-public-data.hacker_news.comments`
LIMIT 10
"""
bqstorage_client = bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=Config.CLIENT._credentials
)
job_configs = (
# There is a known issue reading small anonymous query result
# tables with the BQ Storage API. Writing to a destination
# table works around this issue.
bigquery.QueryJobConfig(
destination=dest_ref, write_disposition="WRITE_TRUNCATE"
),
# Check that the client is able to work around the issue with
# reading small anonymous query result tables by falling back to
# the tabledata.list API.
None,
)
for job_config in job_configs:
df = (
Config.CLIENT.query(query, job_config=job_config)
.result()
.to_dataframe(bqstorage_client)
)
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 10) # verify the number of rows
column_names = ["id", "author", "time_ts", "dead"]
self.assertEqual(list(df), column_names)
exp_datatypes = {
"id": int,
"author": six.text_type,
"time_ts": pandas.Timestamp,
"dead": bool,
}
for index, row in df.iterrows():
for col in column_names:
# all the schema fields are nullable, so None is acceptable
if not row[col] is None:
self.assertIsInstance(row[col], exp_datatypes[col])
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_insert_rows_from_dataframe(self):
SF = bigquery.SchemaField
schema = [
SF("float_col", "FLOAT", mode="REQUIRED"),
SF("int_col", "INTEGER", mode="REQUIRED"),
SF("bool_col", "BOOLEAN", mode="REQUIRED"),
SF("string_col", "STRING", mode="NULLABLE"),
]
dataframe = pandas.DataFrame(
[
{
"float_col": 1.11,
"bool_col": True,
"string_col": "my string",
"int_col": 10,
},
{
"float_col": 2.22,
"bool_col": False,
"string_col": "another string",
"int_col": 20,
},
{
"float_col": 3.33,
"bool_col": False,
"string_col": "another string",
"int_col": 30,
},
{
"float_col": 4.44,
"bool_col": True,
"string_col": "another string",
"int_col": 40,
},
{
"float_col": 5.55,
"bool_col": False,
"string_col": "another string",
"int_col": 50,
},
]
)
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("issue_7553"))
table_arg = Table(dataset.table(table_id), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
Config.CLIENT.insert_rows_from_dataframe(table, dataframe, chunk_size=3)
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
sorted_rows = sorted(rows, key=operator.attrgetter("int_col"))
row_tuples = [r.values() for r in sorted_rows]
expected = [tuple(data_row) for data_row in dataframe.itertuples(index=False)]
assert len(row_tuples) == len(expected)
for row, expected_row in zip(row_tuples, expected):
six.assertCountEqual(
self, row, expected_row
) # column order does not matter
def test_insert_rows_nested_nested(self):
# See #2951
SF = bigquery.SchemaField
schema = [
SF("string_col", "STRING", mode="NULLABLE"),
SF(
"record_col",
"RECORD",
mode="NULLABLE",
fields=[
SF("nested_string", "STRING", mode="NULLABLE"),
SF("nested_repeated", "INTEGER", mode="REPEATED"),
SF(
"nested_record",
"RECORD",
mode="NULLABLE",
fields=[SF("nested_nested_string", "STRING", mode="NULLABLE")],
),
],
),
]
record = {
"nested_string": "another string value",
"nested_repeated": [0, 1, 2],
"nested_record": {"nested_nested_string": "some deep insight"},
}
to_insert = [("Some value", record)]
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("issue_2951"))
table_arg = Table(dataset.table(table_id), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
Config.CLIENT.insert_rows(table, to_insert)
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
row_tuples = [r.values() for r in rows]
self.assertEqual(row_tuples, to_insert)
def test_insert_rows_nested_nested_dictionary(self):
# See #2951
SF = bigquery.SchemaField
schema = [
SF("string_col", "STRING", mode="NULLABLE"),
SF(
"record_col",
"RECORD",
mode="NULLABLE",
fields=[
SF("nested_string", "STRING", mode="NULLABLE"),
SF("nested_repeated", "INTEGER", mode="REPEATED"),
SF(
"nested_record",
"RECORD",
mode="NULLABLE",
fields=[SF("nested_nested_string", "STRING", mode="NULLABLE")],
),
],
),
]
record = {
"nested_string": "another string value",
"nested_repeated": [0, 1, 2],
"nested_record": {"nested_nested_string": "some deep insight"},
}
to_insert = [{"string_col": "Some value", "record_col": record}]
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("issue_2951"))
table_arg = Table(dataset.table(table_id), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
Config.CLIENT.insert_rows(table, to_insert)
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
row_tuples = [r.values() for r in rows]
expected_rows = [("Some value", record)]
self.assertEqual(row_tuples, expected_rows)
def test_create_routine(self):
routine_name = "test_routine"
dataset = self.temp_dataset(_make_dataset_id("create_routine"))
float64_type = bigquery_v2.types.StandardSqlDataType(
type_kind=bigquery_v2.enums.StandardSqlDataType.TypeKind.FLOAT64
)
routine = bigquery.Routine(
dataset.routine(routine_name),
language="JAVASCRIPT",
type_="SCALAR_FUNCTION",
return_type=float64_type,
imported_libraries=[
"gs://{}/bigquery/udfs/max-value.js".format(SAMPLES_BUCKET)
],
)
routine.arguments = [
bigquery.RoutineArgument(
name="arr",
data_type=bigquery_v2.types.StandardSqlDataType(
type_kind=bigquery_v2.enums.StandardSqlDataType.TypeKind.ARRAY,
array_element_type=float64_type,
),
)
]
routine.body = "return maxValue(arr)"
query_string = "SELECT `{}`([-100.0, 3.14, 100.0, 42.0]) as max_value;".format(
str(routine.reference)
)
routine = retry_403(Config.CLIENT.create_routine)(routine)
query_job = retry_403(Config.CLIENT.query)(query_string)
rows = list(query_job.result())
assert len(rows) == 1
assert rows[0].max_value == 100.0
def test_create_table_rows_fetch_nested_schema(self):
table_name = "test_table"
dataset = self.temp_dataset(_make_dataset_id("create_table_nested_schema"))
schema = _load_json_schema()
table_arg = Table(dataset.table(table_name), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertEqual(table.table_id, table_name)
to_insert = []
# Data is in "JSON Lines" format, see http://jsonlines.org/
json_filename = os.path.join(WHERE, "data", "characters.jsonl")
with open(json_filename) as rows_file:
for line in rows_file:
to_insert.append(json.loads(line))
errors = Config.CLIENT.insert_rows_json(table, to_insert)
self.assertEqual(len(errors), 0)
retry = RetryResult(_has_rows, max_tries=8)
fetched = retry(self._fetch_single_page)(table)
fetched_tuples = [f.values() for f in fetched]
self.assertEqual(len(fetched), len(to_insert))
for found, expected in zip(sorted(fetched_tuples), to_insert):
self.assertEqual(found[0], expected["Name"])
self.assertEqual(found[1], int(expected["Age"]))
self.assertEqual(found[2], expected["Weight"])
self.assertEqual(found[3], expected["IsMagic"])
self.assertEqual(len(found[4]), len(expected["Spells"]))
for f_spell, e_spell in zip(found[4], expected["Spells"]):
self.assertEqual(f_spell["Name"], e_spell["Name"])
parts = time.strptime(e_spell["LastUsed"], "%Y-%m-%d %H:%M:%S UTC")
e_used = datetime.datetime(*parts[0:6], tzinfo=UTC)
self.assertEqual(f_spell["LastUsed"], e_used)
self.assertEqual(f_spell["DiscoveredBy"], e_spell["DiscoveredBy"])
self.assertEqual(f_spell["Properties"], e_spell["Properties"])
e_icon = base64.standard_b64decode(e_spell["Icon"].encode("ascii"))
self.assertEqual(f_spell["Icon"], e_icon)
parts = time.strptime(expected["TeaTime"], "%H:%M:%S")
e_teatime = datetime.time(*parts[3:6])
self.assertEqual(found[5], e_teatime)
parts = time.strptime(expected["NextVacation"], "%Y-%m-%d")
e_nextvac = datetime.date(*parts[0:3])
self.assertEqual(found[6], e_nextvac)
parts = time.strptime(expected["FavoriteTime"], "%Y-%m-%dT%H:%M:%S")
e_favtime = datetime.datetime(*parts[0:6])
self.assertEqual(found[7], e_favtime)
self.assertEqual(found[8], decimal.Decimal(expected["FavoriteNumber"]))
def _fetch_dataframe(self, query):
return Config.CLIENT.query(query).result().to_dataframe()
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_nested_table_to_arrow(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
SF = bigquery.SchemaField
schema = [
SF("string_col", "STRING", mode="NULLABLE"),
SF(
"record_col",
"RECORD",
mode="NULLABLE",
fields=[
SF("nested_string", "STRING", mode="NULLABLE"),
SF("nested_repeated", "INTEGER", mode="REPEATED"),
],
),
SF("float_col", "FLOAT", mode="NULLABLE"),
]
record = {"nested_string": "another string value", "nested_repeated": [0, 1, 2]}
to_insert = [
{"string_col": "Some value", "record_col": record, "float_col": 3.14}
]
rows = [json.dumps(row) for row in to_insert]
body = six.BytesIO("{}\n".format("\n".join(rows)).encode("ascii"))
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("nested_df"))
table = dataset.table(table_id)
self.to_delete.insert(0, table)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
job_config.schema = schema
# Load a table using a local JSON file from memory.
Config.CLIENT.load_table_from_file(body, table, job_config=job_config).result()
bqstorage_client = bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=Config.CLIENT._credentials
)
tbl = Config.CLIENT.list_rows(table, selected_fields=schema).to_arrow(
bqstorage_client=bqstorage_client
)
self.assertIsInstance(tbl, pyarrow.Table)
self.assertEqual(tbl.num_rows, 1)
self.assertEqual(tbl.num_columns, 3)
# Columns may not appear in the requested order.
self.assertTrue(
pyarrow.types.is_float64(tbl.schema.field_by_name("float_col").type)
)
self.assertTrue(
pyarrow.types.is_string(tbl.schema.field_by_name("string_col").type)
)
record_col = tbl.schema.field_by_name("record_col").type
self.assertTrue(pyarrow.types.is_struct(record_col))
self.assertEqual(record_col.num_children, 2)
self.assertEqual(record_col[0].name, "nested_string")
self.assertTrue(pyarrow.types.is_string(record_col[0].type))
self.assertEqual(record_col[1].name, "nested_repeated")
self.assertTrue(pyarrow.types.is_list(record_col[1].type))
self.assertTrue(pyarrow.types.is_int64(record_col[1].type.value_type))
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_nested_table_to_dataframe(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
SF = bigquery.SchemaField
schema = [
SF("string_col", "STRING", mode="NULLABLE"),
SF(
"record_col",
"RECORD",
mode="NULLABLE",
fields=[
SF("nested_string", "STRING", mode="NULLABLE"),
SF("nested_repeated", "INTEGER", mode="REPEATED"),
SF(
"nested_record",
"RECORD",
mode="NULLABLE",
fields=[SF("nested_nested_string", "STRING", mode="NULLABLE")],
),
],
),
SF("bigfloat_col", "FLOAT", mode="NULLABLE"),
SF("smallfloat_col", "FLOAT", mode="NULLABLE"),
]
record = {
"nested_string": "another string value",
"nested_repeated": [0, 1, 2],
"nested_record": {"nested_nested_string": "some deep insight"},
}
to_insert = [
{
"string_col": "Some value",
"record_col": record,
"bigfloat_col": 3.14,
"smallfloat_col": 2.72,
}
]
rows = [json.dumps(row) for row in to_insert]
body = six.BytesIO("{}\n".format("\n".join(rows)).encode("ascii"))
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("nested_df"))
table = dataset.table(table_id)
self.to_delete.insert(0, table)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
job_config.schema = schema
# Load a table using a local JSON file from memory.
Config.CLIENT.load_table_from_file(body, table, job_config=job_config).result()
df = Config.CLIENT.list_rows(table, selected_fields=schema).to_dataframe(
dtypes={"smallfloat_col": "float16"}
)
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 1) # verify the number of rows
exp_columns = ["string_col", "record_col", "bigfloat_col", "smallfloat_col"]
self.assertEqual(list(df), exp_columns) # verify the column names
row = df.iloc[0]
# verify the row content
self.assertEqual(row["string_col"], "Some value")
expected_keys = tuple(sorted(record.keys()))
row_keys = tuple(sorted(row["record_col"].keys()))
self.assertEqual(row_keys, expected_keys)
# Can't compare numpy arrays, which pyarrow encodes the embedded
# repeated column to, so convert to list.
self.assertEqual(list(row["record_col"]["nested_repeated"]), [0, 1, 2])
# verify that nested data can be accessed with indices/keys
self.assertEqual(row["record_col"]["nested_repeated"][0], 0)
self.assertEqual(
row["record_col"]["nested_record"]["nested_nested_string"],
"some deep insight",
)
# verify dtypes
self.assertEqual(df.dtypes["bigfloat_col"].name, "float64")
self.assertEqual(df.dtypes["smallfloat_col"].name, "float16")
def test_list_rows_empty_table(self):
from google.cloud.bigquery.table import RowIterator
dataset_id = _make_dataset_id("empty_table")
dataset = self.temp_dataset(dataset_id)
table_ref = dataset.table("empty_table")
table = Config.CLIENT.create_table(bigquery.Table(table_ref))
# It's a bit silly to list rows for an empty table, but this does
# happen as the result of a DDL query from an IPython magic command.
rows = Config.CLIENT.list_rows(table)
self.assertIsInstance(rows, RowIterator)
self.assertEqual(tuple(rows), ())
def test_list_rows_page_size(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
num_items = 7
page_size = 3
num_pages, num_last_page = divmod(num_items, page_size)
SF = bigquery.SchemaField
schema = [SF("string_col", "STRING", mode="NULLABLE")]
to_insert = [{"string_col": "item%d" % i} for i in range(num_items)]
rows = [json.dumps(row) for row in to_insert]
body = six.BytesIO("{}\n".format("\n".join(rows)).encode("ascii"))
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("nested_df"))
table = dataset.table(table_id)
self.to_delete.insert(0, table)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
job_config.schema = schema
# Load a table using a local JSON file from memory.
Config.CLIENT.load_table_from_file(body, table, job_config=job_config).result()
df = Config.CLIENT.list_rows(table, selected_fields=schema, page_size=page_size)
pages = df.pages
for i in range(num_pages):
page = next(pages)
self.assertEqual(page.num_items, page_size)
page = next(pages)
self.assertEqual(page.num_items, num_last_page)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_list_rows_max_results_w_bqstorage(self):
table_ref = DatasetReference("bigquery-public-data", "utility_us").table(
"country_code_iso"
)
bqstorage_client = bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=Config.CLIENT._credentials
)
row_iterator = Config.CLIENT.list_rows(
table_ref,
selected_fields=[bigquery.SchemaField("country_name", "STRING")],
max_results=100,
)
dataframe = row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
self.assertEqual(len(dataframe.index), 100)
def temp_dataset(self, dataset_id, location=None):
dataset = Dataset(Config.CLIENT.dataset(dataset_id))
if location:
dataset.location = location
dataset = retry_403(Config.CLIENT.create_dataset)(dataset)
self.to_delete.append(dataset)
return dataset
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
@pytest.mark.skipif(IPython is None, reason="Requires `ipython`")
@pytest.mark.usefixtures("ipython_interactive")
def test_bigquery_magic():
ip = IPython.get_ipython()
current_process = psutil.Process()
conn_count_start = len(current_process.connections())
ip.extension_manager.load_extension("google.cloud.bigquery")
sql = """
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags like '%google-bigquery%'
ORDER BY view_count DESC
LIMIT 10
"""
with io.capture_output() as captured:
result = ip.run_cell_magic("bigquery", "", sql)
conn_count_end = len(current_process.connections())
lines = re.split("\n|\r", captured.stdout)
# Removes blanks & terminal code (result of display clearing)
updates = list(filter(lambda x: bool(x) and x != "\x1b[2K", lines))
assert re.match("Executing query with job ID: .*", updates[0])
assert all(re.match("Query executing: .*s", line) for line in updates[1:-1])
assert re.match("Query complete after .*s", updates[-1])
assert isinstance(result, pandas.DataFrame)
assert len(result) == 10 # verify row count
assert list(result) == ["url", "view_count"] # verify column names
assert conn_count_end == conn_count_start # system resources are released
def _job_done(instance):
return instance.state.lower() == "done"
def _dataset_exists(ds):
try:
Config.CLIENT.get_dataset(DatasetReference(ds.project, ds.dataset_id))
return True
except NotFound:
return False
def _table_exists(t):
try:
tr = DatasetReference(t.project, t.dataset_id).table(t.table_id)
Config.CLIENT.get_table(tr)
return True
except NotFound:
return False
@pytest.fixture(scope="session")
def ipython():
config = tools.default_config()
config.TerminalInteractiveShell.simple_prompt = True
shell = interactiveshell.TerminalInteractiveShell.instance(config=config)
return shell
@pytest.fixture()
def ipython_interactive(request, ipython):
"""Activate IPython's builtin hooks
for the duration of the test scope.
"""
with ipython.builtin_trap:
yield ipython
| 40.142352 | 89 | 0.597725 |
4a1c03420ab12ccb79733f027ec517ead9262377
| 355 |
py
|
Python
|
client/verta/verta/monitoring/notification_channel/entities/__init__.py
|
fool-sec-review/modeldb
|
44e7f3c1af6768c4c23a2d134f9a322fcf0320b5
|
[
"Apache-2.0"
] | 624 |
2020-01-18T21:10:12.000Z
|
2022-03-23T12:11:06.000Z
|
client/verta/verta/monitoring/notification_channel/entities/__init__.py
|
fool-sec-review/modeldb
|
44e7f3c1af6768c4c23a2d134f9a322fcf0320b5
|
[
"Apache-2.0"
] | 651 |
2019-04-18T12:55:07.000Z
|
2022-03-31T23:45:09.000Z
|
client/verta/verta/monitoring/notification_channel/entities/__init__.py
|
fool-sec-review/modeldb
|
44e7f3c1af6768c4c23a2d134f9a322fcf0320b5
|
[
"Apache-2.0"
] | 118 |
2019-04-12T16:01:21.000Z
|
2022-03-05T16:29:41.000Z
|
# -*- coding: utf-8 -*-
"""Entities for defining notification channels in the Verta backend."""
from verta._internal_utils import documentation
from ._notification_channel import NotificationChannel, NotificationChannels
documentation.reassign_module(
[
NotificationChannel,
NotificationChannels,
],
module_name=__name__,
)
| 23.666667 | 76 | 0.746479 |
4a1c04c5c154dfaabdd144c7568cba698a8e537e
| 4,094 |
py
|
Python
|
genmod/vae/lower_bound.py
|
shuiruge/generative_models
|
a1765a5ff9aeee8c0325f0c5f40b3537bb82accf
|
[
"MIT"
] | 2 |
2018-11-23T06:46:59.000Z
|
2020-09-20T14:42:56.000Z
|
genmod/vae/lower_bound.py
|
shuiruge/generative_models
|
a1765a5ff9aeee8c0325f0c5f40b3537bb82accf
|
[
"MIT"
] | null | null | null |
genmod/vae/lower_bound.py
|
shuiruge/generative_models
|
a1765a5ff9aeee8c0325f0c5f40b3537bb82accf
|
[
"MIT"
] | null | null | null |
"""
Description
-----------
Implements the `LossLowerBound` of variational auto-encoder. However, because
of its intrinsic large variance, this implementation is not to be employed,
but staying here for keeping remind when the Monte-Carlo integral fails.
"""
import tensorflow as tf
from tfutils.monte_carlo_integral import MonteCarloIntegral
class LossLowerBound:
r"""The function ln p(x) by Monte-Carlo integral, which is the lower bound
of the loss `LossX`.
```math
p(x) = E_{z \sim P(Z)} \left[ p(X \mid z) \right].
```
The error of the Monte-Carlo integral is computed as follow.
```math
\begin{equation}
\delta \ln p(x) = \frac{ \delta p(x) }{ p(x) }.
\end{equation}
```
wherein
```math
\begin{align}
& \left( \frac{ \delta p(x) }{ p(x) } \right)^2 \\
= & \frac{
\text{Var}_{z \sim P(Z)} \left[ p(x \mid z) \right]
}{
\text{E}_{z \sim P(Z)}^2 \left[ p(x \mid z) \right]
} \\
= & \frac{
\text{E}_{z \sim P(Z)}^2 \left[ p^2(x \mid z) \right]
}{
\text{E}_{z \sim P(Z)}^2 \left[ p(x \mid z) \right]
} - 1.
\end{align}
```
WARNING:
This estimation of lower bound of the fitting by the KL-divergence is
NOT proper, because of its large variance.
Indeed, as the number of samples in the Monte-Carlo integral increases,
the variance increases, rather than decreasing as what should be expected.
This is caused by the large variance of P(X|Z), which is a multiplication
of P(X_i|Z)s where each X_i is for one pixal of the `28 * 28`-pixal
picture of the MNIST dataset. (Say, the multiplication of `28 * 28`
independent probabilities all with the value `0.9`, `0.9 ** (28 * 28)`,
is extremely tiny.)
Args:
get_p_X_z: Callable with the signature:
Args:
z: Tensor of the shape `batch_shape + [z_dim]`.
reuse: Boolean.
Returns:
An instance of `tfd.Distribution`, with batch-shape `batch_shape`
and event-shape `[ambient_dim]`.
p_z: An instance of `tfd.Distribution`, with batch-shape `batch_size`
and event-shape `z_dim`.
n_samples: Positive integer.
reuse: If reuse the variables in `get_p_X_z`.
"""
def __init__(self,
variational_autoencoder,
epsilon=1e-8,
name='LossLowerBound'):
self.vae = variational_autoencoder
self.epsilon = epsilon
self.base_name = name
self.log_n_samples = tf.log(
tf.cast(self.vae.n_samples, self.vae.prior.dtype),
name='log_n_samples')
def log_expectation(self, log_samples, name='log_expectation'):
"""ln E[ exp(log_samples) ]
Args:
log_samples: Tensor of the shape `[n_samples]` + batch-shape +
event-shape.
Returns:
Tensor of the shape batch_shape + event-shape.
"""
with tf.name_scope(name):
return tf.reduce_logsumexp(log_samples - self.log_n_samples, axis=0)
def __call__(self, ambient):
"""Returns the tensor of ln p(X). This serves as the lower bound of
the loss by KL-divergence, evaluating the fitting.
Args:
ambient: Tensor of the shape `batch_shape + [ambient_dim]`.
name: String.
Returns:
An instance of `MonteCarloIntegral` with shape `batch_shape`.
"""
with tf.name_scope(self.base_name):
# [n_samples] + batch_shape + [latent_dim]
latent_samples = self.vae.prior.sample(self.vae.n_samples)
decoder = self.vae.decoder(latent_samples, reuse=tf.AUTO_REUSE)
# [n_samples] + batch_shape
decoder_log_probs = decoder.log_prob(ambient)
# E_{z~p(z)} [ p(X|z) ]
# batch_shape
lower_bound_tensor = self.log_expectation(decoder_log_probs,
name='lower_bound')
# Variance of Monte-Carlo integral
square_delta_lower_bound = -1.0 + tf.exp(self.log_expectation(
2.0 * (decoder_log_probs - lower_bound_tensor)))
return MonteCarloIntegral(value=lower_bound_tensor,
variance=square_delta_lower_bound)
| 31.984375 | 78 | 0.631656 |
4a1c058c31c8135122bd3a7a24f2fe2d6114b984
| 5,421 |
py
|
Python
|
convert.py
|
naojibrainmachine/bert-voice-conversion
|
7a2493b0b2203736c0b05341d682cda15aee3c90
|
[
"MIT"
] | null | null | null |
convert.py
|
naojibrainmachine/bert-voice-conversion
|
7a2493b0b2203736c0b05341d682cda15aee3c90
|
[
"MIT"
] | null | null | null |
convert.py
|
naojibrainmachine/bert-voice-conversion
|
7a2493b0b2203736c0b05341d682cda15aee3c90
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import math
import sys
import glob
import os
import time
#np.set_printoptions(threshold = 1e6)
from SpeechSynthesizer import speech_synthesizer
from PhoneClassifer import phone_classifer
from utils.load_data import get_data_ss,default_config,phns,denormalize_db,db2amp,spec2wav,inv_preemphasis,save_wave,get_data
from LOAD_SAVE_PARAMS.LOAD_SAVE_PARAMS import save_weight,load_weight
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
tf.compat.v1.keras.backend.set_session(session)
tf.compat.v1.keras.backend.clear_session()
os.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'
def convert(model, x_mfccs, y_spec):
pred_spec,ppgs = model.predict(x_mfccs,dropout_rate=0.0)
#print(model.loss_ss(me_,y_mel,pred_spec,y_spec),"losssss")
y_spec=tf.reshape(y_spec,[y_spec.shape[1],y_spec.shape[-1]]).numpy()
pred_spec=tf.reshape(pred_spec,[pred_spec.shape[1],pred_spec.shape[-1]]).numpy()
# Denormalizatoin
pred_spec = denormalize_db(pred_spec, default_config['max_db'], default_config['min_db'])
y_spec = denormalize_db(y_spec, default_config['max_db'], default_config['min_db'])
# Db to amp
pred_spec = db2amp(pred_spec)
y_spec = db2amp(y_spec)
# Emphasize the magnitude
pred_spec = np.power(pred_spec, default_config['emphasis_magnitude'])
y_spec = np.power(y_spec, default_config['emphasis_magnitude'])
# Spectrogram to waveform
audio = spec2wav(pred_spec.T, default_config['n_fft'], default_config['win_length'], default_config['hop_length'],default_config['n_iter'])
default_config['n_iter']), y_spec))
y_audio = spec2wav(y_spec.T, default_config['n_fft'], default_config['win_length'], default_config['hop_length'],default_config['n_iter'])
# Apply inverse pre-emphasis
audio = inv_preemphasis(audio, coeff=default_config['preemphasis'])
y_audio = inv_preemphasis(y_audio, coeff=default_config['preemphasis'])
return audio, y_audio, ppgs
#tf.summary.audio('A', y_audio, hp.default.sr, max_outputs=hp.convert.batch_size)
#heatmap = np.expand_dims(ppgs, 3) # channel=1
#tf.summary.image('PPG', heatmap, max_outputs=ppgs.shape[0])
def do_convert(model,params,batch_size):
acc=[]
acc_spec=[]
wav_files_list = glob.glob(default_config["data_convert"])
#print(wav_files_list)
iter_data=get_data_ss(wav_files_list,batch_size)
outputs=[]
Ys=[]
los=[]
count=0
for x_mfccs,y_spec,y_mel in iter_data:
e_start=time.time()
X_mfccs=tf.concat(x_mfccs,0)
Y_spec=tf.concat(y_spec,0)
Y_mel=tf.concat(y_mel,0)
X_mfccs=tf.cast(X_mfccs,dtype=tf.float32)
Y_spec=tf.cast(Y_spec,dtype=tf.float32)
audio, y_audio, ppgs = convert(model, X_mfccs,Y_spec)
path="save_data"
y_audio=y_audio*32767
audio=audio*32767
save_wave(path,"y_audio"+str(count)+".wav",y_audio,16000)
save_wave(path,"audio"+str(count)+".wav",audio,16000)
e_end=time.time()
print("一次耗时%f秒"%(e_end-e_start))
count=count+1
def return_accuracy(Y,Y_pre):
num=Y.shape[0]*Y.shape[1]
rowMaxSoft=np.argmax(Y_pre, axis=-1)+1
rowMax=np.argmax(Y, axis=-1)+1
rowMaxSoft=rowMaxSoft.reshape([1,-1])
rowMax=rowMax.reshape([1,-1])
nonO=rowMaxSoft-rowMax
exist = (nonO != 0) * 1.0
factor = np.ones([nonO.shape[1],1])
res = np.dot(exist, factor)
accuracy=(float(num)-res[0][0])/float(num)
return accuracy
if __name__ == "__main__":
batch_size=1
n_mel=default_config["n_mels"]
n_spec=(default_config["n_fft"]//2)+1
input_nums=default_config["n_mfcc"]
num_hiddens=512#default_config["hidden_units"]#768#768电脑带不动
num_outputs=default_config["n_mfcc"]
layer_nums=12
multi_head=12
mel_layers=6
spec_layers=6
max_position_dim=1024
clip_norm=1.0
epochs=3000
isContinue=True
#lr=1e-5#5e-6
model_pc=phone_classifer(lr=1e-5,input_nums=input_nums,hidden_nums=num_hiddens,output_nums=num_outputs,max_position_dim=max_position_dim,layers_encoder=layer_nums,labels_num=len(phns),multi_head=multi_head)
params_pc=model_pc.get_params()+model_pc.get_params_vc()
model_ss=speech_synthesizer(lr=1e-5,mel_in_nums=len(phns),mel_hi_nums=num_hiddens,mel_out_nums=len(phns),mel_layers=mel_layers,n_mel=n_mel,spec_in_nums=n_mel,spec_hi_nums=num_hiddens,spec_out_nums=n_mel,spec_layers=spec_layers,n_spec=n_spec,max_position_dim=max_position_dim,multi_head=multi_head)
params_mel=model_ss.get_params_mel()
params_spec=model_ss.get_params_spec()
params_ss=params_mel+params_spec
try:
load_weight("ckp","params_pc",params_pc)
except:
raise("未发现已经训练好的音素分类模型数据")
model_ss.init_pc(model_pc)#加载pc模型
if isContinue==True:
try:
load_weight("ckp","params_ss",params_ss)
except:
raise("未发现已经训练好的语音合成模型数据")
for i in range(epochs):
with tf.device('/gpu:0'):
do_convert(model_ss,params_ss,batch_size)
#save_weight("ckp","params_ss",params_ss)
| 30.627119 | 301 | 0.696735 |
4a1c0600062b6b24e5b7bc8a9584297e3f557cac
| 6,766 |
py
|
Python
|
primrose/transformers/categoricals.py
|
astro313/primrose
|
891f001e4e198096edb74eea951d27c9ae2a278f
|
[
"Apache-2.0"
] | 38 |
2019-09-04T17:39:31.000Z
|
2021-11-09T21:20:24.000Z
|
primrose/transformers/categoricals.py
|
astro313/primrose
|
891f001e4e198096edb74eea951d27c9ae2a278f
|
[
"Apache-2.0"
] | 66 |
2019-09-05T15:55:19.000Z
|
2021-11-21T05:36:54.000Z
|
primrose/transformers/categoricals.py
|
astro313/primrose
|
891f001e4e198096edb74eea951d27c9ae2a278f
|
[
"Apache-2.0"
] | 6 |
2019-12-02T09:05:30.000Z
|
2021-12-09T16:12:36.000Z
|
"""Module to run a basic decision tree model
Author(s):
Mike Skarlinski (michael.skarlinski@weightwatchers.com)
"""
import pandas as pd
import numpy as np
import logging
from sklearn import preprocessing
from primrose.base.transformer import AbstractTransformer
class ExplicitCategoricalTransform(AbstractTransformer):
DEFAULT_NUMERIC = -9999
def __init__(self, categoricals):
"""initialize the ExplicitCategoricalTransform
Args:
categoricals: dictionary containing for each column to be transformed:
- transformations: list of strings to be executed on the data ('x' represents the current categorical variable)
- rename: if present, rename the current categorical variable to that name
- to_numeric: if true, attempt to apply to_numeric after previous transformations
"""
self.categoricals = categoricals
def fit(self, data):
pass
@staticmethod
def _process_transformations(data, input_data, categorical, x):
"""transform a column
Args:
data (dataframe): dataframe
input configuration (JSON): JSON categorical config for this variable
categorical (str): varible name
x (str): transformation string
Returns:
data (dataframe)
"""
if "transformations" in input_data.keys():
logging.info(
"Applying key {} to variable {}".format("transformations", categorical)
)
for transformation in input_data["transformations"]:
exec(transformation.format(x=x))
@staticmethod
def _process_rename(data, input_data, categorical):
"""rename a field
Args:
data (dataframe): dataframe
input configuration (JSON): JSON categorical config for this variable
categorical (str): varible name
Returns:
(tuple): tuple containing:
data (dataframe): dataframe
name (str): original name (if not "to_numeric": True), new_name otherwise
"""
if "rename" in input_data.keys():
logging.info("Applying key {} to variable {}".format("rename", categorical))
data = data.rename({categorical: input_data["rename"]}, axis="columns")
return data, input_data["rename"]
return data, categorical
@staticmethod
def _process_numeric(data, input_data, name):
"""convert column to numeric
Args:
data (dataframe): dataframe
input configuration (JSON): JSON categorical config for this variable
name (str): field name
Returns:
data with the colun converted to numeric
"""
if input_data.get("to_numeric", False):
logging.info("Applying key {} to variable {}".format("to_numeric", name))
# if there are errors converting to numerical values, we need to sub in a reasonable value
if sum(pd.to_numeric(data[name], errors="coerce").isnull()) > 0:
logging.info(
"Can't convert these entries in {}. Replacing with {}: {}".format(
name,
ExplicitCategoricalTransform.DEFAULT_NUMERIC,
np.unique(
data[name][
pd.to_numeric(data[name], errors="coerce").isnull()
].astype(str)
),
)
)
data[name][
pd.to_numeric(data[name], errors="coerce").isnull()
] = ExplicitCategoricalTransform.DEFAULT_NUMERIC
try:
data[name] = pd.to_numeric(data[name])
return data
except:
raise TypeError("Failed to convert feature {} to numeric".format(name))
else:
return data
def transform(self, data):
"""Transform categorical variables into one or more numeric ones, no need to separate testing & training data
Args:
data: dictionary containing dataframe with all categorical columns present
Returns:
data with all categorical columns recoded and/or deleted
"""
for categorical in self.categoricals.keys():
x = "data['{}']".format(categorical)
input_data = self.categoricals[categorical]
ExplicitCategoricalTransform._process_transformations(
data, input_data, categorical, x
)
data, new_name = ExplicitCategoricalTransform._process_rename(
data, input_data, categorical
)
data = ExplicitCategoricalTransform._process_numeric(
data, input_data, new_name
)
return data
class ImplicitCategoricalTransform(AbstractTransformer):
"""Class which implicitly transforms all string columns of a dataframe with sklearn LabelEncoder"""
def __init__(self, target_variable):
"""initialize this ImplicitCategoricalTransform
Args:
target_variable (str): target variable name
"""
self.target_variable = target_variable
self._encoder = {}
self.target_encoder = None
def fit(self, data):
"""encode the data as categorical labels
Args:
data (dataframe)
Returns:
dataframe (dataframe)
"""
logging.info("Fitting LabelEncoders on all string-based dataframe columns...")
data.is_copy = False
for column_name in data.columns:
if data[column_name].dtype == object:
logging.info("Fitting LabelEncoder for column {}".format(column_name))
self._encoder[column_name] = preprocessing.LabelEncoder()
self._encoder[column_name].fit(data[column_name])
if column_name == self.target_variable:
self.target_encoder = self._encoder[column_name]
else:
pass
return data
def transform(self, data):
"""Transform data into categorical variables using pre-trained label encoder
Args:
data (dataframe)
Returns:
dataframe (dataframe)
"""
data.is_copy = False
for column_name in data.columns:
if column_name in self._encoder:
logging.info("LabelEncoding column {}".format(column_name))
data[column_name] = self._encoder[column_name].transform(
data[column_name]
)
return data
| 29.806167 | 123 | 0.586905 |
4a1c065d5362c291611c39e8bb87ad51a4437bc5
| 15,449 |
py
|
Python
|
calltree/calltree.py
|
vadivelmurugank/calltree
|
2ffb0c6026c2b609182df3c69e11c588ded75415
|
[
"MIT"
] | null | null | null |
calltree/calltree.py
|
vadivelmurugank/calltree
|
2ffb0c6026c2b609182df3c69e11c588ded75415
|
[
"MIT"
] | null | null | null |
calltree/calltree.py
|
vadivelmurugank/calltree
|
2ffb0c6026c2b609182df3c69e11c588ded75415
|
[
"MIT"
] | null | null | null |
"""
calltree.py
Purpose
=======
* What is the problem statement?
* Model the problem.
- Equation
* How this Project solves the problem?
* Use cases
* What is the value add? differentiator?
The understanding of the codeflow in a larger source of codebase, requires to
list the caller and callee relationships for a given function. For linux and
unix developers, cscope has been the primary utitlity to search and browse
through the code.
This utility uses the generated cscope database to list and display the
callgraph, which is stored in a image format.
Interface
=========
* Command Line
* Web/GUI
* API
calltree.py
-d <CSCOPE_DB>
-f <function>
-c <caller>
-l <callee>
-o <imageformat: jpg/png>
Data Structure
===============
* Data Abstraction
* Data Association
* Data Struture
Algorithms
==========
* Arrange/Sort/Search
* Memory Allocate/layout
* Queues/Slice/Schedule
"""
import re
import pdb
import os
import sys
import io
import re
import collections
import subprocess
import shlex
import pdb
import pyparsing
import pydotplus as pydot
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class calltree:
def __init__(self, function=None):
command = ''
self.func = function
self.caller_level = 1
self.callee_level = 1
self.showall = False
self.everything = False
self.sourcedb = os.getenv("SOURCEDB")
self.oformat = "png"
self.odir = os.path.abspath(os.curdir)
self.cluster = ''
self.funcnodes = collections.OrderedDict()
self.parseCmdLineOptions()
self.getsourcedb()
self.createCallTree(self.func, self.caller_level, self.callee_level)
def __call__(self):
command = ''
def __del__(self):
pass
def setsourcedb(self, dbpath):
"""
Get source database path
"""
if dbpath and os.path.exists(dbpath):
self.sourcedb = dbpath
else:
sys.exit("Invalid sourcedb Path!!")
def getsourcedb(self):
"""
Get source database path
"""
if not self.sourcedb:
if os.path.exists("cscope.out"):
self.sourcedb = "cscope.out"
else:
syss.exit("Set SOURCEDB env variable or specify -f")
print("Using sourcedb:", self.sourcedb)
os.chdir(os.path.dirname(self.sourcedb))
def showtree(self):
for func in self.funcnodes.keys():
print("\n(*)", func)
fnode = self.funcnodes[func]
funcdef = fnode["define"]
for ftuple in funcdef:
print(' '*8,"-- %s <%d:%s>" %(' '.join(ftuple[2]),int(ftuple[1]), ftuple[0]))
funcdef = fnode["caller"]
if len(funcdef.keys()) > 0:
print("\n")
for cfunc in funcdef.keys():
flist = funcdef[cfunc]
print(" "*4,"%s" %(cfunc))
for ftuple in flist:
print(' '*8,"|>> %s <%d:%s> " %(' '.join(ftuple[2]),int(ftuple[1]), ftuple[0]))
funcdef = fnode["callee"]
if len(funcdef.keys()) > 0:
print("\n"," "*2,"[%s]" %(func))
print(" "*4,"|")
for cfunc in funcdef.keys():
flist = funcdef[cfunc]
print(" "*4,"|<< %s " %(cfunc))
for ftuple in flist:
print(' '*8,'|'+'-'*4,"%s <%d:%s>" %(' '.join(ftuple[2]), int(ftuple[1]), ftuple[0]))
def showgraph(self):
graph = pydot.Dot(graph_type='digraph', rankdir="LR", splines="true",
nodesep=0.10, ranksep="1.1 equally", labelloc="top",
labeljust="centered", ratio="auto", packMode="array_u",
compound="true", overlap="prism", clusterrank="global",
model="circuit")
graph.set_graph_defaults(graph_type='record')
graph.set_node_defaults(
fontname="Verdana",
fontsize="12",
fillcolor="grey91;0.1:white",
style="filled",fontcolor="black",
labelfontname="Verdana",
labelfontsize="12",
gradientangle=270,
shape='rectangle')
graph.set_edge_defaults(
dir="forward",
rank="same",
color="midnightblue",
fontsize="12",
style="solid",
penwidth=1,
fontname="Verdana")
for func in self.funcnodes.keys():
print("showgraph:", func)
fnode = self.funcnodes[func]
calltree = ''
if (self.func == func):
calltree = pydot.Cluster(graph_name="cluster_calltree_%s" %(func),
rank="same" ,style="filled", fillcolor="bisque1", fontsize=14)
funcdef = fnode["define"]
funcstr = func
#for ftuple in funcdef:
# ftuplestr = "%s" %(' '.join(ftuple[2]))
# funcstr = "\n".join([funcstr,ftuplestr])
funcgraph = pydot.Node(func, label=funcstr,
shape="rect",fontsize=26,
fillcolor="green",style="filled",
fontcolor="brown")
calltree.add_node(funcgraph)
self.cluster=calltree
else:
calltree = pydot.Cluster(graph_name="cluster_calltree_%s" %(func),
rank="same" , fontsize=14, style="dashed")
graph.add_subgraph(calltree)
funcdef = fnode["caller"]
for cfunc in funcdef.keys():
calltree.add_edge(pydot.Edge(cfunc, func, shape="eclipse"))
print(func, "<---", cfunc)
funcdef = fnode["callee"]
for cfunc in funcdef.keys():
calltree.add_edge(pydot.Edge(func, cfunc))
print(func, "->", cfunc)
calltreeimage = (self.func+'.'+self.oformat)
print("\n... Calltree graph stored at: %s\n\n" %(os.path.join(self.odir,calltreeimage)))
if (self.oformat == "jpg"):
graph.write_jpg(os.path.join(self.odir,calltreeimage))
else:
graph.write_png(os.path.join(self.odir,calltreeimage))
# render pydot by calling dot, no file saved to disk
png_str = graph.create_png(prog='dot')
sio = io.BytesIO()
sio.write(png_str)
sio.seek(0)
img = mpimg.imread(sio)
# plot the image
imgplot = plt.imshow(img, aspect='equal')
plt.show(block=True)
def addfunc(self, func):
"""
funcdict = {
function : {
define : {
funcname : (file, line, definitions)
}
callee : {
calleename : (file, line, definitions)
}
caller : {
callername : (file, line, definitions)
}
}
}
"""
if func not in self.funcnodes.keys():
self.funcnodes[func] = collections.OrderedDict()
self.funcnodes[func]["define"] = list()
self.funcnodes[func]["callee"] = collections.OrderedDict()
self.funcnodes[func]["caller"] = collections.OrderedDict()
def adddefine(self, func, filename, line, fdefine):
"""
"""
if func not in self.funcnodes.keys():
self.addfunc(func)
fnode = self.funcnodes[func]
funcdef = fnode["define"]
ftuple = (filename, line, fdefine)
if ftuple not in funcdef:
funcdef.append(ftuple)
def addcallee(self, func, callee, filename, line, cdefine):
"""
"""
if func not in self.funcnodes.keys():
self.addfunc(func)
#if callee not in self.funcnodes.keys():
# self.addfunc(callee)
fnode = self.funcnodes[func]
funcdef = fnode["callee"]
if callee not in funcdef.keys():
funcdef[callee] = list()
ftuple = (filename, line, cdefine)
if ftuple not in funcdef[callee]:
funcdef[callee].append(ftuple)
def addcaller(self, func, caller, filename, line, cdefine):
"""
"""
if func not in self.funcnodes.keys():
self.addfunc(func)
#if caller not in self.funcnodes.keys():
# self.addfunc(caller)
fnode = self.funcnodes[func]
funcdef = fnode["caller"]
if caller not in funcdef.keys():
funcdef[caller] = list()
ftuple = (filename, line, cdefine)
if ftuple not in funcdef[caller]:
funcdef[caller].append(ftuple)
def fsym(self, fname):
self.run_cscope(0, fname)
def fdefines(self, fname):
output = self.run_cscope(1, fname)
for outstr in output:
if not outstr:
continue
cstr = outstr.split(' ')
if len(cstr) > 2:
self.adddefine(cstr[1], cstr[0], cstr[2], cstr[3:] )
else:
print(outstr)
print("ERROR: output doesn't have func defines")
def fcallees(self, fname, level):
if (level > 0):
output = self.run_cscope(2, fname)
for outstr in output:
if not outstr:
continue
cstr = outstr.split(' ')
if len(cstr) > 2:
self.addcallee(fname, cstr[1], cstr[0], cstr[2], cstr[3:] )
else:
print(outstr)
print("ERROR: output doesn't have func callees")
def fcaller(self, fname, level):
if (level > 0):
output = self.run_cscope(3, fname)
for outstr in output:
if not outstr:
continue
cstr = outstr.split(' ')
if len(cstr) > 2:
self.addcaller(fname, cstr[1], cstr[0], cstr[2], cstr[3:] )
else:
print(outstr)
print("ERROR: output doesn't have func caller")
def createCallTree(self,function, caller_level, callee_level):
#print(function, caller_level, callee_level)
if (caller_level <= 0) and (callee_level <= 0 ):
return
if function not in self.funcnodes.keys():
print("processing %s" %(function))
self.fdefines(function)
self.fcaller(function, caller_level)
self.fcallees(function, callee_level)
if function in self.funcnodes.keys():
fnode = self.funcnodes[function]
funcdef = fnode["caller"]
#print("CALLER:", caller_level, funcdef)
if (len(funcdef.keys()) > 0) and (caller_level > 0):
# Unfold caller levels
if (self.showall is True):
caller_level = 1
else:
caller_level -= 1
allcallees = 0
if (self.everything is True):
allcallees = 1
for cfunc in funcdef.keys():
self.createCallTree(cfunc, caller_level, allcallees)
funcdef = fnode["callee"]
if (len(funcdef.keys()) > 0) and (callee_level > 0):
# Unfold callee levels
if (self.showall is True):
callee_level = 1
else:
callee_level -= 1
allcallers = 0
if (self.everything is True):
allcallers = 1
for cfunc in funcdef.keys():
self.createCallTree(cfunc, allcallers, callee_level)
def fgrep(self, fstring):
self.run_cscope(6, fname)
def run_cscope(self, level, fname):
# Construct the command to cscope
cscope = '/usr/bin/cscope'
command = "%s -f %s -L%d %s" %(cscope, self.sourcedb, level, fname)
cmdstr = shlex.split(command)
#print(cmdstr)
outbyte = subprocess.Popen(cmdstr, stdout=subprocess.PIPE,universal_newlines=True)
outbytestr=outbyte.communicate()[0]
outstr=''.join(bytestr for bytestr in outbytestr)
output=outstr.split('\n')
#print(output)
return output
def parseCmdLineOptions(self):
"""
-d <CSCOPE_DB>
-f <output format - jpg/png>
-o <output dir>
-l <show level of callers/callees>
-a <all callers/callees>
-c <callers>
-e <callees>
"""
level = 0
from optparse import OptionParser
usage = "usage : %prog [options] functionname"
version = "%prog 1.0"
parser = OptionParser(usage=usage, version=version)
parser.add_option("--everything", action="store_true",
dest="everything", default=False,
help="Recursively show all callers and callees")
parser.add_option("-a", "--showall", action="store_true",
dest="showall", default=False,
help="Show all callers and callees")
parser.add_option("-c", "--callers", action="store_true",
dest="callers", default=False,
help="Recursively show all callers")
parser.add_option("-e", "--callees", action="store_true",
dest="callees", default=False,
help="Recursively show all callees")
parser.add_option("-l", "--level", action="store", type="int",
dest="level", default=1,
help="Show Level of callers/callees")
parser.add_option("-f", "--format", action="store", type="string",
dest="outputformat", default="png",
help="output format jpg/png")
parser.add_option("-o", "--outputdir", action="store", type="string",
dest="outputdir",default=os.path.abspath(os.curdir),
help="Storage directory for output files")
parser.add_option("-d", "--db", action="store", type="string",
dest="sourcedb",default=os.getenv("SOURCEDB"),
help="cscope database (cscope.out)")
(options, args) = parser.parse_args()
#print(options)
#print(args)
if options.showall is True:
self.showall = True
if options.everything is True:
self.everything = True
self.setsourcedb(options.sourcedb)
self.oformat = options.outputformat
self.odir = options.outputdir
if (options.callers is True):
self.caller_level = options.level
if (options.callees is True):
self.callee_level = options.level
if (self.func == None and len(args) == 1):
self.func = args[0]
else:
print(usage)
sys.exit()
| 33.439394 | 105 | 0.511554 |
4a1c06b702794f86d91e058bed98f9eca894f3d4
| 38 |
py
|
Python
|
AudioRec/__init__.py
|
joelbarmettlerUZH/AudioRec
|
4146a775baeb72fa18de22c78820d9f8a2a95667
|
[
"MIT"
] | 5 |
2020-03-06T04:42:34.000Z
|
2021-04-23T21:03:34.000Z
|
AudioRec/__init__.py
|
joelbarmettlerUZH/AudioRec
|
4146a775baeb72fa18de22c78820d9f8a2a95667
|
[
"MIT"
] | 1 |
2018-04-07T09:06:00.000Z
|
2019-02-09T16:14:43.000Z
|
AudioRec/__init__.py
|
joelbarmettlerUZH/AudioRec
|
4146a775baeb72fa18de22c78820d9f8a2a95667
|
[
"MIT"
] | 2 |
2020-04-06T12:52:34.000Z
|
2022-02-16T20:24:23.000Z
|
from AudioRec.Recorder import Recorder
| 38 | 38 | 0.894737 |
4a1c06f6f8c3171d8a7e04186cb0a6ec5f95ace5
| 484 |
py
|
Python
|
28-ImplementStrStr.py
|
Ergouzii/LeetCode
|
690e2e69250fc0af52a7eb1957dd3c44904e4859
|
[
"MIT"
] | null | null | null |
28-ImplementStrStr.py
|
Ergouzii/LeetCode
|
690e2e69250fc0af52a7eb1957dd3c44904e4859
|
[
"MIT"
] | null | null | null |
28-ImplementStrStr.py
|
Ergouzii/LeetCode
|
690e2e69250fc0af52a7eb1957dd3c44904e4859
|
[
"MIT"
] | null | null | null |
class Solution:
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
if needle == '':
return 0
if not needle in haystack:
return -1
start = 0
end = len(needle)
for i in range(len(haystack)):
if haystack[start:end] == needle:
return start
else:
start += 1
end += 1
| 25.473684 | 45 | 0.429752 |
4a1c07f6f10919eda00cd2e709f6f1adb43c24ab
| 3,744 |
py
|
Python
|
graphs/perception/perception_2nodes/launch/trace_rectify_resize_fpga_streamlined_xrt.launch.py
|
dirksavage88/acceleration_examples
|
97140d08d84e53d7c7cc04340dfefe2c4a954117
|
[
"Apache-2.0"
] | null | null | null |
graphs/perception/perception_2nodes/launch/trace_rectify_resize_fpga_streamlined_xrt.launch.py
|
dirksavage88/acceleration_examples
|
97140d08d84e53d7c7cc04340dfefe2c4a954117
|
[
"Apache-2.0"
] | null | null | null |
graphs/perception/perception_2nodes/launch/trace_rectify_resize_fpga_streamlined_xrt.launch.py
|
dirksavage88/acceleration_examples
|
97140d08d84e53d7c7cc04340dfefe2c4a954117
|
[
"Apache-2.0"
] | null | null | null |
# ____ ____
# / /\/ /
# /___/ \ / Copyright (c) 2021, Xilinx®.
# \ \ \/ Author: Víctor Mayoral Vilches <victorma@xilinx.com>
# \ \
# / /
# /___/ /\
# \ \ / \
# \___\/\___\
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from launch import LaunchDescription
from launch_ros.actions import Node
from launch_ros.substitutions import FindPackageShare
from launch_ros.actions import ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
from tracetools_launch.action import Trace
from tracetools_trace.tools.names import DEFAULT_EVENTS_ROS
from tracetools_trace.tools.names import DEFAULT_EVENTS_KERNEL
from tracetools_trace.tools.names import DEFAULT_CONTEXT
def generate_launch_description():
# Trace
trace = Trace(
session_name="trace_rectify_resize_fpga_streamlined_xrt",
events_ust=[
"ros2_image_pipeline:*",
]
+ DEFAULT_EVENTS_ROS,
context_fields={
'kernel': [],
'userspace': ['vpid', 'vtid', 'procname'],
},
# events_kernel=DEFAULT_EVENTS_KERNEL,
)
# perception_container = ComposableNodeContainer(
# name="perception_container",
# namespace="",
# package="rclcpp_components",
# executable="component_container",
# composable_node_descriptions=[
# ComposableNode(
# package="image_proc",
# plugin="image_proc::RectifyNodeFPGAStreamlinedXRT",
# name="rectify_node_fpga",
# remappings=[
# ("image", "/camera/image_raw"),
# ("camera_info", "/camera/camera_info"),
# ],
# ),
# ComposableNode(
# namespace="resize",
# package="image_proc",
# plugin="image_proc::ResizeNodeFPGAStreamlinedXRT",
# name="resize_node_fpga",
# remappings=[
# ("camera_info", "/camera/camera_info"),
# # ("image", "/image_rect"),
# ("image", "/camera/image_raw"),
# ("resize", "resize"),
# ],
# parameters=[
# {
# "scale_height": 2.0,
# "scale_width": 2.0,
# }
# ],
# ),
# ],
# output="screen",
# )
# Use a multi-threaded executor instead
perception_node = Node(
package="image_pipeline_examples",
executable="rectify_resize_fpga_streamlined_node_xrt",
name="rectify_resize_fpga_streamlined_node_xrt",
remappings=[
("image", "/camera/image_raw"),
("camera_info", "/camera/camera_info"),
("resize", "resize"),
],
parameters=[
{
"scale_height": 2.0,
"scale_width": 2.0,
}
],
)
return LaunchDescription([
# LTTng tracing
trace,
# image pipeline
# perception_container
perception_node
])
| 33.72973 | 74 | 0.551816 |
4a1c097bbf86bec181f09f9602dc414601a7819c
| 14,643 |
py
|
Python
|
hive/db/db_state.py
|
Doctor-Victor-Frankenstein/hivemind
|
ffdc9f849df01973c2f42db56a36aedf346d149c
|
[
"MIT"
] | 2 |
2019-12-13T15:43:41.000Z
|
2020-02-18T18:18:13.000Z
|
hive/db/db_state.py
|
Doctor-Victor-Frankenstein/hivemind
|
ffdc9f849df01973c2f42db56a36aedf346d149c
|
[
"MIT"
] | null | null | null |
hive/db/db_state.py
|
Doctor-Victor-Frankenstein/hivemind
|
ffdc9f849df01973c2f42db56a36aedf346d149c
|
[
"MIT"
] | null | null | null |
"""Hive db state manager. Check if schema loaded, init synced, etc."""
#pylint: disable=too-many-lines
import time
import logging
from hive.db.schema import (setup, reset_autovac, build_metadata,
build_metadata_community, teardown, DB_VERSION)
from hive.db.adapter import Db
log = logging.getLogger(__name__)
class DbState:
"""Manages database state: sync status, migrations, etc."""
_db = None
# prop is true until initial sync complete
_is_initial_sync = True
# db schema version
_ver = None
@classmethod
def initialize(cls):
"""Perform startup database checks.
1) Load schema if needed
2) Run migrations if needed
3) Check if initial sync has completed
"""
log.info("[INIT] Welcome to hive!")
# create db schema if needed
if not cls._is_schema_loaded():
log.info("[INIT] Create db schema...")
setup(cls.db())
cls._before_initial_sync()
# perform db migrations
cls._check_migrations()
# check if initial sync complete
cls._is_initial_sync = cls._is_feed_cache_empty()
if cls._is_initial_sync:
log.info("[INIT] Continue with initial sync...")
else:
log.info("[INIT] Hive initialized.")
@classmethod
def teardown(cls):
"""Drop all tables in db."""
teardown(cls.db())
@classmethod
def db(cls):
"""Get a db adapter instance."""
if not cls._db:
cls._db = Db.instance()
return cls._db
@classmethod
def finish_initial_sync(cls):
"""Set status to initial sync complete."""
assert cls._is_initial_sync, "initial sync was not started."
cls._after_initial_sync()
cls._is_initial_sync = False
log.info("[INIT] Initial sync complete!")
@classmethod
def is_initial_sync(cls):
"""Check if we're still in the process of initial sync."""
return cls._is_initial_sync
@classmethod
def _all_foreign_keys(cls):
md = build_metadata()
out = []
for table in md.tables.values():
out.extend(table.foreign_keys)
return out
@classmethod
def _disableable_indexes(cls):
to_locate = [
'hive_posts_ix3', # (author, depth, id)
'hive_posts_ix4', # (parent_id, id, is_deleted=0)
'hive_posts_ix5', # (community_id>0, is_pinned=1)
'hive_follows_ix5a', # (following, state, created_at, follower)
'hive_follows_ix5b', # (follower, state, created_at, following)
'hive_reblogs_ix1', # (post_id, account, created_at)
'hive_posts_cache_ix6a', # (sc_trend, post_id, paidout=0)
'hive_posts_cache_ix6b', # (post_id, sc_trend, paidout=0)
'hive_posts_cache_ix7a', # (sc_hot, post_id, paidout=0)
'hive_posts_cache_ix7b', # (post_id, sc_hot, paidout=0)
'hive_posts_cache_ix8', # (category, payout, depth, paidout=0)
'hive_posts_cache_ix9a', # (depth, payout, post_id, paidout=0)
'hive_posts_cache_ix9b', # (category, depth, payout, post_id, paidout=0)
'hive_posts_cache_ix10', # (post_id, payout, gray=1, payout>0)
'hive_posts_cache_ix30', # API: community trend
'hive_posts_cache_ix31', # API: community hot
'hive_posts_cache_ix32', # API: community created
'hive_posts_cache_ix33', # API: community payout
'hive_posts_cache_ix34', # API: community muted
'hive_accounts_ix3', # (vote_weight, name VPO)
'hive_accounts_ix4', # (id, name)
'hive_accounts_ix5', # (cached_at, name)
]
to_return = []
md = build_metadata()
for table in md.tables.values():
for index in table.indexes:
if index.name not in to_locate:
continue
to_locate.remove(index.name)
to_return.append(index)
# ensure we found all the items we expected
assert not to_locate, "indexes not located: {}".format(to_locate)
return to_return
@classmethod
def _before_initial_sync(cls):
"""Routine which runs *once* after db setup.
Disables non-critical indexes for faster initial sync, as well
as foreign key constraints."""
engine = cls.db().engine()
log.info("[INIT] Begin pre-initial sync hooks")
for index in cls._disableable_indexes():
log.info("Drop index %s.%s", index.table, index.name)
index.drop(engine)
# TODO: #111
#for key in cls._all_foreign_keys():
# log.info("Drop fk %s", key.name)
# key.drop(engine)
log.info("[INIT] Finish pre-initial sync hooks")
@classmethod
def _after_initial_sync(cls):
"""Routine which runs *once* after initial sync.
Re-creates non-core indexes for serving APIs after init sync,
as well as all foreign keys."""
engine = cls.db().engine()
log.info("[INIT] Begin post-initial sync hooks")
for index in cls._disableable_indexes():
log.info("Create index %s.%s", index.table, index.name)
index.create(engine)
# TODO: #111
#for key in cls._all_foreign_keys():
# log.info("Create fk %s", key.name)
# key.create(engine)
log.info("[INIT] Finish post-initial sync hooks")
@staticmethod
def status():
"""Basic health status: head block/time, current age (secs)."""
sql = ("SELECT num, created_at, extract(epoch from created_at) ts "
"FROM hive_blocks ORDER BY num DESC LIMIT 1")
row = DbState.db().query_row(sql)
return dict(db_head_block=row['num'],
db_head_time=str(row['created_at']),
db_head_age=int(time.time() - row['ts']))
@classmethod
def _is_schema_loaded(cls):
"""Check if the schema has been loaded into db yet."""
# check if database has been initialized (i.e. schema loaded)
engine = cls.db().engine_name()
if engine == 'postgresql':
return bool(cls.db().query_one("""
SELECT 1 FROM pg_catalog.pg_tables WHERE schemaname = 'public'
"""))
if engine == 'mysql':
return bool(cls.db().query_one('SHOW TABLES'))
raise Exception("unknown db engine %s" % engine)
@classmethod
def _is_feed_cache_empty(cls):
"""Check if the hive_feed_cache table is empty.
If empty, it indicates that the initial sync has not finished.
"""
return not cls.db().query_one("SELECT 1 FROM hive_feed_cache LIMIT 1")
@classmethod
def _check_migrations(cls):
"""Check current migration version and perform updates as needed."""
#pylint: disable=line-too-long,too-many-branches,too-many-statements
cls._ver = cls.db().query_one("SELECT db_version FROM hive_state LIMIT 1")
assert cls._ver is not None, 'could not load state record'
if cls._ver == 0:
raise Exception("dbv cannot be 0; reindex required")
if cls._ver == 1:
cls._set_ver(2)
if cls._ver == 2:
cls._set_ver(3)
if cls._ver == 3:
cls.db().query("CREATE INDEX hive_accounts_ix3 ON hive_accounts (vote_weight, name varchar_pattern_ops)")
cls._set_ver(4)
if cls._ver == 4:
cls.db().query("CREATE INDEX hive_follows_ix4 ON hive_follows (follower, following) WHERE state = 2")
cls._set_ver(5)
if cls._ver == 5:
# recover acct names lost to issue #151
from hive.steem.client import SteemClient
from hive.indexer.accounts import Accounts
names = SteemClient().get_all_account_names()
Accounts.load_ids()
Accounts.register(names, '1970-01-01T00:00:00')
Accounts.clear_ids()
cls._set_ver(6)
if cls._ver == 6:
cls.db().query("DROP INDEX hive_posts_cache_ix6")
cls.db().query("CREATE INDEX hive_posts_cache_ix6a ON hive_posts_cache (sc_trend, post_id) WHERE is_paidout = '0'")
cls.db().query("CREATE INDEX hive_posts_cache_ix6b ON hive_posts_cache (post_id, sc_trend) WHERE is_paidout = '0'")
cls.db().query("DROP INDEX hive_posts_cache_ix7")
cls.db().query("CREATE INDEX hive_posts_cache_ix7a ON hive_posts_cache (sc_hot, post_id) WHERE is_paidout = '0'")
cls.db().query("CREATE INDEX hive_posts_cache_ix7b ON hive_posts_cache (post_id, sc_hot) WHERE is_paidout = '0'")
cls._set_ver(7)
if cls._ver == 7:
cls.db().query("CREATE INDEX hive_accounts_ix4 ON hive_accounts (id, name)")
cls.db().query("CREATE INDEX hive_accounts_ix5 ON hive_accounts (cached_at, name)")
cls._set_ver(8)
if cls._ver == 8:
cls.db().query("DROP INDEX hive_follows_ix2")
cls.db().query("DROP INDEX hive_follows_ix3")
cls.db().query("DROP INDEX hive_follows_ix4")
cls.db().query("CREATE INDEX hive_follows_5a ON hive_follows (following, state, created_at, follower)")
cls.db().query("CREATE INDEX hive_follows_5b ON hive_follows (follower, state, created_at, following)")
cls._set_ver(9)
if cls._ver == 9:
from hive.indexer.follow import Follow
Follow.force_recount()
cls._set_ver(10)
if cls._ver == 10:
cls.db().query("CREATE INDEX hive_posts_cache_ix8 ON hive_posts_cache (category, payout, depth) WHERE is_paidout = '0'")
cls.db().query("CREATE INDEX hive_posts_cache_ix9a ON hive_posts_cache (depth, payout, post_id) WHERE is_paidout = '0'")
cls.db().query("CREATE INDEX hive_posts_cache_ix9b ON hive_posts_cache (category, depth, payout, post_id) WHERE is_paidout = '0'")
cls._set_ver(11)
if cls._ver == 11:
cls.db().query("DROP INDEX hive_posts_ix1")
cls.db().query("DROP INDEX hive_posts_ix2")
cls.db().query("CREATE INDEX hive_posts_ix3 ON hive_posts (author, depth, id) WHERE is_deleted = '0'")
cls.db().query("CREATE INDEX hive_posts_ix4 ON hive_posts (parent_id, id) WHERE is_deleted = '0'")
cls._set_ver(12)
if cls._ver == 12: # community schema
assert False, 'not finalized'
for table in ['hive_members', 'hive_flags', 'hive_modlog',
'hive_communities', 'hive_subscriptions',
'hive_roles', 'hive_notifs']:
cls.db().query("DROP TABLE IF EXISTS %s" % table)
build_metadata_community().create_all(cls.db().engine())
cls.db().query("ALTER TABLE hive_accounts ADD COLUMN lr_notif_id integer")
cls.db().query("ALTER TABLE hive_posts DROP CONSTRAINT hive_posts_fk2")
cls.db().query("ALTER TABLE hive_posts DROP COLUMN community")
cls.db().query("ALTER TABLE hive_posts ADD COLUMN community_id integer")
cls.db().query("ALTER TABLE hive_posts_cache ADD COLUMN community_id integer")
cls._set_ver(13)
if cls._ver == 13:
sqls = ("CREATE INDEX hive_posts_ix5 ON hive_posts (id) WHERE is_pinned = '1' AND is_deleted = '0'",
"CREATE INDEX hive_posts_ix6 ON hive_posts (community_id, id) WHERE community_id IS NOT NULL AND is_pinned = '1' AND is_deleted = '0'",
"CREATE INDEX hive_posts_cache_ix10 ON hive_posts_cache (post_id, payout) WHERE is_grayed = '1' AND payout > 0",
"CREATE INDEX hive_posts_cache_ix30 ON hive_posts_cache (community_id, sc_trend, post_id) WHERE community_id IS NOT NULL AND is_grayed = '0' AND depth = 0",
"CREATE INDEX hive_posts_cache_ix31 ON hive_posts_cache (community_id, sc_hot, post_id) WHERE community_id IS NOT NULL AND is_grayed = '0' AND depth = 0",
"CREATE INDEX hive_posts_cache_ix32 ON hive_posts_cache (community_id, created_at, post_id) WHERE community_id IS NOT NULL AND is_grayed = '0' AND depth = 0",
"CREATE INDEX hive_posts_cache_ix33 ON hive_posts_cache (community_id, payout, post_id) WHERE community_id IS NOT NULL AND is_grayed = '0' AND is_paidout = '0'",
"CREATE INDEX hive_posts_cache_ix34 ON hive_posts_cache (community_id, payout, post_id) WHERE community_id IS NOT NULL AND is_grayed = '1' AND is_paidout = '0'")
for sql in sqls:
cls.db().query(sql)
cls._set_ver(14)
if cls._ver == 14:
cls.db().query("ALTER TABLE hive_communities ADD COLUMN primary_tag VARCHAR(32) NOT NULL DEFAULT ''")
cls.db().query("ALTER TABLE hive_communities ADD COLUMN category VARCHAR(32) NOT NULL DEFAULT ''")
cls.db().query("ALTER TABLE hive_communities ADD COLUMN avatar_url VARCHAR(1024) NOT NULL DEFAULT ''")
cls.db().query("ALTER TABLE hive_communities ADD COLUMN num_authors INTEGER NOT NULL DEFAULT 0")
cls.db().query("CREATE INDEX hive_posts_cache_ix20 ON hive_posts_cache (community_id, author, payout, post_id) WHERE is_paidout = '0'")
cls._set_ver(15)
if cls._ver == 15:
cls.db().query("ALTER TABLE hive_accounts DROP COLUMN lr_notif_id")
cls.db().query("ALTER TABLE hive_accounts ADD COLUMN lastread_at TIMESTAMP WITHOUT TIME ZONE DEFAULT '1970-01-01 00:00:00' NOT NULL")
cls.db().query("CREATE INDEX hive_notifs_ix6 ON hive_notifs (dst_id, created_at, score, id) WHERE dst_id IS NOT NULL")
cls._set_ver(16)
reset_autovac(cls.db())
log.info("[HIVE] db version: %d", cls._ver)
assert cls._ver == DB_VERSION, "migration missing or invalid DB_VERSION"
# Example migration:
#if cls._ver == 1:
# cls.db().query("ALTER TABLE hive_posts ALTER COLUMN author SET DEFAULT ''")
# cls._set_ver(2)
@classmethod
def _set_ver(cls, ver):
"""Sets the db/schema version number. Enforce sequential."""
assert cls._ver is not None, 'version needs to be read before updating'
assert ver == cls._ver + 1, 'version must follow previous'
cls.db().query("UPDATE hive_state SET db_version = %d" % ver)
cls._ver = ver
log.info("[HIVE] db migrated to version: %d", ver)
| 44.105422 | 185 | 0.617087 |
4a1c0b0bfc94282040e2d1504b62afc468780c68
| 1,017 |
py
|
Python
|
test/test_industry.py
|
roksela/smartrecruiters-python-client
|
6d0849d173a3d6718b5f0769098f4c76857f637d
|
[
"MIT"
] | 5 |
2018-03-27T08:20:13.000Z
|
2022-03-30T06:23:38.000Z
|
test/test_industry.py
|
roksela/smartrecruiters-python-client
|
6d0849d173a3d6718b5f0769098f4c76857f637d
|
[
"MIT"
] | null | null | null |
test/test_industry.py
|
roksela/smartrecruiters-python-client
|
6d0849d173a3d6718b5f0769098f4c76857f637d
|
[
"MIT"
] | 2 |
2018-12-05T04:48:37.000Z
|
2020-12-17T12:12:12.000Z
|
# coding: utf-8
"""
Unofficial python library for the SmartRecruiters API
The SmartRecruiters API provides a platform to integrate services or applications, build apps and create fully customizable career sites. It exposes SmartRecruiters functionality and allows to connect and build software enhancing it.
OpenAPI spec version: 1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import smartrecruiters_python_client
from smartrecruiters_python_client.rest import ApiException
from smartrecruiters_python_client.models.industry import Industry
class TestIndustry(unittest.TestCase):
""" Industry unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testIndustry(self):
"""
Test Industry
"""
model = smartrecruiters_python_client.models.industry.Industry()
if __name__ == '__main__':
unittest.main()
| 23.651163 | 237 | 0.737463 |
4a1c0b6a51ac99b26802748a61768b85cb8f14cc
| 8,014 |
py
|
Python
|
implicit_solver/core/data_block.py
|
vincentbonnetcg/Numerical-Bric-a-Brac
|
e71f2305d7452de985e5e9fa8935da611b6d9992
|
[
"MIT"
] | 14 |
2019-05-04T00:42:47.000Z
|
2021-09-07T09:57:44.000Z
|
implicit_solver/core/data_block.py
|
vincentbonnetcg/Numerical-Bric-a-Brac
|
e71f2305d7452de985e5e9fa8935da611b6d9992
|
[
"MIT"
] | null | null | null |
implicit_solver/core/data_block.py
|
vincentbonnetcg/Numerical-Bric-a-Brac
|
e71f2305d7452de985e5e9fa8935da611b6d9992
|
[
"MIT"
] | 5 |
2020-12-07T21:44:41.000Z
|
2021-09-13T05:29:54.000Z
|
"""
@author: Vincent Bonnet
@description : Array of Structures of Arrays (AoSoA)
Single Block Memory Layout (with x, v, b as channels)
|-----------------------------|
| x[block_size](np.float) |
| v[block_size](np.float) |
| b[block_size](np.int) |
|-----------------------------|
|blockInfo_size (int64)|
|blockInfo_capacity (int64)|
|blockInfo_active (bool) |
|-----------------------------|
blockInfo_size is the number of set elements in the Block
blockInfo_capacity is the maximum numbe of element in the Block
blockInfo_active defines whether or not the Block is active
Datablock is a list of Blocks
"""
import numba
import numpy as np
import keyword
import core.jit.block_utils as block_utils
class DataBlock:
def __init__(self, class_type, block_size = 100):
# Data
self.blocks = numba.typed.List()
# Datatype
self.dtype_block = None
# Default values
self.defaults = () # heterogeneous tuple storing defaults value
# Block size
self.block_size = block_size
# class has an ID (-1)
self.ID_field_index = -1
# Set class
self.__set_dtype(class_type)
self.clear()
def num_blocks(self):
return len(self.blocks)
def block(self, block_index):
# [0] because the self.blocks[block_index] is an array with one element
return self.blocks[block_index][0]
def clear(self):
'''
Clear the data on the datablock (it doesn't reset the datatype)
'''
self.blocks = numba.typed.List()
# append inactive block
# it prevents to have empty list which would break the JIT compile to work
block = np.empty(1, dtype=self.dtype_block)
block[0]['blockInfo_active'] = False
block[0]['blockInfo_capacity'] = self.block_size
block[0]['blockInfo_size'] = 0
block[0]['blockInfo_handle'] = -1
self.blocks.append(block)
@classmethod
def __check_before_add(cls, field_names, name):
'''
Raise exception if 'name' cannot be added
'''
if name in ['blockInfo_size', 'blockInfo_active', 'blockInfo_capacity', 'blockInfo_handle']:
raise ValueError("field name " + name + " is reserved ")
if keyword.iskeyword(name):
raise ValueError("field name cannot be a keyword: " + name)
if name in field_names:
raise ValueError("field name already used : " + name)
def __set_dtype(self, class_type):
'''
Set data type from the class type
'''
inst = class_type()
# Aosoa data type : (x, y, ...) becomes (self.block_size, x, y, ...)
block_type = {}
block_type['names'] = []
block_type['formats'] = []
default_values = []
for name, value in inst.__dict__.items():
DataBlock.__check_before_add(block_type['names'], name)
block_type['names'].append(name)
default_values.append(value)
data_format = None # tuple(data_type, data_shape)
if np.isscalar(value):
# The coma in data_shape (self.block_size,) is essential
# In case field_shape == self.block_size == 1,
# it guarantees an array will be produced and not a single value
data_format = (type(value), (self.block_size,))
else:
data_type = value.dtype.type
data_shape = ([self.block_size] + list(value.shape))
data_format = (data_type, data_shape)
block_type['formats'].append(data_format)
self.defaults = tuple(default_values)
# add block info
block_type['names'] += ['blockInfo_size', 'blockInfo_capacity', 'blockInfo_active', 'blockInfo_handle']
block_type['formats'] += [np.int64, np.int64, np.bool, np.int32]
# create datatype
self.dtype_block = np.dtype(block_type, align=True)
# set the ID fieldindex (if it exists)
if 'ID' in block_type['names']:
self.ID_field_index = block_type['names'].index('ID')
def initialize(self, num_elements):
'''
Initialize blocks and return new block ids
'''
self.clear()
return self.append(num_elements, True)
def append(self, num_elements, reuse_inactive_block = False, set_defaults = True):
'''
Return a list of new blocks
Initialize with default values
'''
init_block_func = block_utils.init_block
if self.ID_field_index >= 0:
init_block_func = block_utils.init_block_with_ID
block_handles = block_utils.append_blocks(self.blocks,
reuse_inactive_block,
num_elements,
init_block_func)
if set_defaults==False:
return block_handles
for block_handle in block_handles:
block_container = self.blocks[block_handle]
for field_id, default_value in enumerate(self.defaults):
if field_id == self.ID_field_index:
continue
block_container[0][field_id][:] = default_value
return block_handles
def append_empty(self, num_elements, reuse_inactive_block = False):
'''
Return a list of uninitialized blocks
'''
return self.append(num_elements, reuse_inactive_block, False)
def __len__(self):
return len(self.blocks)
def get_field_names(self):
return self.block(0).dtype.names
'''
Vectorize Functions on blocks
'''
def __take_with_id(self, block_handles = []):
for block_handle in block_handles:
block_container = self.blocks[block_handle]
block_data = block_container[0]
if block_data['blockInfo_active']:
yield block_container
def __take(self):
for block_container in self.blocks:
block_data = block_container[0]
if block_data['blockInfo_active']:
yield block_container
def get_blocks(self, block_handles = None):
if block_handles is None:
return self.__take()
return self.__take_with_id(block_handles)
def copyto(self, field_name, values, block_handles = None):
num_elements = 0
for block_container in self.get_blocks(block_handles):
block_data = block_container[0]
begin_index = num_elements
block_n_elements = block_data['blockInfo_size']
num_elements += block_n_elements
end_index = num_elements
np.copyto(block_data[field_name][0:block_n_elements], values[begin_index:end_index])
def fill(self, field_name, value, block_handles = None):
for block_container in self.get_blocks(block_handles):
block_data = block_container[0]
block_data[field_name].fill(value)
def flatten(self, field_name, block_handles = None):
'''
Convert block of array into a single array
'''
field_id = self.get_field_names().index(field_name)
first_value = self.block(0)[field_id][0]
field_type = first_value.dtype.type
field_shape = first_value.shape
field_format =(field_type, field_shape)
num_elements = block_utils.compute_num_elements(self.blocks, block_handles)
result = np.empty(num_elements, field_format)
num_elements = 0
for block_container in self.get_blocks(block_handles):
block_data = block_container[0]
begin_index = num_elements
block_n_elements = block_data['blockInfo_size']
num_elements += block_n_elements
end_index = num_elements
np.copyto(result[begin_index:end_index], block_data[field_id][0:block_n_elements])
return result
| 35.149123 | 111 | 0.606938 |
4a1c0dc229dad69cb12a68bfdf09ca04f472c120
| 3,425 |
py
|
Python
|
modules/preferences.py
|
seirl/lcurse
|
0e177e1d402d38c29c44e48a83b94ecfb886b2e2
|
[
"Unlicense"
] | null | null | null |
modules/preferences.py
|
seirl/lcurse
|
0e177e1d402d38c29c44e48a83b94ecfb886b2e2
|
[
"Unlicense"
] | null | null | null |
modules/preferences.py
|
seirl/lcurse
|
0e177e1d402d38c29c44e48a83b94ecfb886b2e2
|
[
"Unlicense"
] | null | null | null |
from PyQt5 import Qt
from modules import defines
class PreferencesDlg(Qt.QDialog):
def __init__(self, parent):
super(PreferencesDlg, self).__init__(parent)
self.settings = Qt.QSettings()
print(defines)
layout = Qt.QVBoxLayout(self)
layout.addWidget(Qt.QLabel(self.tr("WoW Install Folder:"), self))
folderlayout = Qt.QHBoxLayout()
self.wowInstallFolder = Qt.QLineEdit(self.getWowFolder(), self)
folderlayout.addWidget(self.wowInstallFolder)
btn = Qt.QPushButton(self.tr("..."), self)
btn.clicked.connect(self.browseForWoWFolder)
folderlayout.addWidget(btn)
layout.addLayout(folderlayout)
layout.addWidget(Qt.QLabel(self.tr("Max. concurrent Threads:"), self))
self.maxthreads = Qt.QSpinBox(self)
self.maxthreads.setMinimum(1)
self.maxthreads.setMaximum(1000)
self.maxthreads.setValue(self.getMaxThreads())
layout.addWidget(self.maxthreads)
layout.addWidget(Qt.QLabel(self.tr("Current Toc Number:"), self))
self.currenttoc = Qt.QLineEdit(str(self.getTocVersion()),self)
layout.addWidget(self.currenttoc)
bottom = Qt.QHBoxLayout()
bottom.addSpacing(100)
btn = Qt.QPushButton(self.tr("Save"), self)
btn.clicked.connect(self.accept)
btn.setDefault(True)
bottom.addWidget(btn)
btn = Qt.QPushButton(self.tr("Cancel"), self)
btn.clicked.connect(self.reject)
bottom.addWidget(btn)
layout.addSpacing(100)
layout.addLayout(bottom)
self.setLayout(layout)
def browseForWoWFolder(self):
selectedDir = Qt.QFileDialog.getExistingDirectory(self,
self.tr("Select Wow Install Folder"),
self.wowInstallFolder.text(),
Qt.QFileDialog.ShowDirsOnly |
Qt.QFileDialog.DontResolveSymlinks)
if selectedDir:
directory = Qt.QDir("{}/Interface/AddOns".format(selectedDir))
if directory.exists():
self.wowInstallFolder.setText(selectedDir)
else:
Qt.QMessageBox.warning(self, self.tr("Not Wow-Folder"), self.tr(
"The selected folder wasn't an installation directory of wow.\nPlease select the wow folder"))
def getMaxThreads(self):
return int(self.settings.value(defines.LCURSE_MAXTHREADS_KEY, defines.LCURSE_MAXTHREADS_DEFAULT))
def setMaxThreads(self, newMaxThreads):
return self.settings.setValue(defines.LCURSE_MAXTHREADS_KEY, int(newMaxThreads))
def getWowFolder(self):
return self.settings.value(defines.WOW_FOLDER_KEY, defines.WOW_FOLDER_DEFAULT)
def setWowFolder(self, newfolder):
return self.settings.setValue(defines.WOW_FOLDER_KEY, newfolder)
def getTocVersion(self):
return self.settings.value(defines.WOW_TOC_KEY,70200)
def setTocVersion(self,newtoc):
return self.settings.setValue(defines.WOW_TOC_KEY,int(newtoc))
def accept(self):
self.setWowFolder(self.wowInstallFolder.text())
self.setMaxThreads(self.maxthreads.value())
self.setTocVersion(self.currenttoc.text())
super(PreferencesDlg, self).accept()
| 40.294118 | 114 | 0.632117 |
4a1c0e24fb75540b2d68d782e5f79fe08f78b6f7
| 2,395 |
py
|
Python
|
eMG_utilities.py
|
cristianochesi/PMG
|
eaf57618ba9e1fada3facb3679cdd05a8e0f8bef
|
[
"MIT"
] | 1 |
2021-12-30T22:30:29.000Z
|
2021-12-30T22:30:29.000Z
|
eMG_utilities.py
|
cristianochesi/e-MGs
|
eaf57618ba9e1fada3facb3679cdd05a8e0f8bef
|
[
"MIT"
] | null | null | null |
eMG_utilities.py
|
cristianochesi/e-MGs
|
eaf57618ba9e1fada3facb3679cdd05a8e0f8bef
|
[
"MIT"
] | null | null | null |
import copy
from eMG_complexity_metrics import *
def print_offline_measures(t):
# fixme: to be expanded
print("\n---Offline-Measures------")
print("Sentence: " + t.sentence)
print("Steps: " + str(t.step))
print("Pending items im mem: " + str(len(t.mg.current_node.mem)))
if t.merge_failed:
print("Pending word (failed to merge): " + t.merge_failed_word)
print("Pending expectations: " + str(t.mg.current_node.get_expect()))
if len(t.mg.current_node.mem) == 0 and t.mg.current_node.get_expect() == "" and len(t.words) == 0 and not t.merge_failed:
print("Prediction: GRAMMATICAL")
else:
print("Prediction: UNGRAMMATICAL")
print("Merge unexpected items: " + str(t.mg.merge_unexpected))
print("Move failures: " + str(get_move_failures()))
print("Ambiguities: " + str(get_MaxD()))
print("MaxD: " + str(get_MaxD()))
print("MaxT: " + str(get_MaxT()))
print("SumT: " + str(get_SumT()))
print("MaxS: " + str(get_MaxS()))
print("RelM: " + str(get_RelM()))
def print_online_measures(t):
print("\n---Online-Measures------")
# words = t.sentence.split()
words = t.words_disambiguated
print("Sentence:\t", end='')
for word in words:
print(word + "\t", end='')
print("\nENCODING:\t", end='')
nodes = copy.deepcopy(t.nodes)
for word in words:
cw = find_word(nodes, word)
print(str(cw.encoding) + "\t", end='')
print("\nINTEGRATION:\t", end='')
nodes = copy.deepcopy(t.nodes)
pw = eMG_node.PMG_node("", [], [], [], "")
pw.index = 0
for word in words:
cw = find_word(nodes, word)
enc = cw.index - pw.index
pw = cw
print(str(enc) + "\t", end='')
print("\nRETRIEVAL:\t", end='')
nodes = copy.deepcopy(t.nodes)
for word in words:
print(str(get_retrieval_cost(nodes, word)) + "\t", end='')
print("\nINTERVENTION:\t", end='')
nodes = copy.deepcopy(t.nodes)
for word in words:
print(str(get_intervention_cost(nodes, word)) + "\t", end='')
def find_word(nodes, word):
node = eMG_node.PMG_node("", [], [], [], "")
for n in range(0, len(nodes)):
if nodes[n].phon == word:
node = nodes[n]
nodes.pop(n)
break
return node
def print_tree(t):
print("\n---Tree------------------")
print("\\begin{forest}")
t.tree.print_node(t.mg.root)
t.tree.print_annotations()
print("\\end{forest}")
def check_choice(choice, size):
check = True
try:
i = int(choice)
if i >= size:
check = False
except ValueError:
check = False
return check
| 27.528736 | 122 | 0.640919 |
4a1c0e871d91900c75f16ac685665610678dd2b6
| 1,341 |
py
|
Python
|
Part 4 - Clustering/Part 4 - Hierarchical Clustering.py
|
MichaelGW123/MachineLearning
|
e89c5a8c7dad425c3550f96ec1c3e76fc1fdb2cb
|
[
"MIT"
] | null | null | null |
Part 4 - Clustering/Part 4 - Hierarchical Clustering.py
|
MichaelGW123/MachineLearning
|
e89c5a8c7dad425c3550f96ec1c3e76fc1fdb2cb
|
[
"MIT"
] | null | null | null |
Part 4 - Clustering/Part 4 - Hierarchical Clustering.py
|
MichaelGW123/MachineLearning
|
e89c5a8c7dad425c3550f96ec1c3e76fc1fdb2cb
|
[
"MIT"
] | null | null | null |
# Hierarchical Clustering
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
# Importing the dataset
path = Path(__file__).parent / 'Mall_Customers.csv'
dataset = pd.read_csv(path)
X = dataset.iloc[:, [3, 4]].values
# Using the dendrogram to find the optimal number of clusters
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(X, method = 'ward'))
plt.title('Dendrogram')
plt.xlabel('Customer')
plt.ylabel('Distance')
plt.show()
# Training the Hierarchical model on the dataset
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters=5, affinity='euclidean', linkage = 'ward')
y_hc = hc.fit_predict(X)
# Visualizing the clusters
plt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s = 100, c = 'red', label = 'Cluster 1')
plt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')
plt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s = 100, c = 'green', label = 'Cluster 3')
plt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')
plt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')
plt.title('Clusters of customers')
plt.xlabel('Annual Income (k$)')
plt.ylabel('Spending Score (1-100)')
plt.legend()
plt.show()
| 36.243243 | 90 | 0.686801 |
4a1c100ca5139f6457aae19a4f7139c472b5469d
| 4,271 |
py
|
Python
|
src/util.py
|
jportner/kibbe
|
72d62ba0c1ca206430fc606c72d19eace6329e41
|
[
"MIT"
] | 2 |
2021-08-17T15:04:38.000Z
|
2021-12-28T15:41:42.000Z
|
src/util.py
|
jportner/kibbe
|
72d62ba0c1ca206430fc606c72d19eace6329e41
|
[
"MIT"
] | 7 |
2021-08-24T07:53:19.000Z
|
2022-01-26T20:45:24.000Z
|
src/util.py
|
jportner/kibbe
|
72d62ba0c1ca206430fc606c72d19eace6329e41
|
[
"MIT"
] | 2 |
2021-07-15T16:19:44.000Z
|
2022-01-19T20:25:30.000Z
|
import json
import os
from pathlib import PurePath
import re
import subprocess
import time
import click
import requests
def is_tool(name):
"""Check whether `name` is on PATH and marked as executable."""
# from whichcraft import which
from shutil import which
return which(name) is not None
def is_es_running():
try:
requests.get("http://localhost:9200")
return True
except requests.ConnectionError:
return False
def is_kibana_running():
try:
requests.get("http://localhost:5601")
return True
except requests.ConnectionError:
return False
def force_kibana_root():
if not is_kibana_repo():
raise click.ClickException(
"You must run this command in the root of a kibana repo clone"
)
def is_kibana_repo():
if not os.path.isfile("package.json"):
return False
file = open("package.json")
try:
content = json.load(file)
if content["name"] != "kibana" or not content["homepage"].startswith(
"https://www.elastic.co/"
):
return False
except ValueError:
return False
finally:
file.close()
return True
def get_modified_files():
files = ""
try:
files = subprocess.getoutput("git diff --name-only HEAD")
except ValueError:
return []
files = filter(None, files.split("\n"))
return list(files)
def find_related_test(file):
path = PurePath(file)
# skip if the file is a test
if path.match("*.test.*"):
return ""
test_file = path.with_suffix(".test" + path.suffix)
if os.path.isfile(test_file):
return test_file
return ""
def find_related_plugin_folder(file):
path = PurePath(file)
try:
if not path.relative_to("x-pack/plugins"):
return ""
except ValueError:
return ""
while not path.match("x-pack/plugins/*"):
path = PurePath(path.parent)
return str(path)
def merge_params(config_params, unparsed_args, useEqual=False):
final_params = []
params_map = {}
for conf, value in config_params:
params_map["--" + conf] = value
skip = False
for index, param in enumerate(unparsed_args):
if skip:
skip = False
continue
nextIsValue = len(unparsed_args) > index + 1 and not str(
unparsed_args[index + 1]
).startswith("--")
if param in params_map and nextIsValue:
params_map[param] = unparsed_args[index + 1]
skip = True
else:
params_map[param] = unparsed_args[index + 1] if nextIsValue else ""
if nextIsValue:
skip = True
if useEqual:
for param in params_map:
if len(params_map[param]) > 0:
final_params.append(param + "=" + params_map[param])
else:
final_params.append(param)
else:
for param in params_map:
final_params.append(param)
if len(params_map[param]) > 0:
final_params.append(params_map[param])
return final_params
def unparsed_to_map(params):
params_map = {}
skip = False
for index, param in enumerate(params):
if skip:
skip = False
continue
nextIsValue = len(params) > index + 1 and not str(params[index + 1]).startswith(
"--"
)
if param.startswith("--"):
if nextIsValue:
params_map[param] = params[index + 1]
skip = True
else:
params_map[param] = ""
return params_map
def wait_for_elastic_search():
total = 60
current = total
numbers = list(range(1, total))
with click.progressbar(numbers) as bar:
for item in bar:
current = item
if is_es_running():
break
time.sleep(1)
# progress = click.progressbar(length=total, label="Waiting for elasticsearch")
# while timeout >= 0:
if current <= 0:
return True
else:
return False
def get_valid_filename(name):
s = str(name).strip().replace(" ", "_")
s = re.sub(r"(?u)[^-\w.]", "-", s)
return s
| 23.211957 | 88 | 0.577851 |
4a1c1043d8d908f02cb9137ad15fe7c692dc2515
| 246,178 |
py
|
Python
|
python/ccxt/huobi.py
|
JulesText/ccxt
|
e9209fa2f0a3f43ebb073248c04829f3f13b4830
|
[
"MIT"
] | 2 |
2020-05-18T02:01:37.000Z
|
2020-06-28T07:20:40.000Z
|
python/ccxt/huobi.py
|
alimogh/ccxt
|
518ea1a6d212605aa19deed74991bc525470e5c9
|
[
"MIT"
] | null | null | null |
python/ccxt/huobi.py
|
alimogh/ccxt
|
518ea1a6d212605aa19deed74991bc525470e5c9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountNotEnabled
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import NetworkError
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class huobi(Exchange):
def describe(self):
return self.deep_extend(super(huobi, self).describe(), {
'id': 'huobi',
'name': 'Huobi',
'countries': ['CN'],
'rateLimit': 100,
'userAgent': self.userAgents['chrome39'],
'certified': True,
'version': 'v1',
'accounts': None,
'accountsById': None,
'hostname': 'api.huobi.pro', # api.testnet.huobi.pro
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': True,
'swap': True,
'future': True,
'option': None,
'addMargin': None,
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': True,
'createDepositAddress': None,
'createOrder': True,
'createReduceOnlyOrder': False,
'deposit': None,
'fetchAccounts': True,
'fetchBalance': True,
'fetchBidsAsks': None,
'fetchBorrowRate': True,
'fetchBorrowRateHistories': None,
'fetchBorrowRateHistory': None,
'fetchBorrowRates': True,
'fetchBorrowRatesPerSymbol': True,
'fetchCanceledOrders': None,
'fetchClosedOrder': None,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposit': None,
'fetchDepositAddress': True,
'fetchDepositAddresses': None,
'fetchDepositAddressesByNetwork': True,
'fetchDeposits': True,
'fetchFundingFee': None,
'fetchFundingFees': None,
'fetchFundingHistory': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': True,
'fetchFundingRates': True,
'fetchIndexOHLCV': True,
'fetchIsolatedPositions': False,
'fetchL3OrderBook': None,
'fetchLedger': True,
'fetchLedgerEntry': None,
'fetchLeverage': False,
'fetchLeverageTiers': True,
'fetchMarketLeverageTiers': True,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyBuys': None,
'fetchMySells': None,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrder': None,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderBooks': None,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPosition': True,
'fetchPositions': True,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': True,
'fetchStatus': None,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTradingFees': None,
'fetchTradingLimits': True,
'fetchTransactions': None,
'fetchTransfers': None,
'fetchWithdrawAddressesByNetwork': True,
'fetchWithdrawal': None,
'fetchWithdrawals': True,
'fetchWithdrawalWhitelist': None,
'reduceMargin': None,
'setLeverage': True,
'setMarginMode': False,
'setPositionMode': False,
'signIn': None,
'transfer': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'1w': '1week',
'1M': '1mon',
'1y': '1year',
},
'urls': {
# 'test': {
# 'market': 'https://api.testnet.huobi.pro',
# 'public': 'https://api.testnet.huobi.pro',
# 'private': 'https://api.testnet.huobi.pro',
# },
'logo': 'https://user-images.githubusercontent.com/1294454/76137448-22748a80-604e-11ea-8069-6e389271911d.jpg',
'hostnames': {
'contract': 'api.hbdm.com',
'spot': 'api.huobi.pro',
# recommended for AWS
# 'contract': 'api.hbdm.vn',
# 'spot': 'api-aws.huobi.pro',
},
'api': {
'contract': 'https://{hostname}',
'spot': 'https://{hostname}',
'market': 'https://{hostname}',
'public': 'https://{hostname}',
'private': 'https://{hostname}',
'v2Public': 'https://{hostname}',
'v2Private': 'https://{hostname}',
},
'www': 'https://www.huobi.com',
'referral': {
'url': 'https://www.huobi.com/en-us/topic/double-reward/?invite_code=6rmm2223',
'discount': 0.15,
},
'doc': [
'https://huobiapi.github.io/docs/spot/v1/cn/',
'https://huobiapi.github.io/docs/dm/v1/cn/',
'https://huobiapi.github.io/docs/coin_margined_swap/v1/cn/',
'https://huobiapi.github.io/docs/usdt_swap/v1/cn/',
'https://huobiapi.github.io/docs/option/v1/cn/',
],
'fees': 'https://www.huobi.com/about/fee/',
},
'api': {
# ------------------------------------------------------------
# old api definitions
'v2Public': {
'get': {
'reference/currencies': 1, # 币链参考信息
'market-status': 1, # 获取当前市场状态
},
},
'v2Private': {
'get': {
'account/ledger': 1,
'account/withdraw/quota': 1,
'account/withdraw/address': 1, # 提币地址查询(限母用户可用)
'account/deposit/address': 1,
'account/repayment': 5, # 还币交易记录查询
'reference/transact-fee-rate': 1,
'account/asset-valuation': 0.2, # 获取账户资产估值
'point/account': 5, # 点卡余额查询
'sub-user/user-list': 1, # 获取子用户列表
'sub-user/user-state': 1, # 获取特定子用户的用户状态
'sub-user/account-list': 1, # 获取特定子用户的账户列表
'sub-user/deposit-address': 1, # 子用户充币地址查询
'sub-user/query-deposit': 1, # 子用户充币记录查询
'user/api-key': 1, # 母子用户API key信息查询
'user/uid': 1, # 母子用户获取用户UID
'algo-orders/opening': 1, # 查询未触发OPEN策略委托
'algo-orders/history': 1, # 查询策略委托历史
'algo-orders/specific': 1, # 查询特定策略委托
'c2c/offers': 1, # 查询借入借出订单
'c2c/offer': 1, # 查询特定借入借出订单及其交易记录
'c2c/transactions': 1, # 查询借入借出交易记录
'c2c/repayment': 1, # 查询还币交易记录
'c2c/account': 1, # 查询账户余额
'etp/reference': 1, # 基础参考信息
'etp/transactions': 5, # 获取杠杆ETP申赎记录
'etp/transaction': 5, # 获取特定杠杆ETP申赎记录
'etp/rebalance': 1, # 获取杠杆ETP调仓记录
'etp/limit': 1, # 获取ETP持仓限额
},
'post': {
'account/transfer': 1,
'account/repayment': 5, # 归还借币(全仓逐仓通用)
'point/transfer': 5, # 点卡划转
'sub-user/management': 1, # 冻结/解冻子用户
'sub-user/creation': 1, # 子用户创建
'sub-user/tradable-market': 1, # 设置子用户交易权限
'sub-user/transferability': 1, # 设置子用户资产转出权限
'sub-user/api-key-generation': 1, # 子用户API key创建
'sub-user/api-key-modification': 1, # 修改子用户API key
'sub-user/api-key-deletion': 1, # 删除子用户API key
'sub-user/deduct-mode': 1, # 设置子用户手续费抵扣模式
'algo-orders': 1, # 策略委托下单
'algo-orders/cancel-all-after': 1, # 自动撤销订单
'algo-orders/cancellation': 1, # 策略委托(触发前)撤单
'c2c/offer': 1, # 借入借出下单
'c2c/cancellation': 1, # 借入借出撤单
'c2c/cancel-all': 1, # 撤销所有借入借出订单
'c2c/repayment': 1, # 还币
'c2c/transfer': 1, # 资产划转
'etp/creation': 5, # 杠杆ETP换入
'etp/redemption': 5, # 杠杆ETP换出
'etp/{transactId}/cancel': 10, # 杠杆ETP单个撤单
'etp/batch-cancel': 50, # 杠杆ETP批量撤单
},
},
'market': {
'get': {
'history/kline': 1, # 获取K线数据
'detail/merged': 1, # 获取聚合行情(Ticker)
'depth': 1, # 获取 Market Depth 数据
'trade': 1, # 获取 Trade Detail 数据
'history/trade': 1, # 批量获取最近的交易记录
'detail': 1, # 获取 Market Detail 24小时成交量数据
'tickers': 1,
'etp': 1, # 获取杠杆ETP实时净值
},
},
'public': {
'get': {
'common/symbols': 1, # 查询系统支持的所有交易对
'common/currencys': 1, # 查询系统支持的所有币种
'common/timestamp': 1, # 查询系统当前时间
'common/exchange': 1, # order limits
'settings/currencys': 1, # ?language=en-US
},
},
'private': {
'get': {
'account/accounts': 0.2, # 查询当前用户的所有账户(即account-id)
'account/accounts/{id}/balance': 0.2, # 查询指定账户的余额
'account/accounts/{sub-uid}': 1,
'account/history': 4,
'cross-margin/loan-info': 1,
'margin/loan-info': 1, # 查询借币币息率及额度
'fee/fee-rate/get': 1,
'order/openOrders': 0.4,
'order/orders': 0.4,
'order/orders/{id}': 0.4, # 查询某个订单详情
'order/orders/{id}/matchresults': 0.4, # 查询某个订单的成交明细
'order/orders/getClientOrder': 0.4,
'order/history': 1, # 查询当前委托、历史委托
'order/matchresults': 1, # 查询当前成交、历史成交
# 'dw/withdraw-virtual/addresses', # 查询虚拟币提现地址(Deprecated)
'query/deposit-withdraw': 1,
# 'margin/loan-info', # duplicate
'margin/loan-orders': 0.2, # 借贷订单
'margin/accounts/balance': 0.2, # 借贷账户详情
'cross-margin/loan-orders': 1, # 查询借币订单
'cross-margin/accounts/balance': 1, # 借币账户详情
'points/actions': 1,
'points/orders': 1,
'subuser/aggregate-balance': 10,
'stable-coin/exchange_rate': 1,
'stable-coin/quote': 1,
},
'post': {
'account/transfer': 1, # 资产划转(该节点为母用户和子用户进行资产划转的通用接口。)
'futures/transfer': 1,
'order/batch-orders': 0.4,
'order/orders/place': 0.2, # 创建并执行一个新订单(一步下单, 推荐使用)
'order/orders/submitCancelClientOrder': 0.2,
'order/orders/batchCancelOpenOrders': 0.4,
# 'order/orders', # 创建一个新的订单请求 (仅创建订单,不执行下单)
# 'order/orders/{id}/place', # 执行一个订单 (仅执行已创建的订单)
'order/orders/{id}/submitcancel': 0.2, # 申请撤销一个订单请求
'order/orders/batchcancel': 0.4, # 批量撤销订单
# 'dw/balance/transfer', # 资产划转
'dw/withdraw/api/create': 1, # 申请提现虚拟币
# 'dw/withdraw-virtual/create', # 申请提现虚拟币
# 'dw/withdraw-virtual/{id}/place', # 确认申请虚拟币提现(Deprecated)
'dw/withdraw-virtual/{id}/cancel': 1, # 申请取消提现虚拟币
'dw/transfer-in/margin': 10, # 现货账户划入至借贷账户
'dw/transfer-out/margin': 10, # 借贷账户划出至现货账户
'margin/orders': 10, # 申请借贷
'margin/orders/{id}/repay': 10, # 归还借贷
'cross-margin/transfer-in': 1, # 资产划转
'cross-margin/transfer-out': 1, # 资产划转
'cross-margin/orders': 1, # 申请借币
'cross-margin/orders/{id}/repay': 1, # 归还借币
'stable-coin/exchange': 1,
'subuser/transfer': 10,
},
},
# ------------------------------------------------------------
# new api definitions
# 'https://status.huobigroup.com/api/v2/summary.json': 1,
# 'https://status-dm.huobigroup.com/api/v2/summary.json': 1,
# 'https://status-swap.huobigroup.com/api/v2/summary.json': 1,
# 'https://status-linear-swap.huobigroup.com/api/v2/summary.json': 1,
'spot': {
'public': {
'get': {
'v2/market-status': 1,
'v1/common/symbols': 1,
'v1/common/currencys': 1,
'v2/reference/currencies': 1,
'v1/common/timestamp': 1,
'v1/common/exchange': 1, # order limits
# Market Data
'market/history/candles': 1,
'market/history/kline': 1,
'market/detail/merged': 1,
'market/tickers': 1,
'market/depth': 1,
'market/trade': 1,
'market/history/trade': 1,
'market/detail/': 1,
'market/etp': 1,
# ETP
'v2/etp/reference': 1,
'v2/etp/rebalance': 1,
},
},
'private': {
'get': {
# Account
'v1/account/accounts': 0.2,
'v1/account/accounts/{account-id}/balance': 0.2,
'v2/account/valuation': 1,
'v2/account/asset-valuation': 0.2,
'v1/account/history': 4,
'v2/account/ledger': 1,
'v2/point/account': 5,
# Wallet(Deposit and Withdraw)
'v2/account/deposit/address': 1,
'v2/account/withdraw/quota': 1,
'v2/account/withdraw/address': 1,
'v2/reference/currencies': 1,
'v1/query/deposit-withdraw': 1,
# Sub user management
'v2/user/api-key': 1,
'v2/user/uid': 1,
'v2/sub-user/user-list': 1,
'v2/sub-user/user-state': 1,
'v2/sub-user/account-list': 1,
'v2/sub-user/deposit-address': 1,
'v2/sub-user/query-deposit': 1,
'v1/subuser/aggregate-balance': 10,
'v1/account/accounts/{sub-uid}': 1,
# Trading
'v1/order/openOrders': 0.4,
'v1/order/orders/{order-id}': 0.4,
'v1/order/orders/getClientOrder': 0.4,
'v1/order/orders/{order-id}/matchresults': 0.4,
'v1/order/orders': 0.4,
'v1/order/history': 1,
'v1/order/matchresults': 1,
'v2/reference/transact-fee-rate': 1,
# Conditional Order
'v2/algo-orders/opening': 1,
'v2/algo-orders/history': 1,
'v2/algo-orders/specific': 1,
# Margin Loan(Cross/Isolated)
'v1/margin/loan-info': 1,
'v1/margin/loan-orders': 0.2,
'v1/margin/accounts/balance': 0.2,
'v1/cross-margin/loan-info': 1,
'v1/cross-margin/loan-orders': 1,
'v1/cross-margin/accounts/balance': 1,
'v2/account/repayment': 5,
# Stable Coin Exchange
'v1/stable-coin/quote': 1,
# ETP
'v2/etp/transactions': 5,
'v2/etp/transaction': 5,
'v2/etp/limit': 1,
},
'post': {
# Account
'v1/account/transfer': 1,
'v1/futures/transfer': 1, # future transfers
'v2/point/transfer': 5,
'v2/account/transfer': 1, # swap transfers
# Wallet(Deposit and Withdraw)
'v1/dw/withdraw/api/create': 1,
'v1/dw/withdraw-virtual/{withdraw-id}/cancel': 1,
# Sub user management
'v2/sub-user/deduct-mode': 1,
'v2/sub-user/creation': 1,
'v2/sub-user/management': 1,
'v2/sub-user/tradable-market': 1,
'v2/sub-user/transferability': 1,
'v2/sub-user/api-key-generation': 1,
'v2/sub-user/api-key-modification': 1,
'v2/sub-user/api-key-deletion': 1,
'v1/subuser/transfer': 10,
# Trading
'v1/order/orders/place': 0.2,
'v1/order/batch-orders': 0.4,
'v1/order/orders/{order-id}/submitcancel': 0.2,
'v1/order/orders/submitCancelClientOrder': 0.2,
'v1/order/orders/batchCancelOpenOrders': 0.4,
'v1/order/orders/batchcancel': 0.4,
'v2/algo-orders/cancel-all-after': 1,
# Conditional Order
'v2/algo-orders': 1,
'v2/algo-orders/cancellation': 1,
# Margin Loan(Cross/Isolated)
'v2/account/repayment': 5,
'v1/dw/transfer-in/margin': 10,
'v1/dw/transfer-out/margin': 10,
'v1/margin/orders': 10,
'v1/margin/orders/{order-id}/repay': 10,
'v1/cross-margin/transfer-in': 1,
'v1/cross-margin/transfer-out': 1,
'v1/cross-margin/orders': 1,
'v1/cross-margin/orders/{order-id}/repay': 1,
# Stable Coin Exchange
'v1/stable-coin/exchange': 1,
# ETP
'v2/etp/creation': 5,
'v2/etp/redemption': 5,
'v2/etp/{transactId}/cancel': 10,
'v2/etp/batch-cancel': 50,
},
},
},
'contract': {
'public': {
'get': {
'api/v1/timestamp': 1,
# Future Market Data interface
'api/v1/contract_contract_info': 1,
'api/v1/contract_index': 1,
'api/v1/contract_price_limit': 1,
'api/v1/contract_open_interest': 1,
'api/v1/contract_delivery_price': 1,
'market/depth': 1,
'market/bbo': 1,
'market/history/kline': 1,
'index/market/history/mark_price_kline': 1,
'market/detail/merged': 1,
'market/detail/batch_merged': 1,
'market/trade': 1,
'market/history/trade': 1,
'api/v1/contract_risk_info': 1,
'api/v1/contract_insurance_fund': 1,
'api/v1/contract_adjustfactor': 1,
'api/v1/contract_his_open_interest': 1,
'api/v1/contract_ladder_margin': 1,
'api/v1/contract_api_state': 1,
'api/v1/contract_elite_account_ratio': 1,
'api/v1/contract_elite_position_ratio': 1,
'api/v1/contract_liquidation_orders': 1,
'api/v1/contract_settlement_records': 1,
'index/market/history/index': 1,
'index/market/history/basis': 1,
'api/v1/contract_estimated_settlement_price': 1,
# Swap Market Data interface
'swap-api/v1/swap_contract_info': 1,
'swap-api/v1/swap_index': 1,
'swap-api/v1/swap_price_limit': 1,
'swap-api/v1/swap_open_interest': 1,
'swap-ex/market/depth': 1,
'swap-ex/market/bbo': 1,
'swap-ex/market/history/kline': 1,
'index/market/history/swap_mark_price_kline': 1,
'swap-ex/market/detail/merged': 1,
'swap-ex/market/detail/batch_merged': 1,
'swap-ex/market/trade': 1,
'swap-ex/market/history/trade': 1,
'swap-api/v1/swap_risk_info': 1,
'swap-api/v1/swap_insurance_fund': 1,
'swap-api/v1/swap_adjustfactor': 1,
'swap-api/v1/swap_his_open_interest': 1,
'swap-api/v1/swap_ladder_margin': 1,
'swap-api/v1/swap_api_state': 1,
'swap-api/v1/swap_elite_account_ratio': 1,
'swap-api/v1/swap_elite_position_ratio': 1,
'swap-api/v1/swap_estimated_settlement_price': 1,
'swap-api/v1/swap_liquidation_orders': 1,
'swap-api/v1/swap_settlement_records': 1,
'swap-api/v1/swap_funding_rate': 1,
'swap-api/v1/swap_batch_funding_rate': 1,
'swap-api/v1/swap_historical_funding_rate': 1,
'index/market/history/swap_premium_index_kline': 1,
'index/market/history/swap_estimated_rate_kline': 1,
'index/market/history/swap_basis': 1,
# Swap Market Data interface
'linear-swap-api/v1/swap_contract_info': 1,
'linear-swap-api/v1/swap_index': 1,
'linear-swap-api/v1/swap_price_limit': 1,
'linear-swap-api/v1/swap_open_interest': 1,
'linear-swap-ex/market/depth': 1,
'linear-swap-ex/market/bbo': 1,
'linear-swap-ex/market/history/kline': 1,
'index/market/history/linear_swap_mark_price_kline': 1,
'linear-swap-ex/market/detail/merged': 1,
'linear-swap-ex/market/detail/batch_merged': 1,
'linear-swap-ex/market/trade': 1,
'linear-swap-ex/market/history/trade': 1,
'linear-swap-api/v1/swap_risk_info': 1,
'swap-api/v1/linear-swap-api/v1/swap_insurance_fund': 1,
'linear-swap-api/v1/swap_adjustfactor': 1,
'linear-swap-api/v1/swap_cross_adjustfactor': 1,
'linear-swap-api/v1/swap_his_open_interest': 1,
'linear-swap-api/v1/swap_ladder_margin': 1,
'linear-swap-api/v1/swap_cross_ladder_margin': 1,
'linear-swap-api/v1/swap_api_state': 1,
'linear-swap-api/v1/swap_cross_transfer_state': 1,
'linear-swap-api/v1/swap_cross_trade_state': 1,
'linear-swap-api/v1/swap_elite_account_ratio': 1,
'linear-swap-api/v1/swap_elite_position_ratio': 1,
'linear-swap-api/v1/swap_liquidation_orders': 1,
'linear-swap-api/v1/swap_settlement_records': 1,
'linear-swap-api/v1/swap_funding_rate': 1,
'linear-swap-api/v1/swap_batch_funding_rate': 1,
'linear-swap-api/v1/swap_historical_funding_rate': 1,
'index/market/history/linear_swap_premium_index_kline': 1,
'index/market/history/linear_swap_estimated_rate_kline': 1,
'index/market/history/linear_swap_basis': 1,
'linear-swap-api/v1/swap_estimated_settlement_price': 1,
},
},
'private': {
'get': {
# Future Account Interface
'api/v1/contract_api_trading_status': 1,
# Swap Account Interface
'swap-api/v1/swap_api_trading_status': 1,
# Swap Account Interface
'linear-swap-api/v1/swap_api_trading_status': 1,
},
'post': {
# Future Account Interface
'api/v1/contract_balance_valuation': 1,
'api/v1/contract_account_info': 1,
'api/v1/contract_position_info': 1,
'api/v1/contract_sub_auth': 1,
'api/v1/contract_sub_account_list': 1,
'api/v1/contract_sub_account_info_list': 1,
'api/v1/contract_sub_account_info': 1,
'api/v1/contract_sub_position_info': 1,
'api/v1/contract_financial_record': 1,
'api/v1/contract_financial_record_exact': 1,
'api/v1/contract_user_settlement_records': 1,
'api/v1/contract_order_limit': 1,
'api/v1/contract_fee': 1,
'api/v1/contract_transfer_limit': 1,
'api/v1/contract_position_limit': 1,
'api/v1/contract_account_position_info': 1,
'api/v1/contract_master_sub_transfer': 1,
'api/v1/contract_master_sub_transfer_record': 1,
'api/v1/contract_available_level_rate': 1,
# Future Trade Interface
'api/v1/contract_order': 1,
'v1/contract_batchorder': 1,
'api/v1/contract_cancel': 1,
'api/v1/contract_cancelall': 1,
'api/v1/contract_switch_lever_rate': 1,
'api/v1/lightning_close_position': 1,
'api/v1/contract_order_info': 1,
'api/v1/contract_order_detail': 1,
'api/v1/contract_openorders': 1,
'api/v1/contract_hisorders': 1,
'api/v1/contract_hisorders_exact': 1,
'api/v1/contract_matchresults': 1,
'api/v1/contract_matchresults_exact': 1,
# Contract Strategy Order Interface
'api/v1/contract_trigger_order': 1,
'api/v1/contract_trigger_cancel': 1,
'api/v1/contract_trigger_cancelall': 1,
'api/v1/contract_trigger_openorders': 1,
'api/v1/contract_trigger_hisorders': 1,
'api/v1/contract_tpsl_order': 1,
'api/v1/contract_tpsl_cancel': 1,
'api/v1/contract_tpsl_cancelall': 1,
'api/v1/contract_tpsl_openorders': 1,
'api/v1/contract_tpsl_hisorders': 1,
'api/v1/contract_relation_tpsl_order': 1,
'api/v1/contract_track_order': 1,
'api/v1/contract_track_cancel': 1,
'api/v1/contract_track_cancelall': 1,
'api/v1/contract_track_openorders': 1,
'api/v1/contract_track_hisorders': 1,
# Swap Account Interface
'swap-api/v1/swap_balance_valuation': 1,
'swap-api/v1/swap_account_info': 1,
'swap-api/v1/swap_position_info': 1,
'swap-api/v1/swap_account_position_info': 1,
'swap-api/v1/swap_sub_auth': 1,
'swap-api/v1/swap_sub_account_list': 1,
'swap-api/v1/swap_sub_account_info_list': 1,
'swap-api/v1/swap_sub_account_info': 1,
'swap-api/v1/swap_sub_position_info': 1,
'swap-api/v1/swap_financial_record': 1,
'swap-api/v1/swap_financial_record_exact': 1,
'swap-api/v1/swap_user_settlement_records': 1,
'swap-api/v1/swap_available_level_rate': 1,
'swap-api/v1/swap_order_limit': 1,
'swap-api/v1/swap_fee': 1,
'swap-api/v1/swap_transfer_limit': 1,
'swap-api/v1/swap_position_limit': 1,
'swap-api/v1/swap_master_sub_transfer': 1,
'swap-api/v1/swap_master_sub_transfer_record': 1,
# Swap Trade Interface
'swap-api/v1/swap_order': 1,
'swap-api/v1/swap_batchorder': 1,
'swap-api/v1/swap_cancel': 1,
'swap-api/v1/swap_cancelall': 1,
'swap-api/v1/swap_lightning_close_position': 1,
'swap-api/v1/swap_switch_lever_rate': 1,
'swap-api/v1/swap_order_info': 1,
'swap-api/v1/swap_order_detail': 1,
'swap-api/v1/swap_openorders': 1,
'swap-api/v1/swap_hisorders': 1,
'swap-api/v1/swap_hisorders_exact': 1,
'swap-api/v1/swap_matchresults': 1,
'swap-api/v1/swap_matchresults_exact': 1,
# Swap Strategy Order Interface
'swap-api/v1/swap_trigger_order': 1,
'swap-api/v1/swap_trigger_cancel': 1,
'swap-api/v1/swap_trigger_cancelall': 1,
'swap-api/v1/swap_trigger_openorders': 1,
'swap-api/v1/swap_trigger_hisorders': 1,
'swap-api/v1/swap_tpsl_order': 1,
'swap-api/v1/swap_tpsl_cancel': 1,
'swap-api/v1/swap_tpsl_cancelall': 1,
'swap-api/v1/swap_tpsl_openorders': 1,
'swap-api/v1/swap_tpsl_hisorders': 1,
'swap-api/v1/swap_relation_tpsl_order': 1,
'swap-api/v1/swap_track_order': 1,
'swap-api/v1/swap_track_cancel': 1,
'swap-api/v1/swap_track_cancelall': 1,
'swap-api/v1/swap_track_openorders': 1,
'swap-api/v1/swap_track_hisorders': 1,
# Swap Account Interface
'linear-swap-api/v1/swap_balance_valuation': 1,
'linear-swap-api/v1/swap_account_info': 1,
'linear-swap-api/v1/swap_cross_account_info': 1,
'linear-swap-api/v1/swap_position_info': 1,
'linear-swap-api/v1/swap_cross_position_info': 1,
'linear-swap-api/v1/swap_account_position_info': 1,
'linear-swap-api/v1/swap_cross_account_position_info': 1,
'linear-swap-api/v1/swap_sub_auth': 1,
'linear-swap-api/v1/swap_sub_account_list': 1,
'linear-swap-api/v1/swap_cross_sub_account_list': 1,
'linear-swap-api/v1/swap_sub_account_info_list': 1,
'linear-swap-api/v1/swap_cross_sub_account_info_list': 1,
'linear-swap-api/v1/swap_sub_account_info': 1,
'linear-swap-api/v1/swap_cross_sub_account_info': 1,
'linear-swap-api/v1/swap_sub_position_info': 1,
'linear-swap-api/v1/swap_cross_sub_position_info': 1,
'linear-swap-api/v1/swap_financial_record': 1,
'linear-swap-api/v1/swap_financial_record_exact': 1,
'linear-swap-api/v1/swap_user_settlement_records': 1,
'linear-swap-api/v1/swap_cross_user_settlement_records': 1,
'linear-swap-api/v1/swap_available_level_rate': 1,
'linear-swap-api/v1/swap_cross_available_level_rate': 1,
'linear-swap-api/v1/swap_order_limit': 1,
'linear-swap-api/v1/swap_fee': 1,
'linear-swap-api/v1/swap_transfer_limit': 1,
'linear-swap-api/v1/swap_cross_transfer_limit': 1,
'linear-swap-api/v1/swap_position_limit': 1,
'linear-swap-api/v1/swap_cross_position_limit': 1,
'linear-swap-api/v1/swap_master_sub_transfer': 1,
'linear-swap-api/v1/swap_master_sub_transfer_record': 1,
'linear-swap-api/v1/swap_transfer_inner': 1,
# Swap Trade Interface
'linear-swap-api/v1/swap_order': 1,
'linear-swap-api/v1/swap_cross_order': 1,
'linear-swap-api/v1/swap_batchorder': 1,
'linear-swap-api/v1/swap_cross_batchorder': 1,
'linear-swap-api/v1/swap_cancel': 1,
'linear-swap-api/v1/swap_cross_cancel': 1,
'linear-swap-api/v1/swap_cancelall': 1,
'linear-swap-api/v1/swap_cross_cancelall': 1,
'linear-swap-api/v1/swap_switch_lever_rate': 1,
'linear-swap-api/v1/swap_cross_switch_lever_rate': 1,
'linear-swap-api/v1/swap_lightning_close_position': 1,
'linear-swap-api/v1/swap_cross_lightning_close_position': 1,
'linear-swap-api/v1/swap_order_info': 1,
'linear-swap-api/v1/swap_cross_order_info': 1,
'linear-swap-api/v1/swap_order_detail': 1,
'linear-swap-api/v1/swap_cross_order_detail': 1,
'linear-swap-api/v1/swap_openorders': 1,
'linear-swap-api/v1/swap_cross_openorders': 1,
'linear-swap-api/v1/swap_hisorders': 1,
'linear-swap-api/v1/swap_cross_hisorders': 1,
'linear-swap-api/v1/swap_hisorders_exact': 1,
'linear-swap-api/v1/swap_cross_hisorders_exact': 1,
'linear-swap-api/v1/swap_matchresults': 1,
'linear-swap-api/v1/swap_cross_matchresults': 1,
'linear-swap-api/v1/swap_matchresults_exact': 1,
'linear-swap-api/v1/swap_cross_matchresults_exact': 1,
# Swap Strategy Order Interface
'linear-swap-api/v1/swap_trigger_order': 1,
'linear-swap-api/v1/swap_cross_trigger_order': 1,
'linear-swap-api/v1/swap_trigger_cancel': 1,
'linear-swap-api/v1/swap_cross_trigger_cancel': 1,
'linear-swap-api/v1/swap_trigger_cancelall': 1,
'linear-swap-api/v1/swap_cross_trigger_cancelall': 1,
'linear-swap-api/v1/swap_trigger_openorders': 1,
'linear-swap-api/v1/swap_cross_trigger_openorders': 1,
'linear-swap-api/v1/swap_trigger_hisorders': 1,
'linear-swap-api/v1/swap_cross_trigger_hisorders': 1,
'linear-swap-api/v1/swap_tpsl_order': 1,
'linear-swap-api/v1/swap_cross_tpsl_order': 1,
'linear-swap-api/v1/swap_tpsl_cancel': 1,
'linear-swap-api/v1/swap_cross_tpsl_cancel': 1,
'linear-swap-api/v1/swap_tpsl_cancelall': 1,
'linear-swap-api/v1/swap_cross_tpsl_cancelall': 1,
'linear-swap-api/v1/swap_tpsl_openorders': 1,
'linear-swap-api/v1/swap_cross_tpsl_openorders': 1,
'linear-swap-api/v1/swap_tpsl_hisorders': 1,
'linear-swap-api/v1/swap_cross_tpsl_hisorders': 1,
'linear-swap-api/v1/swap_relation_tpsl_order': 1,
'linear-swap-api/v1/swap_cross_relation_tpsl_order': 1,
'linear-swap-api/v1/swap_track_order': 1,
'linear-swap-api/v1/swap_cross_track_order': 1,
'linear-swap-api/v1/swap_track_cancel': 1,
'linear-swap-api/v1/swap_cross_track_cancel': 1,
'linear-swap-api/v1/swap_track_cancelall': 1,
'linear-swap-api/v1/swap_cross_track_cancelall': 1,
'linear-swap-api/v1/swap_track_openorders': 1,
'linear-swap-api/v1/swap_cross_track_openorders': 1,
'linear-swap-api/v1/swap_track_hisorders': 1,
'linear-swap-api/v1/swap_cross_track_hisorders': 1,
},
},
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': False,
'percentage': True,
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
},
},
'exceptions': {
'broad': {
'contract is restricted of closing positions on API. Please contact customer service': OnMaintenance,
'maintain': OnMaintenance,
},
'exact': {
# err-code
'1017': OrderNotFound, # {"status":"error","err_code":1017,"err_msg":"Order doesnt exist.","ts":1640550859242}
'1034': InvalidOrder, # {"status":"error","err_code":1034,"err_msg":"Incorrect field of order price type.","ts":1643802870182}
'1036': InvalidOrder, # {"status":"error","err_code":1036,"err_msg":"Incorrect field of open long form.","ts":1643802518986}
'1039': InvalidOrder, # {"status":"error","err_code":1039,"err_msg":"Buy price must be lower than 39270.9USDT. Sell price must exceed 37731USDT.","ts":1643802374403}
'1041': InvalidOrder, # {"status":"error","err_code":1041,"err_msg":"The order amount exceeds the limit(170000Cont), please modify and order again.","ts":1643802784940}
'1047': InsufficientFunds, # {"status":"error","err_code":1047,"err_msg":"Insufficient margin available.","ts":1643802672652}
'1066': BadSymbol, # {"status":"error","err_code":1066,"err_msg":"The symbol field cannot be empty. Please re-enter.","ts":1640550819147}
'1067': InvalidOrder, # {"status":"error","err_code":1067,"err_msg":"The client_order_id field is invalid. Please re-enter.","ts":1643802119413}
'1013': BadSymbol, # {"status":"error","err_code":1013,"err_msg":"This contract symbol doesnt exist.","ts":1640550459583}
'1094': InvalidOrder, # {"status":"error","err_code":1094,"err_msg":"The leverage cannot be empty, please switch the leverage or contact customer service","ts":1640496946243}
'1220': AccountNotEnabled, # {"status":"error","err_code":1220,"err_msg":"You don’t have access permission as you have not opened contracts trading.","ts":1645096660718}
'bad-request': BadRequest,
'validation-format-error': BadRequest, # {"status":"error","err-code":"validation-format-error","err-msg":"Format Error: order-id.","data":null}
'validation-constraints-required': BadRequest, # {"status":"error","err-code":"validation-constraints-required","err-msg":"Field is missing: client-order-id.","data":null}
'base-date-limit-error': BadRequest, # {"status":"error","err-code":"base-date-limit-error","err-msg":"date less than system limit","data":null}
'api-not-support-temp-addr': PermissionDenied, # {"status":"error","err-code":"api-not-support-temp-addr","err-msg":"API withdrawal does not support temporary addresses","data":null}
'timeout': RequestTimeout, # {"ts":1571653730865,"status":"error","err-code":"timeout","err-msg":"Request Timeout"}
'gateway-internal-error': ExchangeNotAvailable, # {"status":"error","err-code":"gateway-internal-error","err-msg":"Failed to load data. Try again later.","data":null}
'account-frozen-balance-insufficient-error': InsufficientFunds, # {"status":"error","err-code":"account-frozen-balance-insufficient-error","err-msg":"trade account balance is not enough, left: `0.0027`","data":null}
'invalid-amount': InvalidOrder, # eg "Paramemter `amount` is invalid."
'order-limitorder-amount-min-error': InvalidOrder, # limit order amount error, min: `0.001`
'order-limitorder-amount-max-error': InvalidOrder, # market order amount error, max: `1000000`
'order-marketorder-amount-min-error': InvalidOrder, # market order amount error, min: `0.01`
'order-limitorder-price-min-error': InvalidOrder, # limit order price error
'order-limitorder-price-max-error': InvalidOrder, # limit order price error
'order-holding-limit-failed': InvalidOrder, # {"status":"error","err-code":"order-holding-limit-failed","err-msg":"Order failed, exceeded the holding limit of self currency","data":null}
'order-orderprice-precision-error': InvalidOrder, # {"status":"error","err-code":"order-orderprice-precision-error","err-msg":"order price precision error, scale: `4`","data":null}
'order-etp-nav-price-max-error': InvalidOrder, # {"status":"error","err-code":"order-etp-nav-price-max-error","err-msg":"Order price cannot be higher than 5% of NAV","data":null}
'order-orderstate-error': OrderNotFound, # canceling an already canceled order
'order-queryorder-invalid': OrderNotFound, # querying a non-existent order
'order-update-error': ExchangeNotAvailable, # undocumented error
'api-signature-check-failed': AuthenticationError,
'api-signature-not-valid': AuthenticationError, # {"status":"error","err-code":"api-signature-not-valid","err-msg":"Signature not valid: Incorrect Access key [Access key错误]","data":null}
'base-record-invalid': OrderNotFound, # https://github.com/ccxt/ccxt/issues/5750
'base-symbol-trade-disabled': BadSymbol, # {"status":"error","err-code":"base-symbol-trade-disabled","err-msg":"Trading is disabled for self symbol","data":null}
'base-symbol-error': BadSymbol, # {"status":"error","err-code":"base-symbol-error","err-msg":"The symbol is invalid","data":null}
'system-maintenance': OnMaintenance, # {"status": "error", "err-code": "system-maintenance", "err-msg": "System is in maintenance!", "data": null}
'base-request-exceed-frequency-limit': RateLimitExceeded, # {"status":"error","err-code":"base-request-exceed-frequency-limit","err-msg":"Frequency of requests has exceeded the limit, please try again later","data":null}
# err-msg
'invalid symbol': BadSymbol, # {"ts":1568813334794,"status":"error","err-code":"invalid-parameter","err-msg":"invalid symbol"}
'symbol trade not open now': BadSymbol, # {"ts":1576210479343,"status":"error","err-code":"invalid-parameter","err-msg":"symbol trade not open now"}
'require-symbol': BadSymbol, # {"status":"error","err-code":"require-symbol","err-msg":"Parameter `symbol` is required.","data":null}
},
},
'precisionMode': TICK_SIZE,
'options': {
'fetchMarkets': {
'types': {
'spot': True,
'future': {
'linear': True,
'inverse': True,
},
'swap': {
'linear': True,
'inverse': True,
},
},
},
'defaultType': 'spot', # spot, future, swap
'defaultSubType': 'inverse', # inverse, linear
'defaultNetwork': 'ERC20',
'networks': {
'ETH': 'erc20',
'TRX': 'trc20',
'HRC20': 'hrc20',
'HECO': 'hrc20',
'HT': 'hrc20',
'ALGO': 'algo',
'OMNI': '',
},
# https://github.com/ccxt/ccxt/issues/5376
'fetchOrdersByStatesMethod': 'spot_private_get_v1_order_orders', # 'spot_private_get_v1_order_history' # https://github.com/ccxt/ccxt/pull/5392
'createMarketBuyOrderRequiresPrice': True,
'language': 'en-US',
'broker': {
'id': 'AA03022abc',
},
'accountsByType': {
'spot': 'pro',
'future': 'futures',
},
'typesByAccount': {
'pro': 'spot',
'futures': 'future',
},
'spot': {
'stopOrderTypes': {
'stop-limit': True,
'buy-stop-limit': True,
'sell-stop-limit': True,
'stop-limit-fok': True,
'buy-stop-limit-fok': True,
'sell-stop-limit-fok': True,
},
'limitOrderTypes': {
'limit': True,
'buy-limit': True,
'sell-limit': True,
'ioc': True,
'buy-ioc': True,
'sell-ioc': True,
'limit-maker': True,
'buy-limit-maker': True,
'sell-limit-maker': True,
'stop-limit': True,
'buy-stop-limit': True,
'sell-stop-limit': True,
'limit-fok': True,
'buy-limit-fok': True,
'sell-limit-fok': True,
'stop-limit-fok': True,
'buy-stop-limit-fok': True,
'sell-stop-limit-fok': True,
},
},
},
'commonCurrencies': {
# https://github.com/ccxt/ccxt/issues/6081
# https://github.com/ccxt/ccxt/issues/3365
# https://github.com/ccxt/ccxt/issues/2873
'GET': 'Themis', # conflict with GET(Guaranteed Entrance Token, GET Protocol)
'GTC': 'Game.com', # conflict with Gitcoin and Gastrocoin
'HIT': 'HitChain',
'HOT': 'Hydro Protocol', # conflict with HOT(Holo) https://github.com/ccxt/ccxt/issues/4929
# https://github.com/ccxt/ccxt/issues/7399
# https://coinmarketcap.com/currencies/pnetwork/
# https://coinmarketcap.com/currencies/penta/markets/
# https://en.cryptonomist.ch/blog/eidoo/the-edo-to-pnt-upgrade-what-you-need-to-know-updated/
'PNT': 'Penta',
'SBTC': 'Super Bitcoin',
'BIFI': 'Bitcoin File', # conflict with Beefy.Finance https://github.com/ccxt/ccxt/issues/8706
},
})
def fetch_time(self, params={}):
options = self.safe_value(self.options, 'fetchTime', {})
defaultType = self.safe_string(self.options, 'defaultType', 'spot')
type = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', type)
method = 'spotPublicGetV1CommonTimestamp'
if (type == 'future') or (type == 'swap'):
method = 'contractPublicGetApiV1Timestamp'
response = getattr(self, method)(params)
#
# spot
#
# {"status":"ok","data":1637504261099}
#
# future, swap
#
# {"status":"ok","ts":1637504164707}
#
return self.safe_integer_2(response, 'data', 'ts')
def parse_trading_fee(self, fee, market=None):
#
# {
# "symbol":"btcusdt",
# "actualMakerRate":"0.002",
# "actualTakerRate":"0.002",
# "takerFeeRate":"0.002",
# "makerFeeRate":"0.002"
# }
#
marketId = self.safe_string(fee, 'symbol')
return {
'info': fee,
'symbol': self.safe_symbol(marketId, market),
'maker': self.safe_number(fee, 'actualMakerRate'),
'taker': self.safe_number(fee, 'actualTakerRate'),
}
def fetch_trading_fee(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbols': market['id'], # trading symbols comma-separated
}
response = self.spotPrivateGetV2ReferenceTransactFeeRate(self.extend(request, params))
#
# {
# "code":200,
# "data":[
# {
# "symbol":"btcusdt",
# "actualMakerRate":"0.002",
# "actualTakerRate":"0.002",
# "takerFeeRate":"0.002",
# "makerFeeRate":"0.002"
# }
# ],
# "success":true
# }
#
data = self.safe_value(response, 'data', [])
first = self.safe_value(data, 0, {})
return self.parse_trading_fee(first, market)
def fetch_trading_limits(self, symbols=None, params={}):
# self method should not be called directly, use loadTradingLimits() instead
# by default it will try load withdrawal fees of all currencies(with separate requests)
# however if you define symbols = ['ETH/BTC', 'LTC/BTC'] in args it will only load those
self.load_markets()
if symbols is None:
symbols = self.symbols
result = {}
for i in range(0, len(symbols)):
symbol = symbols[i]
result[symbol] = self.fetch_trading_limits_by_id(self.market_id(symbol), params)
return result
def fetch_trading_limits_by_id(self, id, params={}):
request = {
'symbol': id,
}
response = self.spotPublicGetV1CommonExchange(self.extend(request, params))
#
# {status: "ok",
# data: { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }}
#
return self.parse_trading_limits(self.safe_value(response, 'data', {}))
def parse_trading_limits(self, limits, symbol=None, params={}):
#
# { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }
#
return {
'info': limits,
'limits': {
'amount': {
'min': self.safe_number(limits, 'limit-order-must-greater-than'),
'max': self.safe_number(limits, 'limit-order-must-less-than'),
},
},
}
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['cost'], self.precisionMode)
def fetch_markets(self, params={}):
options = self.safe_value(self.options, 'fetchMarkets', {})
types = self.safe_value(options, 'types', {})
allMarkets = []
keys = list(types.keys())
for i in range(0, len(keys)):
type = keys[i]
value = self.safe_value(types, type)
if value is True:
markets = self.fetch_markets_by_type_and_sub_type(type, None, params)
allMarkets = self.array_concat(allMarkets, markets)
else:
subKeys = list(value.keys())
for j in range(0, len(subKeys)):
subType = subKeys[j]
subValue = self.safe_value(value, subType)
if subValue:
markets = self.fetch_markets_by_type_and_sub_type(type, subType, params)
allMarkets = self.array_concat(allMarkets, markets)
return allMarkets
def fetch_markets_by_type_and_sub_type(self, type, subType, params={}):
method = 'spotPublicGetV1CommonSymbols'
query = self.omit(params, ['type', 'subType'])
spot = (type == 'spot')
contract = (type != 'spot')
future = (type == 'future')
swap = (type == 'swap')
linear = None
inverse = None
request = {}
if contract:
linear = (subType == 'linear')
inverse = (subType == 'inverse')
if linear:
method = 'contractPublicGetLinearSwapApiV1SwapContractInfo'
if future:
request['business_type'] = 'futures'
elif inverse:
if future:
method = 'contractPublicGetApiV1ContractContractInfo'
elif swap:
method = 'contractPublicGetSwapApiV1SwapContractInfo'
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# {
# "status":"ok",
# "data":[
# {
# "base-currency":"xrp3s",
# "quote-currency":"usdt",
# "price-precision":4,
# "amount-precision":4,
# "symbol-partition":"innovation",
# "symbol":"xrp3susdt",
# "state":"online",
# "value-precision":8,
# "min-order-amt":0.01,
# "max-order-amt":1616.4353,
# "min-order-value":5,
# "limit-order-min-order-amt":0.01,
# "limit-order-max-order-amt":1616.4353,
# "limit-order-max-buy-amt":1616.4353,
# "limit-order-max-sell-amt":1616.4353,
# "sell-market-min-order-amt":0.01,
# "sell-market-max-order-amt":1616.4353,
# "buy-market-max-order-value":2500,
# "max-order-value":2500,
# "underlying":"xrpusdt",
# "mgmt-fee-rate":0.035000000000000000,
# "charge-time":"23:55:00",
# "rebal-time":"00:00:00",
# "rebal-threshold":-5,
# "init-nav":10.000000000000000000,
# "api-trading":"enabled",
# "tags":"etp,nav,holdinglimit"
# },
# ]
# }
#
# inverse future
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"BTC",
# "contract_code":"BTC211126",
# "contract_type":"self_week",
# "contract_size":100.000000000000000000,
# "price_tick":0.010000000000000000,
# "delivery_date":"20211126",
# "delivery_time":"1637913600000",
# "create_date":"20211112",
# "contract_status":1,
# "settlement_time":"1637481600000"
# },
# ],
# "ts":1637474595140
# }
#
# linear futures
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"BTC",
# "contract_code":"BTC-USDT-211231",
# "contract_size":0.001000000000000000,
# "price_tick":0.100000000000000000,
# "delivery_date":"20211231",
# "delivery_time":"1640937600000",
# "create_date":"20211228",
# "contract_status":1,
# "settlement_date":"1640764800000",
# "support_margin_mode":"cross",
# "business_type":"futures",
# "pair":"BTC-USDT",
# "contract_type":"self_week" # next_week, quarter
# },
# ],
# "ts":1640736207263
# }
#
# swaps
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"BTC",
# "contract_code":"BTC-USDT",
# "contract_size":0.001000000000000000,
# "price_tick":0.100000000000000000,
# "delivery_time":"",
# "create_date":"20201021",
# "contract_status":1,
# "settlement_date":"1637481600000",
# "support_margin_mode":"all", # isolated
# },
# ],
# "ts":1637474774467
# }
#
markets = self.safe_value(response, 'data')
numMarkets = len(markets)
if numMarkets < 1:
raise NetworkError(self.id + ' fetchMarkets() returned an empty response: ' + self.json(markets))
result = []
for i in range(0, len(markets)):
market = markets[i]
baseId = None
quoteId = None
settleId = None
id = None
if contract:
id = self.safe_string(market, 'contract_code')
if swap:
parts = id.split('-')
baseId = self.safe_string(market, 'symbol')
quoteId = self.safe_string(parts, 1)
settleId = baseId if inverse else quoteId
elif future:
baseId = self.safe_string(market, 'symbol')
if inverse:
quoteId = 'USD'
settleId = baseId
else:
pair = self.safe_string(market, 'pair')
parts = pair.split('-')
quoteId = self.safe_string(parts, 1)
settleId = quoteId
else:
baseId = self.safe_string(market, 'base-currency')
quoteId = self.safe_string(market, 'quote-currency')
id = baseId + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
symbol = base + '/' + quote
expiry = None
if contract:
if inverse:
symbol += ':' + base
elif linear:
symbol += ':' + quote
if future:
expiry = self.safe_integer(market, 'delivery_time')
symbol += '-' + self.yymmdd(expiry)
contractSize = self.safe_number(market, 'contract_size')
pricePrecision = None
amountPrecision = None
costPrecision = None
if spot:
pricePrecision = self.safe_string(market, 'price-precision')
pricePrecision = self.parse_number('1e-' + pricePrecision)
amountPrecision = self.safe_string(market, 'amount-precision')
amountPrecision = self.parse_number('1e-' + amountPrecision)
costPrecision = self.safe_string(market, 'value-precision')
costPrecision = self.parse_number('1e-' + costPrecision)
else:
pricePrecision = self.safe_number(market, 'price_tick')
amountPrecision = 1
maker = None
taker = None
if spot:
maker = 0 if (base == 'OMG') else 0.2 / 100
taker = 0 if (base == 'OMG') else 0.2 / 100
minAmount = self.safe_number(market, 'min-order-amt')
maxAmount = self.safe_number(market, 'max-order-amt')
minCost = self.safe_number(market, 'min-order-value', 0)
active = None
if spot:
state = self.safe_string(market, 'state')
active = (state == 'online')
elif contract:
contractStatus = self.safe_integer(market, 'contract_status')
active = (contractStatus == 1)
leverageRatio = self.safe_string(market, 'leverage-ratio', '1')
superLeverageRatio = self.safe_string(market, 'super-margin-leverage-ratio', '1')
hasLeverage = Precise.string_gt(leverageRatio, '1') or Precise.string_gt(superLeverageRatio, '1')
# 0 Delisting
# 1 Listing
# 2 Pending Listing
# 3 Suspension
# 4 Suspending of Listing
# 5 In Settlement
# 6 Delivering
# 7 Settlement Completed
# 8 Delivered
# 9 Suspending of Trade
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'spot': spot,
'margin': (spot and hasLeverage),
'swap': swap,
'future': future,
'option': False,
'active': active,
'contract': contract,
'linear': linear,
'inverse': inverse,
'taker': taker,
'maker': maker,
'contractSize': contractSize,
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': pricePrecision,
'cost': costPrecision,
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': self.parse_number(leverageRatio),
'superMax': self.parse_number(superLeverageRatio),
},
'amount': {
'min': minAmount,
'max': maxAmount,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
#
# fetchTickers
#
# {
# symbol: "bhdht",
# open: 2.3938,
# high: 2.4151,
# low: 2.3323,
# close: 2.3909,
# amount: 628.992,
# vol: 1493.71841095,
# count: 2088,
# bid: 2.3643,
# bidSize: 0.7136,
# ask: 2.4061,
# askSize: 0.4156
# }
#
marketId = self.safe_string_2(ticker, 'symbol', 'contract_code')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer(ticker, 'ts')
bid = None
bidVolume = None
ask = None
askVolume = None
if 'bid' in ticker:
if isinstance(ticker['bid'], list):
bid = self.safe_string(ticker['bid'], 0)
bidVolume = self.safe_string(ticker['bid'], 1)
else:
bid = self.safe_string(ticker, 'bid')
bidVolume = self.safe_string(ticker, 'bidSize')
if 'ask' in ticker:
if isinstance(ticker['ask'], list):
ask = self.safe_string(ticker['ask'], 0)
askVolume = self.safe_string(ticker['ask'], 1)
else:
ask = self.safe_string(ticker, 'ask')
askVolume = self.safe_string(ticker, 'askSize')
open = self.safe_string(ticker, 'open')
close = self.safe_string(ticker, 'close')
baseVolume = self.safe_string(ticker, 'amount')
quoteVolume = self.safe_string(ticker, 'vol')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': None,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market, False)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {}
fieldName = 'symbol'
method = 'spotPublicGetMarketDetailMerged'
if market['linear']:
method = 'contractPublicGetLinearSwapExMarketDetailMerged'
fieldName = 'contract_code'
elif market['inverse']:
if market['future']:
method = 'contractPublicGetMarketDetailMerged'
elif market['swap']:
method = 'contractPublicGetSwapExMarketDetailMerged'
fieldName = 'contract_code'
request[fieldName] = market['id']
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status": "ok",
# "ch": "market.btcusdt.detail.merged",
# "ts": 1583494336669,
# "tick": {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
# }
#
# future, swap
#
# {
# "ch":"market.BTC211126.detail.merged",
# "status":"ok",
# "tick":{
# "amount":"669.3385682049668320322569544150680718474",
# "ask":[59117.44,48],
# "bid":[59082,48],
# "close":"59087.97",
# "count":5947,
# "high":"59892.62",
# "id":1637502670,
# "low":"57402.87",
# "open":"57638",
# "ts":1637502670059,
# "vol":"394598"
# },
# "ts":1637502670059
# }
#
tick = self.safe_value(response, 'tick', {})
ticker = self.parse_ticker(tick, market)
timestamp = self.safe_integer(response, 'ts')
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
options = self.safe_value(self.options, 'fetchTickers', {})
defaultType = self.safe_string(self.options, 'defaultType', 'spot')
type = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', type)
method = 'spotPublicGetMarketTickers'
defaultSubType = self.safe_string(self.options, 'defaultSubType', 'inverse')
subType = self.safe_string(options, 'subType', defaultSubType)
subType = self.safe_string(params, 'subType', subType)
request = {}
future = (type == 'future')
swap = (type == 'swap')
linear = (subType == 'linear')
inverse = (subType == 'inverse')
if linear:
method = 'contractPublicGetLinearSwapExMarketDetailBatchMerged'
if future:
request['business_type'] = 'futures'
elif inverse:
if future:
method = 'contractPublicGetMarketDetailBatchMerged'
elif swap:
method = 'contractPublicGetSwapExMarketDetailBatchMerged'
params = self.omit(params, ['type', 'subType'])
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "data":[
# {
# "symbol":"hbcbtc",
# "open":5.313E-5,
# "high":5.34E-5,
# "low":5.112E-5,
# "close":5.175E-5,
# "amount":1183.87,
# "vol":0.0618599229,
# "count":205,
# "bid":5.126E-5,
# "bidSize":5.25,
# "ask":5.214E-5,
# "askSize":150.0
# },
# ],
# "status":"ok",
# "ts":1639547261293
# }
#
# inverse swaps, linear swaps, inverse futures
#
# {
# "status":"ok",
# "ticks":[
# {
# "id":1637504679,
# "ts":1637504679372,
# "ask":[0.10644,100],
# "bid":[0.10624,26],
# "symbol":"TRX_CW",
# "open":"0.10233",
# "close":"0.10644",
# "low":"0.1017",
# "high":"0.10725",
# "amount":"2340267.415144052378486261756692535687481566",
# "count":882,
# "vol":"24706"
# }
# ],
# "ts":1637504679376
# }
#
# linear futures
#
# {
# "status":"ok",
# "ticks":[
# {
# "id":1640745627,
# "ts":1640745627957,
# "ask":[48079.1,20],
# "bid":[47713.8,125],
# "business_type":"futures",
# "contract_code":"BTC-USDT-CW",
# "open":"49011.8",
# "close":"47934",
# "low":"47292.3",
# "high":"49011.8",
# "amount":"17.398",
# "count":1515,
# "vol":"17398",
# "trade_turnover":"840726.5048"
# }
# ],
# "ts":1640745627988
# }
#
tickers = self.safe_value_2(response, 'data', 'ticks', [])
timestamp = self.safe_integer(response, 'ts')
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
# the market ids for linear futures are non-standard and differ from all the other endpoints
# we are doing a linear-matching here
if future and linear:
for j in range(0, len(self.symbols)):
symbol = self.symbols[j]
market = self.market(symbol)
contractType = self.safe_string(market['info'], 'contract_type')
if (contractType == 'this_week') and (ticker['symbol'] == (market['baseId'] + '-' + market['quoteId'] + '-CW')):
ticker['symbol'] = market['symbol']
break
elif (contractType == 'next_week') and (ticker['symbol'] == (market['baseId'] + '-' + market['quoteId'] + '-NW')):
ticker['symbol'] = market['symbol']
break
elif (contractType == 'this_quarter') and (ticker['symbol'] == (market['baseId'] + '-' + market['quoteId'] + '-CQ')):
ticker['symbol'] = market['symbol']
break
elif (contractType == 'next_quarter') and (ticker['symbol'] == (market['baseId'] + '-' + market['quoteId'] + '-NQ')):
ticker['symbol'] = market['symbol']
break
symbol = ticker['symbol']
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
#
# from the API docs
#
# to get depth data within step 150, use step0, step1, step2, step3, step4, step5, step14, step15(merged depth data 0-5,14-15, when step is 0,depth data will not be merged
# to get depth data within step 20, use step6, step7, step8, step9, step10, step11, step12, step13(merged depth data 7-13), when step is 6, depth data will not be merged
#
'type': 'step0',
# 'symbol': market['id'], # spot, future
# 'contract_code': market['id'], # swap
}
fieldName = 'symbol'
method = 'spotPublicGetMarketDepth'
if market['linear']:
method = 'contractPublicGetLinearSwapExMarketDepth'
fieldName = 'contract_code'
elif market['inverse']:
if market['future']:
method = 'contractPublicGetMarketDepth'
elif market['swap']:
method = 'contractPublicGetSwapExMarketDepth'
fieldName = 'contract_code'
else:
if limit is not None:
# Valid depths are 5, 10, 20 or empty https://huobiapi.github.io/docs/spot/v1/en/#get-market-depth
if (limit != 5) and (limit != 10) and (limit != 20):
raise BadRequest(self.id + ' fetchOrderBook() limit argument must be None, 5, 10 or 20, default is 150')
request['depth'] = limit
request[fieldName] = market['id']
response = getattr(self, method)(self.extend(request, params))
#
# spot, future, swap
#
# {
# "status": "ok",
# "ch": "market.btcusdt.depth.step0",
# "ts": 1583474832790,
# "tick": {
# "bids": [
# [9100.290000000000000000, 0.200000000000000000],
# [9099.820000000000000000, 0.200000000000000000],
# [9099.610000000000000000, 0.205000000000000000],
# ],
# "asks": [
# [9100.640000000000000000, 0.005904000000000000],
# [9101.010000000000000000, 0.287311000000000000],
# [9101.030000000000000000, 0.012121000000000000],
# ],
# "ch":"market.BTC-USD.depth.step0",
# "ts":1583474832008,
# "id":1637554816,
# "mrid":121654491624,
# "version":104999698780
# }
# }
#
if 'tick' in response:
if not response['tick']:
raise BadSymbol(self.id + ' fetchOrderBook() returned empty response: ' + self.json(response))
tick = self.safe_value(response, 'tick')
timestamp = self.safe_integer(tick, 'ts', self.safe_integer(response, 'ts'))
result = self.parse_order_book(tick, symbol, timestamp)
result['nonce'] = self.safe_integer(tick, 'version')
return result
raise ExchangeError(self.id + ' fetchOrderBook() returned unrecognized response: ' + self.json(response))
def parse_trade(self, trade, market=None):
#
# spot fetchTrades(public)
#
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
#
# spot fetchMyTrades(private)
#
# {
# 'symbol': 'swftcbtc',
# 'fee-currency': 'swftc',
# 'filled-fees': '0',
# 'source': 'spot-api',
# 'id': 83789509854000,
# 'type': 'buy-limit',
# 'order-id': 83711103204909,
# 'filled-points': '0.005826843283532154',
# 'fee-deduct-currency': 'ht',
# 'filled-amount': '45941.53',
# 'price': '0.0000001401',
# 'created-at': 1597933260729,
# 'match-id': 100087455560,
# 'role': 'maker',
# 'trade-id': 100050305348
# }
#
# linear swap isolated margin fetchOrder details
#
# {
# "trade_id": 131560927,
# "trade_price": 13059.800000000000000000,
# "trade_volume": 1.000000000000000000,
# "trade_turnover": 13.059800000000000000,
# "trade_fee": -0.005223920000000000,
# "created_at": 1603703614715,
# "role": "taker",
# "fee_asset": "USDT",
# "profit": 0,
# "real_profit": 0,
# "id": "131560927-770334322963152896-1"
# }
#
# inverse swap cross margin fetchMyTrades
#
# {
# "contract_type":"swap",
# "pair":"O3-USDT",
# "business_type":"swap",
# "query_id":652123190,
# "match_id":28306009409,
# "order_id":941137865226903553,
# "symbol":"O3",
# "contract_code":"O3-USDT",
# "direction":"sell",
# "offset":"open",
# "trade_volume":100.000000000000000000,
# "trade_price":0.398500000000000000,
# "trade_turnover":39.850000000000000000,
# "trade_fee":-0.007970000000000000,
# "offset_profitloss":0E-18,
# "create_date":1644426352999,
# "role":"Maker",
# "order_source":"api",
# "order_id_str":"941137865226903553",
# "id":"28306009409-941137865226903553-1",
# "fee_asset":"USDT",
# "margin_mode":"cross",
# "margin_account":"USDT",
# "real_profit":0E-18,
# "trade_partition":"USDT"
# }
#
marketId = self.safe_string_2(trade, 'contract_code', 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
timestamp = self.safe_integer_2(trade, 'ts', 'created-at')
timestamp = self.safe_integer_2(trade, 'created_at', 'create_date', timestamp)
order = self.safe_string_2(trade, 'order-id', 'order_id')
side = self.safe_string(trade, 'direction')
type = self.safe_string(trade, 'type')
if type is not None:
typeParts = type.split('-')
side = typeParts[0]
type = typeParts[1]
takerOrMaker = self.safe_string_lower(trade, 'role')
priceString = self.safe_string_2(trade, 'price', 'trade_price')
amountString = self.safe_string_2(trade, 'filled-amount', 'amount')
amountString = self.safe_string(trade, 'trade_volume', amountString)
costString = self.safe_string(trade, 'trade_turnover')
fee = None
feeCost = self.safe_string_2(trade, 'filled-fees', 'trade_fee')
feeCurrencyId = self.safe_string_2(trade, 'fee-currency', 'fee_asset')
feeCurrency = self.safe_currency_code(feeCurrencyId)
filledPoints = self.safe_string(trade, 'filled-points')
if filledPoints is not None:
if (feeCost is None) or Precise.string_equals(feeCost, '0'):
feeDeductCurrency = self.safe_string(trade, 'fee-deduct-currency')
if feeDeductCurrency != '':
feeCost = filledPoints
feeCurrency = self.safe_currency_code(feeDeductCurrency)
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
tradeId = self.safe_string_2(trade, 'trade-id', 'tradeId')
id = self.safe_string_2(trade, 'trade_id', 'id', tradeId)
return self.safe_trade({
'id': id,
'info': trade,
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
}, market)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
marketType = None
marketType, params = self.handle_market_type_and_params('fetchOrderTrades', None, params)
method = self.get_supported_mapping(marketType, {
'spot': 'fetchSpotOrderTrades',
# 'swap': 'fetchContractOrderTrades',
# 'future': 'fetchContractOrderTrades',
})
return getattr(self, method)(id, symbol, since, limit, params)
def fetch_spot_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'order-id': id,
}
response = self.spotPrivateGetV1OrderOrdersOrderIdMatchresults(self.extend(request, params))
return self.parse_trades(response['data'], None, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('fetchMyTrades', None, params)
request = {
# spot -----------------------------------------------------------
# 'symbol': market['id'],
# 'types': 'buy-market,sell-market,buy-limit,sell-limit,buy-ioc,sell-ioc,buy-limit-maker,sell-limit-maker,buy-stop-limit,sell-stop-limit',
# 'start-time': since, # max 48 hours within 120 days
# 'end-time': self.milliseconds(), # max 48 hours within 120 days
# 'from': 'id', # tring False N/A Search internal id to begin with if search next page, then self should be the last id(not trade-id) of last page; if search previous page, then self should be the first id(not trade-id) of last page
# 'direct': 'next', # next, prev
# 'size': limit, # default 100, max 500 The number of orders to return [1-500]
# contracts ------------------------------------------------------
# 'symbol': market['settleId'], # required
# 'trade_type': 0, # required, 0 all, 1 open long, 2 open short, 3 close short, 4 close long, 5 liquidate long positions, 6 liquidate short positions
# 'contract_code': market['id'],
# 'start_time': since, # max 48 hours within 120 days
# 'end_time': self.milliseconds(), # max 48 hours within 120 days
# 'from_id': 'id', # tring False N/A Search internal id to begin with if search next page, then self should be the last id(not trade-id) of last page; if search previous page, then self should be the first id(not trade-id) of last page
# 'direct': 'prev', # next, prev
# 'size': limit, # default 20, max 50
}
method = None
market = None
if marketType == 'spot':
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['size'] = limit # default 100, max 500
if since is not None:
request['start-time'] = since # a date within 120 days from today
# request['end-time'] = self.sum(since, 172800000) # 48 hours window
method = 'spotPrivateGetV1OrderMatchresults'
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol for ' + marketType + ' orders')
market = self.market(symbol)
request['contract_code'] = market['id']
request['trade_type'] = 0 # 0 all, 1 open long, 2 open short, 3 close short, 4 close long, 5 liquidate long positions, 6 liquidate short positions
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapMatchresultsExact'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossMatchresultsExact'
elif market['inverse']:
if marketType == 'future':
method = 'contractPrivatePostApiV1ContractMatchresultsExact'
request['symbol'] = market['settleId']
elif marketType == 'swap':
method = 'contractPrivatePostSwapApiV1SwapMatchresultsExact'
else:
raise NotSupported(self.id + ' fetchMyTrades() does not support ' + marketType + ' markets')
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status": "ok",
# "data": [
# {
# "symbol": "polyusdt",
# "fee-currency": "poly",
# "source": "spot-web",
# "price": "0.338",
# "created-at": 1629443051839,
# "role": "taker",
# "order-id": 345487249132375,
# "match-id": 5014,
# "trade-id": 1085,
# "filled-amount": "147.928994082840236",
# "filled-fees": "0",
# "filled-points": "0.1",
# "fee-deduct-currency": "hbpoint",
# "fee-deduct-state": "done",
# "id": 313288753120940,
# "type": "buy-market"
# }
# ]
# }
#
# contracts
#
# {
# "status": "ok",
# "data": {
# "trades": [
# {
# "query_id": 2424420723,
# "match_id": 113891764710,
# "order_id": 773135295142658048,
# "symbol": "ADA",
# "contract_type": "quarter", # swap
# "business_type": "futures", # swap
# "contract_code": "ADA201225",
# "direction": "buy",
# "offset": "open",
# "trade_volume": 1,
# "trade_price": 0.092,
# "trade_turnover": 10,
# "trade_fee": -0.021739130434782608,
# "offset_profitloss": 0,
# "create_date": 1604371703183,
# "role": "Maker",
# "order_source": "web",
# "order_id_str": "773135295142658048",
# "fee_asset": "ADA",
# "margin_mode": "isolated", # cross
# "margin_account": "BTC-USDT",
# "real_profit": 0,
# "id": "113891764710-773135295142658048-1",
# "trade_partition":"USDT",
# }
# ],
# "remain_size": 15,
# "next_id": 2424413094
# },
# "ts": 1604372202243
# }
#
trades = self.safe_value(response, 'data')
if not isinstance(trades, list):
trades = self.safe_value(trades, 'trades')
return self.parse_trades(trades, market, since, limit)
def fetch_trades(self, symbol, since=None, limit=1000, params={}):
self.load_markets()
market = self.market(symbol)
request = {
# 'symbol': market['id'], # spot, future
# 'contract_code': market['id'], # swap
}
fieldName = 'symbol'
method = 'spotPublicGetMarketHistoryTrade'
if market['future']:
if market['inverse']:
method = 'contractPublicGetMarketHistoryTrade'
elif market['linear']:
method = 'contractPublicGetLinearSwapExMarketHistoryTrade'
fieldName = 'contract_code'
elif market['swap']:
if market['inverse']:
method = 'contractPublicGetSwapExMarketHistoryTrade'
elif market['linear']:
method = 'contractPublicGetLinearSwapExMarketHistoryTrade'
fieldName = 'contract_code'
request[fieldName] = market['id']
if limit is not None:
request['size'] = limit # max 2000
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.trade.detail",
# "ts": 1583497692365,
# "data": [
# {
# "id": 105005170342,
# "ts": 1583497692182,
# "data": [
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
# ]
# },
# # ...
# ]
# }
#
data = self.safe_value(response, 'data')
result = []
for i in range(0, len(data)):
trades = self.safe_value(data[i], 'data', [])
for j in range(0, len(trades)):
trade = self.parse_trade(trades[j], market)
result.append(trade)
result = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "amount":1.2082,
# "open":0.025096,
# "close":0.025095,
# "high":0.025096,
# "id":1591515300,
# "count":6,
# "low":0.025095,
# "vol":0.0303205097
# }
#
return [
self.safe_timestamp(ohlcv, 'id'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'amount'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'period': self.timeframes[timeframe],
# 'symbol': market['id'], # spot, future
# 'contract_code': market['id'], # swap
# 'size': 1000, # max 1000 for spot, 2000 for contracts
# 'from': int(since / 1000), spot only
# 'to': self.seconds(), spot only
}
fieldName = 'symbol'
price = self.safe_string(params, 'price')
params = self.omit(params, 'price')
method = 'spotPublicGetMarketHistoryCandles'
if market['spot']:
if since is not None:
request['from'] = int(since / 1000)
if limit is not None:
request['size'] = limit # max 2000
elif market['future']:
if market['inverse']:
if price == 'mark':
method = 'contractPublicGetIndexMarketHistoryMarkPriceKline'
elif price == 'index':
method = 'contractPublicGetIndexMarketHistoryIndex'
elif price == 'premiumIndex':
raise BadRequest(self.id + ' ' + market['type'] + ' has no api endpoint for ' + price + ' kline data')
else:
method = 'contractPublicGetMarketHistoryKline'
elif market['linear']:
if price == 'mark':
method = 'contractPublicGetIndexMarketHistoryLinearSwapMarkPriceKline'
elif price == 'index':
raise BadRequest(self.id + ' ' + market['type'] + ' has no api endpoint for ' + price + ' kline data')
elif price == 'premiumIndex':
method = 'contractPublicGetIndexMarketHistoryLinearSwapPremiumIndexKline'
else:
method = 'contractPublicGetLinearSwapExMarketHistoryKline'
fieldName = 'contract_code'
elif market['swap']:
if market['inverse']:
if price == 'mark':
method = 'contractPublicGetIndexMarketHistorySwapMarkPriceKline'
elif price == 'index':
raise BadRequest(self.id + ' ' + market['type'] + ' has no api endpoint for ' + price + ' kline data')
elif price == 'premiumIndex':
method = 'contractPublicGetIndexMarketHistorySwapPremiumIndexKline'
else:
method = 'contractPublicGetSwapExMarketHistoryKline'
elif market['linear']:
if price == 'mark':
method = 'contractPublicGetIndexMarketHistoryLinearSwapMarkPriceKline'
elif price == 'index':
raise BadRequest(self.id + ' ' + market['type'] + ' has no api endpoint for ' + price + ' kline data')
elif price == 'premiumIndex':
method = 'contractPublicGetIndexMarketHistoryLinearSwapPremiumIndexKline'
else:
method = 'contractPublicGetLinearSwapExMarketHistoryKline'
fieldName = 'contract_code'
if market['contract']:
if limit is None:
limit = 2000
if price is None:
duration = self.parse_timeframe(timeframe)
if since is None:
now = self.seconds()
request['from'] = now - duration * (limit - 1)
request['to'] = now
else:
start = int(since / 1000)
request['from'] = start
request['to'] = self.sum(start, duration * (limit - 1))
request[fieldName] = market['id']
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status":"ok",
# "ch":"market.ethbtc.kline.1min",
# "ts":1591515374371,
# "data":[
# {"amount":0.0,"open":0.025095,"close":0.025095,"high":0.025095,"id":1591515360,"count":0,"low":0.025095,"vol":0.0},
# {"amount":1.2082,"open":0.025096,"close":0.025095,"high":0.025096,"id":1591515300,"count":6,"low":0.025095,"vol":0.0303205097},
# {"amount":0.0648,"open":0.025096,"close":0.025096,"high":0.025096,"id":1591515240,"count":2,"low":0.025096,"vol":0.0016262208},
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'index',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'mark',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_premium_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'premiumIndex',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_accounts(self, params={}):
self.load_markets()
response = self.spotPrivateGetV1AccountAccounts(params)
#
# {
# "status":"ok",
# "data":[
# {"id":5202591,"type":"point","subtype":"","state":"working"},
# {"id":1528640,"type":"spot","subtype":"","state":"working"},
# ]
# }
#
return response['data']
def fetch_account_id_by_type(self, type, params={}):
accounts = self.load_accounts()
accountId = self.safe_value(params, 'account-id')
if accountId is not None:
return accountId
indexedAccounts = self.index_by(accounts, 'type')
defaultAccount = self.safe_value(accounts, 0, {})
account = self.safe_value(indexedAccounts, type, defaultAccount)
return self.safe_string(account, 'id')
def fetch_currencies(self, params={}):
response = self.spotPublicGetV2ReferenceCurrencies()
# {
# "code": 200,
# "data": [
# {
# "currency": "sxp",
# "assetType": "1",
# "chains": [
# {
# "chain": "sxp",
# "displayName": "ERC20",
# "baseChain": "ETH",
# "baseChainProtocol": "ERC20",
# "isDynamic": True,
# "numOfConfirmations": "12",
# "numOfFastConfirmations": "12",
# "depositStatus": "allowed",
# "minDepositAmt": "0.23",
# "withdrawStatus": "allowed",
# "minWithdrawAmt": "0.23",
# "withdrawPrecision": "8",
# "maxWithdrawAmt": "227000.000000000000000000",
# "withdrawQuotaPerDay": "227000.000000000000000000",
# "withdrawQuotaPerYear": null,
# "withdrawQuotaTotal": null,
# "withdrawFeeType": "fixed",
# "transactFeeWithdraw": "11.1653",
# "addrWithTag": False,
# "addrDepositTag": False
# }
# ],
# "instStatus": "normal"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
result = {}
for i in range(0, len(data)):
entry = data[i]
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
chains = self.safe_value(entry, 'chains', [])
networks = {}
instStatus = self.safe_string(entry, 'instStatus')
currencyActive = instStatus == 'normal'
fee = None
minPrecision = None
minWithdraw = None
maxWithdraw = None
deposit = None
withdraw = None
for j in range(0, len(chains)):
chain = chains[j]
networkId = self.safe_string(chain, 'chain')
baseChainProtocol = self.safe_string(chain, 'baseChainProtocol')
huobiToken = 'h' + currencyId
if baseChainProtocol is None:
if huobiToken == networkId:
baseChainProtocol = 'ERC20'
else:
baseChainProtocol = self.safe_string(chain, 'displayName')
network = self.safe_network(baseChainProtocol)
minWithdraw = self.safe_number(chain, 'minWithdrawAmt')
maxWithdraw = self.safe_number(chain, 'maxWithdrawAmt')
withdrawStatus = self.safe_string(chain, 'withdrawStatus')
depositStatus = self.safe_string(chain, 'depositStatus')
withdrawEnabled = (withdrawStatus == 'allowed')
depositEnabled = (depositStatus == 'allowed')
active = withdrawEnabled and depositEnabled
precision = self.safe_string(chain, 'withdrawPrecision')
if precision is not None:
precision = self.parse_number('1e-' + precision)
minPrecision = precision if (minPrecision is None) else max(precision, minPrecision)
if withdrawEnabled and not withdraw:
withdraw = True
elif not withdrawEnabled:
withdraw = False
if depositEnabled and not deposit:
deposit = True
elif not depositEnabled:
deposit = False
fee = self.safe_number(chain, 'transactFeeWithdraw')
networks[network] = {
'info': chain,
'id': networkId,
'network': network,
'limits': {
'withdraw': {
'min': minWithdraw,
'max': maxWithdraw,
},
},
'active': active,
'deposit': depositEnabled,
'withdraw': withdrawEnabled,
'fee': fee,
'precision': precision,
}
networksKeys = list(networks.keys())
networkLength = len(networksKeys)
result[code] = {
'info': entry,
'code': code,
'id': currencyId,
'active': currencyActive,
'deposit': deposit,
'withdraw': withdraw,
'fee': fee if (networkLength <= 1) else None,
'name': None,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': minWithdraw if (networkLength <= 1) else None,
'max': maxWithdraw if (networkLength <= 1) else None,
},
},
'precision': minPrecision,
'networks': networks,
}
return result
def fetch_balance(self, params={}):
self.load_markets()
options = self.safe_value(self.options, 'fetchTickers', {})
defaultType = self.safe_string(self.options, 'defaultType', 'spot')
type = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', type)
params = self.omit(params, 'type')
request = {}
method = None
spot = (type == 'spot')
future = (type == 'future')
swap = (type == 'swap')
defaultSubType = self.safe_string(self.options, 'defaultSubType', 'inverse')
subType = self.safe_string(options, 'subType', defaultSubType)
subType = self.safe_string(params, 'subType', subType)
inverse = (subType == 'inverse')
linear = (subType == 'linear')
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', 'isolated')
isolated = (marginType == 'isolated')
cross = (marginType == 'cross')
if spot:
self.load_accounts()
accountId = self.fetch_account_id_by_type(type, params)
request['account-id'] = accountId
method = 'spotPrivateGetV1AccountAccountsAccountIdBalance'
elif linear:
method = 'contractPrivatePostLinearSwapApiV1SwapCrossAccountInfo'
elif inverse:
if future:
method = 'contractPrivatePostApiV1ContractAccountInfo'
elif swap:
method = 'contractPrivatePostSwapApiV1SwapAccountInfo'
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "data":{
# "id":1528640,
# "type":"spot",
# "state":"working",
# "list":[
# {"currency":"lun","type":"trade","balance":"0","seq-num":"0"},
# {"currency":"lun","type":"frozen","balance":"0","seq-num":"0"},
# {"currency":"ht","type":"frozen","balance":"0","seq-num":"145"},
# ]
# },
# "ts":1637644827566
# }
#
# future, swap isolated
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"BTC",
# "margin_balance":0,
# "margin_position":0E-18,
# "margin_frozen":0,
# "margin_available":0E-18,
# "profit_real":0,
# "profit_unreal":0,
# "risk_rate":null,
# "withdraw_available":0,
# "liquidation_price":null,
# "lever_rate":5,
# "adjust_factor":0.025000000000000000,
# "margin_static":0,
# "is_debit":0, # future only
# "contract_code":"BTC-USD", # swap only
# "margin_asset":"USDT", # linear only
# "margin_mode":"isolated", # linear only
# "margin_account":"BTC-USDT" # linear only
# "transfer_profit_ratio":null # inverse only
# },
# ],
# "ts":1637644827566
# }
#
# linear cross futures and linear cross swap
#
# {
# "status":"ok",
# "data":[
# {
# "futures_contract_detail":[
# {
# "symbol":"ETH",
# "contract_code":"ETH-USDT-220325",
# "margin_position":0,
# "margin_frozen":0,
# "margin_available":200.000000000000000000,
# "profit_unreal":0E-18,
# "liquidation_price":null,
# "lever_rate":5,
# "adjust_factor":0.060000000000000000,
# "contract_type":"quarter",
# "pair":"ETH-USDT",
# "business_type":"futures"
# },
# ],
# "margin_mode":"cross",
# "margin_account":"USDT",
# "margin_asset":"USDT",
# "margin_balance":200.000000000000000000,
# "margin_static":200.000000000000000000,
# "margin_position":0,
# "margin_frozen":0,
# "profit_real":0E-18,
# "profit_unreal":0,
# "withdraw_available":2E+2,
# "risk_rate":null,
# "contract_detail":[
# {
# "symbol":"MANA",
# "contract_code":"MANA-USDT",
# "margin_position":0,
# "margin_frozen":0,
# "margin_available":200.000000000000000000,
# "profit_unreal":0E-18,
# "liquidation_price":null,
# "lever_rate":5,
# "adjust_factor":0.100000000000000000,
# "contract_type":"swap",
# "pair":"MANA-USDT",
# "business_type":"swap"
# },
# ]
# }
# ],
# "ts":1640915104870
# }
#
result = {'info': response}
data = self.safe_value(response, 'data')
if spot:
balances = self.safe_value(data, 'list', [])
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = None
if code in result:
account = result[code]
else:
account = self.account()
if balance['type'] == 'trade':
account['free'] = self.safe_string(balance, 'balance')
if balance['type'] == 'frozen':
account['used'] = self.safe_string(balance, 'balance')
result[code] = account
elif linear:
first = self.safe_value(data, 0, {})
if cross:
account = self.account()
account['free'] = self.safe_string(first, 'margin_balance', 'margin_available')
account['used'] = self.safe_string(first, 'margin_frozen')
currencyId = self.safe_string_2(first, 'margin_asset', 'symbol')
code = self.safe_currency_code(currencyId)
result[code] = account
elif isolated:
fieldName = 'futures_contract_detail' if future else 'contract_detail'
balances = self.safe_value(first, fieldName, [])
for i in range(0, len(balances)):
balance = balances[i]
marketId = self.safe_string_2(balance, 'contract_code', 'margin_account')
market = self.safe_market(marketId)
account = self.account()
account['free'] = self.safe_string(balance, 'margin_balance')
account['used'] = self.safe_string(balance, 'margin_frozen')
code = market['settle']
accountsByCode = {}
accountsByCode[code] = account
symbol = market['symbol']
result[symbol] = self.safe_balance(accountsByCode)
return result
elif inverse:
for i in range(0, len(data)):
balance = data[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'margin_available')
account['used'] = self.safe_string(balance, 'margin_frozen')
result[code] = account
return self.safe_balance(result)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('fetchOrder', None, params)
request = {
# spot -----------------------------------------------------------
# 'order-id': 'id',
# 'symbol': market['id'],
# 'client-order-id': clientOrderId,
# 'clientOrderId': clientOrderId,
# contracts ------------------------------------------------------
# 'order_id': id,
# 'client_order_id': clientOrderId,
# 'contract_code': market['id'],
# 'pair': 'BTC-USDT',
# 'contract_type': 'this_week', # swap, self_week, next_week, quarter, next_ quarter
}
method = None
market = None
if marketType == 'spot':
clientOrderId = self.safe_string(params, 'clientOrderId')
method = 'spotPrivateGetV1OrderOrdersOrderId'
if clientOrderId is not None:
method = 'spotPrivateGetV1OrderOrdersGetClientOrder'
# will be filled below in self.extend()
# they expect clientOrderId instead of client-order-id
# request['clientOrderId'] = clientOrderId
else:
request['order-id'] = id
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol for ' + marketType + ' orders')
market = self.market(symbol)
request['contract_code'] = market['id']
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapOrderInfo'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossOrderInfo'
elif market['inverse']:
if marketType == 'future':
method = 'contractPrivatePostApiV1ContractOrderInfo'
request['symbol'] = market['settleId']
elif marketType == 'swap':
method = 'contractPrivatePostSwapApiV1SwapOrderInfo'
else:
raise NotSupported(self.id + ' fetchOrder() does not support ' + marketType + ' markets')
clientOrderId = self.safe_string_2(params, 'client_order_id', 'clientOrderId')
if clientOrderId is None:
request['order_id'] = id
else:
request['client_order_id'] = clientOrderId
params = self.omit(params, ['client_order_id', 'clientOrderId'])
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "data":{
# "id":438398393065481,
# "symbol":"ethusdt",
# "account-id":1528640,
# "client-order-id":"AA03022abc2163433e-006b-480e-9ad1-d4781478c5e7",
# "amount":"0.100000000000000000",
# "price":"3000.000000000000000000",
# "created-at":1640549994642,
# "type":"buy-limit",
# "field-amount":"0.0",
# "field-cash-amount":"0.0",
# "field-fees":"0.0",
# "finished-at":0,
# "source":"spot-api",
# "state":"submitted",
# "canceled-at":0
# }
# }
#
# linear swap cross margin
#
# {
# "status":"ok",
# "data":[
# {
# "business_type":"swap",
# "contract_type":"swap",
# "pair":"BTC-USDT",
# "symbol":"BTC",
# "contract_code":"BTC-USDT",
# "volume":1,
# "price":3000,
# "order_price_type":"limit",
# "order_type":1,
# "direction":"buy",
# "offset":"open",
# "lever_rate":1,
# "order_id":924912513206878210,
# "client_order_id":null,
# "created_at":1640557927189,
# "trade_volume":0,
# "trade_turnover":0,
# "fee":0,
# "trade_avg_price":null,
# "margin_frozen":3.000000000000000000,
# "profit":0,
# "status":3,
# "order_source":"api",
# "order_id_str":"924912513206878210",
# "fee_asset":"USDT",
# "liquidation_type":"0",
# "canceled_at":0,
# "margin_asset":"USDT",
# "margin_account":"USDT",
# "margin_mode":"cross",
# "is_tpsl":0,
# "real_profit":0
# }
# ],
# "ts":1640557982556
# }
#
# linear swap isolated margin detail
#
# {
# "status": "ok",
# "data": {
# "symbol": "BTC",
# "contract_code": "BTC-USDT",
# "instrument_price": 0,
# "final_interest": 0,
# "adjust_value": 0,
# "lever_rate": 10,
# "direction": "sell",
# "offset": "open",
# "volume": 1.000000000000000000,
# "price": 13059.800000000000000000,
# "created_at": 1603703614712,
# "canceled_at": 0,
# "order_source": "api",
# "order_price_type": "opponent",
# "margin_frozen": 0,
# "profit": 0,
# "trades": [
# {
# "trade_id": 131560927,
# "trade_price": 13059.800000000000000000,
# "trade_volume": 1.000000000000000000,
# "trade_turnover": 13.059800000000000000,
# "trade_fee": -0.005223920000000000,
# "created_at": 1603703614715,
# "role": "taker",
# "fee_asset": "USDT",
# "profit": 0,
# "real_profit": 0,
# "id": "131560927-770334322963152896-1"
# }
# ],
# "total_page": 1,
# "current_page": 1,
# "total_size": 1,
# "liquidation_type": "0",
# "fee_asset": "USDT",
# "fee": -0.005223920000000000,
# "order_id": 770334322963152896,
# "order_id_str": "770334322963152896",
# "client_order_id": 57012021045,
# "order_type": "1",
# "status": 6,
# "trade_avg_price": 13059.800000000000000000,
# "trade_turnover": 13.059800000000000000,
# "trade_volume": 1.000000000000000000,
# "margin_asset": "USDT",
# "margin_mode": "isolated",
# "margin_account": "BTC-USDT",
# "real_profit": 0,
# "is_tpsl": 0
# },
# "ts": 1603703678477
# }
order = self.safe_value(response, 'data')
if isinstance(order, list):
order = self.safe_value(order, 0)
return self.parse_order(order)
def fetch_spot_orders_by_states(self, states, symbol=None, since=None, limit=None, params={}):
method = self.safe_string(self.options, 'fetchOrdersByStatesMethod', 'spot_private_get_v1_order_orders') # spot_private_get_v1_order_history
if method == 'spot_private_get_v1_order_orders':
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
self.load_markets()
market = None
request = {
# spot_private_get_v1_order_orders GET /v1/order/orders ----------
# 'symbol': market['id'], # required
# 'types': 'buy-market,sell-market,buy-limit,sell-limit,buy-ioc,sell-ioc,buy-stop-limit,sell-stop-limit,buy-limit-fok,sell-limit-fok,buy-stop-limit-fok,sell-stop-limit-fok',
# 'start-time': since, # max window of 48h within a range of 180 days, within past 2 hours for cancelled orders
# 'end-time': self.milliseconds(),
'states': states, # filled, partial-canceled, canceled
# 'from': order['id'],
# 'direct': 'next', # next, prev, used with from
# 'size': 100, # max 100
# spot_private_get_v1_order_history GET /v1/order/history --------
# 'symbol': market['id'], # optional
# 'start-time': since, # max window of 48h within a range of 180 days, within past 2 hours for cancelled orders
# 'end-time': self.milliseconds(),
# 'direct': 'next', # next, prev, used with from
# 'size': 100, # max 100
}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['start-time'] = since # a window of 48 hours within 180 days
request['end-time'] = self.sum(since, 48 * 60 * 60 * 1000)
if limit is not None:
request['size'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# spot_private_get_v1_order_orders GET /v1/order/orders
#
# {
# status: "ok",
# data: [
# {
# id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# 'client-order-id': "23456",
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000",
# 'field-cash-amount': "0.001530630000000000",
# 'field-fees': "0.000003061260000000",
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, since, limit)
def fetch_spot_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_spot_orders_by_states('pre-submitted,submitted,partial-filled,filled,partial-canceled,canceled', symbol, since, limit, params)
def fetch_closed_spot_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_spot_orders_by_states('filled,partial-canceled,canceled', symbol, since, limit, params)
def fetch_contract_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchContractOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
marketType = None
marketType, params = self.handle_market_type_and_params('fetchOrders', market, params)
request = {
# POST /api/v1/contract_hisorders inverse futures ----------------
# 'symbol': market['settleId'], # BTC, ETH, ...
# 'order_type': '1', # 1 limit,3 opponent,4 lightning, 5 trigger order, 6 pst_only, 7 optimal_5, 8 optimal_10, 9 optimal_20, 10 fok, 11 ioc
# POST /swap-api/v1/swap_hisorders inverse swap ------------------
# POST /linear-swap-api/v1/swap_hisorders linear isolated --------
# POST /linear-swap-api/v1/swap_cross_hisorders linear cross -----
'contract_code': market['id'],
'trade_type': 0, # 0 all, 1 buy long, 2 sell short, 3 buy short, 4 sell long, 5 sell liquidation, 6 buy liquidation, 7 Delivery long, 8 Delivery short 11 reduce positions to close long, 12 reduce positions to close short
'type': 1, # 1 all orders, 2 finished orders
'status': '0', # comma separated, 0 all, 3 submitted orders, 4 partially matched, 5 partially cancelled, 6 fully matched and closed, 7 canceled
'create_date': 90, # in days?
# 'page_index': 1,
# 'page_size': limit, # default 20, max 50
# 'sort_by': 'create_date', # create_date descending, update_time descending
}
method = None
request['contract_code'] = market['id']
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
method = self.get_supported_mapping(marginType, {
'isolated': 'contractPrivatePostLinearSwapApiV1SwapHisorders',
'cross': 'contractPrivatePostLinearSwapApiV1SwapCrossHisorders',
})
elif market['inverse']:
method = self.get_supported_mapping(marketType, {
'future': 'contractPrivatePostApiV1ContractHisorders',
'swap': 'contractPrivatePostSwapApiV1SwapHisorders',
})
if marketType == 'future':
request['symbol'] = market['settleId']
if limit is not None:
request['page_size'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status": "ok",
# "data": {
# "orders": [
# {
# "order_id": 773131315209248768,
# "contract_code": "ADA201225",
# "symbol": "ADA",
# "lever_rate": 20,
# "direction": "buy",
# "offset": "close",
# "volume": 1,
# "price": 0.0925,
# "create_date": 1604370469629,
# "update_time": 1603704221118,
# "order_source": "web",
# "order_price_type": 6,
# "order_type": 1,
# "margin_frozen": 0,
# "profit": 0,
# "contract_type": "quarter",
# "trade_volume": 0,
# "trade_turnover": 0,
# "fee": 0,
# "trade_avg_price": 0,
# "status": 3,
# "order_id_str": "773131315209248768",
# "fee_asset": "ADA",
# "liquidation_type": "0",
# "is_tpsl": 0,
# "real_profit": 0
# "pair": "BTC-USDT",
# "business_type": "futures",
# "margin_asset": "USDT",
# "margin_mode": "cross",
# "margin_account": "USDT",
# }
# ],
# "total_page": 19,
# "current_page": 1,
# "total_size": 19
# },
# "ts": 1604370617322
# }
#
data = self.safe_value(response, 'data', {})
orders = self.safe_value(data, 'orders', [])
return self.parse_orders(orders, market, since, limit)
def fetch_closed_contract_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': '5,6,7', # comma separated, 0 all, 3 submitted orders, 4 partially matched, 5 partially cancelled, 6 fully matched and closed, 7 canceled
}
return self.fetch_contract_orders(symbol, since, limit, self.extend(request, params))
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('fetchOrders', None, params)
method = self.get_supported_mapping(marketType, {
'spot': 'fetchSpotOrders',
'swap': 'fetchContractOrders',
'future': 'fetchContractOrders',
})
if method is None:
raise NotSupported(self.id + ' fetchOrders does not support ' + marketType + ' markets yet')
contract = (marketType == 'swap') or (marketType == 'future')
if contract and (symbol is None):
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument for ' + marketType + ' orders')
return getattr(self, method)(symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('fetchClosedOrders', None, params)
method = self.get_supported_mapping(marketType, {
'spot': 'fetchClosedSpotOrders',
'swap': 'fetchClosedContractOrders',
'future': 'fetchClosedContractOrders',
})
if method is None:
raise NotSupported(self.id + ' fetchClosedOrders does not support ' + marketType + ' markets yet')
return getattr(self, method)(symbol, since, limit, params)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('fetchOpenOrders', None, params)
request = {
# spot -----------------------------------------------------------
# 'account-id': account['id'],
# 'symbol': market['id'],
# 'side': 'buy', # buy, sell
# 'from': 'id', # order id to begin with
# 'direct': 'prev', # prev, next, mandatory if from is defined
# 'size': 100, # default 100, max 500
# futures --------------------------------------------------------
# 'symbol': market['settleId'],
# 'page_index': 1, # default 1
# 'page_size': limit, # default 20, max 50
# 'sort_by': 'created_at', # created_at, update_time, descending sorting field
# 'trade_type': 0, # 0 all, 1 buy long, 2 sell short, 3 buy short, 4 sell long
}
method = None
market = None
if marketType == 'spot':
method = 'spotPrivateGetV1OrderOpenOrders'
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
# todo replace with fetchAccountIdByType
accountId = self.safe_string(params, 'account-id')
if accountId is None:
# pick the first account
self.load_accounts()
for i in range(0, len(self.accounts)):
account = self.accounts[i]
if account['type'] == 'spot':
accountId = self.safe_string(account, 'id')
if accountId is not None:
break
request['account-id'] = accountId
if limit is not None:
request['size'] = limit
params = self.omit(params, 'account-id')
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol for ' + marketType + ' orders')
market = self.market(symbol)
request['contract_code'] = market['id']
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapOpenorders'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossOpenorders'
elif market['inverse']:
if market['future']:
method = 'contractPrivatePostApiV1ContractOpenorders'
request['symbol'] = market['settleId']
elif market['swap']:
method = 'contractPrivatePostSwapApiV1SwapOpenorders'
if limit is not None:
request['page_size'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"ethusdt",
# "source":"api",
# "amount":"0.010000000000000000",
# "account-id":1528640,
# "created-at":1561597491963,
# "price":"400.000000000000000000",
# "filled-amount":"0.0",
# "filled-cash-amount":"0.0",
# "filled-fees":"0.0",
# "id":38477101630,
# "state":"submitted",
# "type":"sell-limit"
# }
# ]
# }
#
# futures
#
# {
# "status": "ok",
# "data": {
# "orders": [
# {
# "symbol": "ADA",
# "contract_code": "ADA201225",
# "contract_type": "quarter",
# "volume": 1,
# "price": 0.0925,
# "order_price_type": "post_only",
# "order_type": 1,
# "direction": "buy",
# "offset": "close",
# "lever_rate": 20,
# "order_id": 773131315209248768,
# "client_order_id": null,
# "created_at": 1604370469629,
# "trade_volume": 0,
# "trade_turnover": 0,
# "fee": 0,
# "trade_avg_price": null,
# "margin_frozen": 0,
# "profit": 0,
# "status": 3,
# "order_source": "web",
# "order_id_str": "773131315209248768",
# "fee_asset": "ADA",
# "liquidation_type": null,
# "canceled_at": null,
# "is_tpsl": 0,
# "update_time": 1606975980467,
# "real_profit": 0
# }
# ],
# "total_page": 1,
# "current_page": 1,
# "total_size": 1
# },
# "ts": 1604370488518
# }
#
orders = self.safe_value(response, 'data')
if not isinstance(orders, list):
orders = self.safe_value(orders, 'orders', [])
return self.parse_orders(orders, market, since, limit)
def parse_order_status(self, status):
statuses = {
# spot
'partial-filled': 'open',
'partial-canceled': 'canceled',
'filled': 'closed',
'canceled': 'canceled',
'submitted': 'open',
'created': 'open', # For stop orders
# contract
'1': 'open',
'2': 'open',
'3': 'open',
'4': 'open',
'5': 'canceled', # partially matched
'6': 'closed',
'7': 'canceled',
'11': 'canceling',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# spot
#
# {
# id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.001530630000000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000003061260000000", # they have fixed it for filled-fees
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0
# }
#
# {
# id: 20395337822,
# symbol: "ethbtc",
# 'account-id': 5685075,
# amount: "0.001000000000000000",
# price: "0.0",
# 'created-at': 1545831584023,
# type: "buy-market",
# 'field-amount': "0.029100000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.000999788700000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000058200000000000", # they have fixed it for filled-fees
# 'finished-at': 1545831584181,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0
# }
#
# linear swap cross margin createOrder
#
# {
# "order_id":924660854912552960,
# "order_id_str":"924660854912552960"
# }
#
# contracts fetchOrder
#
# {
# "business_type":"swap",
# "contract_type":"swap",
# "pair":"BTC-USDT",
# "symbol":"BTC",
# "contract_code":"BTC-USDT",
# "volume":1,
# "price":3000,
# "order_price_type":"limit",
# "order_type":1,
# "direction":"buy",
# "offset":"open",
# "lever_rate":1,
# "order_id":924912513206878210,
# "client_order_id":null,
# "created_at":1640557927189,
# "trade_volume":0,
# "trade_turnover":0,
# "fee":0,
# "trade_avg_price":null,
# "margin_frozen":3.000000000000000000,
# "profit":0,
# "status":3,
# "order_source":"api",
# "order_id_str":"924912513206878210",
# "fee_asset":"USDT",
# "liquidation_type":"0",
# "canceled_at":0,
# "margin_asset":"USDT",
# "margin_account":"USDT",
# "margin_mode":"cross",
# "is_tpsl":0,
# "real_profit":0
# }
#
# contracts fetchOrder detailed
#
# {
# "status": "ok",
# "data": {
# "symbol": "BTC",
# "contract_code": "BTC-USDT",
# "instrument_price": 0,
# "final_interest": 0,
# "adjust_value": 0,
# "lever_rate": 10,
# "direction": "sell",
# "offset": "open",
# "volume": 1.000000000000000000,
# "price": 13059.800000000000000000,
# "created_at": 1603703614712,
# "canceled_at": 0,
# "order_source": "api",
# "order_price_type": "opponent",
# "margin_frozen": 0,
# "profit": 0,
# "trades": [
# {
# "trade_id": 131560927,
# "trade_price": 13059.800000000000000000,
# "trade_volume": 1.000000000000000000,
# "trade_turnover": 13.059800000000000000,
# "trade_fee": -0.005223920000000000,
# "created_at": 1603703614715,
# "role": "taker",
# "fee_asset": "USDT",
# "profit": 0,
# "real_profit": 0,
# "id": "131560927-770334322963152896-1"
# }
# ],
# "total_page": 1,
# "current_page": 1,
# "total_size": 1,
# "liquidation_type": "0",
# "fee_asset": "USDT",
# "fee": -0.005223920000000000,
# "order_id": 770334322963152896,
# "order_id_str": "770334322963152896",
# "client_order_id": 57012021045,
# "order_type": "1",
# "status": 6,
# "trade_avg_price": 13059.800000000000000000,
# "trade_turnover": 13.059800000000000000,
# "trade_volume": 1.000000000000000000,
# "margin_asset": "USDT",
# "margin_mode": "isolated",
# "margin_account": "BTC-USDT",
# "real_profit": 0,
# "is_tpsl": 0
# },
# "ts": 1603703678477
# }
#
id = self.safe_string_2(order, 'id', 'order_id_str')
side = self.safe_string(order, 'direction')
type = self.safe_string(order, 'order_price_type')
if 'type' in order:
orderType = order['type'].split('-')
side = orderType[0]
type = orderType[1]
status = self.parse_order_status(self.safe_string_2(order, 'state', 'status'))
marketId = self.safe_string_2(order, 'contract_code', 'symbol')
market = self.safe_market(marketId, market)
timestamp = self.safe_integer_2(order, 'created_at', 'created-at')
clientOrderId = self.safe_string_2(order, 'client_order_id', 'client-order-id')
amount = self.safe_string_2(order, 'volume', 'amount')
filled = self.safe_string_2(order, 'filled-amount', 'field-amount') # typo in their API, filled amount
filled = self.safe_string(order, 'trade_volume', filled)
price = self.safe_string(order, 'price')
cost = self.safe_string_2(order, 'filled-cash-amount', 'field-cash-amount') # same typo
cost = self.safe_string(order, 'trade_turnover', cost)
feeCost = self.safe_string_2(order, 'filled-fees', 'field-fees') # typo in their API, filled feeSide
feeCost = self.safe_string(order, 'fee', feeCost)
fee = None
if feeCost is not None:
feeCurrency = None
feeCurrencyId = self.safe_string(order, 'fee_asset')
if feeCurrencyId is not None:
feeCurrency = self.safe_currency_code(feeCurrencyId)
else:
feeCurrency = market['quote'] if (side == 'sell') else market['base']
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
stopPrice = self.safe_string(order, 'stop-price')
average = self.safe_string(order, 'trade_avg_price')
trades = self.safe_value(order, 'trades')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': market['symbol'],
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': trades,
}, market)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
marketType, query = self.handle_market_type_and_params('createOrder', market, params)
method = self.get_supported_mapping(marketType, {
'spot': 'createSpotOrder',
'swap': 'createContractOrder',
'future': 'createContractOrder',
})
if method is None:
raise NotSupported(self.id + ' createOrder does not support ' + marketType + ' markets yet')
return getattr(self, method)(symbol, type, side, amount, price, query)
def create_spot_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
self.load_accounts()
market = self.market(symbol)
accountId = self.fetch_account_id_by_type(market['type'])
request = {
# spot -----------------------------------------------------------
'account-id': accountId,
'symbol': market['id'],
# 'type': side + '-' + type, # buy-market, sell-market, buy-limit, sell-limit, buy-ioc, sell-ioc, buy-limit-maker, sell-limit-maker, buy-stop-limit, sell-stop-limit, buy-limit-fok, sell-limit-fok, buy-stop-limit-fok, sell-stop-limit-fok
# 'amount': self.amount_to_precision(symbol, amount), # for buy market orders it's the order cost
# 'price': self.price_to_precision(symbol, price),
# 'source': 'spot-api', # optional, spot-api, margin-api = isolated margin, super-margin-api = cross margin, c2c-margin-api
# 'client-order-id': clientOrderId, # optional, max 64 chars, must be unique within 8 hours
# 'stop-price': self.price_to_precision(symbol, stopPrice), # trigger price for stop limit orders
# 'operator': 'gte', # gte, lte, trigger price condition
}
orderType = type.replace('buy-', '')
orderType = orderType.replace('sell-', '')
options = self.safe_value(self.options, market['type'], {})
stopPrice = self.safe_string_2(params, 'stopPrice', 'stop-price')
if stopPrice is None:
stopOrderTypes = self.safe_value(options, 'stopOrderTypes', {})
if orderType in stopOrderTypes:
raise ArgumentsRequired(self.id + 'createOrder() requires a stopPrice or a stop-price parameter for a stop order')
else:
stopOperator = self.safe_string(params, 'operator')
if stopOperator is None:
raise ArgumentsRequired(self.id + ' createOrder() requires an operator parameter "gte" or "lte" for a stop order')
params = self.omit(params, ['stopPrice', 'stop-price'])
request['stop-price'] = self.price_to_precision(symbol, stopPrice)
request['operator'] = stopOperator
if (orderType == 'limit') or (orderType == 'limit-fok'):
orderType = 'stop-' + orderType
elif (orderType != 'stop-limit') and (orderType != 'stop-limit-fok'):
raise NotSupported(self.id + 'createOrder() does not support ' + type + ' orders')
postOnly = self.safe_value(params, 'postOnly', False)
if postOnly:
orderType = 'limit-maker'
request['type'] = side + '-' + orderType
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client-order-id') # must be 64 chars max and unique within 24 hours
if clientOrderId is None:
broker = self.safe_value(self.options, 'broker', {})
brokerId = self.safe_string(broker, 'id')
request['client-order-id'] = brokerId + self.uuid()
else:
request['client-order-id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'client-order-id', 'postOnly'])
if (orderType == 'market') and (side == 'buy'):
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " market buy order requires price argument to calculate cost(total amount of quote currency to spend for buying, amount * price). To switch off self warning exception and specify cost in the amount argument, set .options['createMarketBuyOrderRequiresPrice'] = False. Make sure you know what you're doing.")
else:
# despite that cost = amount * price is in quote currency and should have quote precision
# the exchange API requires the cost supplied in 'amount' to be of base precision
# more about it here:
# https://github.com/ccxt/ccxt/pull/4395
# https://github.com/ccxt/ccxt/issues/7611
# we use amountToPrecision here because the exchange requires cost in base precision
request['amount'] = self.cost_to_precision(symbol, float(amount) * float(price))
else:
request['amount'] = self.cost_to_precision(symbol, amount)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
limitOrderTypes = self.safe_value(options, 'limitOrderTypes', {})
if orderType in limitOrderTypes:
request['price'] = self.price_to_precision(symbol, price)
response = self.spotPrivatePostV1OrderOrdersPlace(self.extend(request, params))
#
# spot
#
# {"status":"ok","data":"438398393065481"}
#
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
'timestamp': None,
'datetime': None,
'lastTradeTimestamp': None,
'status': None,
'symbol': None,
'type': None,
'side': None,
'price': None,
'amount': None,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'clientOrderId': None,
'average': None,
}
def create_contract_order(self, symbol, type, side, amount, price=None, params={}):
offset = self.safe_string(params, 'offset')
if offset is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a string offset parameter for contract orders, open or close')
stopPrice = self.safe_string(params, 'stopPrice')
if stopPrice is not None:
raise NotSupported(self.id + ' createOrder() supports tp_trigger_price + tp_order_price for take profit orders and/or sl_trigger_price + sl_order price for stop loss orders, stop orders are supported only with open long orders and open short orders')
market = self.market(symbol)
request = {
# 'symbol': 'BTC', # optional, case-insenstive, both uppercase and lowercase are supported, "BTC", "ETH", ...
# 'contract_type': 'this_week', # optional, self_week, next_week, quarter, next_quarter
'contract_code': market['id'], # optional BTC180914
# 'client_order_id': clientOrderId, # optional, must be less than 9223372036854775807
# 'price': self.price_to_precision(symbol, price), # optional
'volume': self.amount_to_precision(symbol, amount),
'direction': side, # buy, sell
'offset': offset, # open, close
#
# direction buy, offset open = open long
# direction sell, offset close = close long
# direction sell, offset open = open short
# direction buy, offset close = close short
#
'lever_rate': 1, # required, using leverage greater than 20x requires prior approval of high-leverage agreement
# 'order_price_type': 'limit', # required
#
# order_price_type can be:
#
# limit
# opponent # BBO
# post_only
# optimal_5
# optimal_10
# optimal_20
# ioc
# fok
# opponent_ioc # IOC order using the BBO price
# optimal_5_ioc
# optimal_10_ioc
# optimal_20_ioc
# opponent_fok # FOR order using the BBO price
# optimal_5_fok
# optimal_10_fok
# optimal_20_fok
#
# 'tp_trigger_price': self.price_to_precision(symbol, triggerPrice),
# 'tp_order_price': self.price_to_precision(symbol, price),
# 'tp_order_price_type': 'limit', # limit,optimal_5,optimal_10,optimal_20
# 'sl_trigger_price': self.price_to_precision(symbol, stopLossPrice),
# 'sl_order_price': self.price_to_precision(symbol, price),
# 'sl_order_price_type': 'limit', # limit,optimal_5,optimal_10,optimal_20
}
stopLossOrderPrice = self.safe_string(params, 'sl_order_price')
stopLossTriggerPrice = self.safe_string(params, 'sl_trigger_price')
takeProfitOrderPrice = self.safe_string(params, 'tp_order_price')
takeProfitTriggerPrice = self.safe_string(params, 'tp_trigger_price')
isOpenOrder = (offset == 'open')
isStopOrder = False
if stopLossTriggerPrice is not None:
request['sl_trigger_price'] = self.price_to_precision(symbol, stopLossTriggerPrice)
isStopOrder = True
if price is not None:
request['sl_order_price'] = self.price_to_precision(symbol, price)
if stopLossOrderPrice is not None:
request['sl_order_price'] = self.price_to_precision(symbol, stopLossOrderPrice)
isStopOrder = True
if takeProfitTriggerPrice is not None:
request['tp_trigger_price'] = self.price_to_precision(symbol, takeProfitTriggerPrice)
isStopOrder = True
if price is not None:
request['tp_order_price'] = self.price_to_precision(symbol, price)
if takeProfitOrderPrice is not None:
request['tp_order_price'] = self.price_to_precision(symbol, takeProfitOrderPrice)
isStopOrder = True
if isStopOrder and not isOpenOrder:
raise NotSupported(self.id + ' createOrder() supports tp_trigger_price + tp_order_price for take profit orders and/or sl_trigger_price + sl_order price for stop loss orders, stop orders are supported only with open long orders and open short orders')
params = self.omit(params, ['sl_order_price', 'sl_trigger_price', 'tp_order_price', 'tp_trigger_price'])
postOnly = self.safe_value(params, 'postOnly', False)
if postOnly:
type = 'post_only'
if type == 'limit' or type == 'ioc' or type == 'fok' or type == 'post_only':
request['price'] = self.price_to_precision(symbol, price)
request['order_price_type'] = type
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id') # must be 64 chars max and unique within 24 hours
if clientOrderId is None:
broker = self.safe_value(self.options, 'broker', {})
brokerId = self.safe_string(broker, 'id')
request['client_order_id'] = brokerId + self.uuid()
else:
request['client_order_id'] = clientOrderId
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'client_order_id'])
method = None
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapOrder'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossOrder'
elif market['inverse']:
if market['swap']:
method = 'contractPrivatePostSwapApiV1SwapOrder'
elif market['future']:
method = 'contractPrivatePostApiV1ContractOrder'
response = getattr(self, method)(self.extend(request, params))
#
# linear swap cross margin
#
# {
# "status":"ok",
# "data":{
# "order_id":924660854912552960,
# "order_id_str":"924660854912552960"
# },
# "ts":1640497927185
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_order(data, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('cancelOrder', None, params)
request = {
# spot -----------------------------------------------------------
# 'order-id': 'id',
# 'symbol': market['id'],
# 'client-order-id': clientOrderId,
# contracts ------------------------------------------------------
# 'order_id': id,
# 'client_order_id': clientOrderId,
# 'contract_code': market['id'],
# 'pair': 'BTC-USDT',
# 'contract_type': 'this_week', # swap, self_week, next_week, quarter, next_ quarter
}
method = None
market = None
if marketType == 'spot':
clientOrderId = self.safe_string_2(params, 'client-order-id', 'clientOrderId')
method = 'spotPrivatePostV1OrderOrdersOrderIdSubmitcancel'
if clientOrderId is None:
request['order-id'] = id
else:
request['client-order-id'] = clientOrderId
method = 'spotPrivatePostV1OrderOrdersSubmitCancelClientOrder'
params = self.omit(params, ['client-order-id', 'clientOrderId'])
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol for ' + marketType + ' orders')
market = self.market(symbol)
request['contract_code'] = market['id']
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapCancel'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossCancel'
elif market['inverse']:
if market['future']:
method = 'contractPrivatePostApiV1ContractCancel'
request['symbol'] = market['settleId']
elif market['swap']:
method = 'contractPrivatePostSwapApiV1SwapCancel'
else:
raise NotSupported(self.id + ' cancelOrder() does not support ' + marketType + ' markets')
clientOrderId = self.safe_string_2(params, 'client_order_id', 'clientOrderId')
if clientOrderId is None:
request['order_id'] = id
else:
request['client_order_id'] = clientOrderId
params = self.omit(params, ['client_order_id', 'clientOrderId'])
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# 'status': 'ok',
# 'data': '10138899000',
# }
#
# linear swap cross margin
#
# {
# "status":"ok",
# "data":{
# "errors":[],
# "successes":"924660854912552960"
# },
# "ts":1640504486089
# }
#
return self.extend(self.parse_order(response, market), {
'id': id,
'status': 'canceled',
})
def cancel_orders(self, ids, symbol=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('cancelOrder', None, params)
request = {
# spot -----------------------------------------------------------
# 'order-ids': ids.jsoin(','), # max 50
# 'client-order-ids': ','.join(ids), # max 50
# contracts ------------------------------------------------------
# 'order_id': id, # comma separated, max 10
# 'client_order_id': clientOrderId, # comma separated, max 10
# 'contract_code': market['id'],
# 'symbol': market['settleId'],
}
method = None
if marketType == 'spot':
clientOrderIds = self.safe_value_2(params, 'client-order-id', 'clientOrderId')
clientOrderIds = self.safe_value_2(params, 'client-order-ids', 'clientOrderIds', clientOrderIds)
if clientOrderIds is None:
if isinstance(clientOrderIds, basestring):
request['order-ids'] = ids
else:
request['order-ids'] = ','.join(ids)
else:
if isinstance(clientOrderIds, basestring):
request['client-order-ids'] = clientOrderIds
else:
request['client-order-ids'] = ','.join(clientOrderIds)
params = self.omit(params, ['client-order-id', 'client-order-ids', 'clientOrderId', 'clientOrderIds'])
method = 'spotPrivatePostV1OrderOrdersBatchcancel'
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrders() requires a symbol for ' + marketType + ' orders')
market = self.market(symbol)
request['contract_code'] = market['id']
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapCancel'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossCancel'
elif market['inverse']:
if market['future']:
method = 'contractPrivatePostApiV1ContractCancel'
request['symbol'] = market['settleId']
elif market['swap']:
method = 'contractPrivatePostSwapApiV1SwapCancel'
else:
raise NotSupported(self.id + ' cancelOrders() does not support ' + marketType + ' markets')
clientOrderIds = self.safe_string_2(params, 'client_order_id', 'clientOrderId')
clientOrderIds = self.safe_string_2(params, 'client_order_ids', 'clientOrderIds', clientOrderIds)
if clientOrderIds is None:
request['order_id'] = ','.join(ids)
else:
request['client_order_id'] = clientOrderIds
params = self.omit(params, ['client_order_id', 'client_order_ids', 'clientOrderId', 'clientOrderIds'])
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status": "ok",
# "data": {
# "success": [
# "5983466"
# ],
# "failed": [
# {
# "err-msg": "Incorrect order state",
# "order-state": 7,
# "order-id": "",
# "err-code": "order-orderstate-error",
# "client-order-id": "first"
# },
# {
# "err-msg": "Incorrect order state",
# "order-state": 7,
# "order-id": "",
# "err-code": "order-orderstate-error",
# "client-order-id": "second"
# },
# {
# "err-msg": "The record is not found.",
# "order-id": "",
# "err-code": "base-not-found",
# "client-order-id": "third"
# }
# ]
# }
# }
#
# contracts
#
# {
# "status": "ok",
# "data": {
# "errors": [
# {
# "order_id": "769206471845261312",
# "err_code": 1061,
# "err_msg": "This order doesnt exist."
# }
# ],
# "successes": "773120304138219520"
# },
# "ts": 1604367997451
# }
#
return response
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('cancelOrder', None, params)
request = {
# spot -----------------------------------------------------------
# 'account-id': account['id'],
# 'symbol': market['id'], # a list of comma-separated symbols, all symbols by default
# 'types' 'string', buy-market, sell-market, buy-limit, sell-limit, buy-ioc, sell-ioc, buy-stop-limit, sell-stop-limit, buy-limit-fok, sell-limit-fok, buy-stop-limit-fok, sell-stop-limit-fok
# 'side': 'buy', # or 'sell'
# 'size': 100, # the number of orders to cancel 1-100
# contract -------------------------------------------------------
# 'symbol': market['settleId'], # required
# 'contract_code': market['id'],
# 'contract_type': 'this_week', # swap, self_week, next_week, quarter, next_ quarter
# 'direction': 'buy': # buy, sell
# 'offset': 'open', # open, close
}
market = None
method = None
if marketType == 'spot':
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
method = 'spotPrivatePostV1OrderOrdersBatchCancelOpenOrders'
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrders() requires a symbol for ' + marketType + ' orders')
market = self.market(symbol)
request['contract_code'] = market['id']
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapCancelallall'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossCancelall'
elif market['inverse']:
if marketType == 'future':
method = 'contractPrivatePostApiV1ContractCancelall'
request['symbol'] = market['settleId']
elif marketType == 'swap':
method = 'contractPrivatePostSwapApiV1SwapCancelall'
else:
raise NotSupported(self.id + ' cancelOrders() does not support ' + marketType + ' markets')
response = getattr(self, method)(self.extend(request, params))
#
# {
# code: 200,
# data: {
# "success-count": 2,
# "failed-count": 0,
# "next-id": 5454600
# }
# }
#
return response
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, 0, self.currencies[currency]['precision'])
def safe_network(self, networkId):
lastCharacterIndex = len(networkId) - 1
lastCharacter = networkId[lastCharacterIndex]
if lastCharacter == '1':
networkId = networkId[0:lastCharacterIndex]
networksById = {}
return self.safe_string(networksById, networkId, networkId)
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# currency: "usdt",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "usdterc20", # trc20usdt, hrc20usdt, usdt, algousdt
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string(depositAddress, 'addressTag')
if tag == '':
tag = None
currencyId = self.safe_string(depositAddress, 'currency')
currency = self.safe_currency(currencyId, currency)
code = self.safe_currency_code(currencyId, currency)
networkId = self.safe_string(depositAddress, 'chain')
networks = self.safe_value(currency, 'networks', {})
networksById = self.index_by(networks, 'id')
networkValue = self.safe_value(networksById, networkId, networkId)
network = self.safe_string(networkValue, 'network')
note = self.safe_string(depositAddress, 'note')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': network,
'note': note,
'info': depositAddress,
}
def fetch_deposit_addresses_by_network(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.spotPrivateGetV2AccountDepositAddress(self.extend(request, params))
#
# {
# code: 200,
# data: [
# {
# currency: "eth",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "eth"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
parsed = self.parse_deposit_addresses(data, [code], False)
return self.index_by(parsed, 'network')
def fetch_deposit_address(self, code, params={}):
rawNetwork = self.safe_string_upper(params, 'network')
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(networks, rawNetwork, rawNetwork)
params = self.omit(params, 'network')
response = self.fetch_deposit_addresses_by_network(code, params)
result = None
if network is None:
result = self.safe_value(response, code)
if result is None:
alias = self.safe_string(networks, code, code)
result = self.safe_value(response, alias)
if result is None:
defaultNetwork = self.safe_string(self.options, 'defaultNetwork', 'ERC20')
result = self.safe_value(response, defaultNetwork)
if result is None:
values = list(response.values())
result = self.safe_value(values, 0)
if result is None:
raise InvalidAddress(self.id + ' fetchDepositAddress() cannot find deposit address for ' + code)
return result
result = self.safe_value(response, network)
if result is None:
raise InvalidAddress(self.id + ' fetchDepositAddress() cannot find ' + network + ' deposit address for ' + code)
return result
def fetch_withdraw_addresses_by_network(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.spotPrivateGetV2AccountWithdrawAddress(self.extend(request, params))
#
# {
# code: 200,
# data: [
# {
# currency: "eth",
# chain: "eth"
# note: "Binance - TRC20",
# addressTag: "",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
parsed = self.parse_deposit_addresses(data, [code], False)
return self.index_by(parsed, 'network')
def fetch_withdraw_address(self, code, params={}):
rawNetwork = self.safe_string_upper(params, 'network')
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(networks, rawNetwork, rawNetwork)
params = self.omit(params, 'network')
response = self.fetch_withdraw_addresses_by_network(code, params)
result = None
if network is None:
result = self.safe_value(response, code)
if result is None:
alias = self.safe_string(networks, code, code)
result = self.safe_value(response, alias)
if result is None:
defaultNetwork = self.safe_string(self.options, 'defaultNetwork', 'ERC20')
result = self.safe_value(response, defaultNetwork)
if result is None:
values = list(response.values())
result = self.safe_value(values, 0)
if result is None:
raise InvalidAddress(self.id + ' fetchWithdrawAddress() cannot find withdraw address for ' + code)
return result
result = self.safe_value(response, network)
if result is None:
raise InvalidAddress(self.id + ' fetchWithdrawAddress() cannot find ' + network + ' withdraw address for ' + code)
return result
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'deposit',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = self.spotPrivateGetV1QueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'withdraw',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = self.spotPrivateGetV1QueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# 'id': 8211029,
# 'type': 'deposit',
# 'currency': 'eth',
# 'chain': 'eth',
# 'tx-hash': 'bd315....',
# 'amount': 0.81162421,
# 'address': '4b8b....',
# 'address-tag': '',
# 'fee': 0,
# 'state': 'safe',
# 'created-at': 1542180380965,
# 'updated-at': 1542180788077
# }
#
# fetchWithdrawals
#
# {
# 'id': 6908275,
# 'type': 'withdraw',
# 'currency': 'btc',
# 'chain': 'btc',
# 'tx-hash': 'c1a1a....',
# 'amount': 0.80257005,
# 'address': '1QR....',
# 'address-tag': '',
# 'fee': 0.0005,
# 'state': 'confirmed',
# 'created-at': 1552107295685,
# 'updated-at': 1552108032859
# }
#
timestamp = self.safe_integer(transaction, 'created-at')
updated = self.safe_integer(transaction, 'updated-at')
code = self.safe_currency_code(self.safe_string(transaction, 'currency'))
type = self.safe_string(transaction, 'type')
if type == 'withdraw':
type = 'withdrawal'
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
tag = self.safe_string(transaction, 'address-tag')
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
feeCost = abs(feeCost)
address = self.safe_string(transaction, 'address')
network = self.safe_string_upper(transaction, 'chain')
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'tx-hash'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': network,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': tag,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': self.safe_number(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def parse_transaction_status(self, status):
statuses = {
# deposit statuses
'unknown': 'failed',
'confirming': 'pending',
'confirmed': 'ok',
'safe': 'ok',
'orphan': 'failed',
# withdrawal statuses
'submitted': 'pending',
'canceled': 'canceled',
'reexamine': 'pending',
'reject': 'failed',
'pass': 'pending',
'wallet-reject': 'failed',
# 'confirmed': 'ok', # present in deposit statuses
'confirm-error': 'failed',
'repealed': 'failed',
'wallet-transfer': 'pending',
'pre-transfer': 'pending',
}
return self.safe_string(statuses, status, status)
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.load_markets()
self.check_address(address)
currency = self.currency(code)
request = {
'address': address, # only supports existing addresses in your withdraw address list
'amount': amount,
'currency': currency['id'].lower(),
}
if tag is not None:
request['addr-tag'] = tag # only for XRP?
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string_lower(networks, network, network) # handle ETH>ERC20 alias
if network is not None:
# possible chains - usdterc20, trc20usdt, hrc20usdt, usdt, algousdt
if network == 'erc20':
request['chain'] = currency['id'] + network
else:
request['chain'] = network + currency['id']
params = self.omit(params, 'network')
response = self.spotPrivatePostV1DwWithdrawApiCreate(self.extend(request, params))
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
}
def parse_transfer(self, transfer, currency=None):
#
# transfer
#
# {
# "data": 12345,
# "status": "ok"
# }
#
id = self.safe_string(transfer, 'data')
code = self.safe_currency_code(None, currency)
return {
'info': transfer,
'id': id,
'timestamp': None,
'datetime': None,
'currency': code,
'amount': None,
'fromAccount': None,
'toAccount': None,
'status': None,
}
def transfer(self, code, amount, fromAccount, toAccount, params={}):
self.load_markets()
currency = self.currency(code)
type = self.safe_string(params, 'type')
if type is None:
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromAccount = fromAccount.lower() # pro, futures
toAccount = toAccount.lower() # pro, futures
fromId = self.safe_string(accountsByType, fromAccount)
toId = self.safe_string(accountsByType, toAccount)
if fromId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' fromAccount must be one of ' + ', '.join(keys))
if toId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' toAccount must be one of ' + ', '.join(keys))
type = fromAccount + '-to-' + toAccount
request = {
'currency': currency['id'],
'amount': float(self.currency_to_precision(code, amount)),
'type': type,
}
response = self.spotPrivatePostFuturesTransfer(self.extend(request, params))
#
# {
# "data": 12345,
# "status": "ok"
# }
#
transfer = self.parse_transfer(response, currency)
return self.extend(transfer, {
'amount': amount,
'currency': code,
'fromAccount': fromAccount,
'toAccount': toAccount,
})
def fetch_borrow_rates_per_symbol(self, params={}):
self.load_markets()
response = self.spotPrivateGetV1MarginLoanInfo(params)
# {
# "status": "ok",
# "data": [
# {
# "symbol": "1inchusdt",
# "currencies": [
# {
# "currency": "1inch",
# "interest-rate": "0.00098",
# "min-loan-amt": "90.000000000000000000",
# "max-loan-amt": "1000.000000000000000000",
# "loanable-amt": "0.0",
# "actual-rate": "0.00098"
# },
# {
# "currency": "usdt",
# "interest-rate": "0.00098",
# "min-loan-amt": "100.000000000000000000",
# "max-loan-amt": "1000.000000000000000000",
# "loanable-amt": "0.0",
# "actual-rate": "0.00098"
# }
# ]
# },
# ...
# ]
# }
timestamp = self.milliseconds()
data = self.safe_value(response, 'data')
rates = {
'info': response,
}
for i in range(0, len(data)):
rate = data[i]
currencies = self.safe_value(rate, 'currencies')
symbolRates = {}
for j in range(0, len(currencies)):
currency = currencies[j]
currencyId = self.safe_string(currency, 'currency')
code = self.safe_currency_code(currencyId, 'currency')
symbolRates[code] = {
'currency': code,
'rate': self.safe_number(currency, 'actual-rate'),
'span': 86400000,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
market = self.markets_by_id[self.safe_string(rate, 'symbol')]
symbol = market['symbol']
rates[symbol] = symbolRates
return rates
def fetch_borrow_rates(self, params={}):
self.load_markets()
response = self.spotPrivateGetV1MarginLoanInfo(params)
# {
# "status": "ok",
# "data": [
# {
# "symbol": "1inchusdt",
# "currencies": [
# {
# "currency": "1inch",
# "interest-rate": "0.00098",
# "min-loan-amt": "90.000000000000000000",
# "max-loan-amt": "1000.000000000000000000",
# "loanable-amt": "0.0",
# "actual-rate": "0.00098"
# },
# {
# "currency": "usdt",
# "interest-rate": "0.00098",
# "min-loan-amt": "100.000000000000000000",
# "max-loan-amt": "1000.000000000000000000",
# "loanable-amt": "0.0",
# "actual-rate": "0.00098"
# }
# ]
# },
# ...
# ]
# }
timestamp = self.milliseconds()
data = self.safe_value(response, 'data')
rates = {}
for i in range(0, len(data)):
market = data[i]
currencies = self.safe_value(market, 'currencies')
for j in range(0, len(currencies)):
currency = currencies[j]
currencyId = self.safe_string(currency, 'currency')
code = self.safe_currency_code(currencyId, 'currency')
rates[code] = {
'currency': code,
'rate': self.safe_number(currency, 'actual-rate'),
'span': 86400000,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'info': None,
}
return rates
def fetch_funding_rate_history(self, symbol=None, since=None, limit=None, params={}):
#
# Gets a history of funding rates with their timestamps
# (param) symbol: Future currency pair
# (param) limit: not used by huobi
# (param) since: not used by huobi
# (param) params: Object containing more params for the request
# return: [{symbol, fundingRate, timestamp, dateTime}]
#
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingRateHistory() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'contract_code': market['id'],
}
method = None
if market['inverse']:
method = 'contractPublicGetSwapApiV1SwapHistoricalFundingRate'
elif market['linear']:
method = 'contractPublicGetLinearSwapApiV1SwapHistoricalFundingRate'
else:
raise NotSupported(self.id + ' fetchFundingRateHistory() supports inverse and linear swaps only')
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status": "ok",
# "data": {
# "total_page": 62,
# "current_page": 1,
# "total_size": 1237,
# "data": [
# {
# "avg_premium_index": "-0.000208064395065541",
# "funding_rate": "0.000100000000000000",
# "realized_rate": "0.000100000000000000",
# "funding_time": "1638921600000",
# "contract_code": "BTC-USDT",
# "symbol": "BTC",
# "fee_asset": "USDT"
# },
# ]
# },
# "ts": 1638939294277
# }
#
data = self.safe_value(response, 'data')
result = self.safe_value(data, 'data')
rates = []
for i in range(0, len(result)):
entry = result[i]
marketId = self.safe_string(entry, 'contract_code')
symbol = self.safe_symbol(marketId)
timestamp = self.safe_string(entry, 'funding_time')
rates.append({
'info': entry,
'symbol': symbol,
'fundingRate': self.safe_number(entry, 'funding_rate'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
sorted = self.sort_by(rates, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, symbol, since, limit)
def parse_funding_rate(self, fundingRate, market=None):
#
# {
# "status": "ok",
# "data": {
# "estimated_rate": "0.000100000000000000",
# "funding_rate": "0.000100000000000000",
# "contract_code": "BCH-USD",
# "symbol": "BCH",
# "fee_asset": "BCH",
# "funding_time": "1639094400000",
# "next_funding_time": "1639123200000"
# },
# "ts": 1639085854775
# }
#
nextFundingRate = self.safe_number(fundingRate, 'estimated_rate')
fundingTimestamp = self.safe_integer(fundingRate, 'funding_time')
nextFundingTimestamp = self.safe_integer(fundingRate, 'next_funding_time')
marketId = self.safe_string(fundingRate, 'contract_code')
symbol = self.safe_symbol(marketId, market)
return {
'info': fundingRate,
'symbol': symbol,
'markPrice': None,
'indexPrice': None,
'interestRate': None,
'estimatedSettlePrice': None,
'timestamp': None,
'datetime': None,
'fundingRate': self.safe_number(fundingRate, 'funding_rate'),
'fundingTimestamp': fundingTimestamp,
'fundingDatetime': self.iso8601(fundingTimestamp),
'nextFundingRate': nextFundingRate,
'nextFundingTimestamp': nextFundingTimestamp,
'nextFundingDatetime': self.iso8601(nextFundingTimestamp),
'previousFundingRate': None,
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
def fetch_funding_rate(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
method = None
if market['inverse']:
method = 'contractPublicGetSwapApiV1SwapFundingRate'
elif market['linear']:
method = 'contractPublicGetLinearSwapApiV1SwapFundingRate'
else:
raise NotSupported(self.id + ' fetchFundingRateHistory() supports inverse and linear swaps only')
request = {
'contract_code': market['id'],
}
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status": "ok",
# "data": {
# "estimated_rate": "0.000100000000000000",
# "funding_rate": "0.000100000000000000",
# "contract_code": "BTC-USDT",
# "symbol": "BTC",
# "fee_asset": "USDT",
# "funding_time": "1603699200000",
# "next_funding_time": "1603728000000"
# },
# "ts": 1603696494714
# }
#
result = self.safe_value(response, 'data', {})
return self.parse_funding_rate(result, market)
def fetch_funding_rates(self, symbols, params={}):
self.load_markets()
options = self.safe_value(self.options, 'fetchFundingRates', {})
defaultSubType = self.safe_string(self.options, 'defaultSubType', 'inverse')
subType = self.safe_string(options, 'subType', defaultSubType)
subType = self.safe_string(params, 'subType', subType)
request = {
# 'contract_code': market['id'],
}
method = self.get_supported_mapping(subType, {
'linear': 'contractPublicGetLinearSwapApiV1SwapBatchFundingRate',
'inverse': 'contractPublicGetSwapApiV1SwapBatchFundingRate',
})
params = self.omit(params, 'subType')
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status": "ok",
# "data": [
# {
# "estimated_rate": "0.000100000000000000",
# "funding_rate": "0.000100000000000000",
# "contract_code": "MANA-USDT",
# "symbol": "MANA",
# "fee_asset": "USDT",
# "funding_time": "1643356800000",
# "next_funding_time": "1643385600000",
# "trade_partition":"USDT"
# },
# ],
# "ts": 1643346173103
# }
#
data = self.safe_value(response, 'data', [])
result = self.parse_funding_rates(data)
return self.filter_by_array(result, 'symbol', symbols)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
query = self.omit(params, self.extract_params(path))
if isinstance(api, basestring):
# signing implementation for the old endpoints
if api == 'market':
url += api
elif (api == 'public') or (api == 'private'):
url += self.version
elif (api == 'v2Public') or (api == 'v2Private'):
url += 'v2'
url += '/' + self.implode_params(path, params)
if api == 'private' or api == 'v2Private':
self.check_required_credentials()
timestamp = self.ymdhms(self.milliseconds(), 'T')
request = {
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}
if method != 'POST':
request = self.extend(request, query)
request = self.keysort(request)
auth = self.urlencode(request)
# unfortunately, PHP demands double quotes for the escaped newline symbol
payload = "\n".join([method, self.hostname, url, auth]) # eslint-disable-line quotes
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
if query:
url += '?' + self.urlencode(query)
url = self.implode_params(self.urls['api'][api], {
'hostname': self.hostname,
}) + url
else:
# signing implementation for the new endpoints
# type, access = api
type = self.safe_string(api, 0)
access = self.safe_string(api, 1)
url += self.implode_params(path, params)
hostname = self.safe_string(self.urls['hostnames'], type)
if access == 'public':
if query:
url += '?' + self.urlencode(query)
elif access == 'private':
self.check_required_credentials()
timestamp = self.ymdhms(self.milliseconds(), 'T')
request = {
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}
if method != 'POST':
request = self.extend(request, query)
request = self.keysort(request)
auth = self.urlencode(request)
# unfortunately, PHP demands double quotes for the escaped newline symbol
payload = "\n".join([method, hostname, url, auth]) # eslint-disable-line quotes
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
url = self.implode_params(self.urls['api'][type], {
'hostname': hostname,
}) + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
return self.safe_integer(config, 'cost', 1)
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'status' in response:
#
# {"status":"error","err-code":"order-limitorder-amount-min-error","err-msg":"limit order amount error, min: `0.001`","data":null}
#
status = self.safe_string(response, 'status')
if status == 'error':
code = self.safe_string_2(response, 'err-code', 'err_code')
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
message = self.safe_string_2(response, 'err-msg', 'err_msg')
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
raise ExchangeError(feedback)
def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
marketType, query = self.handle_market_type_and_params('fetchFundingHistory', market, params)
method = None
request = {
'type': '30,31',
}
if market['linear']:
method = 'contractPrivatePostLinearSwapApiV1SwapFinancialRecordExact'
#
# {
# status: 'ok',
# data: {
# financial_record: [
# {
# id: '1320088022',
# type: '30',
# amount: '0.004732510000000000',
# ts: '1641168019321',
# contract_code: 'BTC-USDT',
# asset: 'USDT',
# margin_account: 'BTC-USDT',
# face_margin_account: ''
# },
# ],
# remain_size: '0',
# next_id: null
# },
# ts: '1641189898425'
# }
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
request['margin_account'] = market['id']
else:
request['margin_account'] = market['quoteId']
else:
if marketType == 'swap':
method = 'contractPrivatePostSwapApiV1SwapFinancialRecordExact'
request['contract_code'] = market['id']
else:
raise ExchangeError(self.id + ' fetchFundingHistory() only makes sense for swap contracts')
#
# swap
# {
# status: 'ok',
# data: {
# financial_record: [
# {
# id: '1667436164',
# symbol: 'BTC',
# type: '30',
# amount: '3.9755491985E-8',
# ts: '1641168097323',
# contract_code: 'BTC-USD'
# },
# ],
# remain_size: '0',
# next_id: null
# },
# ts: '1641190296379'
# }
#
response = getattr(self, method)(self.extend(request, query))
data = self.safe_value(response, 'data', {})
financialRecord = self.safe_value(data, 'financial_record', [])
return self.parse_incomes(financialRecord, market, since, limit)
def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
marketType, query = self.handle_market_type_and_params('fetchPosition', market, params)
method = None
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
method = self.get_supported_mapping(marginType, {
'isolated': 'contractPrivatePostLinearSwapApiV1SwapSwitchLeverRate',
'cross': 'contractPrivatePostLinearSwapApiV1SwapCrossSwitchLeverRate',
})
#
# {
# status: 'ok',
# data: {
# contract_code: 'BTC-USDT',
# lever_rate: '100',
# margin_mode: 'isolated'
# },
# ts: '1641184710649'
# }
#
else:
method = self.get_supported_mapping(marketType, {
'future': 'contractPrivatePostApiV1ContractSwitchLeverRate',
'swap': 'contractPrivatePostSwapApiV1SwapSwitchLeverRate',
})
#
# future
# {
# status: 'ok',
# data: {symbol: 'BTC', lever_rate: 5},
# ts: 1641184578678
# }
#
# swap
#
# {
# status: 'ok',
# data: {contract_code: 'BTC-USD', lever_rate: '5'},
# ts: '1641184652979'
# }
#
request = {
'lever_rate': leverage,
}
if marketType == 'future' and market['inverse']:
request['symbol'] = market['settleId']
else:
request['contract_code'] = market['id']
response = getattr(self, method)(self.extend(request, query))
return response
def parse_income(self, income, market=None):
#
# {
# id: '1667161118',
# symbol: 'BTC',
# type: '31',
# amount: '-2.11306593188E-7',
# ts: '1641139308983',
# contract_code: 'BTC-USD'
# }
#
marketId = self.safe_string(income, 'contract_code')
symbol = self.safe_symbol(marketId, market)
amount = self.safe_number(income, 'amount')
timestamp = self.safe_integer(income, 'ts')
id = self.safe_string(income, 'id')
currencyId = self.safe_string_2(income, 'symbol', 'asset')
code = self.safe_currency_code(currencyId)
return {
'info': income,
'symbol': symbol,
'code': code,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': id,
'amount': amount,
}
def parse_incomes(self, incomes, market=None, since=None, limit=None):
result = []
for i in range(0, len(incomes)):
entry = incomes[i]
parsed = self.parse_income(entry, market)
result.append(parsed)
sorted = self.sort_by(result, 'timestamp')
return self.filter_by_since_limit(sorted, since, limit, 'timestamp')
def parse_position(self, position, market=None):
#
# {
# symbol: 'BTC',
# contract_code: 'BTC-USDT',
# volume: '1.000000000000000000',
# available: '1.000000000000000000',
# frozen: '0E-18',
# cost_open: '47162.000000000000000000',
# cost_hold: '47151.300000000000000000',
# profit_unreal: '0.007300000000000000',
# profit_rate: '-0.000144183876850008',
# lever_rate: '2',
# position_margin: '23.579300000000000000',
# direction: 'buy',
# profit: '-0.003400000000000000',
# last_price: '47158.6',
# margin_asset: 'USDT',
# margin_mode: 'isolated',
# margin_account: 'BTC-USDT',
# margin_balance: '24.973020070000000000',
# margin_position: '23.579300000000000000',
# margin_frozen: '0',
# margin_available: '1.393720070000000000',
# profit_real: '0E-18',
# risk_rate: '1.044107779705080303',
# withdraw_available: '1.386420070000000000000000000000000000',
# liquidation_price: '22353.229148614609571788',
# adjust_factor: '0.015000000000000000',
# margin_static: '24.965720070000000000'
# }
#
market = self.safe_market(self.safe_string(position, 'contract_code'))
symbol = market['symbol']
contracts = self.safe_string(position, 'volume')
contractSize = self.safe_value(market, 'contractSize')
contractSizeString = self.number_to_string(contractSize)
entryPrice = self.safe_number(position, 'cost_hold')
initialMargin = self.safe_string(position, 'position_margin')
rawSide = self.safe_string(position, 'direction')
side = 'long' if (rawSide == 'buy') else 'short'
unrealizedProfit = self.safe_number(position, 'profit_unreal')
marginType = self.safe_string(position, 'margin_mode')
leverage = self.safe_string(position, 'lever_rate')
percentage = Precise.string_mul(self.safe_string(position, 'profit_rate'), '100')
lastPrice = self.safe_string(position, 'last_price')
faceValue = Precise.string_mul(contracts, contractSizeString)
notional = None
if market['linear']:
notional = Precise.string_mul(faceValue, lastPrice)
else:
notional = Precise.string_div(faceValue, lastPrice)
marginType = 'cross'
intialMarginPercentage = Precise.string_div(initialMargin, notional)
collateral = self.safe_string(position, 'margin_balance')
liquidationPrice = self.safe_number(position, 'liquidation_price')
adjustmentFactor = self.safe_string(position, 'adjust_factor')
maintenanceMarginPercentage = Precise.string_div(adjustmentFactor, leverage)
maintenanceMargin = Precise.string_mul(maintenanceMarginPercentage, notional)
marginRatio = Precise.string_div(maintenanceMargin, collateral)
return {
'info': position,
'symbol': symbol,
'contracts': self.parse_number(contracts),
'contractSize': contractSize,
'entryPrice': entryPrice,
'collateral': self.parse_number(collateral),
'side': side,
'unrealizedProfit': unrealizedProfit,
'leverage': self.parse_number(leverage),
'percentage': self.parse_number(percentage),
'marginType': marginType,
'notional': self.parse_number(notional),
'markPrice': None,
'liquidationPrice': liquidationPrice,
'initialMargin': self.parse_number(initialMargin),
'initialMarginPercentage': self.parse_number(intialMarginPercentage),
'maintenanceMargin': self.parse_number(maintenanceMargin),
'maintenanceMarginPercentage': self.parse_number(maintenanceMarginPercentage),
'marginRatio': self.parse_number(marginRatio),
'timestamp': None,
'datetime': None,
}
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', 'isolated')
defaultSubType = self.safe_string(self.options, 'defaultSubType', 'inverse')
marketType, query = self.handle_market_type_and_params('fetchPositions', None, params)
method = None
if defaultSubType == 'linear':
method = self.get_supported_mapping(marginType, {
'isolated': 'contractPrivatePostLinearSwapApiV1SwapPositionInfo',
'cross': 'contractPrivatePostLinearSwapApiV1SwapCrossPositionInfo',
})
#
# {
# status: 'ok',
# data: [
# {
# symbol: 'BTC',
# contract_code: 'BTC-USDT',
# volume: '1.000000000000000000',
# available: '1.000000000000000000',
# frozen: '0E-18',
# cost_open: '47162.000000000000000000',
# cost_hold: '47162.000000000000000000',
# profit_unreal: '0.047300000000000000',
# profit_rate: '0.002005852169119206',
# lever_rate: '2',
# position_margin: '23.604650000000000000',
# direction: 'buy',
# profit: '0.047300000000000000',
# last_price: '47209.3',
# margin_asset: 'USDT',
# margin_mode: 'isolated',
# margin_account: 'BTC-USDT'
# }
# ],
# ts: '1641108676768'
# }
#
else:
method = self.get_supported_mapping(marketType, {
'future': 'contractPrivatePostApiV1ContractPositionInfo',
'swap': 'contractPrivatePostSwapApiV1SwapPositionInfo',
})
#
# future
# {
# status: 'ok',
# data: [
# {
# symbol: 'BTC',
# contract_code: 'BTC220624',
# contract_type: 'next_quarter',
# volume: '1.000000000000000000',
# available: '1.000000000000000000',
# frozen: '0E-18',
# cost_open: '49018.880000000009853343',
# cost_hold: '49018.880000000009853343',
# profit_unreal: '-8.62360608500000000000000000000000000000000000000E-7',
# profit_rate: '-0.000845439023678622',
# lever_rate: '2',
# position_margin: '0.001019583964880634',
# direction: 'sell',
# profit: '-8.62360608500000000000000000000000000000000000000E-7',
# last_price: '49039.61'
# }
# ],
# ts: '1641109895199'
# }
#
# swap
# {
# status: 'ok',
# data: [
# {
# symbol: 'BTC',
# contract_code: 'BTC-USD',
# volume: '1.000000000000000000',
# available: '1.000000000000000000',
# frozen: '0E-18',
# cost_open: '47150.000000000012353300',
# cost_hold: '47150.000000000012353300',
# profit_unreal: '0E-54',
# profit_rate: '-7.86E-16',
# lever_rate: '3',
# position_margin: '0.000706963591375044',
# direction: 'buy',
# profit: '0E-54',
# last_price: '47150'
# }
# ],
# ts: '1641109636572'
# }
#
response = getattr(self, method)(query)
data = self.safe_value(response, 'data')
timestamp = self.safe_integer(response, 'ts')
result = []
for i in range(0, len(data)):
position = data[i]
parsed = self.parse_position(position)
result.append(self.extend(parsed, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}))
return self.filter_by_array(result, 'symbol', symbols, False)
def fetch_position(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', 'isolated')
marginType = self.safe_string_2(params, 'marginType', 'defaultMarginType', marginType)
params = self.omit(params, ['defaultMarginType', 'marginType'])
marketType, query = self.handle_market_type_and_params('fetchPosition', market, params)
method = None
if market['linear']:
method = self.get_supported_mapping(marginType, {
'isolated': 'contractPrivatePostLinearSwapApiV1SwapAccountPositionInfo',
'cross': 'contractPrivatePostLinearSwapApiV1SwapCrossAccountPositionInfo',
})
#
# {
# status: 'ok',
# data: [
# {
# positions: [
# {
# symbol: 'BTC',
# contract_code: 'BTC-USDT',
# volume: 1,
# available: 1,
# frozen: 0,
# cost_open: 47027.1,
# cost_hold: 47324.4,
# profit_unreal: 0.1705,
# profit_rate: -0.269631765513927,
# lever_rate: 100,
# position_margin: 0.471539,
# direction: 'sell',
# profit: -0.1268,
# last_price: 47153.9,
# margin_asset: 'USDT',
# margin_mode: 'isolated',
# margin_account: 'BTC-USDT'
# }
# ],
# symbol: 'BTC',
# margin_balance: 8.01274699,
# margin_position: 0.471539,
# margin_frozen: 0,
# margin_available: 7.54120799,
# profit_real: 0,
# profit_unreal: 0.1705,
# risk_rate: 16.442755615124092,
# withdraw_available: 7.37070799,
# liquidation_price: 54864.89009448036,
# lever_rate: 100,
# adjust_factor: 0.55,
# margin_static: 7.84224699,
# contract_code: 'BTC-USDT',
# margin_asset: 'USDT',
# margin_mode: 'isolated',
# margin_account: 'BTC-USDT'
# }
# ],
# ts: 1641162539767
# }
#
else:
method = self.get_supported_mapping(marketType, {
'future': 'contractPrivatePostApiV1ContractAccountPositionInfo',
'swap': 'contractPrivatePostSwapApiV1SwapAccountPositionInfo',
})
# future
# {
# status: 'ok',
# data: [
# {
# symbol: 'BTC',
# contract_code: 'BTC-USD',
# margin_balance: 0.000752347253890835,
# margin_position: 0.000705870726835087,
# margin_frozen: 0,
# margin_available: 0.000046476527055748,
# profit_real: 0,
# profit_unreal: -0.000004546248622,
# risk_rate: 1.0508428311146076,
# withdraw_available: 0.000046476527055748,
# liquidation_price: 35017.91655851386,
# lever_rate: 3,
# adjust_factor: 0.015,
# margin_static: 0.000756893502512835,
# positions: [
# {
# symbol: 'BTC',
# contract_code: 'BTC-USD',
# volume: 1,
# available: 1,
# frozen: 0,
# cost_open: 47150.000000000015,
# cost_hold: 47324.6,
# profit_unreal: -0.000004546248622,
# profit_rate: 0.00463757067530574,
# lever_rate: 3,
# position_margin: 0.000705870726835087,
# direction: 'buy',
# profit: 0.0000032785936199,
# last_price: 47223
# }
# ]
# }
# ],
# ts: 1641162795228
# }
#
# swap
# {
# status: 'ok',
# data: [
# {
# positions: [
# {
# symbol: 'BTC',
# contract_code: 'BTC-USDT',
# volume: 1,
# available: 1,
# frozen: 0,
# cost_open: 47027.1,
# cost_hold: 47324.4,
# profit_unreal: 0.1705,
# profit_rate: -0.269631765513927,
# lever_rate: 100,
# position_margin: 0.471539,
# direction: 'sell',
# profit: -0.1268,
# last_price: 47153.9,
# margin_asset: 'USDT',
# margin_mode: 'isolated',
# margin_account: 'BTC-USDT'
# }
# ],
# symbol: 'BTC',
# margin_balance: 8.01274699,
# margin_position: 0.471539,
# margin_frozen: 0,
# margin_available: 7.54120799,
# profit_real: 0,
# profit_unreal: 0.1705,
# risk_rate: 16.442755615124092,
# withdraw_available: 7.37070799,
# liquidation_price: 54864.89009448036,
# lever_rate: 100,
# adjust_factor: 0.55,
# margin_static: 7.84224699,
# contract_code: 'BTC-USDT',
# margin_asset: 'USDT',
# margin_mode: 'isolated',
# margin_account: 'BTC-USDT'
# }
# ],
# ts: 1641162539767
# }
# cross usdt swap
# {
# "status":"ok",
# "data":{
# "positions":[
# ],
# "futures_contract_detail":[
# (...)
# ]
# "margin_mode":"cross",
# "margin_account":"USDT",
# "margin_asset":"USDT",
# "margin_balance":"1.000000000000000000",
# "margin_static":"1.000000000000000000",
# "margin_position":"0",
# "margin_frozen":"1.000000000000000000",
# "profit_real":"0E-18",
# "profit_unreal":"0",
# "withdraw_available":"0",
# "risk_rate":"15.666666666666666666",
# "contract_detail":[
# (...)
# ]
# },
# "ts":"1645521118946"
# }
#
request = {}
if market['future'] and market['inverse']:
request['symbol'] = market['settleId']
else:
if marginType == 'cross':
request['margin_account'] = 'USDT' # only allowed value
request['contract_code'] = market['id']
response = getattr(self, method)(self.extend(request, query))
data = self.safe_value(response, 'data')
account = None
if marginType == 'cross':
account = data
else:
account = self.safe_value(data, 0)
omitted = self.omit(account, ['positions'])
positions = self.safe_value(account, 'positions')
position = None
if market['future'] and market['inverse']:
for i in range(0, len(positions)):
entry = positions[i]
if entry['contract_code'] == market['id']:
position = entry
break
else:
position = self.safe_value(positions, 0)
timestamp = self.safe_integer(response, 'ts')
parsed = self.parse_position(self.extend(position, omitted))
return self.extend(parsed, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
def parse_ledger_entry_type(self, type):
types = {
'trade': 'trade',
'etf': 'trade',
'transact-fee': 'fee',
'fee-deduction': 'fee',
'transfer': 'transfer',
'credit': 'credit',
'liquidation': 'trade',
'interest': 'credit',
'deposit': 'deposit',
'withdraw': 'withdrawal',
'withdraw-fee': 'fee',
'exchange': 'exchange',
'other-types': 'transfer',
'rebate': 'rebate',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "accountId": 10000001,
# "currency": "usdt",
# "transactAmt": 10.000000000000000000,
# "transactType": "transfer",
# "transferType": "margin-transfer-out",
# "transactId": 0,
# "transactTime": 1629882331066,
# "transferer": 28483123,
# "transferee": 13496526
# }
#
id = self.safe_string(item, 'transactId')
currencyId = self.safe_string(item, 'currency')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(item, 'transactAmt')
transferType = self.safe_string(item, 'transferType')
type = self.parse_ledger_entry_type(transferType)
direction = self.safe_string(item, 'direction')
timestamp = self.safe_integer(item, 'transactTime')
datetime = self.iso8601(timestamp)
account = self.safe_string(item, 'accountId')
return {
'id': id,
'direction': direction,
'account': account,
'referenceId': id,
'referenceAccount': account,
'type': type,
'currency': code,
'amount': amount,
'timestamp': timestamp,
'datetime': datetime,
'before': None,
'after': None,
'status': None,
'fee': None,
'info': item,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
accountId = self.fetch_account_id_by_type('spot', params)
request = {
'accountId': accountId,
# 'currency': code,
# 'transactTypes': 'all', # default all
# 'startTime': 1546272000000,
# 'endTime': 1546272000000,
# 'sort': asc, # asc, desc
# 'limit': 100, # range 1-500
# 'fromId': 323 # first record ID in self query for pagination
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit # max 500
response = self.spotPrivateGetV2AccountLedger(self.extend(request, params))
#
# {
# "code": 200,
# "message": "success",
# "data": [
# {
# "accountId": 10000001,
# "currency": "usdt",
# "transactAmt": 10.000000000000000000,
# "transactType": "transfer",
# "transferType": "margin-transfer-out",
# "transactId": 0,
# "transactTime": 1629882331066,
# "transferer": 28483123,
# "transferee": 13496526
# },
# {
# "accountId": 10000001,
# "currency": "usdt",
# "transactAmt": -10.000000000000000000,
# "transactType": "transfer",
# "transferType": "margin-transfer-in",
# "transactId": 0,
# "transactTime": 1629882096562,
# "transferer": 13496526,
# "transferee": 28483123
# }
# ],
# "nextId": 1624316679,
# "ok": True
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ledger(data, currency, since, limit)
def fetch_leverage_tiers(self, symbols=None, params={}):
self.load_markets()
response = self.contractPublicGetLinearSwapApiV1SwapAdjustfactor(params)
#
# {
# "status": "ok",
# "data": [
# {
# "symbol": "MANA",
# "contract_code": "MANA-USDT",
# "margin_mode": "isolated",
# "trade_partition": "USDT",
# "list": [
# {
# "lever_rate": 75,
# "ladders": [
# {
# "ladder": 0,
# "min_size": 0,
# "max_size": 999,
# "adjust_factor": 0.7
# },
# ...
# ]
# }
# ...
# ]
# },
# ...
# ]
# }
#
data = self.safe_value(response, 'data')
return self.parse_leverage_tiers(data, symbols, 'contract_code')
def fetch_market_leverage_tiers(self, symbol, params={}):
self.load_markets()
request = {}
if symbol is not None:
market = self.market(symbol)
if not market['contract']:
raise BadRequest(self.id + '.fetchLeverageTiers symbol supports contract markets only')
request['contract_code'] = market['id']
response = self.contractPublicGetLinearSwapApiV1SwapAdjustfactor(self.extend(request, params))
#
# {
# "status": "ok",
# "data": [
# {
# "symbol": "MANA",
# "contract_code": "MANA-USDT",
# "margin_mode": "isolated",
# "trade_partition": "USDT",
# "list": [
# {
# "lever_rate": 75,
# "ladders": [
# {
# "ladder": 0,
# "min_size": 0,
# "max_size": 999,
# "adjust_factor": 0.7
# },
# ...
# ]
# }
# ...
# ]
# },
# ...
# ]
# }
#
data = self.safe_value(response, 'data')
tiers = self.parse_leverage_tiers(data, [symbol], 'contract_code')
return self.safe_value(tiers, symbol)
def parse_leverage_tiers(self, response, symbols, marketIdKey):
result = {}
for i in range(0, len(response)):
item = response[i]
list = self.safe_value(item, 'list', [])
tiers = []
currency = self.safe_string(item, 'trade_partition')
id = self.safe_string(item, marketIdKey)
symbol = self.safe_symbol(id)
if self.in_array(symbols, symbol):
for j in range(0, len(list)):
obj = list[j]
leverage = self.safe_string(obj, 'lever_rate')
ladders = self.safe_value(obj, 'ladders', [])
for k in range(0, len(ladders)):
bracket = ladders[k]
adjustFactor = self.safe_string(bracket, 'adjust_factor')
tiers.append({
'tier': self.safe_integer(bracket, 'ladder'),
'currency': self.safe_currency_code(currency),
'notionalFloor': self.safe_number(bracket, 'min_size'),
'notionalCap': self.safe_number(bracket, 'max_size'),
'maintenanceMarginRate': self.parse_number(Precise.string_div(adjustFactor, leverage)),
'maxLeverage': self.parse_number(leverage),
'info': bracket,
})
result[symbol] = tiers
return result
| 46.944699 | 355 | 0.467312 |
4a1c1095efb36bca53cc80eebea7e585d050641e
| 286 |
py
|
Python
|
Release/PythonLib/Lib/action.py
|
wenhulove333/ScutServer
|
338a50ff577c0e2ef2276a2883a8bfe28517c79b
|
[
"MIT"
] | 2 |
2017-05-27T13:32:28.000Z
|
2019-05-28T15:11:33.000Z
|
Sample/GameRanking/Server/src/GameRanking.Server/PyScript/Lib/action.py
|
Jesse1205/Scut
|
3df3adbcd0588fa2657ff110380210236ae45dae
|
[
"Unlicense"
] | null | null | null |
Sample/GameRanking/Server/src/GameRanking.Server/PyScript/Lib/action.py
|
Jesse1205/Scut
|
3df3adbcd0588fa2657ff110380210236ae45dae
|
[
"Unlicense"
] | 4 |
2016-08-27T05:26:16.000Z
|
2019-12-27T07:07:09.000Z
|
class HttpParam():
"""httpGet参数"""
def __init__(self):
self.Result = True
class DataResult(object):
"""Action处理结果"""
def __init__(self):
self.Result = True
def getGbLen(str):
"""获得中英文长度"""
return len(str.encode('gb2312'))
| 17.875 | 37 | 0.548951 |
4a1c10c526ed0937eca297af06283145d7ad89e5
| 6,464 |
py
|
Python
|
GitMarco/graphics/plotly.py
|
GitMarco27/GitMarco
|
2d9dd93a73a6d7b68d63222512a646cdd988909e
|
[
"MIT"
] | null | null | null |
GitMarco/graphics/plotly.py
|
GitMarco27/GitMarco
|
2d9dd93a73a6d7b68d63222512a646cdd988909e
|
[
"MIT"
] | null | null | null |
GitMarco/graphics/plotly.py
|
GitMarco27/GitMarco
|
2d9dd93a73a6d7b68d63222512a646cdd988909e
|
[
"MIT"
] | null | null | null |
import plotly.graph_objects as go
import numpy as np
class Scatter3D(object):
def __init__(self,
x,
y,
z,
):
"""
:param x: np.ndarray or list
:param y: np.ndarray or list
:param z: np.ndarray or list
Create a custom 3D scatter plot with plotly
"""
self.x = x
self.y = y
self.z = z
def plot(self,
color=None,
title: str = '',
xlabel: str = '',
ylabel: str = '',
legend_title: str = '',
size: tuple = (800, 600),
marker_size: int = 5,
x_range=None,
y_range=None,
z_range=None,
n_ticks: int = 10,
margin=None,
line_width: float = .0,
line_color: str = 'black',
alpha: float = 0.8,
show: bool = False,
cmin: float = 0.,
cmax: float = 1.,
colorscale: str = 'Turbo'):
"""
:param line_color: line color
:param line_width: line width
:param colorscale: color scale
:param cmax: maximum value of the colorbar
:param cmin: minimum value of the colorbar
:param alpha: alpha
:param margin: scene margins
:param n_ticks: number of ticks on every axis
:param x_range: x range
:param z_range: z range
:param y_range: y range
:param marker_size: marker size
:param color: nodal values for the color scale
:param title: figure title
:param xlabel: xlabel
:param ylabel: ylabel
:param legend_title: legend_title
:param size: size of the figure
:param show: show (or not) the figure
:return fig: figure instance
Create the 3d scatter plot
"""
# color = color.reshape(-1, 1) if color is not None else color
if margin is None:
margin = dict(r=10, l=10, b=10, t=10)
if z_range is None:
z_range = [-1, 1]
if y_range is None:
y_range = [-1, 1]
if x_range is None:
x_range = [-1, 1]
if isinstance(self.x, list) and isinstance(self.y, list) and isinstance(self.z, list):
data = [go.Scatter3d(x=self.x[i], y=self.y[i], z=self.z[i],
mode='markers',
marker=dict(size=marker_size,
color=color[i],
colorscale=colorscale,
opacity=alpha,
colorbar=dict(thickness=20),
cmin=cmin,
cmax=cmax,
line=dict(width=line_width,
color=line_color)
)) for i in range(len(self.x))]
else:
data = [go.Scatter3d(x=self.x, y=self.y, z=self.z,
mode='markers',
marker=dict(size=marker_size,
color=color,
colorscale=colorscale,
opacity=alpha,
colorbar=dict(thickness=20),
cmin=cmin,
cmax=cmax,
line=dict(width=line_width,
color=line_color)
))]
fig = go.Figure(data=data,
layout=go.Layout(
width=size[0],
height=size[1],
))
fig.update_layout(
title=title,
xaxis_title=xlabel,
yaxis_title=ylabel,
legend_title=legend_title,
scene=dict(
xaxis=dict(nticks=n_ticks, range=x_range, ),
yaxis=dict(nticks=n_ticks, range=y_range, ),
zaxis=dict(nticks=n_ticks, range=z_range, ), ),
margin=margin
)
fig.show() if show else None
return fig
def mesh_3d(
x, y, z, i, j, k,
color: np.ndarray = None,
title: str = '',
xlabel: str = '',
ylabel: str = '',
legend_title: str = '',
size: tuple = (800, 600),
x_range=None,
y_range=None,
z_range=None,
n_ticks: int = 10,
margin=None,
show: bool = False,
cmin: float = 0.,
cmax: float = 1.,
colorscale: str = 'Turbo',
flatshading: bool = True,
showscale: bool = False,
paper_bgcolor: str = 'rgb(1,1,1)',
title_x: float = .5,
font_color: str = 'white',
show_axis: bool = False
):
if margin is None:
margin = dict(r=10, l=10, b=10, t=50)
if z_range is None:
z_range = [-10, 10]
if y_range is None:
y_range = [-10, 10]
if x_range is None:
x_range = [-10, 10]
mesh3d = go.Mesh3d(
x=x,
y=y,
z=z,
i=i,
j=j,
k=k,
flatshading=flatshading,
colorscale=colorscale,
intensity=color,
name=title,
showscale=showscale,
cmax=cmax,
cmin=cmin,
)
layout = go.Layout(
paper_bgcolor=paper_bgcolor,
xaxis_title=xlabel,
yaxis_title=ylabel,
legend_title=legend_title,
title_x=title_x,
title_text=title,
font_color=font_color,
width=size[0],
height=size[1],
# scene_camera=dict(eye=dict(x=1.25, y=-1.25, z=1)),
scene_xaxis_visible=show_axis,
scene_yaxis_visible=show_axis,
scene_zaxis_visible=show_axis,
scene=dict(
xaxis=dict(nticks=n_ticks, range=x_range, ),
yaxis=dict(nticks=n_ticks, range=y_range, ),
zaxis=dict(nticks=n_ticks, range=z_range, ), ),
margin=margin
)
fig = go.Figure(data=[mesh3d], layout=layout)
fig.show() if show else None
return fig
| 32.32 | 94 | 0.44539 |
4a1c1163051ced61d61b5c49ec396849deac51dc
| 29,214 |
py
|
Python
|
pymatgen/analysis/chemenv/utils/tests/test_graph_utils.py
|
munrojm/pymatgen
|
95514da2c1f4bd0ee897e657de768ca987fe05e9
|
[
"MIT"
] | null | null | null |
pymatgen/analysis/chemenv/utils/tests/test_graph_utils.py
|
munrojm/pymatgen
|
95514da2c1f4bd0ee897e657de768ca987fe05e9
|
[
"MIT"
] | 3 |
2021-11-11T16:01:44.000Z
|
2021-11-24T22:24:18.000Z
|
pymatgen/analysis/chemenv/utils/tests/test_graph_utils.py
|
munrojm/pymatgen
|
95514da2c1f4bd0ee897e657de768ca987fe05e9
|
[
"MIT"
] | null | null | null |
__author__ = "waroquiers"
import numpy as np
from pymatgen.analysis.chemenv.connectivity.environment_nodes import EnvironmentNode
from pymatgen.analysis.chemenv.utils.graph_utils import (
MultiGraphCycle,
SimpleGraphCycle,
get_delta,
)
from pymatgen.util.testing import PymatgenTest
class FakeNode(object):
def __init__(self, isite):
self.isite = isite
class FakeNodeWithEqMethod(object):
def __init__(self, isite):
self.isite = isite
def __eq__(self, other):
return self.isite == other.isite
def __hash__(self):
return 0
class FakeNodeWithEqLtMethods(object):
def __init__(self, isite):
self.isite = isite
def __eq__(self, other):
return self.isite == other.isite
def __lt__(self, other):
return self.isite < other.isite
def __str__(self):
return "FakeNode_{:d}".format(self.isite)
def __hash__(self):
return 0
class FakeNodeWithEqLtMethodsBis(FakeNodeWithEqLtMethods):
pass
class FakeNodeWithEqMethodWrongSortable(object):
def __init__(self, isite):
self.isite = isite
def __eq__(self, other):
return self.isite == other.isite
def __hash__(self):
return 0
def __lt__(self, other):
return self.isite % 2 < other.isite % 2
class GraphUtilsTest(PymatgenTest):
def test_get_delta(self):
n1 = FakeNode(3)
n2 = FakeNode(7)
edge_data = {"start": 3, "end": 7, "delta": [2, 6, 4]}
self.assertTrue(np.allclose(get_delta(n1, n2, edge_data), [2, 6, 4]))
edge_data = {"start": 7, "end": 3, "delta": [2, 6, 4]}
self.assertTrue(np.allclose(get_delta(n1, n2, edge_data), [-2, -6, -4]))
with self.assertRaisesRegex(
ValueError,
"Trying to find a delta between two nodes with an edge that seems not to link these nodes.",
):
edge_data = {"start": 6, "end": 3, "delta": [2, 6, 4]}
get_delta(n1, n2, edge_data)
with self.assertRaisesRegex(
ValueError,
"Trying to find a delta between two nodes with an edge that seems not to link these nodes.",
):
edge_data = {"start": 7, "end": 2, "delta": [2, 6, 4]}
get_delta(n1, n2, edge_data)
def test_simple_graph_cycle(self):
sg_cycle1 = SimpleGraphCycle([0, 1, 2, 3])
# Test equality
sg_cycle2 = SimpleGraphCycle([1, 2, 3, 0])
self.assertEqual(sg_cycle1, sg_cycle2)
sg_cycle2 = SimpleGraphCycle([2, 3, 0, 1])
self.assertEqual(sg_cycle1, sg_cycle2)
sg_cycle2 = SimpleGraphCycle([3, 0, 1, 2])
self.assertEqual(sg_cycle1, sg_cycle2)
# Test reversed cycles
sg_cycle2 = SimpleGraphCycle([0, 3, 2, 1])
self.assertEqual(sg_cycle1, sg_cycle2)
sg_cycle2 = SimpleGraphCycle([3, 2, 1, 0])
self.assertEqual(sg_cycle1, sg_cycle2)
sg_cycle2 = SimpleGraphCycle([2, 1, 0, 3])
self.assertEqual(sg_cycle1, sg_cycle2)
sg_cycle2 = SimpleGraphCycle([1, 0, 3, 2])
self.assertEqual(sg_cycle1, sg_cycle2)
# Test different cycle lengths inequality
sg_cycle2 = SimpleGraphCycle([0, 1, 2, 3, 4])
self.assertNotEqual(sg_cycle1, sg_cycle2)
# Test equality of self-loops
self.assertEqual(SimpleGraphCycle([0]), SimpleGraphCycle([0]))
self.assertNotEqual(SimpleGraphCycle([0]), SimpleGraphCycle([4]))
# Test inequality inversing two nodes
sg_cycle2 = SimpleGraphCycle([0, 1, 3, 2])
self.assertNotEqual(sg_cycle1, sg_cycle2)
# Test inequality with different nodes
sg_cycle2 = SimpleGraphCycle([4, 1, 2, 3])
self.assertNotEqual(sg_cycle1, sg_cycle2)
# Test hashing function
self.assertEqual(hash(sg_cycle1), 4)
self.assertEqual(hash(SimpleGraphCycle([0])), 1)
self.assertEqual(hash(SimpleGraphCycle([0, 1, 3, 6])), 4)
self.assertEqual(hash(SimpleGraphCycle([0, 1, 2])), 3)
# Test from_edges function
# 3-nodes cycle
edges = [(0, 2), (4, 2), (0, 4)]
sg_cycle = SimpleGraphCycle.from_edges(edges=edges, edges_are_ordered=False)
self.assertEqual(sg_cycle, SimpleGraphCycle([4, 0, 2]))
# Self-loop cycle
edges = [(2, 2)]
sg_cycle = SimpleGraphCycle.from_edges(edges=edges)
self.assertEqual(sg_cycle, SimpleGraphCycle([2]))
# 5-nodes cycle
edges = [(0, 2), (4, 7), (2, 7), (4, 5), (5, 0)]
sg_cycle = SimpleGraphCycle.from_edges(edges=edges, edges_are_ordered=False)
self.assertEqual(sg_cycle, SimpleGraphCycle([2, 7, 4, 5, 0]))
# two identical 3-nodes cycles
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : Duplicate nodes.",
):
edges = [(0, 2), (4, 2), (0, 4), (0, 2), (4, 2), (0, 4)]
SimpleGraphCycle.from_edges(edges=edges, edges_are_ordered=False)
# two cycles in from_edges
with self.assertRaisesRegex(ValueError, expected_regex="Could not construct a cycle from edges."):
edges = [(0, 2), (4, 2), (0, 4), (1, 3), (6, 7), (3, 6), (1, 7)]
SimpleGraphCycle.from_edges(edges=edges, edges_are_ordered=False)
with self.assertRaisesRegex(ValueError, expected_regex="Could not construct a cycle from edges."):
edges = [(0, 2), (4, 6), (2, 7), (4, 5), (5, 0)]
SimpleGraphCycle.from_edges(edges=edges, edges_are_ordered=False)
with self.assertRaisesRegex(ValueError, expected_regex="Could not construct a cycle from edges."):
edges = [(0, 2), (4, 7), (2, 7), (4, 10), (5, 0)]
SimpleGraphCycle.from_edges(edges=edges, edges_are_ordered=False)
# Test as_dict from_dict and len method
sg_cycle = SimpleGraphCycle([0, 1, 2, 3])
self.assertEqual(sg_cycle, SimpleGraphCycle.from_dict(sg_cycle.as_dict()))
self.assertEqual(len(sg_cycle), 4)
sg_cycle = SimpleGraphCycle([4])
self.assertEqual(sg_cycle, SimpleGraphCycle.from_dict(sg_cycle.as_dict()))
self.assertEqual(len(sg_cycle), 1)
sg_cycle = SimpleGraphCycle([4, 2, 6, 7, 9, 3, 15])
self.assertEqual(sg_cycle, SimpleGraphCycle.from_dict(sg_cycle.as_dict()))
self.assertEqual(len(sg_cycle), 7)
# Check validation at instance creation time
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : Duplicate nodes.",
):
SimpleGraphCycle([0, 2, 4, 6, 2])
# Check the validate method
# Nodes not sortable
sgc = SimpleGraphCycle(
[FakeNodeWithEqMethod(1), FakeNodeWithEqMethod(0), FakeNodeWithEqMethod(2)],
validate=False,
ordered=False,
)
self.assertFalse(sgc.ordered)
self.assertEqual(
sgc.nodes,
(FakeNodeWithEqMethod(1), FakeNodeWithEqMethod(0), FakeNodeWithEqMethod(2)),
)
sgc.validate(check_strict_ordering=False)
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : The nodes are not sortable.",
):
sgc.validate(check_strict_ordering=True)
# Empty cycle not valid
sgc = SimpleGraphCycle([], validate=False, ordered=False)
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : Empty cycle is not valid.",
):
sgc.validate()
# Simple graph cycle with 2 nodes not valid
sgc = SimpleGraphCycle([1, 2], validate=False, ordered=False)
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : Simple graph cycle with 2 nodes is not valid.",
):
sgc.validate()
# Simple graph cycle with nodes that cannot be strictly ordered
sgc = SimpleGraphCycle(
[
FakeNodeWithEqMethodWrongSortable(0),
FakeNodeWithEqMethodWrongSortable(1),
FakeNodeWithEqMethodWrongSortable(2),
FakeNodeWithEqMethodWrongSortable(3),
],
validate=False,
ordered=False,
)
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : "
"The list of nodes in the cycle cannot be strictly ordered.",
):
sgc.validate(check_strict_ordering=True)
# Check the order method
sgc = SimpleGraphCycle(
[
FakeNodeWithEqMethodWrongSortable(0),
FakeNodeWithEqMethodWrongSortable(1),
FakeNodeWithEqMethodWrongSortable(2),
FakeNodeWithEqMethodWrongSortable(3),
],
validate=False,
ordered=False,
)
sgc.order(raise_on_fail=False)
self.assertFalse(sgc.ordered)
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : "
"The list of nodes in the cycle cannot be strictly ordered.",
):
sgc.order(raise_on_fail=True)
sgc = SimpleGraphCycle(
[
FakeNodeWithEqMethod(8),
FakeNodeWithEqMethod(0),
FakeNodeWithEqMethod(3),
FakeNodeWithEqMethod(6),
],
validate=False,
ordered=False,
)
sgc.order(raise_on_fail=False)
self.assertFalse(sgc.ordered)
with self.assertRaisesRegex(
ValueError,
expected_regex="SimpleGraphCycle is not valid : The nodes are not sortable.",
):
sgc.order(raise_on_fail=True)
sgc = SimpleGraphCycle(
[
FakeNodeWithEqLtMethods(8),
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
],
validate=False,
ordered=False,
)
sgc.order(raise_on_fail=True)
self.assertTrue(sgc.ordered)
self.assertEqual(
sgc.nodes,
(
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
FakeNodeWithEqLtMethods(8),
),
)
sgc = SimpleGraphCycle(
[
FakeNodeWithEqLtMethodsBis(8),
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
],
validate=False,
ordered=False,
)
sgc.order(raise_on_fail=False)
self.assertFalse(sgc.ordered)
self.assertEqual(
sgc.nodes,
(
FakeNodeWithEqLtMethodsBis(8),
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
),
)
with self.assertRaisesRegex(
ValueError,
expected_regex="Could not order simple graph cycle as the nodes are of different classes.",
):
sgc.order(raise_on_fail=True)
sgc = SimpleGraphCycle([FakeNodeWithEqLtMethods(85)], validate=False, ordered=False)
self.assertFalse(sgc.ordered)
sgc.order()
self.assertTrue(sgc.ordered)
self.assertEqual(sgc.nodes, tuple([FakeNodeWithEqLtMethods(85)]))
sgc = SimpleGraphCycle(
[
FakeNodeWithEqLtMethods(8),
FakeNodeWithEqLtMethods(2),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
FakeNodeWithEqLtMethods(4),
FakeNodeWithEqLtMethods(1),
FakeNodeWithEqLtMethods(64),
FakeNodeWithEqLtMethods(32),
],
validate=False,
ordered=False,
)
self.assertFalse(sgc.ordered)
sgc.order()
self.assertTrue(sgc.ordered)
self.assertEqual(
sgc.nodes,
tuple(
[
FakeNodeWithEqLtMethods(1),
FakeNodeWithEqLtMethods(4),
FakeNodeWithEqLtMethods(3),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(2),
FakeNodeWithEqLtMethods(8),
FakeNodeWithEqLtMethods(32),
FakeNodeWithEqLtMethods(64),
]
),
)
# Test str method
self.assertEqual(
str(sgc),
"Simple cycle with nodes :\n"
"FakeNode_1\n"
"FakeNode_4\n"
"FakeNode_3\n"
"FakeNode_6\n"
"FakeNode_2\n"
"FakeNode_8\n"
"FakeNode_32\n"
"FakeNode_64",
)
def test_multigraph_cycle(self):
mg_cycle1 = MultiGraphCycle([2, 4, 3, 5], [1, 0, 2, 0])
# Check is_valid method
is_valid, msg = MultiGraphCycle._is_valid(mg_cycle1)
self.assertTrue(is_valid)
self.assertEqual(msg, "")
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : "
"Number of nodes different from number of "
"edge indices.",
):
MultiGraphCycle([0, 2, 4], [0, 0]) # number of nodes is different from number of edge_indices
with self.assertRaisesRegex(ValueError, expected_regex="MultiGraphCycle is not valid : Duplicate nodes."):
MultiGraphCycle([0, 2, 4, 3, 2], [0, 0, 0, 0, 0]) # duplicated nodes
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : "
"Cycles with two nodes cannot use the same "
"edge for the cycle.",
):
MultiGraphCycle([3, 5], [1, 1]) # number of nodes is different from number of edge_indices
# Testing equality
# Test different cycle lengths inequality
mg_cycle2 = MultiGraphCycle([2, 3, 4, 5, 6], [1, 0, 2, 0, 0])
self.assertFalse(mg_cycle1 == mg_cycle2)
# Test equality
mg_cycle2 = MultiGraphCycle([2, 4, 3, 5], [1, 0, 2, 0])
self.assertTrue(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([4, 3, 5, 2], [0, 2, 0, 1])
self.assertTrue(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([3, 5, 2, 4], [2, 0, 1, 0])
self.assertTrue(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([5, 2, 4, 3], [0, 1, 0, 2])
self.assertTrue(mg_cycle1 == mg_cycle2)
# Test equality (reversed)
mg_cycle2 = MultiGraphCycle([2, 5, 3, 4], [0, 2, 0, 1])
self.assertTrue(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([5, 3, 4, 2], [2, 0, 1, 0])
self.assertTrue(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([3, 4, 2, 5], [0, 1, 0, 2])
self.assertTrue(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([4, 2, 5, 3], [1, 0, 2, 0])
self.assertTrue(mg_cycle1 == mg_cycle2)
# Test inequality
mg_cycle2 = MultiGraphCycle([2, 5, 3, 4], [0, 1, 0, 1])
self.assertFalse(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([2, 5, 3, 4], [1, 0, 2, 0])
self.assertFalse(mg_cycle1 == mg_cycle2)
mg_cycle2 = MultiGraphCycle([3, 5, 2, 4], [1, 0, 2, 0])
self.assertFalse(mg_cycle1 == mg_cycle2)
# Test Self-loop case
self.assertTrue(MultiGraphCycle([2], [1]) == MultiGraphCycle([2], [1]))
self.assertFalse(MultiGraphCycle([1], [1]) == MultiGraphCycle([2], [1]))
self.assertFalse(MultiGraphCycle([2], [1]) == MultiGraphCycle([2], [0]))
self.assertFalse(MultiGraphCycle([2], [1]) == MultiGraphCycle([1], [1]))
self.assertFalse(MultiGraphCycle([2], [0]) == MultiGraphCycle([2], [1]))
# Test special case with two nodes
self.assertTrue(MultiGraphCycle([2, 4], [1, 3]) == MultiGraphCycle([2, 4], [1, 3]))
self.assertTrue(MultiGraphCycle([2, 4], [1, 3]) == MultiGraphCycle([2, 4], [3, 1]))
self.assertTrue(MultiGraphCycle([2, 4], [1, 3]) == MultiGraphCycle([4, 2], [3, 1]))
self.assertTrue(MultiGraphCycle([2, 4], [1, 3]) == MultiGraphCycle([4, 2], [1, 3]))
self.assertFalse(MultiGraphCycle([2, 4], [1, 3]) == MultiGraphCycle([4, 2], [1, 2]))
self.assertFalse(MultiGraphCycle([2, 4], [1, 3]) == MultiGraphCycle([4, 0], [1, 3]))
# Test hashing function
self.assertEqual(hash(mg_cycle1), 4)
self.assertEqual(hash(MultiGraphCycle([0], [0])), 1)
self.assertEqual(hash(MultiGraphCycle([0, 3], [0, 1])), 2)
self.assertEqual(hash(MultiGraphCycle([0, 3, 5, 7, 8], [0, 1, 0, 0, 0])), 5)
# Test as_dict from_dict and len method
mg_cycle = MultiGraphCycle([0, 1, 2, 3], [1, 2, 0, 3])
self.assertTrue(mg_cycle == MultiGraphCycle.from_dict(mg_cycle.as_dict()))
self.assertEqual(len(mg_cycle), 4)
mg_cycle = MultiGraphCycle([2, 5, 3, 4, 1, 0], [0, 0, 2, 0, 1, 0])
self.assertTrue(mg_cycle == MultiGraphCycle.from_dict(mg_cycle.as_dict()))
self.assertEqual(len(mg_cycle), 6)
mg_cycle = MultiGraphCycle([8], [1])
self.assertTrue(mg_cycle == MultiGraphCycle.from_dict(mg_cycle.as_dict()))
self.assertEqual(len(mg_cycle), 1)
# Check the validate method
# Number of nodes and edges do not match
mgc = MultiGraphCycle(
nodes=[
FakeNodeWithEqMethod(1),
FakeNodeWithEqMethod(0),
FakeNodeWithEqMethod(2),
],
edge_indices=[0, 0],
validate=False,
ordered=False,
)
self.assertFalse(mgc.ordered)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : "
"Number of nodes different from "
"number of edge indices.",
):
mgc.validate(check_strict_ordering=False)
# Empty cycle not valid
mgc = MultiGraphCycle([], edge_indices=[], validate=False, ordered=False)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : Empty cycle is not valid.",
):
mgc.validate()
# Multi graph cycle with duplicate nodes not valid
mgc = MultiGraphCycle(
[FakeNodeWithEqMethod(1), FakeNodeWithEqMethod(0), FakeNodeWithEqMethod(1)],
edge_indices=[0, 1, 0],
validate=False,
ordered=False,
)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : Duplicate nodes.",
):
mgc.validate()
# Multi graph cycle with two nodes cannot use the same edge
mgc = MultiGraphCycle(
[FakeNodeWithEqMethod(1), FakeNodeWithEqMethod(0)],
edge_indices=[1, 1],
validate=False,
ordered=False,
)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : "
"Cycles with two nodes cannot use the same edge for the cycle.",
):
mgc.validate()
# Nodes not sortable
mgc = MultiGraphCycle(
[FakeNodeWithEqMethod(1), FakeNodeWithEqMethod(0), FakeNodeWithEqMethod(2)],
edge_indices=[0, 0, 0],
validate=False,
ordered=False,
)
self.assertFalse(mgc.ordered)
self.assertEqual(
mgc.nodes,
(FakeNodeWithEqMethod(1), FakeNodeWithEqMethod(0), FakeNodeWithEqMethod(2)),
)
mgc.validate(check_strict_ordering=False)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : The nodes are not sortable.",
):
mgc.validate(check_strict_ordering=True)
# Multi graph cycle with nodes that cannot be strictly ordered
mgc = MultiGraphCycle(
[
FakeNodeWithEqMethodWrongSortable(0),
FakeNodeWithEqMethodWrongSortable(1),
FakeNodeWithEqMethodWrongSortable(2),
FakeNodeWithEqMethodWrongSortable(3),
],
edge_indices=[0, 0, 0, 0],
validate=False,
ordered=False,
)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : "
"The list of nodes in the cycle cannot be strictly ordered.",
):
mgc.validate(check_strict_ordering=True)
# Check the order method
mgc = MultiGraphCycle(
[
FakeNodeWithEqMethodWrongSortable(0),
FakeNodeWithEqMethodWrongSortable(1),
FakeNodeWithEqMethodWrongSortable(2),
FakeNodeWithEqMethodWrongSortable(3),
],
edge_indices=[0, 0, 0, 0],
validate=False,
ordered=False,
)
mgc.order(raise_on_fail=False)
self.assertFalse(mgc.ordered)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : "
"The list of nodes in the cycle cannot be strictly ordered.",
):
mgc.order(raise_on_fail=True)
mgc = MultiGraphCycle(
[
FakeNodeWithEqMethod(8),
FakeNodeWithEqMethod(0),
FakeNodeWithEqMethod(3),
FakeNodeWithEqMethod(6),
],
edge_indices=[0, 0, 0, 0],
validate=False,
ordered=False,
)
mgc.order(raise_on_fail=False)
self.assertFalse(mgc.ordered)
with self.assertRaisesRegex(
ValueError,
expected_regex="MultiGraphCycle is not valid : The nodes are not sortable.",
):
mgc.order(raise_on_fail=True)
mgc = MultiGraphCycle(
[
FakeNodeWithEqLtMethods(8),
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
],
edge_indices=[2, 5, 3, 7],
validate=False,
ordered=False,
)
mgc.order(raise_on_fail=True)
self.assertTrue(mgc.ordered)
self.assertEqual(
mgc.nodes,
(
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
FakeNodeWithEqLtMethods(8),
),
)
self.assertEqual(mgc.edge_indices, (5, 3, 7, 2))
mgc = MultiGraphCycle(
[
FakeNodeWithEqLtMethodsBis(8),
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
],
edge_indices=[2, 5, 3, 7],
validate=False,
ordered=False,
)
mgc.order(raise_on_fail=False)
self.assertFalse(mgc.ordered)
self.assertEqual(
mgc.nodes,
(
FakeNodeWithEqLtMethodsBis(8),
FakeNodeWithEqLtMethods(0),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
),
)
self.assertEqual(mgc.edge_indices, (2, 5, 3, 7))
with self.assertRaisesRegex(
ValueError,
expected_regex="Could not order simple graph cycle as the nodes are of different classes.",
):
mgc.order(raise_on_fail=True)
mgc = MultiGraphCycle(
[FakeNodeWithEqLtMethods(85)],
edge_indices=[7],
validate=False,
ordered=False,
)
self.assertFalse(mgc.ordered)
mgc.order()
self.assertTrue(mgc.ordered)
self.assertEqual(mgc.nodes, tuple([FakeNodeWithEqLtMethods(85)]))
self.assertEqual(mgc.edge_indices, tuple([7]))
mgc = MultiGraphCycle(
[
FakeNodeWithEqLtMethods(8),
FakeNodeWithEqLtMethods(2),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(3),
FakeNodeWithEqLtMethods(4),
FakeNodeWithEqLtMethods(1),
FakeNodeWithEqLtMethods(64),
FakeNodeWithEqLtMethods(32),
],
edge_indices=[2, 0, 4, 1, 0, 3, 5, 2],
validate=False,
ordered=False,
)
self.assertFalse(mgc.ordered)
mgc.order()
self.assertTrue(mgc.ordered)
self.assertEqual(
mgc.nodes,
tuple(
[
FakeNodeWithEqLtMethods(1),
FakeNodeWithEqLtMethods(4),
FakeNodeWithEqLtMethods(3),
FakeNodeWithEqLtMethods(6),
FakeNodeWithEqLtMethods(2),
FakeNodeWithEqLtMethods(8),
FakeNodeWithEqLtMethods(32),
FakeNodeWithEqLtMethods(64),
]
),
)
self.assertEqual(mgc.edge_indices, tuple([0, 1, 4, 0, 2, 2, 5, 3]))
# Testing all cases for a length-4 cycle
nodes_ref = tuple(FakeNodeWithEqLtMethods(inode) for inode in [0, 1, 2, 3])
edges_ref = (3, 6, 9, 12)
for inodes, iedges in [
((0, 1, 2, 3), (3, 6, 9, 12)),
((1, 2, 3, 0), (6, 9, 12, 3)),
((2, 3, 0, 1), (9, 12, 3, 6)),
((3, 0, 1, 2), (12, 3, 6, 9)),
((3, 2, 1, 0), (9, 6, 3, 12)),
((2, 1, 0, 3), (6, 3, 12, 9)),
((1, 0, 3, 2), (3, 12, 9, 6)),
((0, 3, 2, 1), (12, 9, 6, 3)),
]:
mgc = MultiGraphCycle(
[FakeNodeWithEqLtMethods(inode) for inode in inodes],
edge_indices=[iedge for iedge in iedges],
)
strnodes = ", ".join([str(i) for i in inodes])
self.assertEqual(
mgc.nodes,
nodes_ref,
msg="Nodes not equal for inodes = ({})".format(", ".join([str(i) for i in inodes])),
)
self.assertEqual(
mgc.edge_indices,
edges_ref,
msg="Edges not equal for inodes = ({})".format(strnodes),
)
class EnvironmentNodesGraphUtilsTest(PymatgenTest):
def test_cycle(self):
e1 = EnvironmentNode(central_site="Si", i_central_site=0, ce_symbol="T:4")
e2 = EnvironmentNode(central_site="Si", i_central_site=3, ce_symbol="T:4")
e3 = EnvironmentNode(central_site="Si", i_central_site=2, ce_symbol="T:4")
e4 = EnvironmentNode(central_site="Si", i_central_site=5, ce_symbol="T:4")
e5 = EnvironmentNode(central_site="Si", i_central_site=1, ce_symbol="T:4")
# Tests of SimpleGraphCycle with EnvironmentNodes
c1 = SimpleGraphCycle([e2])
c2 = SimpleGraphCycle([e2])
self.assertEqual(c1, c2)
c1 = SimpleGraphCycle([e1])
c2 = SimpleGraphCycle([e2])
self.assertNotEqual(c1, c2)
c1 = SimpleGraphCycle([e1, e2, e3])
c2 = SimpleGraphCycle([e2, e1, e3])
self.assertEqual(c1, c2)
c2 = SimpleGraphCycle([e2, e3, e1])
self.assertEqual(c1, c2)
c1 = SimpleGraphCycle([e3, e2, e4, e1, e5])
c2 = SimpleGraphCycle([e1, e4, e2, e3, e5])
self.assertEqual(c1, c2)
c2 = SimpleGraphCycle([e2, e3, e5, e1, e4])
self.assertEqual(c1, c2)
c1 = SimpleGraphCycle([e2, e3, e4, e1, e5])
c2 = SimpleGraphCycle([e2, e3, e5, e1, e4])
self.assertNotEqual(c1, c2)
# Tests of MultiGraphCycle with EnvironmentNodes
c1 = MultiGraphCycle([e1], [2])
c2 = MultiGraphCycle([e1], [2])
self.assertEqual(c1, c2)
c2 = MultiGraphCycle([e1], [1])
self.assertNotEqual(c1, c2)
c2 = MultiGraphCycle([e2], [2])
self.assertNotEqual(c1, c2)
c1 = MultiGraphCycle([e1, e2], [0, 1])
c2 = MultiGraphCycle([e1, e2], [1, 0])
self.assertEqual(c1, c2)
c2 = MultiGraphCycle([e2, e1], [1, 0])
self.assertEqual(c1, c2)
c2 = MultiGraphCycle([e2, e1], [0, 1])
self.assertEqual(c1, c2)
c2 = MultiGraphCycle([e2, e1], [2, 1])
self.assertNotEqual(c1, c2)
c1 = MultiGraphCycle([e1, e2, e3], [0, 1, 2])
c2 = MultiGraphCycle([e2, e1, e3], [0, 2, 1])
self.assertEqual(c1, c2)
c2 = MultiGraphCycle([e2, e3, e1], [1, 2, 0])
self.assertEqual(c1, c2)
if __name__ == "__main__":
import unittest
unittest.main()
| 36.932996 | 114 | 0.562539 |
4a1c118d7864fab7e5c8abd48ac72f2724c82d07
| 2,327 |
py
|
Python
|
server/predictions.py
|
xhuang98/Dtect
|
929d01945fd2768032dbb84d8ba1f62069132172
|
[
"MIT"
] | 1 |
2021-12-25T01:43:43.000Z
|
2021-12-25T01:43:43.000Z
|
server/predictions.py
|
xhuang98/Dtect
|
929d01945fd2768032dbb84d8ba1f62069132172
|
[
"MIT"
] | null | null | null |
server/predictions.py
|
xhuang98/Dtect
|
929d01945fd2768032dbb84d8ba1f62069132172
|
[
"MIT"
] | 1 |
2021-09-02T15:30:04.000Z
|
2021-09-02T15:30:04.000Z
|
from data.models import Authentication
from data_analysis import *
from data_analysis.los_alamos_processing import *
from data_analysis.predict import evaluate
from data_analysis import data_config
import logging
import sentry_sdk
from sentry_sdk.integrations.logging import LoggingIntegration
sentry_logging = LoggingIntegration(
level=logging.DEBUG,
event_level=logging.ERROR
)
sentry_sdk.init(
dsn="https://de11a1016667481096a0b4fd02346103@o358880.ingest.sentry.io/5450617",
integrations=[sentry_logging]
)
def calculate_prediction(db, entry: Authentication):
"""
Given an authentication entry from the database, return model prediction.
"""
logging.info(f"Predicting for entry {entry.id}")
query = db.session.query(Authentication).filter_by(source_user=entry.source_user).order_by(Authentication.time)
history = [event for event in query if int(event.time) < int(entry.time)]
if len(history) == 0:
return False
while len(history) < data_config.window_size:
history.append(history[-1])
try:
return evaluate([{
'timestamp': event.time,
'src_user': event.source_user,
'dest_user': event.destination_user,
'src_comp': event.source_computer,
'dest_comp': event.destination_computer,
'auth_type': event.authentication_type,
'logon_type': event.logon_type,
'auth_orientation': event.auth_orientation == 'LogOn',
'success': event.auth_result == 'Success'
} for event in history[:data_config.window_size]])
except ValueError:
return None
def update_predictions(db, context):
"""
Go through entries in the authentication database and update prediction values.
"""
#with app.app_context():
with context:
logging.info(f"Prediction update initiated...")
query = db.session.query(Authentication).filter(Authentication.flagged == None).all()
for event in query:
prediction = calculate_prediction(db, event)
event.flagged = prediction
db.session.commit()
logging.info(f"Prediction update complete")
if __name__ == '__main__':
from api import app, db
update_predictions(db, app.app_context())
| 35.8 | 115 | 0.680705 |
4a1c12d9680468f517a5e1d27519323ada414672
| 765 |
py
|
Python
|
tests/factories.py
|
jacobSingh/allbackgammongroup
|
587dc751461a575dc8f0cf3cedf83eee2e996520
|
[
"BSD-3-Clause"
] | null | null | null |
tests/factories.py
|
jacobSingh/allbackgammongroup
|
587dc751461a575dc8f0cf3cedf83eee2e996520
|
[
"BSD-3-Clause"
] | null | null | null |
tests/factories.py
|
jacobSingh/allbackgammongroup
|
587dc751461a575dc8f0cf3cedf83eee2e996520
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Factories to help in tests."""
from factory import PostGenerationMethodCall, Sequence
from factory.alchemy import SQLAlchemyModelFactory
from abg_stats.database import db
from abg_stats.user.models import User
class BaseFactory(SQLAlchemyModelFactory):
"""Base factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
"""User factory."""
username = Sequence(lambda n: 'user{0}'.format(n))
email = Sequence(lambda n: 'user{0}@example.com'.format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
"""Factory configuration."""
model = User
| 23.90625 | 66 | 0.679739 |
4a1c13e76c247373764208d09224ea14fffd5fb5
| 3,130 |
py
|
Python
|
setup.py
|
zidarsk8/orange3-data-sets
|
4c2606ab3a449b6770727f34c1f045d44f337345
|
[
"MIT"
] | 1 |
2018-01-03T03:01:48.000Z
|
2018-01-03T03:01:48.000Z
|
setup.py
|
zidarsk8/world_bank_data
|
4c2606ab3a449b6770727f34c1f045d44f337345
|
[
"MIT"
] | 2 |
2017-02-17T09:55:40.000Z
|
2017-07-28T15:33:56.000Z
|
setup.py
|
zidarsk8/world_bank_data
|
4c2606ab3a449b6770727f34c1f045d44f337345
|
[
"MIT"
] | 3 |
2017-02-08T02:18:42.000Z
|
2021-04-07T01:20:39.000Z
|
#!/usr/bin/env python
from setuptools import setup
ENTRY_POINTS = {
# Entry point used to specify packages containing tutorials accessible
# from welcome screen. Tutorials are saved Orange Workflows (.ows files).
'orange.widgets.tutorials': (
# Syntax: any_text = path.to.package.containing.tutorials
'exampletutorials = orangecontrib.wbd.tutorials',
),
# Entry point used to specify packages containing widgets.
'orange.widgets': (
# Syntax: category name = path.to.package.containing.widgets
# Widget category specification can be seen in
# orangecontrib/wbd/widgets/__init__.py
'Data Sets = orangecontrib.wbd.widgets',
),
# Register widget help
"orange.canvas.help": (
'html-index = orangecontrib.wbd.widgets:WIDGET_HELP_PATH',)
}
KEYWORDS = [
# [PyPi](https://pypi.python.org) packages with keyword "orange3 add-on"
# can be installed using the Orange Add-on Manager
"orange3 add-on",
"world bank data",
"indicator api",
]
def get_description():
with open("README.rst") as f:
return f.read()
if __name__ == '__main__':
setup(
name="Orange3-Datasets",
version="0.1.3",
license="MIT",
author="Miha Zidar",
author_email="zidarsk8@gmail.com",
description=("Orange interface for World Bank Data Indicator and "
"Climate APIs"),
long_description=get_description(),
url="https://github.com/zidarsk8/orange3-datasets",
download_url=("https://github.com/zidarsk8/orange3-datasets/tarball/"
"0.1.3"),
packages=[
'orangecontrib',
'orangecontrib.wbd',
'orangecontrib.wbd.tutorials',
'orangecontrib.wbd.widgets',
],
package_data={
'orangecontrib.wbd': ['tutorials/*.ows'],
'orangecontrib.wbd.widgets': ['icons/*'],
},
install_requires=[
'Orange3',
'numpy',
'observable',
'simple_wbd>=0.5.1',
],
extras_require={
'doc': [
'sphinx',
'sphinx_rtd_theme',
],
'test': [
'nose',
'nose-cov',
]
},
entry_points=ENTRY_POINTS,
keywords=KEYWORDS,
namespace_packages=['orangecontrib'],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: Qt',
'Environment :: Plugins',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
],
)
| 31.938776 | 77 | 0.565815 |
4a1c147d8f0afcb3b6fc183c0c7a23dfb1920ae2
| 13,203 |
py
|
Python
|
main.py
|
TusharJ3011/Shop-Management-System
|
baaae7d537d0eeafc33d9a81f06424a8afc7c841
|
[
"MIT"
] | null | null | null |
main.py
|
TusharJ3011/Shop-Management-System
|
baaae7d537d0eeafc33d9a81f06424a8afc7c841
|
[
"MIT"
] | null | null | null |
main.py
|
TusharJ3011/Shop-Management-System
|
baaae7d537d0eeafc33d9a81f06424a8afc7c841
|
[
"MIT"
] | null | null | null |
from flask import render_template, url_for, Flask, request, redirect, session
import datetime as dt
import os
# Local Files
import dbms
import security
app = Flask(__name__)
app.secret_key = os.environ['APP_KEY']
WORKING_AGE_LIMIT_DAYS = int(14 * 365.25)
# Global variable for FindProduct.html
global fProduct
fProduct = dbms.FindProduct()
# Global variables for Checkout.html
global checkoutProduct
checkoutProduct = dbms.Checkout()
global restock, newType, newProduct, newBrand, todayData, dateData, monthData, yearData, runningAcc
restock = dbms.AdminProducts()
newType = dbms.AdminProducts()
newProduct = dbms.AdminProducts()
newBrand = dbms.AdminProducts()
todayData = ["", False]
dateData = ["", False]
monthData = ["", False]
yearData = ["", False]
runningAcc = None
@app.route('/')
def home():
global runningAcc
runningAcc = None
global fProduct
fProduct = dbms.FindProduct()
return render_template('Home.html')
@app.route('/findproduct')
@app.route('/findproduct/')
def findProduct():
global runningAcc
runningAcc = None
global fProduct
return render_template('FindProduct.html', fProduct=fProduct)
@app.route('/fpro', methods=['POST', 'GET'])
def fpro():
global runningAcc
runningAcc = None
global fProduct
data = request.form
fProduct.resetAfterfpro()
fProduct.sBrand = data['brand']
fProduct.getBrandProducts()
return redirect(url_for('findProduct'))
@app.route('/ftype', methods=['POST', 'GET'])
def ftype():
global runningAcc
runningAcc = None
global fProduct
data = request.form
fProduct.resetAfterftype()
fProduct.sProduct = data['product']
fProduct.getProductTypes()
return redirect(url_for('findProduct'))
@app.route('/resetfindproducts', methods=['POST', 'GET'])
def resetFindProducts():
global runningAcc
runningAcc = None
global fProduct
fProduct = dbms.FindProduct()
return redirect(url_for('findProduct'))
@app.route('/searchfindproducts', methods=['POST', 'GET'])
def searchFindProducts():
global runningAcc
runningAcc = None
global fProduct
data = request.form
fProduct.sType = data['type']
fProduct.getDetails()
return redirect(url_for('findProduct'))
@app.route('/checkout')
@app.route('/checkout/')
def checkout():
global runningAcc
runningAcc = None
global fProduct
fProduct = dbms.FindProduct()
global checkoutProduct
length = len(checkoutProduct.tags)
gst = 0
tax = gst * checkoutProduct.cost
checkoutProduct.amount = checkoutProduct.cost + tax
return render_template('Checkout.html', tax=tax, length=length, checkoutProduct=checkoutProduct)
@app.route('/cpro', methods=['POST', 'GET'])
def cpro():
global runningAcc
runningAcc = None
global checkoutProduct
data = request.form
checkoutProduct.resetAfterfpro()
checkoutProduct.sBrand = data['brand']
checkoutProduct.getBrandProducts()
return redirect(url_for('checkout'))
@app.route('/ctype', methods=['POST', 'GET'])
def ctype():
global runningAcc
runningAcc = None
global checkoutProduct
data = request.form
checkoutProduct.resetAfterftype()
checkoutProduct.sProduct = data['product']
checkoutProduct.getProductTypes()
return redirect(url_for('checkout'))
@app.route('/resetcheckout', methods=['POST', 'GET'])
def resetcheckout():
global runningAcc
runningAcc = None
global checkoutProduct
checkoutProduct.cleanCheckout()
return redirect(url_for('checkout'))
@app.route('/addcheckout', methods=['POST', 'GET'])
def addcheckout():
global runningAcc
runningAcc = None
global checkoutProduct
data = request.form
checkoutProduct.sType = data['type']
quantity = int(data['quantity'])
checkoutProduct.getCheckoutAddDetails(quantity)
return redirect(url_for('resetcheckout'))
@app.route('/removetag/<int:index>/<id>/<int:quantity>', methods=['POST', 'GET'])
def removetag(index, id, quantity):
global runningAcc
runningAcc = None
global checkoutProduct
checkoutProduct.cost -= checkoutProduct.tags[index][4]
checkoutProduct.tags.pop(index)
tempIndex = checkoutProduct.addedProducts[0].index(id)
checkoutProduct.addedProducts[1][tempIndex] = checkoutProduct.addedProducts[1][tempIndex] - quantity
if checkoutProduct.addedProducts[1][tempIndex] == 0:
checkoutProduct.addedProducts[0].pop(tempIndex)
checkoutProduct.addedProducts[1].pop(tempIndex)
checkoutProduct.addedProducts[2].pop(tempIndex)
return redirect(url_for('checkout'))
@app.route('/proceedcheckout')
def proceedcheckout():
return redirect(url_for('verifycheckout'))
@app.route('/checkout/verify', methods=['POST', 'GET'])
def verifycheckout():
global runningAcc
runningAcc = None
if request.method == 'POST':
data = request.form
shop = data['shop']
employee = data['employee']
password = data['password']
if security.checkEmployee(employee=employee, password=password):
global checkoutProduct
string = ''
for IDs in checkoutProduct.addedProducts[0]:
string += IDs + ','
date = dt.datetime.today()
date = date.strftime('%Y-%m-%d')
list = []
list.append(int(shop[-4:-1]))
list.append(int(employee[-4:-1]))
list.append(date)
list.append(string)
list.append(checkoutProduct.amount)
bill = tuple(list)
checkoutProduct.decStock()
checkoutProduct.checkout(bill)
checkoutProduct = dbms.Checkout()
return redirect(url_for('checkout'))
else:
shops = dbms.getShops()
employees = dbms.getEmployees()
return render_template('checkoutLogin.html', errorCode=True, shops=shops, employees=employees)
shops = dbms.getShops()
employees = dbms.getEmployees()
return render_template('checkoutLogin.html', errorCode=False, shops=shops, employees=employees)
@app.route('/admin')
@app.route('/admin/')
def admin():
try:
if session['adminLogin']:
today = dt.date.today()
workingYear = today - dt.timedelta(days=WORKING_AGE_LIMIT_DAYS)
today = today.strftime("%Y-%m-%d")
workingYear = workingYear.strftime("%Y-%m-%d")
global restock, newType, newProduct, newBrand, todayData, dateData, monthData, yearData, runningAcc
return render_template('admin.html', restock=restock, newType=newType, newProduct=newProduct,
newBrand=newBrand, todayData=todayData, dateData=dateData, monthData=monthData,
yearData=yearData, today=today, emindate=workingYear, runningAcc=runningAcc)
else:
return redirect(url_for('adminlogin'))
except (RuntimeError, KeyError):
return redirect(url_for('adminlogin'))
@app.route('/admin/login', methods=['POST', 'GET'])
@app.route('/admin/login/', methods=['POST', 'GET'])
def adminlogin():
global runningAcc
runningAcc = None
if request.method == 'POST':
data = request.form
username = data['username']
password = data['password']
errorCodes = security.checkAdmin(username=username, password=password)
if not errorCodes[0] and not errorCodes[1]:
session['adminLogin'] = True
return redirect(url_for('admin'))
else:
return render_template('Login.html', errorCodes=errorCodes)
errorCodes = [False, False]
return render_template('Login.html', errorCodes=errorCodes)
@app.route('/admin/logout')
def adminlogout():
global runningAcc
runningAcc = None
session['adminLogin'] = False
return redirect(url_for('admin'))
@app.route('/logs')
@app.route('/logs/')
def logs():
logs = dbms.getLogs()
try:
if session['adminLogin']:
return render_template('logs.html', logs=logs)
else:
return redirect(url_for('adminlogin'))
except (RuntimeError, KeyError):
return redirect(url_for('adminlogin'))
@app.route('/admin/restock/products', methods=['POST', 'GET'])
def restockproduct():
global restock, runningAcc
runningAcc = 'restock'
data = request.form
restock.sBrand = data['brand']
restock.getBrandProducts()
return redirect(url_for('admin'))
@app.route('/admin/restock/types', methods=['POST', 'GET'])
def restocktypes():
global restock, runningAcc
runningAcc = 'restock'
data = request.form
restock.sProduct = data['product']
restock.getProductTypes()
return redirect(url_for('admin'))
@app.route('/admin/restock/add', methods=['POST', 'GET'])
def restockadd():
global restock, runningAcc
runningAcc = 'restock'
data = request.form
restock.sType = data['type']
restock.quantity = int(data['quantity'])
restock.price = int(data['price'])
restock.manDate = data['mDate']
restock.expDate = data['eDate']
restock.restock()
return redirect(url_for('admin'))
@app.route('/admin/addnt/products', methods=['POST', 'GET'])
def addntproduct():
global newType, runningAcc
runningAcc = 'addnt'
data = request.form
newType.sBrand = data['brand']
newType.getBrandProducts()
return redirect(url_for('admin'))
@app.route('/admin/addnt/add', methods=['POST', 'GET'])
def addntadd():
global newType, runningAcc
runningAcc = 'addnt'
data = request.form
newType.sProduct = data['product']
newType.sType = data['type']
newType.quantity = int(data['quantity'])
newType.price = int(data['price'])
newType.manDate = data['mDate']
newType.expDate = data['eDate']
newType.newType()
return redirect(url_for('admin'))
@app.route('/admin/addnp/add', methods=['POST', 'GET'])
def addnpadd():
global newProduct, runningAcc
runningAcc = 'addnp'
data = request.form
newProduct.sBrand = data['brand']
newProduct.sProduct = data['product']
newProduct.sType = data['type']
newProduct.quantity = int(data['quantity'])
newProduct.price = int(data['price'])
newProduct.manDate = data['mDate']
newProduct.expDate = data['eDate']
newProduct.newProduct()
return redirect(url_for('admin'))
@app.route('/admin/addnb/add', methods=['POST', 'GET'])
def addnbadd():
global newBrand, runningAcc
runningAcc = 'addnb'
data = request.form
newBrand.sBrand = data['brand']
newBrand.sProduct = data['product']
newBrand.sType = data['type']
newBrand.quantity = int(data['quantity'])
newBrand.price = int(data['price'])
newBrand.manDate = data['mDate']
newBrand.expDate = data['eDate']
newBrand.newBrand()
return redirect(url_for('admin'))
@app.route('/admin/revenue/today', methods=['POST', 'GET'])
def revtoday():
global todayData, runningAcc
runningAcc = 'revtoday'
data = request.form
password = data['password']
todayData = dbms.getTodayRevenue(password)
return redirect(url_for('admin'))
@app.route('/admin/revenue/date', methods=['POST', 'GET'])
def revdate():
global dateData, runningAcc
runningAcc = 'revdate'
data = request.form
date = data['date']
password = data['password']
dateData = dbms.getDailyRevenue(password, date)
return redirect(url_for('admin'))
@app.route('/admin/revenue/year', methods=['POST', 'GET'])
def revyear():
global yearData, runningAcc
runningAcc = 'revyear'
data = request.form
year = int(data['year'])
password = data['password']
yearData = dbms.getYearlyRevenue(password, year)
return redirect(url_for('admin'))
@app.route('/admin/revenue/month', methods=['POST', 'GET'])
def revmonth():
global monthData, runningAcc
runningAcc = 'revmonth'
data = request.form
month = int(data['month'])
year = int(data['year'])
password = data['password']
monthData = dbms.getMonthlyRevenue(password, year, month)
return redirect(url_for('admin'))
@app.route('/admin/branch', methods=['POST', 'GET'])
def addbranch():
global runningAcc
runningAcc = 'addbranch'
data = request.form
shopName = data['shopname']
shopAddress = data['shopaddress']
dbms.setNewBranch((shopName, shopAddress))
return redirect(url_for('admin'))
@app.route('/admin/employee', methods=['POST', 'GET'])
def addemployee():
global runningAcc
runningAcc = 'addemployee'
data = request.form
dbms.setNewEmployee((data['ename'], data['epassword'], str(data['phoneNumber']), data['edob'], data['eaddress']))
return redirect(url_for('admin'))
if __name__ == '__main__':
app.run(debug=True)
| 30.704651 | 118 | 0.643566 |
4a1c15d41fa7cd8a96f407c3e1c4920d50fd64ca
| 753 |
py
|
Python
|
posts/migrations/0002_alter_post_author_alter_post_group.py
|
DenisFrunza/yatube_project
|
b60fc3d70a9cb75d3e85c4e2edf20ae3ad624964
|
[
"BSD-3-Clause"
] | null | null | null |
posts/migrations/0002_alter_post_author_alter_post_group.py
|
DenisFrunza/yatube_project
|
b60fc3d70a9cb75d3e85c4e2edf20ae3ad624964
|
[
"BSD-3-Clause"
] | null | null | null |
posts/migrations/0002_alter_post_author_alter_post_group.py
|
DenisFrunza/yatube_project
|
b60fc3d70a9cb75d3e85c4e2edf20ae3ad624964
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 4.0 on 2022-01-05 07:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
('posts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
),
migrations.AlterField(
model_name='post',
name='group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='posts', to='posts.group'),
),
]
| 28.961538 | 145 | 0.62417 |
4a1c162d561fd310716db5bbe68721a9e5216c28
| 3,100 |
py
|
Python
|
test/AS/ASFLAGS.py
|
jcassagnol-public/scons
|
8eaf585a893757e68c9e4a6e25d375021fa5eab7
|
[
"MIT"
] | 1,403 |
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
test/AS/ASFLAGS.py
|
jcassagnol-public/scons
|
8eaf585a893757e68c9e4a6e25d375021fa5eab7
|
[
"MIT"
] | 3,708 |
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
test/AS/ASFLAGS.py
|
jcassagnol-public/scons
|
8eaf585a893757e68c9e4a6e25d375021fa5eab7
|
[
"MIT"
] | 281 |
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
_exe = TestSCons._exe
test.file_fixture('mylink.py')
test.file_fixture(['fixture', 'myas_args.py'])
o = ' -x'
o_c = ' -x -c'
if sys.platform == 'win32':
import SCons.Tool.MSCommon as msc
if msc.msvc_exists():
o_c = ' -x'
test.write('SConstruct', """
env = Environment(LINK = r'%(_python_)s mylink.py',
LINKFLAGS = [],
AS = r'%(_python_)s myas_args.py', ASFLAGS = '-x',
CC = r'%(_python_)s myas_args.py')
env.Program(target = 'test1', source = 'test1.s')
env.Program(target = 'test2', source = 'test2.S')
env.Program(target = 'test3', source = 'test3.asm')
env.Program(target = 'test4', source = 'test4.ASM')
env.Program(target = 'test5', source = 'test5.spp')
env.Program(target = 'test6', source = 'test6.SPP')
""" % locals())
test.write('test1.s', r"""This is a .s file.
#as
#link
""")
test.write('test2.S', r"""This is a .S file.
#as
#link
""")
test.write('test3.asm', r"""This is a .asm file.
#as
#link
""")
test.write('test4.ASM', r"""This is a .ASM file.
#as
#link
""")
test.write('test5.spp', r"""This is a .spp file.
#as
#link
""")
test.write('test6.SPP', r"""This is a .SPP file.
#as
#link
""")
test.run(arguments = '.', stderr = None)
if TestSCons.case_sensitive_suffixes('.s', '.S'):
o_css = o_c
else:
o_css = o
test.must_match('test1' + _exe, "%s\nThis is a .s file.\n" % o)
test.must_match('test2' + _exe, "%s\nThis is a .S file.\n" % o_css)
test.must_match('test3' + _exe, "%s\nThis is a .asm file.\n" % o)
test.must_match('test4' + _exe, "%s\nThis is a .ASM file.\n" % o)
test.must_match('test5' + _exe, "%s\nThis is a .spp file.\n" % o_c)
test.must_match('test6' + _exe, "%s\nThis is a .SPP file.\n" % o_c)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 27.927928 | 73 | 0.676129 |
4a1c16490b55a253cd627688af7d995f4626d1c6
| 2,994 |
py
|
Python
|
lib/notification_service/tests/test_notification_server.py
|
Sxnan/ai-flow
|
a307e21f5982924a8be58366b3bb9da914a7c3c8
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
lib/notification_service/tests/test_notification_server.py
|
Sxnan/ai-flow
|
a307e21f5982924a8be58366b3bb9da914a7c3c8
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
lib/notification_service/tests/test_notification_server.py
|
Sxnan/ai-flow
|
a307e21f5982924a8be58366b3bb9da914a7c3c8
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import unittest
from notification_service.base_notification import BaseEvent
from notification_service.client import NotificationClient
from notification_service.server import NotificationServerRunner
from notification_service.util import db
from notification_service.server_config import NotificationServerConfig
_SQLITE_FILE = 'ns.db'
config_file = os.path.join(os.path.dirname(__file__), 'notification_server.yaml')
class TestNotificationServer(unittest.TestCase):
def _clean_db(self):
if os.path.exists(_SQLITE_FILE):
os.remove(_SQLITE_FILE)
def setUp(self) -> None:
self._clean_db()
config = NotificationServerConfig(config_file)
db.clear_engine_and_session()
db.create_all_tables(config.db_uri)
def tearDown(self) -> None:
self._clean_db()
db.clear_engine_and_session()
def test_run_notification_server(self):
server = NotificationServerRunner(config_file=config_file)
server.start()
client = NotificationClient(server_uri='localhost:50052')
client.send_event(BaseEvent(key='a', value='a'))
self.assertEqual(1, len(client.list_events(key='a')))
self.assertEqual('a', client.list_events(key='a')[0].value)
server.stop()
def test_run_ha_notification_server(self):
server1 = NotificationServerRunner(config_file=config_file)
server1.config.port = 50053
server1.config.enable_ha = True
server1.config.advertised_uri = 'localhost:50053'
server1.start()
server2 = NotificationServerRunner(config_file=config_file)
server2.config.port = 50054
server2.config.enable_ha = True
server2.config.advertised_uri = 'localhost:50054'
server2.start()
client = NotificationClient(server_uri='localhost:50053,localhost:50054', enable_ha=True)
client.send_event(BaseEvent(key='b', value='b'))
self.assertEqual(1, len(client.list_events(key='b')))
self.assertEqual('b', client.list_events(key='b')[0].value)
client.disable_high_availability()
server1.stop()
server2.stop()
if __name__ == '__main__':
unittest.main()
| 37.898734 | 97 | 0.724449 |
4a1c164929d24c31609b49c4de2e7b0653268bc9
| 400 |
py
|
Python
|
phonebook_api/api/permissions.py
|
AygulNas/phonebook_test_lubimovka
|
94574e42597c3bbdacfba93e9b3c25a593bccf91
|
[
"MIT"
] | null | null | null |
phonebook_api/api/permissions.py
|
AygulNas/phonebook_test_lubimovka
|
94574e42597c3bbdacfba93e9b3c25a593bccf91
|
[
"MIT"
] | null | null | null |
phonebook_api/api/permissions.py
|
AygulNas/phonebook_test_lubimovka
|
94574e42597c3bbdacfba93e9b3c25a593bccf91
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
class IsAllowedChangeCompanies(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if (request.method in permissions.SAFE_METHODS
or request.method == 'POST'):
return True
return (view.action in ['update', 'partial_update', 'destroy'] and
(request.user == obj.creator))
| 36.363636 | 74 | 0.6725 |
4a1c1676721d890d76124e5c91e116998bf50780
| 7,554 |
py
|
Python
|
somke_detection.py
|
Turbo-Dynamic-s/Intelligent-Video-Monitoring
|
6eb732ebaaa8abe56b3b363cebedc1a3d7bba40c
|
[
"Apache-2.0"
] | null | null | null |
somke_detection.py
|
Turbo-Dynamic-s/Intelligent-Video-Monitoring
|
6eb732ebaaa8abe56b3b363cebedc1a3d7bba40c
|
[
"Apache-2.0"
] | null | null | null |
somke_detection.py
|
Turbo-Dynamic-s/Intelligent-Video-Monitoring
|
6eb732ebaaa8abe56b3b363cebedc1a3d7bba40c
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.python.keras.applications import ResNet50
from tensorflow.python.keras.applications import VGG16
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten, GlobalAveragePooling2D
from tensorflow.python.keras.applications.resnet50 import preprocess_input
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from sklearn.utils import class_weight
from tensorflow.python.keras import optimizers
import cv2
import math
from IPython.display import clear_output
%matplotlib inline
import os
print(os.listdir("../input/fire-detection-from-cctv/data/data/img_data"))
print(os.listdir("../input/fire-detection-from-cctv/data/data/video_data/test_videos"))
print(os.listdir("../input"))
print(os.listdir("../input/resnet50"))
print(os.listdir("../input/vgg16"))
Data Preparation
IMG_SIZE = 224
NUM_EPOCHS = 20
NUM_CLASSES = 3
TRAIN_BATCH_SIZE = 77
TEST_BATCH_SIZE = 1
Model Creation
def create_model( model_size ):
my_new_model = Sequential()
if model_size == 'L':
resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
resnet = ResNet50(include_top=False, pooling='avg', weights=resnet_weights_path)
my_new_model.add(resnet)
my_new_model.layers[0].trainable = False
else:
vgg_weights_path = '../input/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
vgg= VGG16(include_top=False, weights=vgg_weights_path )
vgg.summary()
my_new_model.add(vgg)
my_new_model.add(GlobalAveragePooling2D())
my_new_model.layers[0].trainable = False
my_new_model.layers[1].trainable = False
my_new_model.add(Dense(NUM_CLASSES, activation='softmax'))
opt = optimizers.adam()
my_new_model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return my_new_model
Training
def train_model( model ):
data_generator_with_aug = ImageDataGenerator(preprocessing_function=preprocess_input,
width_shift_range=0.1,
height_shift_range=0.1,
#sear_range=0.01,
zoom_range=[0.9, 1.25],
horizontal_flip=True,
vertical_flip=False,
data_format='channels_last',
brightness_range=[0.5, 1.5]
)
train_generator = data_generator_with_aug.flow_from_directory(
'../input/fire-detection-from-cctv/data/data/img_data/train',
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=TRAIN_BATCH_SIZE,
class_mode='categorical')
validation_generator = data_generator_with_aug.flow_from_directory(
'../input/fire-detection-from-cctv/data/data/img_data/test',
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=TEST_BATCH_SIZE,
shuffle = False,
class_mode='categorical')
H = model.fit_generator(
train_generator,
steps_per_epoch=train_generator.n/TRAIN_BATCH_SIZE,
epochs=NUM_EPOCHS,
validation_data=validation_generator,
validation_steps=1
)
plot_history( H, NUM_EPOCHS )
return model, train_generator,validation_generator
def get_label_dict(train_generator ):
labels = (train_generator.class_indices)
label_dict = dict((v,k) for k,v in labels.items())
return label_dict
def get_labels( generator ):
generator.reset()
labels = []
for i in range(len(generator)):
labels.extend(np.array(generator[i][1]) )
return np.argmax(labels, axis =1)
def get_pred_labels( test_generator):
test_generator.reset()
pred_vec=model.predict_generator(test_generator,
steps=test_generator.n,
verbose=1)
return np.argmax( pred_vec, axis = 1), np.max(pred_vec, axis = 1)
def plot_history( H, NUM_EPOCHS ):
plt.style.use("ggplot")
fig = plt.figure()
fig.set_size_inches(15, 5)
fig.add_subplot(1, 3, 1)
plt.plot(np.arange(0, NUM_EPOCHS), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, NUM_EPOCHS), H.history["val_loss"], label="val_loss")
plt.title("Training Loss and Validation Loss on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.legend(loc="lower left")
fig.add_subplot(1, 3, 2)
plt.plot(np.arange(0, NUM_EPOCHS), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, NUM_EPOCHS), H.history["acc"], label="train_acc")
plt.title("Training Loss and Accuracy on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
fig.add_subplot(1, 3, 3)
plt.plot(np.arange(0, NUM_EPOCHS), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, NUM_EPOCHS), H.history["val_acc"], label="val_acc")
plt.title("Validation Loss and Accuracy on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.show()
Prediction
def draw_prediction( frame, class_string ):
x_start = frame.shape[1] -600
cv2.putText(frame, class_string, (x_start, 75), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 0, 0), 2, cv2.LINE_AA)
return frame
def prepare_image_for_prediction( img):
return preprocess_input(img)
model = create_model('L')
trained_model_l, train_generator,validation_generator = train_model(model)
label_dict_l = get_label_dict(train_generator )
model = create_model('S')
trained_model_s, train_generator,validation_generator = train_model(model)
label_dict_s = get_label_dict(train_generator)
def get_display_string(pred_class, label_dict):
txt = ""
for c, confidence in pred_class:
txt += label_dict[c]
if c :
txt += '['+ str(confidence) +']'
return txt
def predict( model, video_path, filename, label_dict ):
vs = cv2.VideoCapture(video_path)
fps = math.floor(vs.get(cv2.CAP_PROP_FPS))
ret_val = True
writer = 0
while True:
ret_val, frame = vs.read()
if not ret_val:
break
resized_frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))
frame_for_pred = prepare_image_for_prediction( resized_frame )
pred_vec = model.predict(frame_for_pred)
pred_class =[]
confidence = np.round(pred_vec.max(),2)
if confidence > 0.4:
pc = pred_vec.argmax()
pred_class.append( (pc, confidence) )
else:
pred_class.append( (0, 0) )
if pred_class:
txt = get_display_string(pred_class, label_dict)
frame = draw_prediction( frame, txt )
if not writer:
fourcc = cv2.VideoWriter_fourcc(*"XVID")
writer = cv2.VideoWriter(filename, fourcc, fps,(frame.shape[1], frame.shape[0]), True)
writer.write(frame)
vs.release()
writer.release()
| 34.81106 | 112 | 0.626688 |
4a1c169f4ebfa985399e2dd8c75c023152b008fe
| 4,389 |
py
|
Python
|
Fig10/plot_angle.py
|
TatsuyaHaga/reversereplaymodel_codes
|
503d545449efab603e18d224fc2f94158d967530
|
[
"MIT"
] | 6 |
2019-01-12T14:16:42.000Z
|
2022-03-07T20:51:03.000Z
|
Fig10/plot_angle.py
|
elifesciences-publications/reversereplaymodel_codes
|
579009d260f32b259994d77c8a66877cf6304dee
|
[
"MIT"
] | null | null | null |
Fig10/plot_angle.py
|
elifesciences-publications/reversereplaymodel_codes
|
579009d260f32b259994d77c8a66877cf6304dee
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import numpy
import scipy.stats
import matplotlib
matplotlib.use("Agg")
import pylab
import seaborn
seaborn.set(context="paper", style="white", palette="deep")
Ntrial_nonrandom=int(sys.argv[1])
Ntrial_random=int(sys.argv[2])
angle_novel=[numpy.load("learningON/trial"+str(i)+"/angle_novel.npy") for i in range(1, Ntrial_nonrandom+1)]
angle_repeat=[numpy.load("learningON/trial"+str(i)+"/angle_repeat.npy") for i in range(1, Ntrial_nonrandom+1)]
angle_random=[numpy.load("learningOFF/trial"+str(i)+"/angle_repeat.npy") for i in range(1, Ntrial_random+1)]
cutdata=lambda data, n: numpy.hstack([x[n] for x in data])
angle_start_novel=cutdata(angle_novel, 0)
angle_start_repeat=cutdata(angle_repeat, 0)
angle_start_random=cutdata(angle_random, 0)
angle_run_novel=cutdata(angle_novel, 1)
angle_run_repeat=cutdata(angle_repeat, 1)
angle_run_random=cutdata(angle_random, 1)
angle_end_novel=cutdata(angle_novel, 2)
angle_end_repeat=cutdata(angle_repeat, 2)
angle_end_random=cutdata(angle_random,2)
#plot
hist_range=[0,180]
xtick=[0,90,180]
Nbin=18
pylab.close()
pylab.figure(figsize=(2.5,1.5))
ymax=0.035
seaborn.distplot(angle_start_novel, bins=Nbin, kde=False, norm_hist=True, label="SWITCH", color="blue", hist_kws={"range": hist_range})
seaborn.distplot(angle_start_repeat, bins=Nbin, kde=False, norm_hist=True, label="REPEAT", color="green", hist_kws={"range": hist_range})
#seaborn.distplot(angle_start_random, bins=Nbin, kde=False, norm_hist=True, label="NOLEARN", color="red", hist_kws={"range": hist_range})
pylab.plot([numpy.mean(angle_start_novel)]*2, [0,ymax], color="blue")
pylab.plot([numpy.mean(angle_start_repeat)]*2, [0,ymax], color="green")
#pylab.plot([numpy.mean(angle_start_random)]*2, [0,ymax], color="red")
pylab.xlabel("Angular displacement [degree]")
pylab.ylabel("Probability")
pylab.xticks(xtick)
pylab.yticks([])
pylab.ylim([0,ymax])
pylab.legend()
pylab.tight_layout()
pylab.savefig("angle_start.svg")
pylab.close()
pylab.figure(figsize=(2.5,1.5))
ymax=0.02
seaborn.distplot(angle_run_novel, bins=Nbin, kde=False, norm_hist=True, label="SWITCH", color="blue", hist_kws={"range": hist_range})
seaborn.distplot(angle_run_repeat, bins=Nbin, kde=False, norm_hist=True, label="REPEAT", color="green", hist_kws={"range": hist_range})
#seaborn.distplot(angle_run_random, bins=Nbin, kde=False, norm_hist=True, label="NOLEARN", color="red", hist_kws={"range": hist_range})
pylab.plot([numpy.mean(angle_run_novel)]*2, [0,ymax], color="blue")
pylab.plot([numpy.mean(angle_run_repeat)]*2, [0,ymax], color="green")
#pylab.plot([numpy.mean(angle_run_random)]*2, [0,ymax], color="red")
pylab.xlabel("Angular displacement [degree]")
pylab.ylabel("Probability")
pylab.xticks(xtick)
pylab.yticks([])
pylab.ylim([0,ymax])
pylab.legend()
pylab.tight_layout()
pylab.savefig("angle_run.svg")
pylab.close()
pylab.figure(figsize=(2.5,1.5))
ymax=0.025
seaborn.distplot(angle_end_novel, bins=Nbin, kde=False, norm_hist=True, label="SWITCH", color="blue", hist_kws={"range": hist_range})
seaborn.distplot(angle_end_repeat, bins=Nbin, kde=False, norm_hist=True, label="REPEAT", color="green", hist_kws={"range": hist_range})
#seaborn.distplot(angle_end_random, bins=Nbin, kde=False, norm_hist=True, label="NOLEARN", color="red", hist_kws={"range": hist_range})
pylab.plot([numpy.mean(angle_end_novel)]*2, [0,ymax], color="blue")
pylab.plot([numpy.mean(angle_end_repeat)]*2, [0,ymax], color="green")
#pylab.plot([numpy.mean(angle_end_random)]*2, [0,ymax], color="red")
pylab.xlabel("Angular displacement [degree]")
pylab.ylabel("Probability")
pylab.xticks(xtick)
pylab.yticks([])
pylab.ylim([0,ymax])
pylab.legend()
pylab.tight_layout()
pylab.savefig("angle_end.svg")
#statistical test
stattest_func=scipy.stats.ranksums
stat=numpy.zeros([9,2])
stat[0,:]=stattest_func(angle_start_novel, angle_start_repeat)
stat[1,:]=stattest_func(angle_start_repeat, angle_start_random)
stat[2,:]=stattest_func(angle_start_novel, angle_start_random)
stat[3,:]=stattest_func(angle_run_novel, angle_run_repeat)
stat[4,:]=stattest_func(angle_run_repeat, angle_run_random)
stat[5,:]=stattest_func(angle_run_novel, angle_run_random)
stat[6,:]=stattest_func(angle_end_novel, angle_end_repeat)
stat[7,:]=stattest_func(angle_end_repeat, angle_end_random)
stat[8,:]=stattest_func(angle_end_novel, angle_end_random)
numpy.savetxt("angle_stat_ranksum.csv", stat, delimiter=",")
| 42.201923 | 137 | 0.766689 |
4a1c18915f5a65c06d50e7da6cf4666cfcb168f4
| 14,706 |
py
|
Python
|
nldrp/experiments/fuse_call_level_loso_opt_feats_KL.py
|
etzinis/nldrp
|
3b6e24aa86a6d43bfd6f753b346739c00c282de3
|
[
"MIT"
] | 6 |
2018-10-27T13:16:36.000Z
|
2020-09-07T17:34:11.000Z
|
nldrp/experiments/fuse_call_level_loso_opt_feats_KL.py
|
etzinis/nldrp
|
3b6e24aa86a6d43bfd6f753b346739c00c282de3
|
[
"MIT"
] | null | null | null |
nldrp/experiments/fuse_call_level_loso_opt_feats_KL.py
|
etzinis/nldrp
|
3b6e24aa86a6d43bfd6f753b346739c00c282de3
|
[
"MIT"
] | 2 |
2018-10-29T16:20:09.000Z
|
2020-02-12T16:06:52.000Z
|
"""!
\brief Utterance level classification schema by concatenating vectors
and performing classification
@author Efthymios Tzinis {etzinis@gmail.com}
@copyright National Technical University of Athens
"""
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import Normalizer
from sklearn.model_selection import StratifiedKFold
from sklearn.externals import joblib
import tabulate
# import elm
import itertools
import gc
import pandas as pd
import argparse
import numpy as np
import pprint
import json
import os
import copy
import sys
nldrp_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../')
sys.path.insert(0, nldrp_dir)
import nldrp.feature_extraction.pipeline.utterance_feature_loader as \
feature_loader
import nldrp.config
def generate_speaker_independent_folds(features_dic):
all_X = np.concatenate([v['x'] for k, v in features_dic.items()],
axis=0)
all_scaler = StandardScaler().fit(all_X)
for te_speaker, te_data in features_dic.items():
x_te = all_scaler.transform(te_data['x'])
x_tr_list = []
Y_tr = []
for tr_speaker, tr_data in features_dic.items():
if tr_speaker == te_speaker:
continue
sp_x = all_scaler.transform(tr_data['x'])
x_tr_list.append(sp_x)
Y_tr += tr_data['y']
X_tr = np.concatenate(x_tr_list, axis=0)
yield x_te, te_data['y'], X_tr, Y_tr
def generate_speaker_dependent_folds(features_dic,
n_splits=5,
random_seed=7):
norm_per_sp_dic = copy.deepcopy(features_dic)
for sp, data in norm_per_sp_dic.items():
this_scaler = StandardScaler().fit(data['x'])
norm_per_sp_dic[sp]['x'] = this_scaler.transform(data['x'])
xy_l = [v for (sp, v) in norm_per_sp_dic.items()]
x_all = np.concatenate([v['x'] for v in xy_l])
y_all = [utt_label for speaker_labels in [v['y'] for v in xy_l]
for utt_label in speaker_labels]
skf = StratifiedKFold(n_splits=n_splits,
shuffle=True,
random_state=random_seed)
for tr_ind, te_ind in skf.split(x_all, y_all):
yield (x_all[te_ind],
[y_all[i] for i in te_ind],
x_all[tr_ind],
[y_all[i] for i in tr_ind])
def generate_speaker_folds(features_dic):
for te_speaker, te_data in features_dic.items():
x_tr_list = []
Y_tr = []
for tr_speaker, tr_data in features_dic.items():
if tr_speaker == te_speaker:
continue
x_tr_list.append(tr_data['x'])
Y_tr += tr_data['y']
X_tr = np.concatenate(x_tr_list, axis=0)
yield te_speaker, te_data['x'], te_data['y'], X_tr, Y_tr
def compute_metrics(Y_predicted, Y_true):
uw_f1 = f1_score(Y_predicted, Y_true, average='macro')
w_f1 = f1_score(Y_predicted, Y_true, average='micro')
uw_rec = recall_score(Y_predicted, Y_true, average='macro')
w_rec = recall_score(Y_predicted, Y_true, average='micro')
uw_prec = precision_score(Y_predicted, Y_true, average='macro')
w_prec = precision_score(Y_predicted, Y_true, average='micro')
w_acc = accuracy_score(Y_predicted, Y_true)
cmat = confusion_matrix(Y_true, Y_predicted)
with np.errstate(divide='ignore'):
uw_acc = (cmat.diagonal() / (1.0 * cmat.sum(axis=1) + 1e-6
)).mean()
if np.isnan(uw_acc):
uw_acc = 0.
metrics_l = [('uw_f1', uw_f1),
#('w_f1', w_f1),
# ('uw_rec', uw_rec),
# ('w_rec', w_rec),
# ('uw_prec', uw_prec),
# ('w_prec', w_prec),
('uw_acc', uw_acc),
('w_acc', w_acc)]
metric_dic = dict(metrics_l)
return metric_dic
def configure_models():
# class ELMWrapper(object):
# def __init__(self, **kwargs):
# self.kernel = elm.ELMKernel()
# def predict(self, x):
# return self.kernel.test(x)
# def fit(self, x_tr, y_tr):
# self.le = LabelEncoder()
# self.le.fit(y_tr)
# int_labels = self.le.transform(y_tr)
# labels_col = np.asarray(int_labels)
# labels_col = np.reshape(labels_col, (-1,1))
# new_data = np.concatenate([labels_col, x_tr], axis=1)
#
# new_data = elm.read('/home/thymios/Desktop/iris.data')
# print new_data.shape
#
# self.kernel.search_param(new_data,
# of="accuracy",
# eval=10)
# # self.kernel.train(new_data)
# exit()
models = []
# models.append(('ELM', ELMWrapper()))
models.append(('LR', LogisticRegression()))
#models.append(('LDA', LinearDiscriminantAnalysis()))
#models.append(('KNN', KNeighborsClassifier()))
# models.append(('CART', DecisionTreeClassifier()))
# models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
# models.append(('RF', RandomForestClassifier()))
# models.append(('ADAb', AdaBoostClassifier()))
# models.append(('GRADb', GradientBoostingClassifier()))
# models.append(('QDA', QuadraticDiscriminantAnalysis()))
# models.append(('LinR', LogisticRegression()))
return dict(models)
def dummy_generate_SVMs_and_LRs():
svm_params = [('svm', c) for c in [0.1, 0.3, 0.5, 1, 3, 5, 7, 8, 10]]
lr_params = [('lr', c) for c in [1e-3, 0.01, 0.05, 0.1, 0.3, 0.5, 1, 3]]
all_params = svm_params + lr_params
for m_name, c in all_params:
desc = '{}_{}'.format(m_name, str(c))
if m_name == 'svm':
yield desc, SVC(C=c)
else:
yield desc, LogisticRegression(C=c)
def speaker_dependent(model,
X_te, Y_te,
X_tr, Y_tr):
n_components = int(X_tr.shape[1] / 2)
# n_components = 3000
# pca = PCA(n_components=n_components).fit(X_tr)
#
# X_tr = pca.transform(X_tr)
# FIXME: Per speaker normalization the dirty way
model.fit(X_tr, Y_tr)
# X_te = pca.transform(X_te)
Y_pred = model.predict(X_te)
model_metrics = compute_metrics(Y_pred, Y_te)
return model_metrics
def speaker_independent(model,
X_te, Y_te,
X_tr, Y_tr):
n_components = int(X_tr.shape[1] / 10)
n_components = 3000
# pca = PCA(n_components=n_components).fit(X_tr)
# X_tr = pca.transform(X_tr)
# X_te = pca.transform(X_te)
model.fit(X_tr, Y_tr)
Y_pred = model.predict(X_te)
model_metrics = compute_metrics(Y_pred, Y_te)
return model_metrics
def evaluate_loso(features_dic):
all_models = list(dummy_generate_SVMs_and_LRs()) #configure_models()
result_dic = {}
all_results = {}
folds_independent = list(generate_speaker_independent_folds(features_dic))
folds_dependent = list(generate_speaker_dependent_folds(features_dic))
#for model_name, model in all_models.items():
for model_name, model in all_models:
result_dic[model_name] = {}
for X_te, Y_te, X_tr, Y_tr in folds_dependent:
exp = 'dependent'
m = {}
m[exp] = speaker_dependent(
model, X_te,
Y_te, X_tr, Y_tr)
for k, v in m[exp].items():
col_name = exp + '_' + k
if result_dic[model_name] and col_name in result_dic[model_name]:
result_dic[model_name][col_name].append(v)
else:
result_dic[model_name][col_name]=[v]
for X_te, Y_te, X_tr, Y_tr in folds_independent:
exp = 'independent'
m = {}
m[exp] = speaker_independent(
model, X_te,
Y_te, X_tr, Y_tr)
for k, v in m[exp].items():
col_name = exp + '_' + k
if result_dic[model_name] and col_name in result_dic[model_name]:
result_dic[model_name][col_name].append(v)
else:
result_dic[model_name][col_name]=[v]
print model_name
for k, v in result_dic[model_name].items():
result_dic[model_name][k] = round(np.mean(v), 4)
pprint.pprint(result_dic[model_name])
all_results['model'] = []
for k in result_dic[model_name]:
for mod, _ in all_models:
if mod not in all_results['model']:
all_results['model'].append(mod)
if mod in result_dic:
if k in all_results:
all_results[k].append(result_dic[mod][k])
else:
all_results[k] = [result_dic[mod][k]]
#df = pd.DataFrame.from_dict(all_results)
#df.to_clipboard()
return all_results
def convert_2_numpy_per_utterance(dataset_dic):
converted_dic = {}
for spkr in dataset_dic:
x_list = []
y_list = []
converted_dic[spkr] = {}
for id, el_dic in dataset_dic[spkr].items():
label = el_dic['y']
feat_vec = el_dic['x']
x_list.append(feat_vec)
y_list.append(label)
this_utt_array = np.array(x_list)
converted_dic[spkr]['x']=this_utt_array
converted_dic[spkr]['y']=y_list
return converted_dic
def nl_feature_load(list_of_paths):
nl_features = {}
for nl_feat_p in list_of_paths:
the_path = os.path.join(nldrp.config.NL_FEATURE_PATH, nl_feat_p)
temp_dic = joblib.load(the_path)
nl_features[nl_feat_p] = temp_dic
print "Read {} features from {}".format(len(nl_features.items()), len(list_of_paths))
return nl_features
def fusion_loso(list_of_paths):
all_results = {}
emo_data_dic = joblib.load(nldrp.config.EMOBASE_PATH)
nl_feature_dic = nl_feature_load(list_of_paths)
for nl_feat_p, temp_dic in nl_feature_dic.items():
print nl_feat_p
final_data_dic = copy.deepcopy(emo_data_dic)
print "COPY"
try:
for spkr in temp_dic:
for id, el_dic in temp_dic[spkr].items():
assert el_dic['y'] == final_data_dic[spkr][id]['y']
prev_vec = final_data_dic[spkr][id]['x']
this_vec = el_dic['x']
new_vec = np.concatenate([prev_vec, this_vec], axis=0)
final_data_dic[spkr][id]['x'] = new_vec
except Exception as e:
print "Failed to update the Fused dictionary"
raise e
fused_converted_dic = convert_2_numpy_per_utterance(final_data_dic)
print "FUSE"
results = evaluate_loso(fused_converted_dic)
print "EVALUATE"
all_results[nl_feat_p] = results
formatted_results = {'configs': []}
for k, v in all_results.items():
for item, lst in v.items():
cnt = len(lst)
if item in formatted_results:
formatted_results[item] += lst
else:
formatted_results[item] = lst
for _ in range(cnt):
formatted_results['configs'].append(k)
print "AGGREGATE RESULTS"
with open(os.path.join(nldrp.config.BASE_PATH, 'up2now_best_features_kl.json'), 'w') as fd:
json.dump(formatted_results, fd)
print "JSON DUMP"
gc.collect()
#df = pd.DataFrame.from_dict(formatted_results)
#df.to_csv(os.path.join(nldrp.config.BASE_PATH, 'up2now_best_features.csv'))
print "FINISHED"
formatted_results = {'configs': []}
for k, v in all_results.items():
for item, lst in v.items():
cnt = len(lst)
if item in formatted_results:
formatted_results[item].append(lst)
else:
formatted_results[item] = [lst]
for _ in range(cnt):
formatted_results['configs'].append(k)
print "FORMATTED RESULTS"
try:
with open(os.path.join(nldrp.config.BASE_PATH, 'best_features_kl.json'), 'w') as fd:
json.dump(formatted_results, fd)
df = pd.DataFrame.from_dict(formatted_results)
df.to_csv(os.path.join(nldrp.config.BASE_PATH, 'best_features_kl.csv'))
except Exception as e:
with open(os.path.join(nldrp.config.BASE_PATH, 'best_features_kl.json'), 'w') as fd:
json.dump(formatted_results, fd)
return all_results
def get_args():
"""! Command line parser for Utterance level classification Leave
one speaker out schema pipeline"""
parser = argparse.ArgumentParser(
description='Utterance level classification Leave one '
'speaker out schema pipeline' )
parser.add_argument('-i', '--input_features_paths', nargs='+',
help='File paths of the features you want to '
'concatenate and the classify')
# parser.add_argument("--dataset", type=str,
# help="""The name of the dataset""",
# required=True,
# choices=['SAVEE'])
# parser.add_argument("-i", "--save_dir", type=str,
# help="""Where the corresponding binary file full of
# data that will contain the dictionary for each speaker is
# stored.
# Another subdic for all the sentences with their ids
# and a 1d numpy matrix for each one of them.
# """,
# default=nldrp.config.EXTRACTED_FEATURES_PATH )
args = parser.parse_args()
return args
if __name__ == "__main__":
"""!brief Example of usage"""
fusion_loso(os.listdir(nldrp.config.NL_FEATURE_PATH))
| 34.683962 | 99 | 0.607643 |
4a1c1a632b3929fc2d983ec1fcc7bcb14f808dcc
| 5,956 |
py
|
Python
|
simulation/controllers/mavic2dji_cnn/mavic2dji_cnn.py
|
unic-ailab/flockai-working
|
32939202f372c9cebbfc62ce27a00e3aaf196b3f
|
[
"MIT"
] | null | null | null |
simulation/controllers/mavic2dji_cnn/mavic2dji_cnn.py
|
unic-ailab/flockai-working
|
32939202f372c9cebbfc62ce27a00e3aaf196b3f
|
[
"MIT"
] | null | null | null |
simulation/controllers/mavic2dji_cnn/mavic2dji_cnn.py
|
unic-ailab/flockai-working
|
32939202f372c9cebbfc62ce27a00e3aaf196b3f
|
[
"MIT"
] | null | null | null |
import pickle
from PIL import Image
import numpy as np
from dlib import cnn_face_detection_model_v1
from controller import Camera
from flockai.PyCatascopia.Metrics import *
from flockai.interfaces.flockai_ml import FlockAIClassifier
from flockai.models.probes.flockai_probe import FlockAIProbe, ProcessCpuUtilizationMetric, ProcessCpuTimeMetric, ProcessIOTimeMetric, \
ProcessAliveTimeMetric, ProbeAliveTimeMetric, ProcessMemoryMetric
from flockai.webots_controllers.mavic2dji import KeyboardMavic2DJI
from flockai.models.devices.device_enums import EnableableDevice, NonEnableableDevice, MotorDevice, AircraftAxis, \
Relative2DPosition, Devices
"""""""""""""""""""""
DECLARE DEVICES HERE
"""""""""""""""""""""
enableable_devices = [
(EnableableDevice.RECEIVER, "receiver"),
(EnableableDevice.CAMERA, "camera"),
(EnableableDevice.KEYBOARD, None),
(EnableableDevice.BATTERY_SENSOR, None),
(EnableableDevice.INERTIAL_UNIT, "inertial unit"),
(EnableableDevice.GPS, "gps"),
(EnableableDevice.COMPASS, "compass"),
(EnableableDevice.GYRO, "gyro")
]
non_enableable_devices = [
(NonEnableableDevice.EMITTER, "emitter"),
(NonEnableableDevice.LED, "front left led"),
(NonEnableableDevice.LED, "front right led"),
(NonEnableableDevice.DISTANCE_SENSOR, "ds0")
]
"""""""""""""""""""""
DECLARE MOTORS HERE
"""""""""""""""""""""
motor_devices = [
(MotorDevice.CAMERA, "camera roll", AircraftAxis.ROLL),
(MotorDevice.CAMERA, "camera pitch", AircraftAxis.PITCH),
(MotorDevice.CAMERA, "camera yaw", AircraftAxis.YAW),
(MotorDevice.PROPELLER, "front left propeller", Relative2DPosition(1, -1)),
(MotorDevice.PROPELLER, "front right propeller", Relative2DPosition(1, 1)),
(MotorDevice.PROPELLER, "rear left propeller", Relative2DPosition(-1, -1)),
(MotorDevice.PROPELLER, "rear right propeller", Relative2DPosition(-1, 1)),
]
devices = Devices(enableable_devices, non_enableable_devices, motor_devices)
"""""""""""""""""""""""""""
CREATE MONITORING PROBES
"""""""""""""""""""""""""""
metrics = [
ProcessCpuUtilizationMetric(name='cpu_pct', units='%', desc='process-level cpu utilization', minVal=0, higherIsBetter=False),
ProcessCpuTimeMetric('cpu_time', 's', 'process-level cpu time', minVal=0, higherIsBetter=False),
ProcessIOTimeMetric('io_time', 's', 'process-level io time (linux-only)', minVal=0, higherIsBetter=False),
ProcessAliveTimeMetric('alive_time', 's', 'time process is alive', minVal=0, higherIsBetter=False),
ProbeAliveTimeMetric('probe_alive_time', 's', 'time probe is alive', minVal=0, higherIsBetter=False),
ProcessMemoryMetric('mem_pct', '%', 'process-level memory utilization', minVal=0, higherIsBetter=False),
]
probe = FlockAIProbe(metrics, name='Example Probe', periodicity=1)
"""""""""""""""""""""""""""""
INITIALIZE THE CONTROLLER
"""""""""""""""""""""""""""""
controller = KeyboardMavic2DJI(devices=devices, probe=probe)
"""""""""""""""""""""""""""""""""""
IMPLEMENT THE FLOCKAI CLASSIFIER
"""""""""""""""""""""""""""""""""""
class FaceDetectionClassifier(FlockAIClassifier):
def __init__(self):
super().__init__()
# REQUIRED ATTRIBUTES
self.periodicity = 5 # defines the periodicity of the prediction
self.onboard = True # defines if the classifier is run on the drone, if False, the drone transmits the input data via its emitter device
self._load_model()
""" IMPLEMENT ABSTRACT METHODS"""
def _load_model(self):
"""
Custom method that implements the way a model is loaded
:return:
"""
filename = 'cnnFaceRecognition.bin'
self.model = pickle.load(open(filename, 'rb'))
self.cnn_face_detector = cnn_face_detection_model_v1(self.model)
def _get_model_input(self):
"""
Custom method that access the camera on the controller and captures images
:return:
"""
filename = f'logs/Images/image_{str(int(time.time()))}.jpg'
camera: Camera = controller.devices['camera']['device'] # get access to controller devices
camera.saveImage(filename, 20)
return filename
def predict(self):
"""
Main pipeline method used by FlockAI during the simulation to make predictions
:return:
"""
if controller.getTime() % self.periodicity != 0.0: # get access to controller functions
return None
image_filename = self._get_model_input()
# return image_filename
image = self._load_image_file(image_filename)
return [self._trim_css_to_bounds(self._rect_to_css(face.rect), image.shape) for face in self.cnn_face_detector(image, 1)]
""" IMPLEMENT CUSTOM METHODS """
def _get_foo_unused_input(self):
"""
Unused method showcasing a different input method that the user needs
:return:
"""
camera: Camera = controller.devices['camera']['device']
image = camera.getImage()
width = camera.getWidth()
height = camera.getHeight()
image_vector = [[[camera.imageGetRed(image, width, x, y),
camera.imageGetGreen(image, width, x, y),
camera.imageGetBlue(image, width, x, y)] for y in range(height)] for x in range(width)]
return image_vector
def _trim_css_to_bounds(self, css, image_shape):
return max(css[0], 0), min(css[1], image_shape[1]), min(css[2], image_shape[0]), max(css[3], 0)
def _rect_to_css(self, rect):
return rect.top(), rect.right(), rect.bottom(), rect.left()
def _load_image_file(self, file, mode='RGB'):
im = Image.open(file)
if mode:
im = im.convert(mode)
return np.array(im)
"""""""""""""""""""""""""""""""""""""""""""""
SET THE ML MODEL ON THE CONTROLLER AND RUN IT
"""""""""""""""""""""""""""""""""""""""""""""
controller.model = FaceDetectionClassifier()
controller.run()
| 41.075862 | 145 | 0.65732 |
4a1c1b1ff53cf1e4e1767c2757adea2ad022d425
| 7,428 |
py
|
Python
|
utils/string_utils.py
|
aldoram5/NLP-Utils
|
41b8f78c91ba32e44caace0f04142ffef96021e2
|
[
"MIT"
] | null | null | null |
utils/string_utils.py
|
aldoram5/NLP-Utils
|
41b8f78c91ba32e44caace0f04142ffef96021e2
|
[
"MIT"
] | null | null | null |
utils/string_utils.py
|
aldoram5/NLP-Utils
|
41b8f78c91ba32e44caace0f04142ffef96021e2
|
[
"MIT"
] | null | null | null |
#
# Set of utility functions helpful for sentence processing and cleaning
# stopwords obtained from NLTK (https://github.com/nltk/nltk/blob/develop/LICENSE.txt)
# Contractions gotten from https://en.wikipedia.org/wiki/Wikipedia:List_of_English_contractions
#
import string
import re
from difflib import SequenceMatcher
_CONTRACTIONS_DICT = {
"ain't": "am not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"daren't": "dare not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"everyone's": "everyone is",
"gimme":"give me",
"gonna":"going to",
"gotta":"got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'd've": "I would have",
"I'll": "I will",
"I'm": "I am",
"i'm": "i am",
"I've": "I have",
"isn't": "is not",
"it'd": "it had",
"it'll": "it will",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"ne'er":"never",
"o'clock":"of the clock",
"ol'":"old",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"shouldn't": "should not",
"so've": "so have",
"so's": "so is",
"that'd": "that would",
"that's": "that is",
"that'll": "that will",
"there'd": "there had",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we had",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"would've": "would have",
"wouldn't": "would not",
"you'll": "you will",
"you're": "you are",
"you've": "you have",
"'tis":"it is"
}
_CONTRACTIONS_REGEX = re.compile(r'('+'|'.join(_CONTRACTIONS_DICT.keys())+')')
_ENGLISH_STOPWORDS = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've",
"you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself',
'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them',
'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll",
'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has',
'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or',
'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against',
'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from',
'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once',
'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too',
'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll',
'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't",
'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't",
'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn',
"shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't"]
_STOPWORDS_REGEX = re.compile(r'(?:^|(?<= ))('+'|'.join(_ENGLISH_STOPWORDS)+')(?:(?= )|$)')
def _contractions_replace(match):
"""
Helper internal method used to easily replace the specified contraction with the match
"""
return _CONTRACTIONS_DICT[match.group(0)]
def expand_contractions(text, regex=_CONTRACTIONS_REGEX):
"""
Expands contractions found in text
:param text: the text string to which we'll expand the contractions inside itself
:param regex: regex to use to find the contractions, should be left to default most of the time
:return: the text with the contractions expanded
"""
return regex.sub(_contractions_replace, text)
def remove_stopwords(text, regex=_STOPWORDS_REGEX):
"""
Removes the stopwords found in the text
:param text: the text string that we'll be removing the stopwords from
:param regex: regex to be used to find the stopwords in the text, should be left to default unless you want to use
another set of stopwords
:return: the text without the stopwords
"""
return regex.sub('',text)
def strip_punc(s, all=False):
"""
Removes punctuation from a string.
:param s: The string.
:param all: Remove all punctuation. If False, only removes punctuation from
the ends of the string.
"""
if all:
return re.compile('[{0}]'.format(re.escape(string.punctuation))).sub('', s.strip())
else:
return s.strip().strip(string.punctuation)
def calculate_string_distance(first, final):
"""
Calculates the string "distance"
:param first: first string to check
:param final: second string to check
:return: The ratio found by the SequenceMatcher
"""
return SequenceMatcher(None, first.lower(), final.lower()).ratio()
def normalize(line, accepted_chars='abcdefghijklmnopqrstuvwxyz '):
"""
Return only the subset of chars from accepted_chars.
This helps keep the model relatively small by ignoring punctuation,
infrequenty symbols, etc.
"""
return [c.lower() for c in line if c.lower() in accepted_chars]
def ngram(n, l):
""" Return all n grams from l after normalizing """
filtered = normalize(l)
for start in range(0, len(filtered) - n + 1):
yield ''.join(filtered[start:start + n])
def pre_process_sentence( sentence):
"""
pre_process_sentence expands contractions on a sentence and changes the symbol ? so it can be specially processed
:param sentence: the sentence to pre-process
:return: the sentence with the modifications
"""
# expand the contractions
expanded_sentence = expand_contractions(sentence.lower())
# remove punctuation
return strip_punc(expanded_sentence)
| 35.203791 | 119 | 0.5521 |
4a1c1b7f3ea9e8289121379ab7cd8d3e1e4871ad
| 307 |
py
|
Python
|
pyinstaller.py
|
jmbreuer/strainer
|
cf8d5fbb0782ca9d9148107c28cdcd66ac2d6927
|
[
"Unlicense"
] | 2 |
2020-04-10T22:20:14.000Z
|
2020-05-14T21:35:12.000Z
|
pyinstaller.py
|
jmbreuer/strainer
|
cf8d5fbb0782ca9d9148107c28cdcd66ac2d6927
|
[
"Unlicense"
] | 31 |
2020-05-21T14:03:53.000Z
|
2022-03-11T12:04:50.000Z
|
pyinstaller.py
|
jmbreuer/strainer
|
cf8d5fbb0782ca9d9148107c28cdcd66ac2d6927
|
[
"Unlicense"
] | 1 |
2022-03-09T18:19:55.000Z
|
2022-03-09T18:19:55.000Z
|
# PyInstaller insists on executing a script, then fails to recognize runpy.run_module.
# This line alone doesn't work either, because PyInstaller doesn't find the __main__ submodule.
# We also have to manually add strainer.__main__ as a hidden import for everything to work...
from strainer import __main__
| 61.4 | 95 | 0.80456 |
4a1c1bd586eba1613386ad9c2d20891b3639a739
| 1,299 |
py
|
Python
|
setup.py
|
oahzxl/nerfies
|
9e8b007d8fc7059f6a42e07233c76c4f356d8439
|
[
"Apache-2.0"
] | 716 |
2021-02-03T08:37:54.000Z
|
2022-03-31T19:40:45.000Z
|
setup.py
|
oahzxl/nerfies
|
9e8b007d8fc7059f6a42e07233c76c4f356d8439
|
[
"Apache-2.0"
] | 53 |
2021-02-04T21:07:44.000Z
|
2022-03-31T15:58:21.000Z
|
setup.py
|
oahzxl/nerfies
|
9e8b007d8fc7059f6a42e07233c76c4f356d8439
|
[
"Apache-2.0"
] | 107 |
2021-02-03T09:57:48.000Z
|
2022-03-29T09:19:33.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="nerfies", # Replace with your own username
version="0.0.2",
author="Keunhong Park",
author_email="kpar@cs.washington.edu",
description="Code for 'Nerfies: Deformable Neural Radiance Fields'.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/google/nerfies",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License 2.0",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
)
| 35.108108 | 74 | 0.711316 |
4a1c1c95b07ae409bbab4757a1e5ebcf9bd07b66
| 4,358 |
py
|
Python
|
pymatgen_diffusion/neb/io.py
|
LyKex/pymatgen-diffusion
|
266d882d2502d918ca63641c201d7e95b629af7e
|
[
"BSD-3-Clause"
] | 1 |
2020-08-21T08:29:58.000Z
|
2020-08-21T08:29:58.000Z
|
pymatgen_diffusion/neb/io.py
|
weforever5/pymatgen-diffusion
|
39181c61357e8fe983fbd3cc9c0b5e94c6feb410
|
[
"BSD-3-Clause"
] | null | null | null |
pymatgen_diffusion/neb/io.py
|
weforever5/pymatgen-diffusion
|
39181c61357e8fe983fbd3cc9c0b5e94c6feb410
|
[
"BSD-3-Clause"
] | 1 |
2020-08-19T08:30:54.000Z
|
2020-08-19T08:30:54.000Z
|
# coding: utf-8
# Copyright (c) Materials Virtual Lab.
# Distributed under the terms of the BSD License.
import copy
from pymatgen.io.vasp.sets import MITRelaxSet, MITNEBSet
from pymatgen.core import Structure
__author__ = 'Austen'
class MVLCINEBEndPointSet(MITRelaxSet):
"""
Class for writing NEB end points relaxation inputs.
"""
def __init__(self, structure, **kwargs):
user_incar_settings = kwargs.get("user_incar_settings", {})
defaults = {
"ISIF": 2,
"EDIFF": 5e-5,
"EDIFFG": -0.02,
"ISMEAR": 0,
"ISYM": 0,
"LCHARG": False,
"LDAU": False,
"NELMIN": 4
}
if user_incar_settings != {}:
defaults.update(user_incar_settings)
kwargs["user_incar_settings"] = defaults
super(MVLCINEBEndPointSet, self).__init__(structure, **kwargs)
class MVLCINEBSet(MITNEBSet):
"""
MAVRL-tested settings for CI-NEB calculations. Note that these parameters
requires the VTST modification of VASP from the Henkelman group. See
http://theory.cm.utexas.edu/vtsttools/
Args:
nimages (int): Number of NEB images (excluding start and ending
structures).
user_incar_settings (dict): A dict specifying additional incar
settings.
"""
def __init__(self, structures, **kwargs):
user_incar_settings = kwargs.get("user_incar_settings", {})
# CI-NEB settings
defaults = {
"EDIFF": 5e-5,
"EDIFFG": -0.02,
"IBRION": 3,
"ICHAIN": 0,
"IOPT": 1,
"ISIF": 2,
"ISMEAR": 0,
"ISPIN": 2,
"LCHARG": False,
"LCLIMB": True,
"LDAU": False,
"LORBIT": 0,
"NSW": 200,
"POTIM": 0,
"SPRING": -5
}
if user_incar_settings != {}:
defaults.update(user_incar_settings)
kwargs["user_incar_settings"] = defaults
super(MVLCINEBSet, self).__init__(structures, **kwargs)
def get_endpoints_from_index(structure, site_indices):
"""
This class reads in one perfect structure and the two endpoint structures
are generated using site_indices.
Args:
structure (Structure): A perfect structure.
site_indices (list of int): a two-element list indicating site indices.
Returns:
endpoints (list of Structure): a two-element list of two endpoints
Structure object.
"""
if len(site_indices) != 2 or len(set(site_indices)) != 2:
raise ValueError("Invalid indices!")
if structure[site_indices[0]].specie != structure[site_indices[1]].specie:
raise ValueError("The site indices must be "
"associated with identical species!")
s = structure.copy()
sites = s.sites
# Move hopping atoms to the beginning of species index.
init_site = sites[site_indices[0]]
final_site = sites[site_indices[1]]
sites.remove(init_site)
sites.remove(final_site)
init_sites = copy.deepcopy(sites)
final_sites = copy.deepcopy(sites)
init_sites.insert(0, final_site)
final_sites.insert(0, init_site)
s_0 = Structure.from_sites(init_sites)
s_1 = Structure.from_sites(final_sites)
endpoints = [s_0, s_1]
return endpoints
def get_endpoint_dist(ep_0, ep_1):
"""
Calculate a list of site distances between two endpoints, assuming periodic
boundary conditions.
Args:
ep_0 (Structure): the first endpoint structure.
ep_1 (Structure): the second endpoint structure.
Returns:
dist (list): a list of distances between two structures.
"""
ep_0.remove_oxidation_states()
ep_1.remove_oxidation_states()
assert ep_0.species == ep_1.species, "Formula mismatch!"
assert ep_0.lattice.abc == ep_0.lattice.abc, "Lattice mismatch!"
distances = []
for site0, site1 in zip(ep_0, ep_1):
fc = (site0.frac_coords, site1.frac_coords)
d = ep_0.lattice.get_distance_and_image(fc[0], fc[1])[0]
distances.append(d)
return distances
| 30.055172 | 80 | 0.592703 |
4a1c1d43cd07191b9479df097fa9d238b31d9f1f
| 1,477 |
py
|
Python
|
demo/baidu.py
|
allyLei/cmnmt
|
b4310fb4fc9c022c4a94579d009f6243b7d21e82
|
[
"MIT"
] | null | null | null |
demo/baidu.py
|
allyLei/cmnmt
|
b4310fb4fc9c022c4a94579d009f6243b7d21e82
|
[
"MIT"
] | null | null | null |
demo/baidu.py
|
allyLei/cmnmt
|
b4310fb4fc9c022c4a94579d009f6243b7d21e82
|
[
"MIT"
] | null | null | null |
#-*- encoding:utf-8 -*-
import requests
import hashlib
import uuid
class Baidu(object):
"""
baidu translation
"""
def __init__(self, appid, key):
self.appid = appid
self.key = key
self.langList = {}
self.initLang()
def initLang(self):
self.langList['ch'] = 'zh'
self.langList['en'] = 'en'
self.langList['auto'] = 'auto'
def translate(self, query, src=None, tgt=None):
assert src in self.langList.keys()
assert tgt in self.langList.keys()
url = 'http://api.fanyi.baidu.com/api/trans/vip/translate'
params = self.generateParams(query, src, tgt)
try:
respond = requests.post(url, params=params).json()
return respond['trans_result'][0]['dst']
except:
return "baidu translation failed"
def generateParams(self, query, src, tgt):
params = {}
salt = uuid.uuid4().hex
sign = hashlib.md5(self.appid+query+salt+self.key).hexdigest()
params['q'] = query
params['from'] = self.langList[src]
params['to'] = self.langList[tgt]
params['appid'] = self.appid
params['salt'] = salt
params['sign'] = sign
return params
if __name__ == "__main__":
appid = "20160825000027412"
key = "nqZwgqSR74topEKu8MGL"
baidu = Baidu(appid, key)
doc = u"我是一个中国人!"
res = baidu.translate(doc.encode('utf-8'), 'ch', 'en')
print res
| 27.867925 | 70 | 0.572783 |
4a1c1d7e0b98e881d8d58437fe1480d5b2396999
| 64,274 |
py
|
Python
|
lib/galaxy/webapps/galaxy/controllers/history.py
|
biomarble/galaxy
|
5767dbe975e1ce51fec70541d4b89c55609d45ee
|
[
"CC-BY-3.0"
] | 1 |
2020-03-11T15:17:32.000Z
|
2020-03-11T15:17:32.000Z
|
lib/galaxy/webapps/galaxy/controllers/history.py
|
biomarble/galaxy
|
5767dbe975e1ce51fec70541d4b89c55609d45ee
|
[
"CC-BY-3.0"
] | 72 |
2019-06-06T18:52:41.000Z
|
2022-02-17T02:53:18.000Z
|
lib/galaxy/webapps/galaxy/controllers/history.py
|
biomarble/galaxy
|
5767dbe975e1ce51fec70541d4b89c55609d45ee
|
[
"CC-BY-3.0"
] | 1 |
2022-03-01T08:07:54.000Z
|
2022-03-01T08:07:54.000Z
|
import logging
from markupsafe import escape
from sqlalchemy import (
and_,
false,
null,
true
)
from sqlalchemy.orm import (
eagerload,
joinedload,
undefer
)
import galaxy.util
from galaxy import exceptions
from galaxy import managers
from galaxy import model
from galaxy import web
from galaxy.model.item_attrs import (
UsesAnnotations,
UsesItemRatings
)
from galaxy.util import (
listify,
Params,
parse_int,
sanitize_text,
string_as_bool,
unicodify
)
from galaxy.web import url_for
from galaxy.web.framework.helpers import (
grids,
iff,
time_ago
)
from galaxy.webapps.base.controller import (
BaseUIController,
ERROR,
ExportsHistoryMixin,
ImportsHistoryMixin,
INFO,
SharableMixin,
SUCCESS,
WARNING,
)
from ._create_history_template import render_item
log = logging.getLogger(__name__)
class NameColumn(grids.TextColumn):
def get_value(self, trans, grid, history):
return escape(history.get_display_name())
class HistoryListGrid(grids.Grid):
# Custom column types
class ItemCountColumn(grids.GridColumn):
def get_value(self, trans, grid, history):
return str(history.hid_counter - 1)
class HistoryListNameColumn(NameColumn):
def get_link(self, trans, grid, history):
link = None
if not history.deleted:
link = dict(operation="Switch", id=history.id, use_panels=grid.use_panels, async_compatible=True)
return link
class DeletedColumn(grids.DeletedColumn):
def get_value(self, trans, grid, history):
if history == trans.history:
return "<strong>current history</strong>"
if history.purged:
return "deleted permanently"
elif history.deleted:
return "deleted"
return ""
def sort(self, trans, query, ascending, column_name=None):
if ascending:
query = query.order_by(self.model_class.table.c.purged.asc(), self.model_class.table.c.update_time.desc())
else:
query = query.order_by(self.model_class.table.c.purged.desc(), self.model_class.table.c.update_time.desc())
return query
def build_initial_query(self, trans, **kwargs):
# Override to preload sharing information used when fetching data for grid.
query = super().build_initial_query(trans, **kwargs)
query = query.options(undefer("users_shared_with_count"))
return query
# Grid definition
title = "Saved Histories"
model_class = model.History
default_sort_key = "-update_time"
columns = [
HistoryListNameColumn("Name", key="name", attach_popup=True, filterable="advanced"),
ItemCountColumn("Items", key="item_count", sortable=False),
grids.GridColumn("Datasets", key="datasets_by_state", sortable=False, nowrap=True, delayed=True),
grids.IndividualTagsColumn("Tags", key="tags", model_tag_association_class=model.HistoryTagAssociation,
filterable="advanced", grid_name="HistoryListGrid"),
grids.SharingStatusColumn("Sharing", key="sharing", filterable="advanced", sortable=False, use_shared_with_count=True),
grids.GridColumn("Size on Disk", key="disk_size", sortable=False, delayed=True),
grids.GridColumn("Created", key="create_time", format=time_ago),
grids.GridColumn("Last Updated", key="update_time", format=time_ago),
DeletedColumn("Status", key="deleted", filterable="advanced")
]
columns.append(
grids.MulticolFilterColumn(
"search history names and tags",
cols_to_filter=[columns[0], columns[3]],
key="free-text-search", visible=False, filterable="standard")
)
global_actions = [
grids.GridAction("Import from file", dict(controller="", action="histories/import"))
]
operations = [
grids.GridOperation("Switch", allow_multiple=False, condition=(lambda item: not item.deleted), async_compatible=True),
grids.GridOperation("View", allow_multiple=False, url_args=dict(controller="", action="histories/view")),
grids.GridOperation("Share or Publish", allow_multiple=False, condition=(lambda item: not item.deleted), url_args=dict(controller="", action="histories/sharing")),
grids.GridOperation("Change Permissions", allow_multiple=False, condition=(lambda item: not item.deleted), url_args=dict(controller="", action="histories/permissions")),
grids.GridOperation("Copy", allow_multiple=False, condition=(lambda item: not item.deleted), async_compatible=False),
grids.GridOperation("Rename", condition=(lambda item: not item.deleted), url_args=dict(controller="", action="histories/rename"), target="top"),
grids.GridOperation("Delete", condition=(lambda item: not item.deleted), async_compatible=True),
grids.GridOperation("Delete Permanently", condition=(lambda item: not item.purged), confirm="History contents will be removed from disk, this cannot be undone. Continue?", async_compatible=True),
grids.GridOperation("Undelete", condition=(lambda item: item.deleted and not item.purged), async_compatible=True),
]
standard_filters = [
grids.GridColumnFilter("Active", args=dict(deleted=False)),
grids.GridColumnFilter("Deleted", args=dict(deleted=True)),
grids.GridColumnFilter("All", args=dict(deleted='All')),
]
default_filter = dict(name="All", deleted="False", tags="All", sharing="All")
num_rows_per_page = 15
use_paging = True
info_text = "Histories that have been deleted for more than a time period specified by the Galaxy administrator(s) may be permanently deleted."
def get_current_item(self, trans, **kwargs):
return trans.get_history()
def apply_query_filter(self, trans, query, **kwargs):
return query.filter_by(user=trans.user, importing=False)
class SharedHistoryListGrid(grids.Grid):
# Custom column types
class DatasetsByStateColumn(grids.GridColumn):
def get_value(self, trans, grid, history):
rval = ''
for state in ('ok', 'running', 'queued', 'error'):
total = sum(1 for d in history.active_datasets if d.state == state)
if total:
rval += f'<div class="count-box state-color-{state}">{total}</div>'
return rval
class SharedByColumn(grids.GridColumn):
def get_value(self, trans, grid, history):
return escape(history.user.email)
# Grid definition
title = "Histories shared with you by others"
model_class = model.History
default_sort_key = "-update_time"
columns = [
grids.GridColumn("Name", key="name", attach_popup=True),
DatasetsByStateColumn("Datasets", sortable=False),
grids.GridColumn("Created", key="create_time", format=time_ago),
grids.GridColumn("Last Updated", key="update_time", format=time_ago),
SharedByColumn("Shared by", key="user_id")
]
operations = [
grids.GridOperation("View", allow_multiple=False, url_args=dict(controller="", action="histories/view")),
grids.GridOperation("Copy", allow_multiple=False),
grids.GridOperation("Unshare", allow_multiple=False)
]
def build_initial_query(self, trans, **kwargs):
return trans.sa_session.query(self.model_class).join('users_shared_with')
def apply_query_filter(self, trans, query, **kwargs):
return query.filter(model.HistoryUserShareAssociation.user == trans.user)
class HistoryAllPublishedGrid(grids.Grid):
class NameURLColumn(grids.PublicURLColumn, NameColumn):
pass
title = "Published Histories"
model_class = model.History
default_sort_key = "update_time"
default_filter = dict(public_url="All", username="All", tags="All")
use_paging = True
num_rows_per_page = 50
columns = [
NameURLColumn("Name", key="name", filterable="advanced"),
grids.OwnerAnnotationColumn("Annotation", key="annotation", model_annotation_association_class=model.HistoryAnnotationAssociation, filterable="advanced"),
grids.OwnerColumn("Owner", key="username", model_class=model.User, filterable="advanced"),
grids.CommunityRatingColumn("Community Rating", key="rating"),
grids.CommunityTagsColumn("Community Tags", key="tags", model_tag_association_class=model.HistoryTagAssociation, filterable="advanced", grid_name="PublicHistoryListGrid"),
grids.ReverseSortColumn("Last Updated", key="update_time", format=time_ago)
]
columns.append(
grids.MulticolFilterColumn(
"Search name, annotation, owner, and tags",
cols_to_filter=[columns[0], columns[1], columns[2], columns[4]],
key="free-text-search", visible=False, filterable="standard")
)
def build_initial_query(self, trans, **kwargs):
# TODO: Tags are still loaded one at a time, consider doing this all at once:
# - eagerload would keep everything in one query but would explode the number of rows and potentially
# result in unneeded info transferred over the wire.
# - subqueryload("tags").subqueryload("tag") would probably be better under postgres but I'd
# like some performance data against a big database first - might cause problems?
# - Pull down only username from associated User table since that is all that is used
# (can be used during search). Need join in addition to the eagerload since it is used in
# the .count() query which doesn't respect the eagerload options (could eliminate this with #5523).
# - Undefer average_rating column to prevent loading individual ratings per-history.
# - Eager load annotations - this causes a left join which might be inefficient if there were
# potentially many items per history (like if joining HDAs for instance) but there should only
# be at most one so this is fine.
return trans.sa_session.query(self.model_class).join("user").options(eagerload("user").load_only("username"), eagerload("annotations"), undefer("average_rating"))
def apply_query_filter(self, trans, query, **kwargs):
# A public history is published, has a slug, and is not deleted.
return query.filter(self.model_class.published == true()).filter(self.model_class.slug != null()).filter(self.model_class.deleted == false())
class HistoryController(BaseUIController, SharableMixin, UsesAnnotations, UsesItemRatings,
ExportsHistoryMixin, ImportsHistoryMixin):
def __init__(self, app):
super().__init__(app)
self.history_manager = managers.histories.HistoryManager(app)
self.history_export_view = managers.histories.HistoryExportView(app)
self.history_serializer = managers.histories.HistorySerializer(self.app)
@web.expose
def index(self, trans):
return ""
@web.expose
def list_as_xml(self, trans):
"""XML history list for functional tests"""
trans.response.set_content_type('text/xml')
return trans.fill_template("/history/list_as_xml.mako")
# ......................................................................... lists
stored_list_grid = HistoryListGrid()
shared_list_grid = SharedHistoryListGrid()
published_list_grid = HistoryAllPublishedGrid()
@web.expose
@web.json
def list_published(self, trans, **kwargs):
return self.published_list_grid(trans, **kwargs)
@web.legacy_expose_api
@web.require_login("work with multiple histories")
def list(self, trans, **kwargs):
"""List all available histories"""
current_history = trans.get_history()
message = kwargs.get('message')
status = kwargs.get('status')
if 'operation' in kwargs:
operation = kwargs['operation'].lower()
history_ids = listify(kwargs.get('id', []))
# Display no message by default
status, message = None, None
# Load the histories and ensure they all belong to the current user
histories = []
for history_id in history_ids:
history = self.history_manager.get_owned(self.decode_id(history_id), trans.user, current_history=trans.history)
if history:
# Ensure history is owned by current user
if history.user_id is not None and trans.user:
assert trans.user.id == history.user_id, "History does not belong to current user"
histories.append(history)
else:
log.warning("Invalid history id '%r' passed to list", history_id)
if histories:
if operation == "switch":
status, message = self._list_switch(trans, histories)
# Take action to update UI to reflect history switch. If
# grid is using panels, it is standalone and hence a redirect
# to root is needed; if grid is not using panels, it is nested
# in the main Galaxy UI and refreshing the history frame
# is sufficient.
use_panels = kwargs.get('use_panels', False) == 'True'
if use_panels:
return trans.response.send_redirect(url_for("/"))
else:
kwargs['refresh_frames'] = ['history']
elif operation in ("delete", "delete permanently"):
status, message = self._list_delete(trans, histories, purge=(operation == "delete permanently"))
if current_history in histories:
# Deleted the current history, so a new, empty history was
# created automatically, and we need to refresh the history frame
kwargs['refresh_frames'] = ['history']
elif operation == "undelete":
status, message = self._list_undelete(trans, histories)
trans.sa_session.flush()
# Render the list view
if message and status:
kwargs['message'] = sanitize_text(message)
kwargs['status'] = status
return self.stored_list_grid(trans, **kwargs)
def _list_delete(self, trans, histories, purge=False):
"""Delete histories"""
n_deleted = 0
deleted_current = False
message_parts = []
status = SUCCESS
current_history = trans.get_history()
for history in histories:
try:
if history.users_shared_with:
raise exceptions.ObjectAttributeInvalidException(
"History (%s) has been shared with others, unshare it before deleting it." % history.name
)
if purge:
self.history_manager.purge(history)
else:
self.history_manager.delete(history)
if history == current_history:
deleted_current = True
except Exception as e:
message_parts.append(unicodify(e))
status = ERROR
else:
trans.log_event("History (%s) marked as deleted" % history.name)
n_deleted += 1
if n_deleted:
part = "Deleted %d %s" % (n_deleted, iff(n_deleted != 1, "histories", "history"))
if purge and trans.app.config.allow_user_dataset_purge:
part += " and removed {} dataset{} from disk".format(iff(n_deleted != 1, "their", "its"), iff(n_deleted != 1, 's', ''))
elif purge:
part += " but the datasets were not removed from disk because that feature is not enabled in this Galaxy instance"
message_parts.append("%s. " % part)
if deleted_current:
# if this history is the current history for this session,
# - attempt to find the most recently used, undeleted history and switch to it.
# - If no suitable recent history is found, create a new one and switch
# note: this needs to come after commits above or will use an empty history that was deleted above
not_deleted_or_purged = [model.History.deleted == false(), model.History.purged == false()]
most_recent_history = self.history_manager.most_recent(user=trans.user, filters=not_deleted_or_purged)
if most_recent_history:
self.history_manager.set_current(trans, most_recent_history)
else:
trans.get_or_create_default_history()
message_parts.append("Your active history was deleted, a new empty history is now active. ")
status = INFO
return (status, " ".join(message_parts))
def _list_undelete(self, trans, histories):
"""Undelete histories"""
n_undeleted = 0
n_already_purged = 0
for history in histories:
if history.purged:
n_already_purged += 1
if history.deleted:
history.deleted = False
if not history.default_permissions:
# For backward compatibility - for a while we were deleting all DefaultHistoryPermissions on
# the history when we deleted the history. We are no longer doing this.
# Need to add default DefaultHistoryPermissions in case they were deleted when the history was deleted
default_action = trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS
private_user_role = trans.app.security_agent.get_private_user_role(history.user)
default_permissions = {}
default_permissions[default_action] = [private_user_role]
trans.app.security_agent.history_set_default_permissions(history, default_permissions)
n_undeleted += 1
trans.log_event("History (%s) %d marked as undeleted" % (history.name, history.id))
status = SUCCESS
message_parts = []
if n_undeleted:
message_parts.append("Undeleted %d %s. " % (n_undeleted, iff(n_undeleted != 1, "histories", "history")))
if n_already_purged:
message_parts.append("%d histories have already been purged and cannot be undeleted." % n_already_purged)
status = WARNING
return status, "".join(message_parts)
def _list_switch(self, trans, histories):
"""Switch to a new different history"""
new_history = histories[0]
galaxy_session = trans.get_galaxy_session()
try:
association = trans.sa_session.query(trans.app.model.GalaxySessionToHistoryAssociation) \
.filter_by(session_id=galaxy_session.id, history_id=new_history.id) \
.first()
except Exception:
association = None
new_history.add_galaxy_session(galaxy_session, association=association)
trans.sa_session.add(new_history)
trans.sa_session.flush()
trans.set_history(new_history)
# No message
return None, None
@web.expose
@web.json
@web.require_login("work with shared histories")
def list_shared(self, trans, **kwargs):
"""List histories shared with current user by others"""
status = message = None
if 'operation' in kwargs:
ids = listify(kwargs.get('id', []))
operation = kwargs['operation'].lower()
if operation == 'unshare':
if not ids:
message = "Select a history to unshare"
status = 'error'
for id in ids:
# No need to check security, association below won't yield a
# hit if this user isn't having the history shared with her.
history = self.history_manager.by_id(self.decode_id(id))
# Current user is the user with which the histories were shared
association = (trans.sa_session.query(trans.app.model.HistoryUserShareAssociation)
.filter_by(user=trans.user, history=history).one())
trans.sa_session.delete(association)
trans.sa_session.flush()
message = "Unshared %d shared histories" % len(ids)
status = 'done'
# Render the list view
return self.shared_list_grid(trans, status=status, message=message, **kwargs)
@web.expose
def as_xml(self, trans, id=None, show_deleted=None, show_hidden=None):
"""
Return a history in xml format.
"""
if trans.app.config.require_login and not trans.user:
return trans.fill_template('/no_access.mako', message='Please log in to access Galaxy histories.')
if id:
history = self.history_manager.get_accessible(self.decode_id(id), trans.user,
current_history=trans.history)
else:
history = trans.get_history(most_recent=True, create=True)
trans.response.set_content_type('text/xml')
return trans.fill_template_mako(
"history/as_xml.mako",
history=history,
show_deleted=string_as_bool(show_deleted),
show_hidden=string_as_bool(show_hidden))
@web.expose
@web.json
def display_structured(self, trans, id=None):
"""
Display a history as a nested structure showing the jobs and workflow
invocations that created each dataset (if any).
"""
# Get history
if id is None:
id = trans.history.id
else:
id = self.decode_id(id)
# Expunge history from the session to allow us to force a reload
# with a bunch of eager loaded joins
trans.sa_session.expunge(trans.history)
history = trans.sa_session.query(model.History).options(
joinedload('active_datasets').joinedload('creating_job_associations').joinedload('job').joinedload('workflow_invocation_step').joinedload('workflow_invocation').joinedload('workflow'),
).get(id)
if not (history and ((history.user and trans.user and history.user.id == trans.user.id) or
(trans.history and history.id == trans.history.id) or
trans.user_is_admin)):
return trans.show_error_message("Cannot display history structure.")
# Resolve jobs and workflow invocations for the datasets in the history
# items is filled with items (hdas, jobs, or workflows) that go at the
# top level
items = []
# First go through and group hdas by job, if there is no job they get
# added directly to items
jobs = {}
for hda in history.active_datasets:
if hda.visible is False:
continue
# Follow "copied from ..." association until we get to the original
# instance of the dataset
original_hda = hda
# while original_hda.copied_from_history_dataset_association:
# original_hda = original_hda.copied_from_history_dataset_association
# Check if the job has a creating job, most should, datasets from
# before jobs were tracked, or from the upload tool before it
# created a job, may not
if not original_hda.creating_job_associations:
items.append((hda, None))
# Attach hda to correct job
# -- there should only be one creating_job_association, so this
# loop body should only be hit once
for assoc in original_hda.creating_job_associations:
job = assoc.job
if job in jobs:
jobs[job].append((hda, None))
else:
jobs[job] = [(hda, None)]
# Second, go through the jobs and connect to workflows
wf_invocations = {}
for job, hdas in jobs.items():
# Job is attached to a workflow step, follow it to the
# workflow_invocation and group
if job.workflow_invocation_step:
wf_invocation = job.workflow_invocation_step.workflow_invocation
if wf_invocation in wf_invocations:
wf_invocations[wf_invocation].append((job, hdas))
else:
wf_invocations[wf_invocation] = [(job, hdas)]
# Not attached to a workflow, add to items
else:
items.append((job, hdas))
# Finally, add workflow invocations to items, which should now
# contain all hdas with some level of grouping
items.extend(wf_invocations.items())
# Sort items by age
items.sort(key=(lambda x: x[0].create_time), reverse=True)
# logic taken from mako files
from galaxy.managers import hdas
hda_serializer = hdas.HDASerializer(trans.app)
hda_dicts = []
id_hda_dict_map = {}
for hda in history.active_datasets:
hda_dict = hda_serializer.serialize_to_view(hda, user=trans.user, trans=trans, view='detailed')
id_hda_dict_map[hda_dict['id']] = hda_dict
hda_dicts.append(hda_dict)
html_template = ''
for entity, children in items:
html_template += render_item(trans, entity, children)
return {
'name': history.name,
'history_json': hda_dicts,
'template': html_template
}
@web.expose
@web.json
def view(self, trans, id=None, show_deleted=False, show_hidden=False, use_panels=True):
"""
View a history. If a history is importable, then it is viewable by any user.
"""
show_deleted = string_as_bool(show_deleted)
show_hidden = string_as_bool(show_hidden)
use_panels = string_as_bool(use_panels)
history_dictionary = {}
user_is_owner = False
try:
if id:
history_to_view = self.history_manager.get_accessible(self.decode_id(id), trans.user,
current_history=trans.history)
user_is_owner = history_to_view.user == trans.user
history_is_current = history_to_view == trans.history
else:
history_to_view = trans.history
user_is_owner = True
history_is_current = True
# include all datasets: hidden, deleted, and purged
history_dictionary = self.history_serializer.serialize_to_view(history_to_view,
view='dev-detailed', user=trans.user, trans=trans)
except Exception as exc:
user_id = str(trans.user.id) if trans.user else '(anonymous)'
log.exception('Error bootstrapping history for user %s', user_id)
if isinstance(exc, exceptions.ItemAccessibilityException):
error_msg = 'You do not have permission to view this history.'
else:
error_msg = ('An error occurred getting the history data from the server. ' +
'Please contact a Galaxy administrator if the problem persists.')
return trans.show_error_message(error_msg, use_panels=use_panels)
return {
"history": history_dictionary,
"user_is_owner": user_is_owner,
"history_is_current": history_is_current,
"show_deleted": show_deleted,
"show_hidden": show_hidden,
"use_panels": use_panels,
"allow_user_dataset_purge": trans.app.config.allow_user_dataset_purge
}
@web.require_login("use more than one Galaxy history")
@web.expose
def view_multiple(self, trans, include_deleted_histories=False, order='update_time', limit=10):
"""
"""
current_history_id = trans.security.encode_id(trans.history.id)
# TODO: allow specifying user_id for admin?
include_deleted_histories = string_as_bool(include_deleted_histories)
limit = parse_int(limit, min_val=1, default=10, allow_none=True)
return trans.fill_template_mako("history/view_multiple.mako", current_history_id=current_history_id,
include_deleted_histories=include_deleted_histories, order=order, limit=limit)
@web.expose
def display_by_username_and_slug(self, trans, username, slug):
"""
Display history based on a username and slug.
"""
# Get history.
session = trans.sa_session
user = session.query(model.User).filter_by(username=username).first()
history = trans.sa_session.query(model.History) \
.options(eagerload('tags')).options(eagerload('annotations')) \
.filter_by(user=user, slug=slug, deleted=False).first()
if history is None:
raise web.httpexceptions.HTTPNotFound()
# Security check raises error if user cannot access history.
self.history_manager.error_unless_accessible(history, trans.user, current_history=trans.history)
# Get rating data.
user_item_rating = 0
if trans.get_user():
user_item_rating = self.get_user_item_rating(trans.sa_session, trans.get_user(), history)
if user_item_rating:
user_item_rating = user_item_rating.rating
else:
user_item_rating = 0
ave_item_rating, num_ratings = self.get_ave_item_rating_data(trans.sa_session, history)
# create ownership flag for template, dictify models
user_is_owner = trans.user == history.user
history_dictionary = self.history_serializer.serialize_to_view(history,
view='dev-detailed', user=trans.user, trans=trans)
history_dictionary['annotation'] = self.get_item_annotation_str(trans.sa_session, history.user, history)
return trans.stream_template_mako("history/display.mako", item=history, item_data=[],
user_is_owner=user_is_owner, history_dict=history_dictionary,
user_item_rating=user_item_rating, ave_item_rating=ave_item_rating, num_ratings=num_ratings)
@web.legacy_expose_api
@web.require_login("changing default permissions")
def permissions(self, trans, payload=None, **kwd):
"""
Sets the permissions on a history.
"""
history_id = kwd.get('id')
if not history_id:
return self.message_exception(trans, 'Invalid history id (%s) received' % str(history_id))
history = self.history_manager.get_owned(self.decode_id(history_id), trans.user, current_history=trans.history)
if trans.request.method == 'GET':
inputs = []
all_roles = trans.user.all_roles()
current_actions = history.default_permissions
for action_key, action in trans.app.model.Dataset.permitted_actions.items():
in_roles = set()
for a in current_actions:
if a.action == action.action:
in_roles.add(a.role)
inputs.append({'type' : 'select',
'multiple' : True,
'optional' : True,
'individual': True,
'name' : action_key,
'label' : action.action,
'help' : action.description,
'options' : [(role.name, trans.security.encode_id(role.id)) for role in set(all_roles)],
'value' : [trans.security.encode_id(role.id) for role in in_roles]})
return {'title' : 'Change default dataset permissions for history \'%s\'' % history.name, 'inputs' : inputs}
else:
permissions = {}
for action_key, action in trans.app.model.Dataset.permitted_actions.items():
in_roles = payload.get(action_key) or []
in_roles = [trans.sa_session.query(trans.app.model.Role).get(trans.security.decode_id(x)) for x in in_roles]
permissions[trans.app.security_agent.get_action(action.action)] = in_roles
trans.app.security_agent.history_set_default_permissions(history, permissions)
return {'message': 'Default history \'%s\' dataset permissions have been changed.' % history.name}
@web.legacy_expose_api
@web.require_login("make datasets private")
def make_private(self, trans, history_id=None, all_histories=False, **kwd):
"""
Sets the datasets within a history to private. Also sets the default
permissions for the history to private, for future datasets.
"""
histories = []
all_histories = string_as_bool(all_histories)
if all_histories:
histories = trans.user.histories
elif history_id:
history = self.history_manager.get_owned(self.decode_id(history_id), trans.user, current_history=trans.history)
if history:
histories.append(history)
if not histories:
return self.message_exception(trans, 'Invalid history or histories specified.')
private_role = trans.app.security_agent.get_private_user_role(trans.user)
user_roles = trans.user.all_roles()
private_permissions = {
trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS: [private_role],
trans.app.security_agent.permitted_actions.DATASET_ACCESS: [private_role],
}
for history in histories:
# Set default role for history to private
trans.app.security_agent.history_set_default_permissions(history, private_permissions)
# Set private role for all datasets
for hda in history.datasets:
if (not hda.dataset.library_associations
and not trans.app.security_agent.dataset_is_private_to_user(trans, hda.dataset)
and trans.app.security_agent.can_manage_dataset(user_roles, hda.dataset)):
# If it's not private to me, and I can manage it, set fixed private permissions.
trans.app.security_agent.set_all_dataset_permissions(hda.dataset, private_permissions)
if not trans.app.security_agent.dataset_is_private_to_user(trans, hda.dataset):
raise exceptions.InternalServerError('An error occurred and the dataset is NOT private.')
return {'message': 'Success, requested permissions have been changed in %s.' % ("all histories" if all_histories else history.name)}
@web.expose
@web.require_login("share histories with other users")
def share(self, trans, id=None, email="", **kwd):
# If a history contains both datasets that can be shared and others that cannot be shared with the desired user,
# then the entire history is shared, and the protected datasets will be visible, but inaccessible ( greyed out )
# in the copyd history
params = Params(kwd)
user = trans.get_user()
# TODO: we have too many error messages floating around in here - we need
# to incorporate the messaging system used by the libraries that will display
# a message on any page.
err_msg = galaxy.util.restore_text(params.get('err_msg', ''))
if not email:
if not id:
# Default to the current history
id = trans.security.encode_id(trans.history.id)
id = listify(id)
send_to_err = err_msg
histories = []
for history_id in id:
history_id = self.decode_id(history_id)
history = self.history_manager.get_owned(history_id, trans.user, current_history=trans.history)
histories.append(history)
return trans.fill_template("/history/share.mako",
histories=histories,
email=email,
send_to_err=send_to_err)
histories = self._get_histories(trans, id)
send_to_users, send_to_err = self._get_users(trans, user, email)
if not send_to_users:
if not send_to_err:
send_to_err += f"{email} is not a valid Galaxy user. {err_msg}"
return trans.fill_template("/history/share.mako",
histories=histories,
email=email,
send_to_err=send_to_err)
if params.get('share_button', False):
# The user has not yet made a choice about how to share, so dictionaries will be built for display
can_change, cannot_change, no_change_needed, unique_no_change_needed, send_to_err = \
self._populate_restricted(trans, user, histories, send_to_users, None, send_to_err, unique=True)
send_to_err += err_msg
if cannot_change and not no_change_needed and not can_change:
send_to_err = "The histories you are sharing do not contain any datasets that can be accessed by the users with which you are sharing."
return trans.fill_template("/history/share.mako",
histories=histories,
email=email,
send_to_err=send_to_err)
if can_change or cannot_change:
return trans.fill_template("/history/share.mako",
histories=histories,
email=email,
send_to_err=send_to_err,
can_change=can_change,
cannot_change=cannot_change,
no_change_needed=unique_no_change_needed)
if no_change_needed:
return self._share_histories(trans, user, send_to_err, histories=no_change_needed)
elif not send_to_err:
# User seems to be sharing an empty history
send_to_err = "You cannot share an empty history. "
return trans.fill_template("/history/share.mako",
histories=histories,
email=email,
send_to_err=send_to_err)
@web.expose
def adjust_hidden(self, trans, id=None, **kwd):
""" THIS METHOD IS A TEMPORARY ADDITION. It'll allow us to fix the
regression in history-wide actions, and will be removed in the first
release after 17.01 """
action = kwd.get('user_action', None)
if action == 'delete':
for hda in trans.history.datasets:
if not hda.visible:
hda.mark_deleted()
elif action == 'unhide':
trans.history.unhide_datasets()
trans.sa_session.flush()
@web.expose
@web.require_login("share restricted histories with other users")
def share_restricted(self, trans, id=None, email="", **kwd):
if 'action' in kwd:
action = kwd['action']
else:
err_msg = "Select an action. "
return trans.response.send_redirect(url_for(controller='history',
action='share',
id=id,
email=email,
err_msg=err_msg,
share_button=True))
user = trans.get_user()
user_roles = user.all_roles()
histories = self._get_histories(trans, id)
send_to_users, send_to_err = self._get_users(trans, user, email)
send_to_err = ''
# The user has made a choice, so dictionaries will be built for sharing
can_change, cannot_change, no_change_needed, unique_no_change_needed, send_to_err = \
self._populate_restricted(trans, user, histories, send_to_users, action, send_to_err)
# Now that we've populated the can_change, cannot_change, and no_change_needed dictionaries,
# we'll populate the histories_for_sharing dictionary from each of them.
histories_for_sharing = {}
if no_change_needed:
# Don't need to change anything in cannot_change, so populate as is
histories_for_sharing, send_to_err = \
self._populate(trans, histories_for_sharing, no_change_needed, send_to_err)
if cannot_change:
# Can't change anything in cannot_change, so populate as is
histories_for_sharing, send_to_err = \
self._populate(trans, histories_for_sharing, cannot_change, send_to_err)
# The action here is either 'public' or 'private', so we'll continue to populate the
# histories_for_sharing dictionary from the can_change dictionary.
for send_to_user, history_dict in can_change.items():
for history in history_dict:
# Make sure the current history has not already been shared with the current send_to_user
if trans.sa_session.query(trans.app.model.HistoryUserShareAssociation) \
.filter(and_(trans.app.model.HistoryUserShareAssociation.table.c.user_id == send_to_user.id,
trans.app.model.HistoryUserShareAssociation.table.c.history_id == history.id)) \
.count() > 0:
send_to_err += f"History ({history.name}) already shared with user ({send_to_user.email})"
else:
# Only deal with datasets that have not been purged
for hda in history.activatable_datasets:
# If the current dataset is not public, we may need to perform an action on it to
# make it accessible by the other user.
if not trans.app.security_agent.can_access_dataset(send_to_user.all_roles(), hda.dataset):
# The user with which we are sharing the history does not have access permission on the current dataset
if trans.app.security_agent.can_manage_dataset(user_roles, hda.dataset) and not hda.dataset.library_associations:
# The current user has authority to change permissions on the current dataset because
# they have permission to manage permissions on the dataset and the dataset is not associated
# with a library.
if action == "private":
trans.app.security_agent.privately_share_dataset(hda.dataset, users=[user, send_to_user])
elif action == "public":
trans.app.security_agent.make_dataset_public(hda.dataset)
# Populate histories_for_sharing with the history after performing any requested actions on
# its datasets to make them accessible by the other user.
if send_to_user not in histories_for_sharing:
histories_for_sharing[send_to_user] = [history]
elif history not in histories_for_sharing[send_to_user]:
histories_for_sharing[send_to_user].append(history)
return self._share_histories(trans, user, send_to_err, histories=histories_for_sharing)
def _get_histories(self, trans, ids):
if not ids:
# Default to the current history
ids = trans.security.encode_id(trans.history.id)
ids = listify(ids)
histories = []
for history_id in ids:
history_id = self.decode_id(history_id)
history = self.history_manager.get_owned(history_id, trans.user, current_history=trans.history)
histories.append(history)
return histories
def _get_users(self, trans, user, emails_or_ids):
send_to_users = []
send_to_err = ""
for string in listify(emails_or_ids):
string = string.strip()
if not string:
continue
send_to_user = None
if '@' in string:
email_address = string
send_to_user = self.user_manager.by_email(email_address,
filters=[trans.app.model.User.table.c.deleted == false()])
else:
try:
decoded_user_id = self.decode_id(string)
send_to_user = self.user_manager.by_id(decoded_user_id)
if send_to_user.deleted:
send_to_user = None
# TODO: in an ideal world, we would let this bubble up to web.expose which would handle it
except exceptions.MalformedId:
send_to_user = None
if not send_to_user:
send_to_err += "%s is not a valid Galaxy user. " % string
elif send_to_user == user:
send_to_err += "You cannot send histories to yourself. "
else:
send_to_users.append(send_to_user)
return send_to_users, send_to_err
def _populate(self, trans, histories_for_sharing, other, send_to_err):
# This method will populate the histories_for_sharing dictionary with the users and
# histories in other, eliminating histories that have already been shared with the
# associated user. No security checking on datasets is performed.
# If not empty, the histories_for_sharing dictionary looks like:
# { userA: [ historyX, historyY ], userB: [ historyY ] }
# other looks like:
# { userA: {historyX : [hda, hda], historyY : [hda]}, userB: {historyY : [hda]} }
for send_to_user, history_dict in other.items():
for history in history_dict:
# Make sure the current history has not already been shared with the current send_to_user
if trans.sa_session.query(trans.app.model.HistoryUserShareAssociation) \
.filter(and_(trans.app.model.HistoryUserShareAssociation.table.c.user_id == send_to_user.id,
trans.app.model.HistoryUserShareAssociation.table.c.history_id == history.id)) \
.count() > 0:
send_to_err += f"History ({history.name}) already shared with user ({send_to_user.email})"
else:
# Build the dict that will be used for sharing
if send_to_user not in histories_for_sharing:
histories_for_sharing[send_to_user] = [history]
elif history not in histories_for_sharing[send_to_user]:
histories_for_sharing[send_to_user].append(history)
return histories_for_sharing, send_to_err
def _populate_restricted(self, trans, user, histories, send_to_users, action, send_to_err, unique=False):
# The user may be attempting to share histories whose datasets cannot all be accessed by other users.
# If this is the case, the user sharing the histories can:
# 1) action=='public': choose to make the datasets public if he is permitted to do so
# 2) action=='private': automatically create a new "sharing role" allowing protected
# datasets to be accessed only by the desired users
# This method will populate the can_change, cannot_change and no_change_needed dictionaries, which
# are used for either displaying to the user, letting them make 1 of the choices above, or sharing
# after the user has made a choice. They will be used for display if 'unique' is True, and will look
# like: {historyX : [hda, hda], historyY : [hda] }
# For sharing, they will look like:
# { userA: {historyX : [hda, hda], historyY : [hda]}, userB: {historyY : [hda]} }
can_change = {}
cannot_change = {}
no_change_needed = {}
unique_no_change_needed = {}
user_roles = user.all_roles()
for history in histories:
for send_to_user in send_to_users:
# Make sure the current history has not already been shared with the current send_to_user
if trans.sa_session.query(trans.app.model.HistoryUserShareAssociation) \
.filter(and_(trans.app.model.HistoryUserShareAssociation.table.c.user_id == send_to_user.id,
trans.app.model.HistoryUserShareAssociation.table.c.history_id == history.id)) \
.count() > 0:
send_to_err += f"History ({history.name}) already shared with user ({send_to_user.email})"
else:
# Only deal with datasets that have not been purged
for hda in history.activatable_datasets:
if trans.app.security_agent.can_access_dataset(send_to_user.all_roles(), hda.dataset):
# The no_change_needed dictionary is a special case. If both of can_change
# and cannot_change are empty, no_change_needed will used for sharing. Otherwise
# unique_no_change_needed will be used for displaying, so we need to populate both.
# Build the dictionaries for display, containing unique histories only
if history not in unique_no_change_needed:
unique_no_change_needed[history] = [hda]
else:
unique_no_change_needed[history].append(hda)
# Build the dictionaries for sharing
if send_to_user not in no_change_needed:
no_change_needed[send_to_user] = {}
if history not in no_change_needed[send_to_user]:
no_change_needed[send_to_user][history] = [hda]
else:
no_change_needed[send_to_user][history].append(hda)
else:
# The user with which we are sharing the history does not have access permission on the current dataset
if trans.app.security_agent.can_manage_dataset(user_roles, hda.dataset):
# The current user has authority to change permissions on the current dataset because
# they have permission to manage permissions on the dataset.
# NOTE: ( gvk )There may be problems if the dataset also has an ldda, but I don't think so
# because the user with which we are sharing will not have the "manage permission" permission
# on the dataset in their history. Keep an eye on this though...
if unique:
# Build the dictionaries for display, containing unique histories only
if history not in can_change:
can_change[history] = [hda]
else:
can_change[history].append(hda)
else:
# Build the dictionaries for sharing
if send_to_user not in can_change:
can_change[send_to_user] = {}
if history not in can_change[send_to_user]:
can_change[send_to_user][history] = [hda]
else:
can_change[send_to_user][history].append(hda)
else:
if action in ["private", "public"]:
# The user has made a choice, so 'unique' doesn't apply. Don't change stuff
# that the user doesn't have permission to change
continue
if unique:
# Build the dictionaries for display, containing unique histories only
if history not in cannot_change:
cannot_change[history] = [hda]
else:
cannot_change[history].append(hda)
else:
# Build the dictionaries for sharing
if send_to_user not in cannot_change:
cannot_change[send_to_user] = {}
if history not in cannot_change[send_to_user]:
cannot_change[send_to_user][history] = [hda]
else:
cannot_change[send_to_user][history].append(hda)
return can_change, cannot_change, no_change_needed, unique_no_change_needed, send_to_err
def _share_histories(self, trans, user, send_to_err, histories=None):
# histories looks like: { userA: [ historyX, historyY ], userB: [ historyY ] }
histories = histories or {}
if not histories:
send_to_err += "No users have been specified or no histories can be sent without changing permissions or associating a sharing role. "
return trans.response.send_redirect(web.url_for("/histories/list?status=error&message=%s" % send_to_err))
else:
shared_histories = []
for send_to_user, send_to_user_histories in histories.items():
for history in send_to_user_histories:
share = trans.app.model.HistoryUserShareAssociation()
share.history = history
share.user = send_to_user
trans.sa_session.add(share)
self.create_item_slug(trans.sa_session, history)
trans.sa_session.flush()
if history not in shared_histories:
shared_histories.append(history)
return trans.response.send_redirect(web.url_for("/histories/sharing?id=%s" % trans.security.encode_id(shared_histories[0].id)))
# ......................................................................... actions/orig. async
@web.expose
def purge_deleted_datasets(self, trans):
count = 0
if trans.app.config.allow_user_dataset_purge and trans.history:
for hda in trans.history.datasets:
if not hda.deleted or hda.purged:
continue
hda.purge_usage_from_quota(trans.user)
hda.purged = True
trans.sa_session.add(hda)
trans.log_event("HDA id %s has been purged" % hda.id)
trans.sa_session.flush()
if hda.dataset.user_can_purge:
try:
hda.dataset.full_delete()
trans.log_event(f"Dataset id {hda.dataset.id} has been purged upon the the purge of HDA id {hda.id}")
trans.sa_session.add(hda.dataset)
except Exception:
log.exception(f'Unable to purge dataset ({hda.dataset.id}) on purge of hda ({hda.id}):')
count += 1
return trans.show_ok_message("%d datasets have been deleted permanently" % count, refresh_frames=['history'])
return trans.show_error_message("Cannot purge deleted datasets from this session.")
@web.expose
def resume_paused_jobs(self, trans, current=False, ids=None):
"""Resume paused jobs the active history -- this does not require a logged in user."""
if not ids and string_as_bool(current):
histories = [trans.get_history()]
refresh_frames = ['history']
else:
raise NotImplementedError("You can currently only resume all the datasets of the current history.")
for history in histories:
history.resume_paused_jobs()
trans.sa_session.add(history)
trans.sa_session.flush()
return trans.show_ok_message("Your jobs have been resumed.", refresh_frames=refresh_frames)
# TODO: used in index.mako
@web.expose
@web.require_login("rate items")
@web.json
def rate_async(self, trans, id, rating):
""" Rate a history asynchronously and return updated community data. """
history = self.history_manager.get_accessible(self.decode_id(id), trans.user, current_history=trans.history)
if not history:
return trans.show_error_message("The specified history does not exist.")
# Rate history.
self.rate_item(trans.sa_session, trans.get_user(), history, rating)
return self.get_ave_item_rating_data(trans.sa_session, history)
# TODO: used in display_base.mako
@web.expose
def export_archive(self, trans, id=None, jeha_id="latest"):
""" Export a history to an archive. """
#
# Get history to export.
#
jeha = self.history_export_view.get_ready_jeha(trans, id, jeha_id)
return self.serve_ready_history_export(trans, jeha)
@web.expose
@web.json
@web.require_login("get history name and link")
def get_name_and_link_async(self, trans, id=None):
""" Returns history's name and link. """
history = self.history_manager.get_accessible(self.decode_id(id), trans.user, current_history=trans.history)
if self.create_item_slug(trans.sa_session, history):
trans.sa_session.flush()
return_dict = {
"name": history.name,
"link": url_for(controller='history', action="display_by_username_and_slug",
username=history.user.username, slug=history.slug)}
return return_dict
# TODO: used in page/editor.mako
@web.expose
@web.require_login("set history's accessible flag")
def set_accessible_async(self, trans, id=None, accessible=False):
""" Set history's importable attribute and slug. """
history = self.history_manager.get_owned(self.decode_id(id), trans.user, current_history=trans.history)
# Only set if importable value would change; this prevents a change in the update_time unless attribute really changed.
importable = accessible in ['True', 'true', 't', 'T']
if history and history.importable != importable:
if importable:
self._make_item_accessible(trans.sa_session, history)
else:
history.importable = importable
trans.sa_session.flush()
return
# TODO: used in page/editor.mako
@web.legacy_expose_api
@web.require_login("rename histories")
def rename(self, trans, payload=None, **kwd):
id = kwd.get('id')
if not id:
return self.message_exception(trans, 'No history id received for renaming.')
user = trans.get_user()
id = listify(id)
histories = []
for history_id in id:
history = self.history_manager.get_owned(self.decode_id(history_id), trans.user, current_history=trans.history)
if history and history.user_id == user.id:
histories.append(history)
if trans.request.method == 'GET':
return {
'title' : 'Change history name(s)',
'inputs' : [{
'name' : 'name_%i' % i,
'label' : 'Current: %s' % h.name,
'value' : h.name
} for i, h in enumerate(histories)]
}
else:
messages = []
for i, h in enumerate(histories):
cur_name = h.get_display_name()
new_name = payload.get('name_%i' % i)
# validate name is empty
if not isinstance(new_name, str) or not new_name.strip():
messages.append('You must specify a valid name for History \'%s\'.' % cur_name)
# skip if not the owner
elif h.user_id != user.id:
messages.append('History \'%s\' does not appear to belong to you.' % cur_name)
# skip if it wouldn't be a change
elif new_name != cur_name:
h.name = new_name
trans.sa_session.add(h)
trans.sa_session.flush()
trans.log_event('History renamed: id: {}, renamed to: {}'.format(str(h.id), new_name))
messages.append('History \'' + cur_name + '\' renamed to \'' + new_name + '\'.')
message = sanitize_text(' '.join(messages)) if messages else 'History names remain unchanged.'
return {'message': message, 'status': 'success'}
# ------------------------------------------------------------------------- current history
@web.expose
@web.require_login("switch to a history")
def switch_to_history(self, trans, hist_id=None):
"""Change the current user's current history to one with `hist_id`."""
# remains for backwards compat
self.set_as_current(trans, id=hist_id)
return trans.response.send_redirect(url_for("/"))
def get_item(self, trans, id):
return self.history_manager.get_owned(self.decode_id(id), trans.user, current_history=trans.history)
# TODO: override of base ui controller?
def history_data(self, trans, history):
"""Return the given history in a serialized, dictionary form."""
return self.history_serializer.serialize_to_view(history, view='dev-detailed', user=trans.user, trans=trans)
# TODO: combine these next two - poss. with a redirect flag
# @web.require_login( "switch to a history" )
@web.json
@web.do_not_cache
def set_as_current(self, trans, id):
"""Change the current user's current history to one with `id`."""
try:
history = self.history_manager.get_owned(self.decode_id(id), trans.user, current_history=trans.history)
trans.set_history(history)
return self.history_data(trans, history)
except exceptions.MessageException as msg_exc:
trans.response.status = msg_exc.status_code
return {'err_msg': msg_exc.err_msg, 'err_code': msg_exc.err_code.code}
@web.json
@web.do_not_cache
def current_history_json(self, trans):
"""Return the current user's current history in a serialized, dictionary form."""
history = trans.get_history(most_recent=True, create=True)
return self.history_data(trans, history)
@web.json
def create_new_current(self, trans, name=None):
"""Create a new, current history for the current user"""
new_history = trans.new_history(name)
return self.history_data(trans, new_history)
# TODO: /history/current to do all of the above: if ajax, return json; if post, read id and set to current
| 52.425775 | 204 | 0.603183 |
4a1c1def3e63292a74be551bc02f658588b92f2d
| 5,945 |
py
|
Python
|
contrib/experimental/great_expectations_experimental/expectations/expect_column_values_url_is_available.py
|
mmi333/great_expectations
|
cc9df78596610002c24e2d46f737179e04f31d29
|
[
"Apache-2.0"
] | 1 |
2022-03-17T08:05:44.000Z
|
2022-03-17T08:05:44.000Z
|
contrib/experimental/great_expectations_experimental/expectations/expect_column_values_url_is_available.py
|
Tchibo/great_expectations
|
27220336190039148ab91138cb2fd489d2159183
|
[
"Apache-2.0"
] | null | null | null |
contrib/experimental/great_expectations_experimental/expectations/expect_column_values_url_is_available.py
|
Tchibo/great_expectations
|
27220336190039148ab91138cb2fd489d2159183
|
[
"Apache-2.0"
] | null | null | null |
"""
This is a template for creating custom ColumnMapExpectations.
For detailed instructions on how to use it, please see:
https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations
"""
import json
import urllib.request
from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_available(url: str) -> bool:
try:
res_code = urllib.request.urlopen(url).getcode()
except Exception as e:
return False
if res_code == 200:
return True
else:
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesUrlIsAvailable(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.url_is_available"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_available(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesUrlIsAvailable(ColumnMapExpectation):
"""Expect column values's URL is available"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_available": [
"https://google.com",
"https://index.hu",
"https://microsoft.com",
"https://bing.com",
"https://github.com",
],
"some_other": [
"https://google.com/what_is_it",
"https://index.hu/nincs_ilyen.html",
"https://microsoft.com",
"https://bing.com",
"https://github.com",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_available"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.url_is_available"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
return True
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
ExpectColumnValuesUrlIsAvailable().print_diagnostic_checklist()
| 37.626582 | 136 | 0.623886 |
4a1c1e2a90a351d60f9a86ad21917cfddee58359
| 2,443 |
py
|
Python
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/nn/qat/modules/pooling.py
|
bluetiger9/Vitis-AI
|
a7728733bbcfc292ff3afa46b9c8b03e94b740b3
|
[
"Apache-2.0"
] | 848 |
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/nn/qat/modules/pooling.py
|
wangyifan778/Vitis-AI
|
f61061eef7550d98bf02a171604c9a9f283a7c47
|
[
"Apache-2.0"
] | 656 |
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/nn/qat/modules/pooling.py
|
wangyifan778/Vitis-AI
|
f61061eef7550d98bf02a171604c9a9f283a7c47
|
[
"Apache-2.0"
] | 506 |
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from pytorch_nndct.nn.modules.fix_ops import NndctScale
class DPUAvgPool2d(torch.nn.modules.AvgPool2d):
def forward(self, input):
output = super().forward(input)
# scale to DPU accuracy
need_scale = False
scale = 1.0
if self.kernel_size == [3, 3]:
need_scale = True
scale = 9.0 * 7.0 / 64.0
elif self.kernel_size == [5, 5]:
need_scale = True
scale = 25.0 * 10.0 / 256.0
elif self.kernel_size == [[6, 6], [3, 6], [6, 3]]:
need_scale = True
scale = 36.0 * 7.0 / 256.0
elif self.kernel_size == [7, 7]:
need_scale = True
scale = 49.0 * 21.0 / 1024.0
elif self.kernel_size == [14, 14]:
need_scale = True
scale = 196.0 * 21.0 / 4096.0
if need_scale:
NndctScale(output, scale)
return output
class DPUAdaptiveAvgPool2d(torch.nn.modules.AdaptiveAvgPool2d):
def forward(self, input):
output = super().forward(input)
if (isinstance(self.output_size,
(tuple, list)) and tuple(self.output_size) !=
(1, 1)) or self.output_size != 1:
print(
"Warning: For adaptive average pooling, DPU only supports output size=1"
)
need_scale = False
scale = 1.0
if input.shape[2] == 3 and input.shape[3] == 3:
need_scale = True
scale = 9.0 * 7.0 / 64.0
elif input.shape[2] == 5 and input.shape[3] == 5:
need_scale = True
scale = 25.0 * 10.0 / 256.0
elif input.shape[2] == 6 and input.shape[3] == 6:
need_scale = True
scale = 36.0 * 7.0 / 256.0
elif input.shape[2] == 7 and input.shape[3] == 7:
need_scale = True
scale = 49.0 * 21.0 / 1024.0
elif input.shape[2] == 14 and input.shape[3] == 14:
need_scale = True
scale = 196.0 * 21.0 / 4096.0
if need_scale:
NndctScale(output, scale)
return output
| 29.083333 | 82 | 0.623823 |
4a1c1ef98a8ff9b94b735aab0e5fc816a7d944ab
| 587 |
py
|
Python
|
models.py
|
CharlieMinaya/Musify-Backend
|
7678d932a881333570c2975534c961fa1145c004
|
[
"MIT"
] | null | null | null |
models.py
|
CharlieMinaya/Musify-Backend
|
7678d932a881333570c2975534c961fa1145c004
|
[
"MIT"
] | null | null | null |
models.py
|
CharlieMinaya/Musify-Backend
|
7678d932a881333570c2975534c961fa1145c004
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
class Song(models.Model):
song = models.CharField(max_length=120)
artist = models.CharField(max_length=120)
genre = models.CharField(max_length=120)
release_date = models.IntegerField()
def __str__(self):
return self.song
class Rating(models.Model):
username = models.ForeignKey(User, on_delete=models.CASCADE)
song = models.ForeignKey(Song, on_delete=models.CASCADE)
rating = models.IntegerField(null=True, blank=True)
def __int__(self):
return self.rating
| 29.35 | 64 | 0.727428 |
4a1c1f1444436616ff9df53b308c7316b40d61f8
| 305 |
py
|
Python
|
2016/02/sanders-trump-trade-20160208/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14 |
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2016/02/sanders-trump-anger-20160208/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2016/02/sanders-trump-anger-20160208/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7 |
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1eZbdE-8aeUzmDCp-j0BhZgYp5Lu29Bpl8VhVgtokWnI'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.785714 | 77 | 0.813115 |
4a1c1f79ddc790ab5ba4966c71f638e1ee78ba6c
| 1,347 |
py
|
Python
|
src/sage/coding/bounds_catalog.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 1,742 |
2015-01-04T07:06:13.000Z
|
2022-03-30T11:32:52.000Z
|
src/sage/coding/bounds_catalog.py
|
Ivo-Maffei/sage
|
467fbc70a08b552b3de33d9065204ee9cbfb02c7
|
[
"BSL-1.0"
] | 66 |
2015-03-19T19:17:24.000Z
|
2022-03-16T11:59:30.000Z
|
src/sage/coding/bounds_catalog.py
|
dimpase/sage
|
468f23815ade42a2192b0a9cd378de8fdc594dcd
|
[
"BSL-1.0"
] | 495 |
2015-01-10T10:23:18.000Z
|
2022-03-24T22:06:11.000Z
|
r"""
Index of bounds on the parameters of codes
The ``codes.bounds`` object may be used to access the bounds that Sage can compute.
{INDEX_OF_FUNCTIONS}
.. NOTE::
To import these names into the global namespace, use:
sage: from sage.coding.bounds_catalog import *
"""
from sage.misc.lazy_import import lazy_import as _lazy_import
_lazy_import("sage.coding.code_bounds", ["codesize_upper_bound",
"dimension_upper_bound",
"volume_hamming",
"gilbert_lower_bound",
"plotkin_upper_bound",
"griesmer_upper_bound",
"elias_upper_bound",
"hamming_upper_bound",
"singleton_upper_bound",
"gv_info_rate",
"entropy",
"gv_bound_asymp",
"hamming_bound_asymp",
"singleton_bound_asymp",
"plotkin_bound_asymp",
"elias_bound_asymp",
"mrrw1_bound_asymp"])
_lazy_import("sage.coding.delsarte_bounds",
["krawtchouk",
"delsarte_bound_hamming_space",
"delsarte_bound_additive_hamming_space"])
from sage.misc.rest_index_of_methods import gen_rest_table_index as _gen_rest_table_index
import sys as _sys
__doc__ = __doc__.format(INDEX_OF_FUNCTIONS=_gen_rest_table_index(_sys.modules[__name__], only_local_functions=False))
| 32.071429 | 118 | 0.666667 |
4a1c1f9272411a8a3ac3859fbc8a04fae0b1dffb
| 1,718 |
py
|
Python
|
z3/fibonacci.py
|
Wikunia/hakank
|
030bc928d2efe8dcbc5118bda3f8ae9575d0fd13
|
[
"MIT"
] | 279 |
2015-01-10T09:55:35.000Z
|
2022-03-28T02:34:03.000Z
|
z3/fibonacci.py
|
Wikunia/hakank
|
030bc928d2efe8dcbc5118bda3f8ae9575d0fd13
|
[
"MIT"
] | 10 |
2017-10-05T15:48:50.000Z
|
2021-09-20T12:06:52.000Z
|
z3/fibonacci.py
|
Wikunia/hakank
|
030bc928d2efe8dcbc5118bda3f8ae9575d0fd13
|
[
"MIT"
] | 83 |
2015-01-20T03:44:00.000Z
|
2022-03-13T23:53:06.000Z
|
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Fibonacci (bidirectional) in Z3
#
# This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com)
# See also my Z3 page: http://hakank.org/z3/
#
#
from z3 import *
# From https://rise4fun.com/Z3/0pld:
# """
# (declare-fun fib (Int) Int)
# (assert (= 1 (fib 0)))
# (assert (= 1 (fib 1)))
# (assert (forall (x Int) (=> (>= x 2) (= (fib x) (+ (fib (- x 1)) (fib (- x 2)))))))
# (assert (= 2 (fib 2)))
# """
sol = Solver()
max_n = 31
#
# Note: One have to set a max limit on fib
#
# https://stackoverflow.com/questions/6915227/can-z3-check-the-satisfiability-of-formulas-that-contain-recursive-functions
# Leonardo de Moura:
# """
# The models produced by Z3 assign an interpretation for each uninterpreted function symbol. The models can
# be viewed as functional programs. The current version does not produce recursive definitions.
# The first example [Fibonacci] is satisfiable, but Z3 fails to produce an interpretation for fib because
# it does not support recursive definitions. We have plans to extend Z3 in this direction.
# """
fib = Function("fib", IntSort(), IntSort())
x = Int("x")
# sol.add(fib(0) == 1)
# sol.add(fib(1) == 1)
# sol.add(ForAll(x, Implies(And(x >= 2, x <= max_n), fib(x) == fib(x-1) + fib(x-2))))
# Simpler:
sol.add(ForAll(x, If(And(x >= 2, x <= max_n), fib(x) == fib(x-1) + fib(x-2), fib(x) == 1)))
# sol.add(x == fib(2))
y = Int("y")
z = Int("z")
sol.add(y>0, y <= max_n, z >0, z <= max_n)
sol.add(10946 == fib(y))
sol.add(2178309 == fib(z))
print(sol)
if sol.check()==sat:
mod = sol.model()
# print("x:", mod.eval(x))
print("z:", mod.eval(z), "y:", mod.eval(y))
sol.add(z != mod.eval(z),y != mod.eval(y))
| 29.62069 | 122 | 0.621071 |
4a1c2009733199f5fc2f350cd792766f30b309a3
| 413 |
py
|
Python
|
luz/base/tests/test_home.py
|
HenriqueCCdA/contaDeLuz
|
c41433eac32f8ff0aca26d44f9cf1f6d227089cd
|
[
"MIT"
] | null | null | null |
luz/base/tests/test_home.py
|
HenriqueCCdA/contaDeLuz
|
c41433eac32f8ff0aca26d44f9cf1f6d227089cd
|
[
"MIT"
] | 21 |
2021-12-06T03:04:01.000Z
|
2022-01-30T20:13:57.000Z
|
luz/base/tests/test_home.py
|
HenriqueCCdA/contaDeLuz
|
c41433eac32f8ff0aca26d44f9cf1f6d227089cd
|
[
"MIT"
] | null | null | null |
import pytest
from http import HTTPStatus
from django.test import Client
from luz.base.django_assertions import assertion_contains
@pytest.fixture
def response(client: Client):
return client.get('/')
def test_home_status_code(response):
assert response.status_code == HTTPStatus.OK
def test_home_navbar(response):
assertion_contains(response, '<a class="nav-link active" href="/"> Home </a>')
| 20.65 | 82 | 0.762712 |
4a1c218d664b10cdd53d3be9e55885a536cf5d70
| 14,020 |
py
|
Python
|
clients/kratos/python/ory_kratos_client/model/authenticator_assurance_level.py
|
russelg/sdk
|
2515b35981784319bd7d58fcf0b5ab85b501b62f
|
[
"Apache-2.0"
] | 77 |
2020-02-14T17:27:36.000Z
|
2022-03-25T08:44:52.000Z
|
clients/kratos/python/ory_kratos_client/model/authenticator_assurance_level.py
|
russelg/sdk
|
2515b35981784319bd7d58fcf0b5ab85b501b62f
|
[
"Apache-2.0"
] | 125 |
2020-02-07T21:45:52.000Z
|
2022-03-31T12:54:24.000Z
|
clients/kratos/python/ory_kratos_client/model/authenticator_assurance_level.py
|
russelg/sdk
|
2515b35981784319bd7d58fcf0b5ab85b501b62f
|
[
"Apache-2.0"
] | 44 |
2020-01-31T22:05:47.000Z
|
2022-03-09T14:41:22.000Z
|
"""
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.8.2-alpha.1
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_kratos_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from ory_kratos_client.exceptions import ApiAttributeError
class AuthenticatorAssuranceLevel(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'AAL0': "aal0",
'AAL1': "aal1",
'AAL2': "aal2",
'AAL3': "aal3",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""AuthenticatorAssuranceLevel - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): The authenticator assurance level can be one of \"aal1\", \"aal2\", or \"aal3\". A higher number means that it is harder for an attacker to compromise the account. Generally, \"aal1\" implies that one authentication factor was used while AAL2 implies that two factors (e.g. password + TOTP) have been used. To learn more about these levels please head over to: https://www.ory.sh/kratos/docs/concepts/credentials., must be one of ["aal0", "aal1", "aal2", "aal3", ] # noqa: E501
Keyword Args:
value (str): The authenticator assurance level can be one of \"aal1\", \"aal2\", or \"aal3\". A higher number means that it is harder for an attacker to compromise the account. Generally, \"aal1\" implies that one authentication factor was used while AAL2 implies that two factors (e.g. password + TOTP) have been used. To learn more about these levels please head over to: https://www.ory.sh/kratos/docs/concepts/credentials., must be one of ["aal0", "aal1", "aal2", "aal3", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""AuthenticatorAssuranceLevel - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): The authenticator assurance level can be one of \"aal1\", \"aal2\", or \"aal3\". A higher number means that it is harder for an attacker to compromise the account. Generally, \"aal1\" implies that one authentication factor was used while AAL2 implies that two factors (e.g. password + TOTP) have been used. To learn more about these levels please head over to: https://www.ory.sh/kratos/docs/concepts/credentials., must be one of ["aal0", "aal1", "aal2", "aal3", ] # noqa: E501
Keyword Args:
value (str): The authenticator assurance level can be one of \"aal1\", \"aal2\", or \"aal3\". A higher number means that it is harder for an attacker to compromise the account. Generally, \"aal1\" implies that one authentication factor was used while AAL2 implies that two factors (e.g. password + TOTP) have been used. To learn more about these levels please head over to: https://www.ory.sh/kratos/docs/concepts/credentials., must be one of ["aal0", "aal1", "aal2", "aal3", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
| 48.013699 | 507 | 0.587803 |
4a1c221aa326bb1cfd7196fe3ed2ed598b4a93c2
| 858 |
py
|
Python
|
var/spack/repos/builtin/packages/py-isodate/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-isodate/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-isodate/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyIsodate(PythonPackage):
"""This module implements ISO 8601 date, time and duration parsing. The
implementation follows ISO8601:2004 standard, and implements only date/time
representations mentioned in the standard. If something is not mentioned
there, then it is treated as non existent, and not as an allowed option."""
homepage = "https://github.com/gweis/isodate/"
pypi = "isodate/isodate-0.6.0.tar.gz"
version('0.6.0', sha256='2e364a3d5759479cdb2d37cce6b9376ea504db2ff90252a2e5b7cc89cc9ff2d8')
depends_on('py-setuptools', type='build')
depends_on('py-six', type=('build', 'run'))
| 39 | 95 | 0.737762 |
4a1c22f022314c1dd0f2bd4f19e38547b62cb253
| 2,603 |
py
|
Python
|
utils/moments.py
|
victor5as/RieszLearning
|
cd6aa6a3736b4bf868770ef5002af8ee2f3c41fa
|
[
"MIT"
] | null | null | null |
utils/moments.py
|
victor5as/RieszLearning
|
cd6aa6a3736b4bf868770ef5002af8ee2f3c41fa
|
[
"MIT"
] | null | null | null |
utils/moments.py
|
victor5as/RieszLearning
|
cd6aa6a3736b4bf868770ef5002af8ee2f3c41fa
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
# Returns the moment for the ATE example, for each sample in x
def ate_moment_fn(x, test_fn, device):
if torch.is_tensor(x):
with torch.no_grad():
t1 = torch.cat([torch.ones((x.shape[0], 1)).to(device), x[:, 1:]], dim=1)
t0 = torch.cat([torch.zeros((x.shape[0], 1)).to(device), x[:, 1:]], dim=1)
else:
t1 = np.hstack([np.ones((x.shape[0], 1)), x[:, 1:]])
t0 = np.hstack([np.zeros((x.shape[0], 1)), x[:, 1:]])
return test_fn(t1) - test_fn(t0)
def policy_moment_gen(policy):
def policy_moment_fn(x, test_fn, device):
with torch.no_grad():
if torch.is_tensor(x):
t1 = torch.cat([torch.ones((x.shape[0], 1)).to(device), x[:, 1:]], dim=1)
t0 = torch.cat([torch.zeros((x.shape[0], 1)).to(device), x[:, 1:]], dim=1)
else:
t1 = np.hstack([np.ones((x.shape[0], 1)), x[:, 1:]])
t0 = np.hstack([np.zeros((x.shape[0], 1)), x[:, 1:]])
p1 = policy(x)
out1 = test_fn(t1)
out0 = test_fn(t0)
if len(out1.shape) > 1:
p1 = p1.reshape(-1, 1)
return out1 * p1 + out0 * (1 - p1)
return policy_moment_fn
def trans_moment_gen(trans):
def trans_moment_fn(x, test_fn, device):
with torch.no_grad():
if torch.is_tensor(x):
tx = torch.cat([x[:, [0]], trans(x[:, [1]]), x[:, 2:]], dim=1)
else:
tx = np.hstack([x[:, [0]], trans(x[:, [1]]), x[:, 2:]])
return test_fn(tx) - test_fn(x)
return trans_moment_fn
def avg_der_moment_fn(x, test_fn, device):
if torch.is_tensor(x):
T = torch.autograd.Variable(x[:, [0]], requires_grad=True)
input = torch.cat([T, x[:, 1:]], dim=1)
output = test_fn(input)
gradients = torch.autograd.grad(outputs=output, inputs=T,
grad_outputs=torch.ones(output.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
else:
raise AttributeError('Not implemented')
return gradients
def avg_small_diff(x, test_fn, device):
epsilon = 0.01
if torch.is_tensor(x):
with torch.no_grad():
t1 = torch.cat([(x[:, [0]] + epsilon).to(device), x[:, 1:]], dim=1)
t0 = torch.cat([(x[:, [0]] - epsilon).to(device), x[:, 1:]], dim=1)
else:
t1 = np.hstack([x[:, [0]] + epsilon, x[:, 1:]])
t0 = np.hstack([x[:, [0]] - epsilon, x[:, 1:]])
return (test_fn(t1) - test_fn(t0)) / (2*epsilon)
| 37.185714 | 90 | 0.519785 |
4a1c2464f18c688452a4ca68d2f631fe1788883e
| 3,221 |
py
|
Python
|
scout/parse/hgnc.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | 111 |
2015-01-15T11:53:20.000Z
|
2022-03-26T19:55:24.000Z
|
scout/parse/hgnc.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | 2,995 |
2015-01-15T16:14:20.000Z
|
2022-03-31T13:36:32.000Z
|
scout/parse/hgnc.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | 55 |
2015-05-31T19:09:49.000Z
|
2021-11-01T10:50:31.000Z
|
import logging
from pprint import pprint as pp
logger = logging.getLogger(__name__)
def parse_hgnc_line(line, header):
"""Parse an hgnc formated line
Args:
line(list): A list with hgnc gene info
header(list): A list with the header info
Returns:
hgnc_info(dict): A dictionary with the relevant info
"""
hgnc_gene = {}
line = line.rstrip().split("\t")
raw_info = dict(zip(header, line))
# Skip all genes that have status withdrawn
if "Withdrawn" in raw_info["status"]:
return hgnc_gene
hgnc_symbol = raw_info["symbol"]
hgnc_gene["hgnc_symbol"] = hgnc_symbol
hgnc_gene["hgnc_id"] = int(raw_info["hgnc_id"].split(":")[-1])
hgnc_gene["description"] = raw_info["name"]
# We want to have the current symbol as an alias
aliases = set([hgnc_symbol, hgnc_symbol.upper()])
# We then need to add both the previous symbols and
# alias symbols
previous_names = raw_info["prev_symbol"]
if previous_names:
for alias in previous_names.strip('"').split("|"):
aliases.add(alias)
alias_symbols = raw_info["alias_symbol"]
if alias_symbols:
for alias in alias_symbols.strip('"').split("|"):
aliases.add(alias)
hgnc_gene["previous_symbols"] = list(aliases)
# We need the ensembl_gene_id to link the genes with ensembl
hgnc_gene["ensembl_gene_id"] = raw_info.get("ensembl_gene_id")
omim_id = raw_info.get("omim_id")
if omim_id:
hgnc_gene["omim_id"] = int(omim_id.strip('"').split("|")[0])
else:
hgnc_gene["omim_id"] = None
entrez_id = hgnc_gene["entrez_id"] = raw_info.get("entrez_id")
if entrez_id:
hgnc_gene["entrez_id"] = int(entrez_id)
else:
hgnc_gene["entrez_id"] = None
# These are the primary transcripts according to HGNC
ref_seq = raw_info.get("refseq_accession")
if ref_seq:
hgnc_gene["ref_seq"] = ref_seq.strip('"').split("|")
else:
hgnc_gene["ref_seq"] = []
uniprot_ids = raw_info.get("uniprot_ids")
if uniprot_ids:
hgnc_gene["uniprot_ids"] = uniprot_ids.strip('""').split("|")
else:
hgnc_gene["uniprot_ids"] = []
ucsc_id = raw_info.get("ucsc_id")
if ucsc_id:
hgnc_gene["ucsc_id"] = ucsc_id
else:
hgnc_gene["ucsc_id"] = None
vega_id = raw_info.get("vega_id")
if vega_id:
hgnc_gene["vega_id"] = vega_id
else:
hgnc_gene["vega_id"] = None
return hgnc_gene
def parse_hgnc_genes(lines):
"""Parse lines with hgnc formated genes
This is designed to take a dump with genes from HGNC.
This is downloaded from:
ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/tsv/hgnc_complete_set.txt
Args:
lines(iterable(str)): An iterable with HGNC formated genes
Yields:
hgnc_gene(dict): A dictionary with the relevant information
"""
header = []
logger.info("Parsing hgnc genes...")
for index, line in enumerate(lines):
if index == 0:
header = line.split("\t")
elif len(line) > 1:
hgnc_gene = parse_hgnc_line(line=line, header=header)
if hgnc_gene:
yield hgnc_gene
| 29.550459 | 77 | 0.631791 |
4a1c252599b7749eee7f86f7d803794f77871375
| 588 |
py
|
Python
|
nni/runtime/platform/__init__.py
|
qfyin/nni
|
59a1ccf8eba68b94974e84fc3834f38d851faf89
|
[
"MIT"
] | 1 |
2021-01-19T02:48:00.000Z
|
2021-01-19T02:48:00.000Z
|
nni/runtime/platform/__init__.py
|
qfyin/nni
|
59a1ccf8eba68b94974e84fc3834f38d851faf89
|
[
"MIT"
] | 1 |
2021-01-17T08:53:56.000Z
|
2021-01-17T08:53:56.000Z
|
nni/runtime/platform/__init__.py
|
qfyin/nni
|
59a1ccf8eba68b94974e84fc3834f38d851faf89
|
[
"MIT"
] | 1 |
2020-12-21T11:15:54.000Z
|
2020-12-21T11:15:54.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from ..env_vars import trial_env_vars, dispatcher_env_vars
assert dispatcher_env_vars.SDK_PROCESS != 'dispatcher'
if trial_env_vars.NNI_PLATFORM is None:
from .standalone import *
elif trial_env_vars.NNI_PLATFORM == 'unittest':
from .test import *
elif trial_env_vars.NNI_PLATFORM in ('local', 'remote', 'pai', 'kubeflow', 'frameworkcontroller', 'paiYarn', 'dlts', 'aml', 'adl', 'heterogeneous'):
from .local import *
else:
raise RuntimeError('Unknown platform %s' % trial_env_vars.NNI_PLATFORM)
| 36.75 | 148 | 0.748299 |
4a1c25abbcb576f5c2f9bcd6f418bfee58a4696e
| 50,196 |
py
|
Python
|
gplearn/tests/test_genetic.py
|
HengruiX/gplearn
|
922084857b1a333d4d9925bdca315af8be02673d
|
[
"BSD-3-Clause"
] | null | null | null |
gplearn/tests/test_genetic.py
|
HengruiX/gplearn
|
922084857b1a333d4d9925bdca315af8be02673d
|
[
"BSD-3-Clause"
] | null | null | null |
gplearn/tests/test_genetic.py
|
HengruiX/gplearn
|
922084857b1a333d4d9925bdca315af8be02673d
|
[
"BSD-3-Clause"
] | null | null | null |
"""Testing the Genetic Programming module's underlying datastructure
(gplearn.genetic._Program) as well as the classes that use it,
gplearn.genetic.SymbolicRegressor and gplearn.genetic.SymbolicTransformer."""
# Author: Trevor Stephens <trevorstephens.com>
#
# License: BSD 3 clause
import pickle
import sys
from io import StringIO
import numpy as np
from scipy.stats import pearsonr, spearmanr
from sklearn.datasets import load_boston, load_breast_cancer
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import check_random_state
from gplearn.genetic import SymbolicClassifier, SymbolicRegressor
from gplearn.genetic import SymbolicTransformer
from gplearn.fitness import weighted_pearson, weighted_spearman
from gplearn._program import _Program
from gplearn.fitness import _fitness_map
from gplearn.functions import (add2, sub2, mul2, div2, sqrt1, log1, abs1, max2,
min2)
from gplearn.functions import _Function
from gplearn.tests.check_estimator import custom_check_estimator
from gplearn.tests.check_estimator import rewritten_check_estimator
# load the boston dataset and randomly permute it
rng = check_random_state(0)
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# load the breast cancer dataset and randomly permute it
rng = check_random_state(0)
cancer = load_breast_cancer()
perm = rng.permutation(cancer.target.size)
cancer.data = cancer.data[perm]
cancer.target = cancer.target[perm]
def test_sklearn_estimator_checks_regressor():
"""Run the sklearn estimator validation checks on SymbolicRegressor"""
check_estimator(SymbolicRegressor)
def test_sklearn_estimator_checks_classifier():
"""Run the sklearn estimator validation checks on SymbolicClassifier"""
custom_check_estimator(SymbolicClassifier)
def test_sklearn_estimator_checks_classifier_binary():
"""Run custom binary estimator validation checks on SymbolicClassifier"""
rewritten_check_estimator(SymbolicClassifier)
def test_sklearn_estimator_checks_transformer():
"""Run the sklearn estimator validation checks on SymbolicTransformer"""
check_estimator(SymbolicTransformer)
def test_weighted_correlations():
"""Check weighted Pearson correlation coefficient matches scipy"""
random_state = check_random_state(415)
x1 = random_state.uniform(size=500)
x2 = random_state.uniform(size=500)
w1 = np.ones(500)
w2 = random_state.uniform(size=500)
# Pearson's correlation coefficient
scipy_pearson = pearsonr(x1, x2)[0]
# Check with constant weights (should be equal)
gplearn_pearson = weighted_pearson(x1, x2, w1)
assert_almost_equal(scipy_pearson, gplearn_pearson)
# Check with irregular weights (should be different)
gplearn_pearson = weighted_pearson(x1, x2, w2)
assert_true(abs(scipy_pearson - gplearn_pearson) > 0.01)
# Spearman's correlation coefficient
scipy_spearman = spearmanr(x1, x2)[0]
# Check with constant weights (should be equal)
gplearn_spearman = weighted_spearman(x1, x2, w1)
assert_almost_equal(scipy_spearman, gplearn_spearman)
# Check with irregular weights (should be different)
gplearn_spearman = weighted_pearson(x1, x2, w2)
assert_true(abs(scipy_spearman - gplearn_spearman) > 0.01)
def test_program_init_method():
"""Check 'full' creates longer and deeper programs than other methods"""
params = {'function_set': [add2, sub2, mul2, div2, sqrt1, log1, abs1, max2,
min2],
'arities': {1: [sqrt1, log1, abs1],
2: [add2, sub2, mul2, div2, max2, min2]},
'init_depth': (2, 6),
'n_features': 10,
'const_range': (-1.0, 1.0),
'metric': 'mean absolute error',
'p_point_replace': 0.05,
'parsimony_coefficient': 0.1}
random_state = check_random_state(415)
programs = []
for i in range(20):
programs.append(_Program(init_method='full',
random_state=random_state, **params))
full_length = np.mean([gp.length_ for gp in programs])
full_depth = np.mean([gp.depth_ for gp in programs])
programs = []
for i in range(20):
programs.append(_Program(init_method='half and half',
random_state=random_state, **params))
hnh_length = np.mean([gp.length_ for gp in programs])
hnh_depth = np.mean([gp.depth_ for gp in programs])
programs = []
for i in range(20):
programs.append(_Program(init_method='grow',
random_state=random_state, **params))
grow_length = np.mean([gp.length_ for gp in programs])
grow_depth = np.mean([gp.depth_ for gp in programs])
assert_greater(full_length, hnh_length)
assert_greater(hnh_length, grow_length)
assert_greater(full_depth, hnh_depth)
assert_greater(hnh_depth, grow_depth)
def test_program_init_depth():
"""Check 'full' creates constant depth programs for single depth limit"""
params = {'function_set': [add2, sub2, mul2, div2, sqrt1, log1, abs1, max2,
min2],
'arities': {1: [sqrt1, log1, abs1],
2: [add2, sub2, mul2, div2, max2, min2]},
'init_depth': (6, 6),
'n_features': 10,
'const_range': (-1.0, 1.0),
'metric': 'mean absolute error',
'p_point_replace': 0.05,
'parsimony_coefficient': 0.1}
random_state = check_random_state(415)
programs = []
for i in range(20):
programs.append(_Program(init_method='full',
random_state=random_state, **params))
full_depth = np.bincount([gp.depth_ for gp in programs])
programs = []
for _ in range(20):
programs.append(_Program(init_method='half and half',
random_state=random_state, **params))
hnh_depth = np.bincount([gp.depth_ for gp in programs])
programs = []
for i in range(20):
programs.append(_Program(init_method='grow',
random_state=random_state, **params))
grow_depth = np.bincount([gp.depth_ for gp in programs])
assert_true(full_depth[-1] == 20)
assert_false(hnh_depth[-1] == 20)
assert_false(grow_depth[-1] == 20)
def test_validate_program():
"""Check that valid programs are accepted & invalid ones raise error"""
function_set = [add2, sub2, mul2, div2, sqrt1, log1, abs1, max2, min2]
arities = {1: [sqrt1, log1, abs1],
2: [add2, sub2, mul2, div2, max2, min2]},
init_depth = (2, 6)
init_method = 'half and half'
n_features = 10
const_range = (-1.0, 1.0)
metric = 'mean absolute error'
p_point_replace = 0.05
parsimony_coefficient = 0.1
random_state = check_random_state(415)
test_gp = [sub2, abs1, sqrt1, log1, log1, sqrt1, 7, abs1, abs1, abs1, log1,
sqrt1, 2]
# This one should be fine
_ = _Program(function_set, arities, init_depth, init_method, n_features,
const_range, metric, p_point_replace, parsimony_coefficient,
random_state, program=test_gp)
# Now try a couple that shouldn't be
assert_raises(ValueError, _Program, function_set, arities, init_depth,
init_method, n_features, const_range, metric,
p_point_replace, parsimony_coefficient, random_state,
program=test_gp[:-1])
assert_raises(ValueError, _Program, function_set, arities, init_depth,
init_method, n_features, const_range, metric,
p_point_replace, parsimony_coefficient, random_state,
program=test_gp + [1])
def test_print_overloading():
"""Check that printing a program object results in 'pretty' output"""
params = {'function_set': [add2, sub2, mul2, div2],
'arities': {2: [add2, sub2, mul2, div2]},
'init_depth': (2, 6),
'init_method': 'half and half',
'n_features': 10,
'const_range': (-1.0, 1.0),
'metric': 'mean absolute error',
'p_point_replace': 0.05,
'parsimony_coefficient': 0.1}
random_state = check_random_state(415)
test_gp = [mul2, div2, 8, 1, sub2, 9, .5]
gp = _Program(random_state=random_state, program=test_gp, **params)
orig_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
print(gp)
output = out.getvalue().strip()
finally:
sys.stdout = orig_stdout
lisp = "mul(div(X8, X1), sub(X9, 0.500))"
assert_true(output == lisp)
# Test with feature names
params['feature_names'] = [str(n) for n in range(10)]
gp = _Program(random_state=random_state, program=test_gp, **params)
orig_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
print(gp)
output = out.getvalue().strip()
finally:
sys.stdout = orig_stdout
lisp = "mul(div(8, 1), sub(9, 0.500))"
assert_true(output == lisp)
def test_export_graphviz():
"""Check output of a simple program to Graphviz"""
params = {'function_set': [add2, sub2, mul2, div2],
'arities': {2: [add2, sub2, mul2, div2]},
'init_depth': (2, 6),
'init_method': 'half and half',
'n_features': 10,
'const_range': (-1.0, 1.0),
'metric': 'mean absolute error',
'p_point_replace': 0.05,
'parsimony_coefficient': 0.1}
random_state = check_random_state(415)
# Test for a small program
test_gp = [mul2, div2, 8, 1, sub2, 9, .5]
gp = _Program(random_state=random_state, program=test_gp, **params)
output = gp.export_graphviz()
tree = 'digraph program {\n' \
'node [style=filled]\n' \
'0 [label="mul", fillcolor="#136ed4"] ;\n' \
'1 [label="div", fillcolor="#136ed4"] ;\n' \
'2 [label="X8", fillcolor="#60a6f6"] ;\n' \
'3 [label="X1", fillcolor="#60a6f6"] ;\n' \
'1 -> 3 ;\n1 -> 2 ;\n' \
'4 [label="sub", fillcolor="#136ed4"] ;\n' \
'5 [label="X9", fillcolor="#60a6f6"] ;\n' \
'6 [label="0.500", fillcolor="#60a6f6"] ;\n' \
'4 -> 6 ;\n4 -> 5 ;\n0 -> 4 ;\n0 -> 1 ;\n}'
assert_true(output == tree)
# Test with feature names
params['feature_names'] = [str(n) for n in range(10)]
gp = _Program(random_state=random_state, program=test_gp, **params)
output = gp.export_graphviz()
tree = tree.replace('X', '')
assert_true(output == tree)
# Test with fade_nodes
params['feature_names'] = None
gp = _Program(random_state=random_state, program=test_gp, **params)
output = gp.export_graphviz(fade_nodes=[0, 1, 2, 3])
tree = 'digraph program {\n' \
'node [style=filled]\n' \
'0 [label="mul", fillcolor="#cecece"] ;\n' \
'1 [label="div", fillcolor="#cecece"] ;\n' \
'2 [label="X8", fillcolor="#cecece"] ;\n' \
'3 [label="X1", fillcolor="#cecece"] ;\n' \
'1 -> 3 ;\n1 -> 2 ;\n' \
'4 [label="sub", fillcolor="#136ed4"] ;\n' \
'5 [label="X9", fillcolor="#60a6f6"] ;\n' \
'6 [label="0.500", fillcolor="#60a6f6"] ;\n' \
'4 -> 6 ;\n4 -> 5 ;\n0 -> 4 ;\n0 -> 1 ;\n}'
assert_true(output == tree)
# Test a degenerative single-node program
test_gp = [1]
gp = _Program(random_state=random_state, program=test_gp, **params)
output = gp.export_graphviz()
tree = 'digraph program {\n' \
'node [style=filled]\n' \
'0 [label="X1", fillcolor="#60a6f6"] ;\n}'
assert_true(output == tree)
def test_invalid_feature_names():
"""Check invalid feature names raise errors"""
for Symbolic in (SymbolicRegressor, SymbolicTransformer):
# Check invalid length feature_names
est = Symbolic(feature_names=['foo', 'bar'])
assert_raises(ValueError, est.fit, boston.data, boston.target)
# Check invalid type feature_name
feature_names = [str(n) for n in range(12)] + [0]
est = Symbolic(feature_names=feature_names)
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_execute():
"""Check executing the program works"""
params = {'function_set': [add2, sub2, mul2, div2],
'arities': {2: [add2, sub2, mul2, div2]},
'init_depth': (2, 6),
'init_method': 'half and half',
'n_features': 10,
'const_range': (-1.0, 1.0),
'metric': 'mean absolute error',
'p_point_replace': 0.05,
'parsimony_coefficient': 0.1}
random_state = check_random_state(415)
# Test for a small program
test_gp = [mul2, div2, 8, 1, sub2, 9, .5]
X = np.reshape(random_state.uniform(size=50), (5, 10))
gp = _Program(random_state=random_state, program=test_gp, **params)
result = gp.execute(X)
expected = [-0.19656208, 0.78197782, -1.70123845, -0.60175969, -0.01082618]
assert_array_almost_equal(result, expected)
def test_all_metrics():
"""Check all supported metrics work"""
params = {'function_set': [add2, sub2, mul2, div2],
'arities': {2: [add2, sub2, mul2, div2]},
'init_depth': (2, 6),
'init_method': 'half and half',
'n_features': 10,
'const_range': (-1.0, 1.0),
'metric': 'mean absolute error',
'p_point_replace': 0.05,
'parsimony_coefficient': 0.1}
random_state = check_random_state(415)
# Test for a small program
test_gp = [mul2, div2, 8, 1, sub2, 9, .5]
gp = _Program(random_state=random_state, program=test_gp, **params)
X = np.reshape(random_state.uniform(size=50), (5, 10))
y = random_state.uniform(size=5)
sample_weight = np.ones(5)
expected = [1.48719809776, 1.82389179833, 1.76013763179, -0.2928200724,
-0.5]
result = []
for m in ['mean absolute error', 'mse', 'rmse', 'pearson', 'spearman']:
gp.metric = _fitness_map[m]
gp.raw_fitness_ = gp.raw_fitness(X, y, sample_weight)
result.append(gp.fitness())
assert_array_almost_equal(result, expected)
def test_get_subtree():
"""Check that get subtree does the same thing for self and new programs"""
params = {'function_set': [add2, sub2, mul2, div2],
'arities': {2: [add2, sub2, mul2, div2]},
'init_depth': (2, 6),
'init_method': 'half and half',
'n_features': 10,
'const_range': (-1.0, 1.0),
'metric': 'mean absolute error',
'p_point_replace': 0.05,
'parsimony_coefficient': 0.1}
random_state = check_random_state(415)
# Test for a small program
test_gp = [mul2, div2, 8, 1, sub2, 9, .5]
gp = _Program(random_state=random_state, program=test_gp, **params)
self_test = gp.get_subtree(check_random_state(0))
external_test = gp.get_subtree(check_random_state(0), test_gp)
assert_equal(self_test, external_test)
def test_genetic_operations():
"""Check all genetic operations are stable and don't change programs"""
params = {'function_set': [add2, sub2, mul2, div2],
'arities': {2: [add2, sub2, mul2, div2]},
'init_depth': (2, 6),
'init_method': 'half and half',
'n_features': 10,
'const_range': (-1.0, 1.0),
'metric': 'mean absolute error',
'p_point_replace': 0.05,
'parsimony_coefficient': 0.1}
random_state = check_random_state(415)
# Test for a small program
test_gp = [mul2, div2, 8, 1, sub2, 9, .5]
donor = [add2, 0.1, sub2, 2, 7]
gp = _Program(random_state=random_state, program=test_gp, **params)
assert_equal([f.name if isinstance(f, _Function) else f
for f in gp.reproduce()],
['mul', 'div', 8, 1, 'sub', 9, 0.5])
assert_equal(gp.program, test_gp)
assert_equal([f.name if isinstance(f, _Function) else f
for f in gp.crossover(donor, random_state)[0]],
['sub', 2, 7])
assert_equal(gp.program, test_gp)
assert_equal([f.name if isinstance(f, _Function) else f
for f in gp.subtree_mutation(random_state)[0]],
['mul', 'div', 8, 1, 'sub', 'sub', 3, 5, 'add', 6, 3])
assert_equal(gp.program, test_gp)
assert_equal([f.name if isinstance(f, _Function) else f
for f in gp.hoist_mutation(random_state)[0]],
['div', 8, 1])
assert_equal(gp.program, test_gp)
assert_equal([f.name if isinstance(f, _Function) else f
for f in gp.point_mutation(random_state)[0]],
['mul', 'div', 8, 1, 'sub', 9, 0.5])
assert_equal(gp.program, test_gp)
def test_program_input_validation():
"""Check that guarded input validation raises errors"""
for Symbolic in (SymbolicRegressor, SymbolicTransformer):
# Check too much proba
est = Symbolic(p_point_mutation=.5)
assert_raises(ValueError, est.fit, boston.data, boston.target)
# Check invalid init_method
est = Symbolic(init_method='ni')
assert_raises(ValueError, est.fit, boston.data, boston.target)
# Check invalid const_ranges
est = Symbolic(const_range=2)
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = Symbolic(const_range=[2, 2])
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = Symbolic(const_range=(2, 2, 2))
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = Symbolic(const_range='ni')
assert_raises(ValueError, est.fit, boston.data, boston.target)
# And check acceptable, but strange, representations of const_range
est = Symbolic(generations=2, const_range=(2, 2))
est.fit(boston.data, boston.target)
est = Symbolic(generations=2, const_range=None)
est.fit(boston.data, boston.target)
est = Symbolic(generations=2, const_range=(4, 2))
est.fit(boston.data, boston.target)
# Check invalid init_depth
est = Symbolic(init_depth=2)
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = Symbolic(init_depth=2)
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = Symbolic(init_depth=[2, 2])
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = Symbolic(init_depth=(2, 2, 2))
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = Symbolic(init_depth='ni')
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = Symbolic(init_depth=(4, 2))
assert_raises(ValueError, est.fit, boston.data, boston.target)
# And check acceptable, but strange, representations of init_depth
est = Symbolic(generations=2, init_depth=(2, 2))
est.fit(boston.data, boston.target)
# Check hall_of_fame and n_components for transformer
est = SymbolicTransformer(hall_of_fame=2000)
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = SymbolicTransformer(n_components=2000)
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = SymbolicTransformer(hall_of_fame=0)
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = SymbolicTransformer(n_components=0)
assert_raises(ValueError, est.fit, boston.data, boston.target)
# Check regressor metrics
for m in ['mean absolute error', 'mse', 'rmse', 'pearson', 'spearman']:
est = SymbolicRegressor(generations=2, metric=m)
est.fit(boston.data, boston.target)
# And check a fake one
est = SymbolicRegressor(generations=2, metric='the larch')
assert_raises(ValueError, est.fit, boston.data, boston.target)
# Check transformer metrics
for m in ['pearson', 'spearman']:
est = SymbolicTransformer(generations=2, metric=m)
est.fit(boston.data, boston.target)
# And check the regressor metrics as well as a fake one
for m in ['mean absolute error', 'mse', 'rmse', 'the larch']:
est = SymbolicTransformer(generations=2, metric=m)
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_program_input_validation_classifier():
"""Check that guarded input validation raises errors"""
# Check too much proba
est = SymbolicClassifier(p_point_mutation=.5)
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
# Check invalid init_method
est = SymbolicClassifier(init_method='ni')
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
# Check invalid const_ranges
est = SymbolicClassifier(const_range=2)
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
est = SymbolicClassifier(const_range=[2, 2])
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
est = SymbolicClassifier(const_range=(2, 2, 2))
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
est = SymbolicClassifier(const_range='ni')
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
# And check acceptable, but strange, representations of const_range
est = SymbolicClassifier(generations=2, const_range=(2, 2))
est.fit(cancer.data, cancer.target)
est = SymbolicClassifier(generations=2, const_range=None)
est.fit(cancer.data, cancer.target)
est = SymbolicClassifier(generations=2, const_range=(4, 2))
est.fit(cancer.data, cancer.target)
# Check invalid init_depth
est = SymbolicClassifier(init_depth=2)
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
est = SymbolicClassifier(init_depth=2)
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
est = SymbolicClassifier(init_depth=[2, 2])
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
est = SymbolicClassifier(init_depth=(2, 2, 2))
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
est = SymbolicClassifier(init_depth='ni')
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
est = SymbolicClassifier(init_depth=(4, 2))
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
# And check acceptable, but strange, representations of init_depth
est = SymbolicClassifier(generations=2, init_depth=(2, 2))
est.fit(cancer.data, cancer.target)
# Check classifier metrics
for m in ['log loss']:
est = SymbolicClassifier(generations=2, metric=m)
est.fit(cancer.data, cancer.target)
# And check a fake one
est = SymbolicClassifier(generations=2, metric='the larch')
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
# Check classifier transformers
for t in ['sigmoid']:
est = SymbolicClassifier(generations=2, transformer=t)
est.fit(cancer.data, cancer.target)
# And check an incompatible one with wrong arity
est = SymbolicClassifier(generations=2, transformer=sub2)
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
# And check a fake one
est = SymbolicClassifier(generations=2, transformer='the larch')
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
def test_none_const_range():
"""Check that const_range=None produces no constants"""
# Check with None as const_range
est = SymbolicRegressor(const_range=None, generations=2)
est.fit(boston.data, boston.target)
float_count = 0
for generation in est._programs:
for program in generation:
if program is None:
continue
for element in program.program:
if type(element) == float:
float_count += 1
assert_true(float_count == 0)
# Check with default const_range
est = SymbolicRegressor(generations=2)
est.fit(boston.data, boston.target)
float_count = 0
for generation in est._programs:
for program in generation:
if program is None:
continue
for element in program.program:
if isinstance(element, float):
float_count += 1
assert_true(float_count > 1)
def test_sample_weight():
"""Check sample_weight param works"""
# Check constant sample_weight has no effect
sample_weight = np.ones(boston.target.shape[0])
est1 = SymbolicRegressor(generations=2, random_state=0)
est1.fit(boston.data, boston.target)
est2 = SymbolicRegressor(generations=2, random_state=0)
est2.fit(boston.data, boston.target, sample_weight=sample_weight)
# And again with a scaled sample_weight
est3 = SymbolicRegressor(generations=2, random_state=0)
est3.fit(boston.data, boston.target, sample_weight=sample_weight * 1.1)
assert_almost_equal(est1._program.fitness_, est2._program.fitness_)
assert_almost_equal(est1._program.fitness_, est3._program.fitness_)
# And again for the classifier
sample_weight = np.ones(cancer.target.shape[0])
est1 = SymbolicClassifier(generations=2, random_state=0)
est1.fit(cancer.data, cancer.target)
est2 = SymbolicClassifier(generations=2, random_state=0)
est2.fit(cancer.data, cancer.target, sample_weight=sample_weight)
# And again with a scaled sample_weight
est3 = SymbolicClassifier(generations=2, random_state=0)
est3.fit(cancer.data, cancer.target, sample_weight=sample_weight * 1.1)
assert_almost_equal(est1._program.fitness_, est2._program.fitness_)
assert_almost_equal(est1._program.fitness_, est3._program.fitness_)
# And again for the transformer
sample_weight = np.ones(boston.target.shape[0])
est1 = SymbolicTransformer(generations=2, random_state=0)
est1 = est1.fit_transform(boston.data, boston.target)
est2 = SymbolicTransformer(generations=2, random_state=0)
est2 = est2.fit_transform(boston.data, boston.target,
sample_weight=sample_weight)
assert_array_almost_equal(est1, est2)
def test_trigonometric():
"""Check that using trig functions work and that results differ"""
est1 = SymbolicRegressor(random_state=0)
est1.fit(boston.data[:400, :], boston.target[:400])
est1 = mean_absolute_error(est1.predict(boston.data[400:, :]),
boston.target[400:])
est2 = SymbolicRegressor(function_set=['add', 'sub', 'mul', 'div',
'sin', 'cos', 'tan'],
random_state=0)
est2.fit(boston.data[:400, :], boston.target[:400])
est2 = mean_absolute_error(est2.predict(boston.data[400:, :]),
boston.target[400:])
assert_true(abs(est1 - est2) > 0.01)
def test_subsample():
"""Check that subsample work and that results differ"""
est1 = SymbolicRegressor(max_samples=1.0, random_state=0)
est1.fit(boston.data[:400, :], boston.target[:400])
est1 = mean_absolute_error(est1.predict(boston.data[400:, :]),
boston.target[400:])
est2 = SymbolicRegressor(max_samples=0.7, random_state=0)
est2.fit(boston.data[:400, :], boston.target[:400])
est2 = mean_absolute_error(est2.predict(boston.data[400:, :]),
boston.target[400:])
assert_true(abs(est1 - est2) > 0.01)
def test_parsimony_coefficient():
"""Check that parsimony coefficients work and that results differ"""
est1 = SymbolicRegressor(parsimony_coefficient=0.001, random_state=0)
est1.fit(boston.data[:400, :], boston.target[:400])
est1 = mean_absolute_error(est1.predict(boston.data[400:, :]),
boston.target[400:])
est2 = SymbolicRegressor(parsimony_coefficient=0.1, random_state=0)
est2.fit(boston.data[:400, :], boston.target[:400])
est2 = mean_absolute_error(est2.predict(boston.data[400:, :]),
boston.target[400:])
est3 = SymbolicRegressor(parsimony_coefficient='auto', random_state=0)
est3.fit(boston.data[:400, :], boston.target[:400])
est3 = mean_absolute_error(est3.predict(boston.data[400:, :]),
boston.target[400:])
assert_true(abs(est1 - est2) > 0.01)
assert_true(abs(est1 - est3) > 0.01)
assert_true(abs(est2 - est3) > 0.01)
def test_early_stopping():
"""Check that early stopping works"""
est1 = SymbolicRegressor(stopping_criteria=10, random_state=0)
est1.fit(boston.data[:400, :], boston.target[:400])
assert_true(len(est1._programs) == 1)
est1 = SymbolicTransformer(stopping_criteria=0.5, random_state=0)
est1.fit(boston.data[:400, :], boston.target[:400])
assert_true(len(est1._programs) == 1)
est1 = SymbolicClassifier(stopping_criteria=.9, random_state=0)
est1.fit(cancer.data[:400, :], cancer.target[:400])
assert_true(len(est1._programs) == 1)
def test_verbose_output():
"""Check verbose=1 does not cause error"""
old_stdout = sys.stdout
sys.stdout = StringIO()
est = SymbolicRegressor(random_state=0, verbose=1)
est.fit(boston.data, boston.target)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header1 = verbose_output.readline().rstrip()
true_header = ' |{:^25}|{:^42}|'.format('Population Average',
'Best Individual')
assert_equal(true_header, header1)
header2 = verbose_output.readline().rstrip()
true_header = '-' * 4 + ' ' + '-' * 25 + ' ' + '-' * 42 + ' ' + '-' * 10
assert_equal(true_header, header2)
header3 = verbose_output.readline().rstrip()
line_format = '{:>4} {:>8} {:>16} {:>8} {:>16} {:>16} {:>10}'
true_header = line_format.format('Gen', 'Length', 'Fitness', 'Length',
'Fitness', 'OOB Fitness', 'Time Left')
assert_equal(true_header, header3)
n_lines = sum(1 for l in verbose_output.readlines())
assert_equal(20, n_lines)
def test_verbose_with_oob():
"""Check oob scoring for subsample does not cause error"""
old_stdout = sys.stdout
sys.stdout = StringIO()
est = SymbolicRegressor(max_samples=0.9, random_state=0, verbose=1)
est.fit(boston.data, boston.target)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
# Ignore header rows
_ = verbose_output.readline().rstrip()
_ = verbose_output.readline().rstrip()
_ = verbose_output.readline().rstrip()
n_lines = sum(1 for l in verbose_output.readlines())
assert_equal(20, n_lines)
def test_more_verbose_output():
"""Check verbose=2 does not cause error"""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
est = SymbolicRegressor(random_state=0, verbose=2)
est.fit(boston.data, boston.target)
verbose_output = sys.stdout
joblib_output = sys.stderr
sys.stdout = old_stdout
sys.stderr = old_stderr
# check output
verbose_output.seek(0)
# Ignore header rows
_ = verbose_output.readline().rstrip()
_ = verbose_output.readline().rstrip()
_ = verbose_output.readline().rstrip()
n_lines = sum(1 for l in verbose_output.readlines())
assert_equal(20, n_lines)
joblib_output.seek(0)
n_lines = sum(1 for l in joblib_output.readlines())
# New version of joblib appears to output sys.stderr
assert_equal(0, n_lines % 10)
def test_parallel_train():
"""Check predictions are the same for different n_jobs"""
# Check the regressor
ests = [
SymbolicRegressor(population_size=100, generations=4, n_jobs=n_jobs,
random_state=0).fit(boston.data[:100, :],
boston.target[:100])
for n_jobs in [1, 2, 3, 8, 16]
]
preds = [e.predict(boston.data[500:, :]) for e in ests]
for pred1, pred2 in zip(preds, preds[1:]):
assert_array_almost_equal(pred1, pred2)
lengths = np.array([[gp.length_ for gp in e._programs[-1]] for e in ests])
for len1, len2 in zip(lengths, lengths[1:]):
assert_array_almost_equal(len1, len2)
# Check the transformer
ests = [
SymbolicTransformer(population_size=100, hall_of_fame=50,
generations=4, n_jobs=n_jobs,
random_state=0).fit(boston.data[:100, :],
boston.target[:100])
for n_jobs in [1, 2, 3, 8, 16]
]
preds = [e.transform(boston.data[500:, :]) for e in ests]
for pred1, pred2 in zip(preds, preds[1:]):
assert_array_almost_equal(pred1, pred2)
lengths = np.array([[gp.length_ for gp in e._programs[-1]] for e in ests])
for len1, len2 in zip(lengths, lengths[1:]):
assert_array_almost_equal(len1, len2)
# Check the classifier
ests = [
SymbolicClassifier(population_size=100, generations=4, n_jobs=n_jobs,
random_state=0).fit(cancer.data[:100, :],
cancer.target[:100])
for n_jobs in [1, 2, 3, 8, 16]
]
preds = [e.predict(cancer.data[500:, :]) for e in ests]
for pred1, pred2 in zip(preds, preds[1:]):
assert_array_almost_equal(pred1, pred2)
lengths = np.array([[gp.length_ for gp in e._programs[-1]] for e in ests])
for len1, len2 in zip(lengths, lengths[1:]):
assert_array_almost_equal(len1, len2)
def test_pickle():
"""Check pickability"""
# Check the regressor
est = SymbolicRegressor(generations=2, random_state=0)
est.fit(boston.data[:100, :], boston.target[:100])
score = est.score(boston.data[500:, :], boston.target[500:])
pickle_object = pickle.dumps(est)
est2 = pickle.loads(pickle_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(boston.data[500:, :], boston.target[500:])
assert_equal(score, score2)
# Check the transformer
est = SymbolicTransformer(generations=2, random_state=0)
est.fit(boston.data[:100, :], boston.target[:100])
X_new = est.transform(boston.data[500:, :])
pickle_object = pickle.dumps(est)
est2 = pickle.loads(pickle_object)
assert_equal(type(est2), est.__class__)
X_new2 = est2.transform(boston.data[500:, :])
assert_array_almost_equal(X_new, X_new2)
# Check the classifier
est = SymbolicClassifier(generations=2, random_state=0)
est.fit(cancer.data[:100, :], cancer.target[:100])
score = est.score(cancer.data[500:, :], cancer.target[500:])
pickle_object = pickle.dumps(est)
est2 = pickle.loads(pickle_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(cancer.data[500:, :], cancer.target[500:])
assert_equal(score, score2)
def test_memory_layout():
"""Check that it works no matter the memory layout"""
for Symbolic in [SymbolicTransformer, SymbolicRegressor]:
for dtype in [np.float64, np.float32]:
est = Symbolic(generations=2, random_state=0)
# Nothing
X = np.asarray(boston.data, dtype=dtype)
y = boston.target
est.fit(X, y)
# C-order
X = np.asarray(boston.data, order="C", dtype=dtype)
y = boston.target
est.fit(X, y)
# F-order
X = np.asarray(boston.data, order="F", dtype=dtype)
y = boston.target
est.fit(X, y)
# Contiguous
X = np.ascontiguousarray(boston.data, dtype=dtype)
y = boston.target
est.fit(X, y)
# Strided
X = np.asarray(boston.data[::3], dtype=dtype)
y = boston.target[::3]
est.fit(X, y)
def test_input_shape():
"""Check changed dimensions cause failure"""
random_state = check_random_state(415)
X = np.reshape(random_state.uniform(size=50), (5, 10))
y = random_state.uniform(size=5)
yc = np.asarray(['foo', 'bar', 'foo', 'foo', 'bar'])
X2 = np.reshape(random_state.uniform(size=45), (5, 9))
# Check the regressor
est = SymbolicRegressor(generations=2, random_state=0)
est.fit(X, y)
assert_raises(ValueError, est.predict, X2)
# Check the transformer
est = SymbolicTransformer(generations=2, random_state=0)
est.fit(X, y)
assert_raises(ValueError, est.transform, X2)
# Check the classifier
est = SymbolicClassifier(generations=2, random_state=0)
est.fit(X, yc)
assert_raises(ValueError, est.predict, X2)
def test_output_shape():
"""Check output shape is as expected"""
random_state = check_random_state(415)
X = np.reshape(random_state.uniform(size=50), (5, 10))
y = random_state.uniform(size=5)
# Check the transformer
est = SymbolicTransformer(n_components=5, generations=2, random_state=0)
est.fit(X, y)
assert_true(est.transform(X).shape == (5, 5))
def test_gridsearch():
"""Check that SymbolicRegressor can be grid-searched"""
# Grid search parsimony_coefficient
parameters = {'parsimony_coefficient': [0.001, 0.1, 'auto']}
clf = SymbolicRegressor(population_size=50, generations=5,
tournament_size=5, random_state=0)
grid = GridSearchCV(clf, parameters, cv=3,
scoring='neg_mean_absolute_error')
grid.fit(boston.data, boston.target)
expected = {'parsimony_coefficient': 0.001}
assert_equal(grid.best_params_, expected)
def test_pipeline():
"""Check that SymbolicRegressor/Transformer can work in a pipeline"""
# Check the regressor
est = make_pipeline(StandardScaler(),
SymbolicRegressor(population_size=50,
generations=5,
tournament_size=5,
random_state=0))
est.fit(boston.data, boston.target)
assert_almost_equal(est.score(boston.data, boston.target), -4.00270923)
# Check the classifier
est = make_pipeline(StandardScaler(),
SymbolicClassifier(population_size=50,
generations=5,
tournament_size=5,
random_state=0))
est.fit(cancer.data, cancer.target)
assert_almost_equal(est.score(cancer.data, cancer.target), 0.934973637961)
# Check the transformer
est = make_pipeline(SymbolicTransformer(population_size=50,
hall_of_fame=20,
generations=5,
tournament_size=5,
random_state=0),
DecisionTreeRegressor())
est.fit(boston.data, boston.target)
assert_almost_equal(est.score(boston.data, boston.target), 1.0)
def test_transformer_iterable():
"""Check that the transformer is iterable"""
random_state = check_random_state(415)
X = np.reshape(random_state.uniform(size=50), (5, 10))
y = random_state.uniform(size=5)
function_set = ['add', 'sub', 'mul', 'div', 'sqrt', 'log', 'abs', 'neg',
'inv', 'max', 'min']
est = SymbolicTransformer(population_size=500, generations=2,
function_set=function_set, random_state=0)
# Check unfitted
unfitted_len = len(est)
unfitted_iter = [gp.length_ for gp in est]
expected_iter = []
assert_true(unfitted_len == 0)
assert_true(unfitted_iter == expected_iter)
# Check fitted
est.fit(X, y)
fitted_len = len(est)
fitted_iter = [gp.length_ for gp in est]
expected_iter = [8, 12, 2, 29, 9, 33, 9, 8, 4, 22]
assert_true(fitted_len == 10)
assert_true(fitted_iter == expected_iter)
# Check IndexError
assert_raises(IndexError, est.__getitem__, 10)
def test_print_overloading_estimator():
"""Check that printing a fitted estimator results in 'pretty' output"""
random_state = check_random_state(415)
X = np.reshape(random_state.uniform(size=50), (5, 10))
y = random_state.uniform(size=5)
# Check the regressor
est = SymbolicRegressor(generations=2, random_state=0)
# Unfitted
orig_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
print(est)
output_unfitted = out.getvalue().strip()
finally:
sys.stdout = orig_stdout
# Fitted
est.fit(X, y)
orig_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
print(est)
output_fitted = out.getvalue().strip()
finally:
sys.stdout = orig_stdout
orig_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
print(est._program)
output_program = out.getvalue().strip()
finally:
sys.stdout = orig_stdout
assert_true(output_unfitted != output_fitted)
assert_true(output_unfitted == est.__repr__())
assert_true(output_fitted == output_program)
# Check the transformer
est = SymbolicTransformer(generations=2, random_state=0)
# Unfitted
orig_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
print(est)
output_unfitted = out.getvalue().strip()
finally:
sys.stdout = orig_stdout
# Fitted
est.fit(X, y)
orig_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
print(est)
output_fitted = out.getvalue().strip()
finally:
sys.stdout = orig_stdout
orig_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
output = str([gp.__str__() for gp in est])
print(output.replace("',", ",\n").replace("'", ""))
output_program = out.getvalue().strip()
finally:
sys.stdout = orig_stdout
assert_true(output_unfitted != output_fitted)
assert_true(output_unfitted == est.__repr__())
assert_true(output_fitted == output_program)
# Check the classifier
y = (y > .5).astype(int)
est = SymbolicClassifier(generations=2, random_state=0)
# Unfitted
orig_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
print(est)
output_unfitted = out.getvalue().strip()
finally:
sys.stdout = orig_stdout
# Fitted
est.fit(X, y)
orig_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
print(est)
output_fitted = out.getvalue().strip()
finally:
sys.stdout = orig_stdout
orig_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
print(est._program)
output_program = out.getvalue().strip()
finally:
sys.stdout = orig_stdout
assert_true(output_unfitted != output_fitted)
assert_true(output_unfitted == est.__repr__())
assert_true(output_fitted == output_program)
def test_validate_functions():
"""Check that valid functions are accepted & invalid ones raise error"""
for Symbolic in (SymbolicRegressor, SymbolicTransformer):
# These should be fine
est = Symbolic(generations=2, random_state=0,
function_set=(add2, sub2, mul2, div2))
est.fit(boston.data, boston.target)
est = Symbolic(generations=2, random_state=0,
function_set=('add', 'sub', 'mul', div2))
est.fit(boston.data, boston.target)
# These should fail
est = Symbolic(generations=2, random_state=0,
function_set=('ni', 'sub', 'mul', div2))
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = Symbolic(generations=2, random_state=0,
function_set=(7, 'sub', 'mul', div2))
assert_raises(ValueError, est.fit, boston.data, boston.target)
est = Symbolic(generations=2, random_state=0, function_set=())
assert_raises(ValueError, est.fit, boston.data, boston.target)
# Now for the classifier... These should be fine
est = SymbolicClassifier(generations=2, random_state=0,
function_set=(add2, sub2, mul2, div2))
est.fit(cancer.data, cancer.target)
est = SymbolicClassifier(generations=2, random_state=0,
function_set=('add', 'sub', 'mul', div2))
est.fit(cancer.data, cancer.target)
# These should fail
est = SymbolicClassifier(generations=2, random_state=0,
function_set=('ni', 'sub', 'mul', div2))
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
est = SymbolicClassifier(generations=2, random_state=0,
function_set=(7, 'sub', 'mul', div2))
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
est = SymbolicClassifier(generations=2, random_state=0, function_set=())
assert_raises(ValueError, est.fit, cancer.data, cancer.target)
def test_indices():
"""Check that indices are stable when generated on the fly."""
params = {'function_set': [add2, sub2, mul2, div2],
'arities': {2: [add2, sub2, mul2, div2]},
'init_depth': (2, 6),
'init_method': 'half and half',
'n_features': 10,
'const_range': (-1.0, 1.0),
'metric': 'mean absolute error',
'p_point_replace': 0.05,
'parsimony_coefficient': 0.1}
random_state = check_random_state(415)
test_gp = [mul2, div2, 8, 1, sub2, 9, .5]
gp = _Program(random_state=random_state, program=test_gp, **params)
assert_raises(ValueError, gp.get_all_indices)
assert_raises(ValueError, gp._indices)
def get_indices_property():
return gp.indices_
assert_raises(ValueError, get_indices_property)
indices, _ = gp.get_all_indices(10, 7, random_state)
assert_array_equal(indices, gp.get_all_indices()[0])
assert_array_equal(indices, gp._indices())
assert_array_equal(indices, gp.indices_)
def test_run_details():
"""Check the run_details_ attribute works as expected."""
est = SymbolicRegressor(generations=5, random_state=415)
est.fit(boston.data, boston.target)
# Check generations are indexed as expected without warm_start
assert_equal(est.run_details_['generation'], list(range(5)))
est.set_params(generations=10, warm_start=True)
est.fit(boston.data, boston.target)
# Check generations are indexed as expected with warm_start
assert_equal(est.run_details_['generation'], list(range(10)))
# Check all details have expected number of elements
for detail in est.run_details_:
assert_equal(len(est.run_details_[detail]), 10)
def test_warm_start():
"""Check the warm_start functionality works as expected."""
est = SymbolicRegressor(generations=20, random_state=415)
est.fit(boston.data, boston.target)
cold_fitness = est._program.fitness_
cold_program = est._program.__str__()
# Check fitting fewer generations raises error
est.set_params(generations=5, warm_start=True)
assert_raises(ValueError, est.fit, boston.data, boston.target)
# Check fitting the same number of generations warns
est.set_params(generations=20, warm_start=True)
assert_warns(UserWarning, est.fit, boston.data, boston.target)
# Check warm starts get the same result
est = SymbolicRegressor(generations=10, random_state=415)
est.fit(boston.data, boston.target)
est.set_params(generations=20, warm_start=True)
est.fit(boston.data, boston.target)
warm_fitness = est._program.fitness_
warm_program = est._program.__str__()
assert_almost_equal(cold_fitness, warm_fitness)
assert_equal(cold_program, warm_program)
def test_low_memory():
"""Check the low_memory functionality works as expected."""
est = SymbolicRegressor(generations=10,
random_state=56,
low_memory=True)
# Check there are no parents
est.fit(boston.data, boston.target)
assert_true(est._programs[-2] is None)
def test_low_memory_warm_start():
"""Check the warm_start functionality works as expected with low_memory."""
est = SymbolicRegressor(generations=20,
random_state=415,
low_memory=True)
est.fit(boston.data, boston.target)
cold_fitness = est._program.fitness_
cold_program = est._program.__str__()
# Check warm start with low memory gets the same result
est = SymbolicRegressor(generations=10,
random_state=415,
low_memory=True)
est.fit(boston.data, boston.target)
est.set_params(generations=20, warm_start=True)
est.fit(boston.data, boston.target)
warm_fitness = est._program.fitness_
warm_program = est._program.__str__()
assert_almost_equal(cold_fitness, warm_fitness)
assert_equal(cold_program, warm_program)
if __name__ == "__main__":
import nose
nose.runmodule()
| 37.571856 | 79 | 0.63768 |
4a1c25b674a2a04556106c45387c0c8a6dff17d2
| 1,784 |
py
|
Python
|
asyncio/asyncio03_api/class_with_async_methods/ex03_module_classmethod.py
|
levs72/pyneng-examples
|
d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9
|
[
"MIT"
] | 11 |
2021-04-05T09:30:23.000Z
|
2022-03-09T13:27:56.000Z
|
asyncio/asyncio03_api/class_with_async_methods/ex03_module_classmethod.py
|
levs72/pyneng-examples
|
d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9
|
[
"MIT"
] | null | null | null |
asyncio/asyncio03_api/class_with_async_methods/ex03_module_classmethod.py
|
levs72/pyneng-examples
|
d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9
|
[
"MIT"
] | 11 |
2021-04-06T03:44:35.000Z
|
2022-03-04T21:20:40.000Z
|
from pprint import pprint
import asyncio
from itertools import repeat
import re
import asyncssh
class CiscoSSH:
@classmethod
async def connect(cls, ip, username, password):
self = CiscoSSH()
self.ip = ip
self.username = username
self.password = password
print(f"Подключаюсь к {self.ip}")
ssh_coroutine = asyncssh.connect(
self.ip,
username=self.username,
password=self.password,
encryption_algs="+aes128-cbc,aes256-cbc",
)
self.ssh = await asyncio.wait_for(ssh_coroutine, timeout=10)
self.writer, self.reader, stderr = await self.ssh.open_session(
term_type="Dumb", term_size=(200, 24)
)
output = await self.reader.readuntil(">")
self.writer.write("enable\n")
output = await self.reader.readuntil("Password")
self.writer.write("cisco\n")
output = await self.reader.readuntil("#")
self.writer.write("terminal length 0\n")
output = await self.reader.readuntil("#")
return self
async def send_show_command(self, command):
print(f"Отправляю команду {command} на устройство {self.ip}")
self.writer.write(command + "\n")
output = await self.reader.readuntil([">", "#"])
return output
def parse_output(self, command_output):
m = re.search("(\S+)[#>]", command_output)
if m:
return m.group(1)
async def close(self):
self.ssh.close()
await self.ssh.wait_closed()
async def __aenter__(self):
print("__aenter__")
await self.connect()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
print("__aexit__")
await self.close()
| 29.733333 | 71 | 0.6037 |
4a1c260b56af501744c314feb81cb9df80115516
| 3,481 |
py
|
Python
|
src/experiment3.py
|
qwerty29544/Volume_nonStationary_acoustics
|
5b56e0417804b659f88364f7b8abe0f4ea11a68d
|
[
"Apache-2.0"
] | 1 |
2022-03-28T18:39:38.000Z
|
2022-03-28T18:39:38.000Z
|
src/experiment3.py
|
qwerty29544/Volume_nonStationary_acoustics
|
5b56e0417804b659f88364f7b8abe0f4ea11a68d
|
[
"Apache-2.0"
] | null | null | null |
src/experiment3.py
|
qwerty29544/Volume_nonStationary_acoustics
|
5b56e0417804b659f88364f7b8abe0f4ea11a68d
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import random
from typing import *
import matplotlib.pyplot as plt
from iterations_lib.python_inspectors import BiCGStab, TwoSGD, ThreeSGD, utils
import pandas as pd
def plot_iterations(z_history, iteration_space, color="#FF00FF"):
z_history = z_history @ z_history.T
z_history = z_history[:, 0]
plt.plot(iteration_space, z_history, c=color)
return True
def main():
# Постановка задачи ---------------------------------
random.seed(123)
np.random.seed(123)
n = 2000
f = np.random.normal(0, 1, 2 * n) + 1j * np.random.normal(0, 1, 2*n)
half_diag = np.random.uniform(5, 10, n) + 1j * np.random.uniform(0, 2, n)
other_half = np.conj(half_diag)
diag_h = np.concatenate((half_diag, other_half), 0)
matrix_h = np.diag(diag_h)
z_0 = np.random.uniform(-1, 1, 2 * n)
plt.scatter(0, 0)
plt.scatter(np.real(diag_h), np.imag(diag_h))
plt.show()
print("Current equation:")
print("\nH matrix:")
print(matrix_h)
print("\nf vector:")
print(f)
print("\nSolve of the current equation is:")
print(np.linalg.solve(matrix_h, f))
# Решение BiCGStab ---------------------------
filename_BiCGStab = "ex3_BiCGStab.csv"
solve_BiCGStab, iter_space_BiCGStab, _, _, _, _, _, alpha, beta, omega, rho = BiCGStab.BiCGStab_solver(matrix_h, f, z_0)
Table_BiCGStab = pd.DataFrame(data=solve_BiCGStab)
Table_BiCGStab['matrix_multiplications'] = iter_space_BiCGStab
Table_BiCGStab['alpha'] = alpha
Table_BiCGStab['beta'] = beta
Table_BiCGStab['omega'] = omega
Table_BiCGStab['rho'] = rho
cols = Table_BiCGStab.columns.tolist()
cols = cols[-5:] + cols[:-5]
Table_BiCGStab = Table_BiCGStab[cols]
Table_BiCGStab.to_csv(filename_BiCGStab)
# Решение TwoSGD -----------------------------
filename_TwoSGD = "ex3_TwoSGD.csv"
solve_TwoSGD, it_space_TwoSGD, _, _, alpha_TwoSGD, gamma_TwoSGD = TwoSGD.TwoSGD_solver(matrix_h, f, z_0)
Table_TwoSGD = pd.DataFrame(data=solve_TwoSGD)
Table_TwoSGD['matrix_multiplications'] = it_space_TwoSGD
Table_TwoSGD['alpha'] = alpha_TwoSGD
Table_TwoSGD['gamma'] = gamma_TwoSGD
cols = Table_TwoSGD.columns.tolist()
cols = cols[-3:] + cols[:-3]
Table_TwoSGD = Table_TwoSGD[cols]
Table_TwoSGD.to_csv(filename_TwoSGD)
# Решение ThreeSGD ---------------------------
filename_ThreeSGD = "ex3_ThreeSGD.csv"
solve_ThreeSGD, it_space_ThreeSGD, _, _, _, _, alpha_ThreeSGD, beta_ThreeSGD, gamma_ThreeSGD = ThreeSGD.ThreeSGD_solver(matrix_h, f, z_0)
Table_ThreeSGD = pd.DataFrame(data=solve_ThreeSGD)
Table_ThreeSGD['matrix_multiplications'] = it_space_ThreeSGD
Table_ThreeSGD['alpha'] = alpha_ThreeSGD
Table_ThreeSGD['beta'] = beta_ThreeSGD
Table_ThreeSGD['gamma'] = gamma_ThreeSGD
cols = Table_ThreeSGD.columns.tolist()
cols = cols[-4:] + cols[:-4]
Table_ThreeSGD = Table_ThreeSGD[cols]
Table_ThreeSGD.to_csv(filename_ThreeSGD)
# Графики --------------------------------------------
plt.figure(figsize=(9, 9), dpi=100)
plot_iterations(np.real(solve_BiCGStab), iter_space_BiCGStab, color="#FF0000")
plot_iterations(np.real(solve_TwoSGD), it_space_TwoSGD, color="#0000FF")
plot_iterations(np.real(solve_ThreeSGD), it_space_ThreeSGD, color="#FFFF00")
plt.xlabel("Количество умножений матрицы на вектор")
plt.ylabel("Норма вектора решения")
plt.show()
return 0
if __name__ == "__main__":
main()
| 36.642105 | 141 | 0.669922 |
4a1c263d637619294f021a67467b4c659bf91a5d
| 965 |
py
|
Python
|
tests/test_gini.py
|
dave452/pl_curves
|
7d102c1a4c2e0c86958037c906fa067dfb36601c
|
[
"MIT"
] | null | null | null |
tests/test_gini.py
|
dave452/pl_curves
|
7d102c1a4c2e0c86958037c906fa067dfb36601c
|
[
"MIT"
] | null | null | null |
tests/test_gini.py
|
dave452/pl_curves
|
7d102c1a4c2e0c86958037c906fa067dfb36601c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
'''
unit test for the gini coefficient function
'''
from pl_curve import calculate_gini
import pandas as pd
import math
def test_gini_empty():
'''test calculating a gini coefficient with an empty list
This will cause some warnings from python
'''
gini = calculate_gini(pd.Series([]))
assert math.isnan(gini) is True
def test_gini_single():
'''FIXME
test calculating a gini coefficient with a single item in the list
the coefficient should be zero as there's no variation
'''
gini = calculate_gini(pd.Series([1.0]))
assert gini == 0
def test_gini_four():
'''test calculating a gini coefficient with four different items'''
gini = calculate_gini(pd.Series([1.0, 2.0, 3.0, 4.0]))
assert gini == 0.25
def test_gini_four_even():
'''test calculating a gini coefficient with four identical items'''
gini = calculate_gini(pd.Series([1.0, 1.0, 1.0, 1.0]))
assert gini == 0.0
| 26.081081 | 71 | 0.68601 |
4a1c26ff0b24f1b4f7926f61b40b0855aed3bcec
| 11,492 |
py
|
Python
|
analysis/tables/table_iv.py
|
HansGiesen/hudson
|
7642e50279290bf1e4cc930c88eece5ce025b4ed
|
[
"MIT"
] | null | null | null |
analysis/tables/table_iv.py
|
HansGiesen/hudson
|
7642e50279290bf1e4cc930c88eece5ce025b4ed
|
[
"MIT"
] | null | null | null |
analysis/tables/table_iv.py
|
HansGiesen/hudson
|
7642e50279290bf1e4cc930c88eece5ce025b4ed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import math
import numpy as np
import os
import pandas as pd
import sqlalchemy
import yaml
from gzip import zlib
from pickle import loads
resources = ['luts', 'regs', 'dsps', 'brams']
platforms = {'zcu102': 'ZCU102', 'ultra96': 'Ultra96-V2', 'pynq': 'Pynq-Z1'}
benchmarks = {'bnn': 'BNN', 'spam-filter': 'Spam filter', '3d-rendering': '3D rendering',
'digit-recognition': 'Digit recognition', 'optical-flow': 'Optical flow',
'face-detection': 'Face detection'}
def get_value(data, benchmark, platform, technique, column):
sel = data.loc[(data['benchmark'] == benchmark) &
(data['platform'] == platform) &
(data['technique'] == technique), column]
return sel.squeeze() if len(sel) > 0 else float('inf')
def get_core_map(data, benchmark, platform):
core_map = get_value(data, benchmark, platform, 'PipelinedMultiFidBayes', 'core_map')
return core_map if core_map != 'bnn' else '4x1.1x2.1x2'
def get_runtime(data, benchmark, platform, technique):
run_time = 1000.0 * get_value(data, benchmark, platform, technique, 'run_time')
return '-' if math.isinf(run_time) else ('%.1f' % run_time)
def get_speedup(data, benchmark, platform, technique):
run_time_untuned = untuned_latencies[(platform, benchmark)]
run_time_bayes = 1000.0 * get_value(data, benchmark, platform, technique, 'run_time')
return run_time_untuned / run_time_bayes
def get_utilization(data, benchmark, platform, technique, resource):
usage = get_value(data, benchmark, platform, technique, resource)
max_usage = get_value(data, benchmark, platform, technique, 'max_' + resource)
utilization = 100.0 * usage / max_usage
return '%.0f\\%%' % utilization
def get_untuned_latency(platform, benchmark):
sel = data.loc[untuned_data['platform'] == platform & untuned_data['benchmark'] == benchmark, 'runtime']
return ('%.1f' % (1000.0 * sel.squeeze())) if len(sel) > 0 else '-'
script_dir = os.path.dirname(os.path.realpath("__file__"))
with open('../../cfg.yml', 'r') as cfg_file:
data = cfg_file.read()
tuner_cfg = yaml.safe_load(data)
database = tuner_cfg['database'].replace('mysql', 'mysql+pymysql')
engine = sqlalchemy.create_engine(database)
query = 'select platform.name as platform, program.name as benchmark, run_time, proc_freq from result' \
' inner join tuning_run on tuning_run.id = result.tuning_run_id' \
' inner join program_version on program_version.id = program_version_id' \
' inner join program on program.id = program_version.program_id' \
' inner join platform on platform.id = platform_id' \
' inner join test on test.id = test_id' \
' where test.name like "untuned_*"'
untuned_data = pd.read_sql_query(query, engine)
# Convert the latency to seconds.
untuned_data['run_time'] /= untuned_data['proc_freq']
untuned_data = untuned_data.drop(columns=['proc_freq'])
query = 'select program.name as benchmark, seed, args, proc_freq, tuning_run.start_date, collection_date,' \
' result.state, run_time, tuning_run.name, result.luts, result.regs, result.dsps, result.brams,' \
' platform.luts as max_luts, platform.regs as max_regs, platform.dsps as max_dsps,' \
' platform.brams as max_brams, platform.name as platform, fidelity from result' \
' inner join configuration on result.configuration_id = configuration.id' \
' inner join tuning_run on tuning_run.id = result.tuning_run_id' \
' inner join program_version on program_version.id = program_version_id' \
' inner join program on program.id = program_version.program_id' \
' inner join platform on platform.id = platform_id' \
' inner join test on test.id = test_id' \
' where test.name like "opentuner_%%" or test.name like "bayes_%%" or' \
' test.name like "pipe_%%" and (tuning_run.name not like "bnn_%%" or' \
' tuning_run.name like "bnn_4x1.1x2.1x2_%%")'
data = pd.read_sql_query(query, engine)
# Get command line arguments of each tuning run.
args = data['args'].transform(zlib.decompress)
args = args.transform(lambda field: loads(field, encoding='latin1'))
data = data.drop(columns=['args'])
# Get the search technique of each tuning run.
data['technique'] = args.transform(lambda field: field.technique[0])
# Extract the core assignment of each tuning_run.
data['core_map'] = args.transform(lambda field: (field.core_map if 'core_map' in field else ''))
data.loc[data['core_map'].isna(), 'core_map'] = ''
# Determine the maximum fidelity.
data.loc[data['technique'] == 'PipelinedMultiFidBayes', 'max_fidelity'] = 3
data.loc[data['technique'] == 'AUCBanditMetaTechniqueA', 'max_fidelity'] = 0
data.loc[data['technique'] == 'Bayes', 'max_fidelity'] = 0
# Extract the seed from the tuning run name.
data['seed'] = data['name'].str.extract(r'.*_(\d+)$').astype(int)
# Replace extremely large values with infinity.
data.loc[data['run_time'] > 1e30, 'run_time'] = float('inf')
# Set runtime of failed builds to infinity to make sure that they will be ignored later.
data.loc[data['state'] != 'OK', 'run_time'] = float('inf')
data = data.drop(columns=['state'])
# Set runtime of incomplete builds to infinity to make sure that they will be ignored later.
data.loc[data['fidelity'] != data['max_fidelity'], 'run_time'] = float('inf')
data = data.drop(columns=['fidelity', 'max_fidelity'])
# Set the resource usage of all failed or incomplete builds to NA.
data.loc[~np.isfinite(data['run_time']), resources] = None
# Convert the latency to seconds.
data['run_time'] = data['run_time'] / data['proc_freq']
data = data.drop(columns=['proc_freq'])
# Compute the tuning time in days.
data['time'] = (data['collection_date'] - data['start_date']).dt.total_seconds() / 86400.0
data = data.drop(columns=['start_date', 'collection_date'])
# Cut the tuning runs off after 1 day.
data_1 = data[data['time'] < 1]
# Find the shortest latency. We sort in case there are ties.
data_1 = data_1.sort_values('time')
data_1 = data_1.drop(columns=['time'])
rows = data_1.groupby(['benchmark', 'platform', 'seed', 'technique', 'core_map'])['run_time'].idxmin()
data_1 = data_1.loc[rows]
# Find average latency across seeds.
data_1 = data_1.groupby(['benchmark', 'platform', 'technique', 'core_map']).mean().reset_index()
data_1 = data_1.drop(columns=['seed'])
# Cut the tuning runs off after 3 days.
data = data[data['time'] < 3]
data = data.drop(columns=['time'])
# Find the shortest latency. We sort in case there are ties.
rows = data.groupby(['benchmark', 'platform', 'seed', 'technique', 'core_map'])['run_time'].idxmin()
data = data.loc[rows]
# Find average latency across seeds.
data = data.groupby(['benchmark', 'platform', 'technique', 'core_map']).mean().reset_index()
data = data.drop(columns=['seed'])
# Generate Latex table.
output = ''
min_speedup_1 = float('inf')
min_speedup_3 = float('inf')
max_speedup_1 = 0.0
max_speedup_3 = 0.0
speedups_bayes = []
speedups_pipe = []
for (benchmark, platform), group in data.groupby(['benchmark', 'platform']):
core_map = get_core_map(data, benchmark, platform)
core_map = '\\texttt{' + core_map.replace('.', ',') + '}'
speedup_bayes = get_speedup(data, benchmark, platform, 'Bayes')
speedup_1 = get_speedup(data_1, benchmark, platform, 'PipelinedMultiFidBayes')
speedup_3 = get_speedup(data, benchmark, platform, 'PipelinedMultiFidBayes')
output += benchmarks[benchmark] + ' & ' + platforms[platform] + ' & ' + core_map.replace('.', ',')
output += ' & ' + get_untuned_latency(platform, benchmark)
output += ' & ' + get_runtime(data_1, benchmark, platform, 'AUCBanditMetaTechniqueA')
output += ' & ' + get_runtime(data, benchmark, platform, 'AUCBanditMetaTechniqueA')
output += ' & ' + get_runtime(data_1, benchmark, platform, 'Bayes')
output += ' & ' + get_runtime(data, benchmark, platform, 'Bayes')
output += ' & ' + get_runtime(data_1, benchmark, platform, 'PipelinedMultiFidBayes')
output += ' & ' + get_runtime(data, benchmark, platform, 'PipelinedMultiFidBayes')
output += ' & ' + get_utilization(data, benchmark, platform, 'PipelinedMultiFidBayes', 'luts')
output += ' & ' + get_utilization(data, benchmark, platform, 'PipelinedMultiFidBayes', 'regs')
output += ' & ' + get_utilization(data, benchmark, platform, 'PipelinedMultiFidBayes', 'dsps')
output += ' & ' + get_utilization(data, benchmark, platform, 'PipelinedMultiFidBayes', 'brams') + ' \\\\\n'
if platform == 'zcu102':
min_speedup_1 = min(min_speedup_1, speedup_1)
min_speedup_3 = min(min_speedup_3, speedup_3)
max_speedup_1 = max(max_speedup_1, speedup_1)
max_speedup_3 = max(max_speedup_3, speedup_3)
speedups_bayes.append(speedup_bayes)
speedups_pipe.append(speedup_3)
# Show table.
print(output)
# Output table to file.
with open('run_time.tex', 'w') as output_file:
output_file.write(output)
# Compute the average runtime decrease after 1 day.
sel = data_1[data_1['platform'] == 'zcu102'].sort_values('benchmark')
data_opentuner = sel.loc[sel['technique'] == 'AUCBanditMetaTechniqueA', 'run_time'].reset_index(drop=True)
data_pipe = sel.loc[sel['technique'] == 'PipelinedMultiFidBayes', 'run_time'].reset_index(drop=True)
run_time_dec = 100.0 * (1.0 - (data_pipe / data_opentuner).mean())
# Compute the average runtime decrease after 3 days.
sel = data[data['platform'] == 'zcu102'].sort_values('benchmark')
data_opentuner = sel.loc[sel['technique'] == 'AUCBanditMetaTechniqueA', 'run_time'].reset_index(drop=True)
data_bayes = sel.loc[sel['technique'] == 'Bayes', 'run_time'].reset_index(drop=True)
data_pipe = sel.loc[sel['technique'] == 'PipelinedMultiFidBayes', 'run_time'].reset_index(drop=True)
run_time_dec_opent = 100.0 * (1.0 - (data_pipe / data_opentuner).mean())
run_time_dec_bayes = 100.0 * (1.0 - (data_pipe / data_bayes).mean())
# Compute the speedup decrease due to not using the multi-fidelity model.
speedup_dec = 100.0 * (1.0 - np.mean(speedups_bayes) / np.mean(speedups_pipe))
# Show the runtime decrease from using the pipeline vs OpenTuner.
print('Runtime decrease with the pipeline vs OpenTuner after 1 day: %f%%' % run_time_dec)
print('Runtime decrease with the pipeline vs Bayesian optimization after 3 days: %f%%' % run_time_dec_bayes)
print('Runtime decrease with the pipeline vs OpenTuner after 3 days: %f%%' % run_time_dec_opent)
# Show the speedups.
print('Speedup after 1 day: %f - %f' % (min_speedup_1, max_speedup_1))
print('Speedup after 3 days: %f - %f' % (min_speedup_3, max_speedup_3))
# Show the speedup decreases.
print('Speedup decrease: %f%%' % speedup_dec)
# Output callouts to file.
with open('../callouts/run_time.tex', 'w') as output_file:
output_file.write('\\def \\runtimedec {%.0f}\n' % run_time_dec)
output_file.write('\\def \\runtimedecbayes {%.0f}\n' % run_time_dec_bayes)
output_file.write('\\def \\runtimedecopent {%.0f}\n' % run_time_dec_opent)
output_file.write('\\def \\minspeedupone {{{:0.1f}}}\n'.format(min_speedup_1))
output_file.write('\\def \\maxspeedupone {{{:0.0f}}}\n'.format(max_speedup_1))
output_file.write('\\def \\minspeedupthree {{{:0.1f}}}\n'.format(min_speedup_3))
output_file.write('\\def \\maxspeedupthree {{{:0.0f}}}\n'.format(max_speedup_3))
output_file.write('\\def \\bayesspeedupdec {%.0f}\n' % speedup_dec)
| 49.748918 | 111 | 0.6937 |
4a1c2790cae052c37040388653d44d44cd7a6e29
| 10,337 |
py
|
Python
|
tests/test_chaum_pedersen.py
|
iadi7ya/electionguard-python
|
731e32c221850777c0dab45b2fe94f6af67e76e3
|
[
"MIT"
] | null | null | null |
tests/test_chaum_pedersen.py
|
iadi7ya/electionguard-python
|
731e32c221850777c0dab45b2fe94f6af67e76e3
|
[
"MIT"
] | null | null | null |
tests/test_chaum_pedersen.py
|
iadi7ya/electionguard-python
|
731e32c221850777c0dab45b2fe94f6af67e76e3
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from datetime import timedelta
from hypothesis import given, settings, HealthCheck
from hypothesis.strategies import integers
from electionguard.chaum_pedersen import (
ConstantChaumPedersenProof,
make_disjunctive_chaum_pedersen_zero,
make_disjunctive_chaum_pedersen_one,
make_chaum_pedersen,
make_constant_chaum_pedersen,
make_disjunctive_chaum_pedersen,
)
from electionguard.elgamal import (
ElGamalKeyPair,
elgamal_encrypt,
elgamal_keypair_from_secret,
)
from electionguard.group import ElementModQ, TWO_MOD_Q, ONE_MOD_Q, int_to_p
from electionguardtest.elgamal import elgamal_keypairs
from electionguardtest.group import elements_mod_q_no_zero, elements_mod_q
from electionguard.utils import get_optional
class TestDisjunctiveChaumPedersen(TestCase):
def test_djcp_proofs_simple(self):
# doesn't get any simpler than this
keypair = elgamal_keypair_from_secret(TWO_MOD_Q)
nonce = ONE_MOD_Q
seed = TWO_MOD_Q
message0 = get_optional(elgamal_encrypt(0, nonce, keypair.public_key))
proof0 = make_disjunctive_chaum_pedersen_zero(
message0, nonce, keypair.public_key, ONE_MOD_Q, seed
)
proof0bad = make_disjunctive_chaum_pedersen_one(
message0, nonce, keypair.public_key, ONE_MOD_Q, seed
)
self.assertTrue(proof0.is_valid(message0, keypair.public_key, ONE_MOD_Q))
self.assertFalse(proof0bad.is_valid(message0, keypair.public_key, ONE_MOD_Q))
message1 = get_optional(elgamal_encrypt(1, nonce, keypair.public_key))
proof1 = make_disjunctive_chaum_pedersen_one(
message1, nonce, keypair.public_key, ONE_MOD_Q, seed
)
proof1bad = make_disjunctive_chaum_pedersen_zero(
message1, nonce, keypair.public_key, ONE_MOD_Q, seed
)
self.assertTrue(proof1.is_valid(message1, keypair.public_key, ONE_MOD_Q))
self.assertFalse(proof1bad.is_valid(message1, keypair.public_key, ONE_MOD_Q))
def test_djcp_proof_invalid_inputs(self):
# this is here to push up our coverage
keypair = elgamal_keypair_from_secret(TWO_MOD_Q)
nonce = ONE_MOD_Q
seed = TWO_MOD_Q
message0 = get_optional(elgamal_encrypt(0, nonce, keypair.public_key))
self.assertRaises(
Exception,
make_disjunctive_chaum_pedersen,
message0,
nonce,
keypair.public_key,
seed,
3,
)
@settings(
deadline=timedelta(milliseconds=2000),
suppress_health_check=[HealthCheck.too_slow],
max_examples=10,
)
@given(elgamal_keypairs(), elements_mod_q_no_zero(), elements_mod_q())
def test_djcp_proof_zero(
self, keypair: ElGamalKeyPair, nonce: ElementModQ, seed: ElementModQ
):
message = get_optional(elgamal_encrypt(0, nonce, keypair.public_key))
proof = make_disjunctive_chaum_pedersen_zero(
message, nonce, keypair.public_key, ONE_MOD_Q, seed
)
proof_bad = make_disjunctive_chaum_pedersen_one(
message, nonce, keypair.public_key, ONE_MOD_Q, seed
)
self.assertTrue(proof.is_valid(message, keypair.public_key, ONE_MOD_Q))
self.assertFalse(proof_bad.is_valid(message, keypair.public_key, ONE_MOD_Q))
@settings(
deadline=timedelta(milliseconds=2000),
suppress_health_check=[HealthCheck.too_slow],
max_examples=10,
)
@given(elgamal_keypairs(), elements_mod_q_no_zero(), elements_mod_q())
def test_djcp_proof_one(
self, keypair: ElGamalKeyPair, nonce: ElementModQ, seed: ElementModQ
):
message = get_optional(elgamal_encrypt(1, nonce, keypair.public_key))
proof = make_disjunctive_chaum_pedersen_one(
message, nonce, keypair.public_key, ONE_MOD_Q, seed
)
proof_bad = make_disjunctive_chaum_pedersen_zero(
message, nonce, keypair.public_key, ONE_MOD_Q, seed
)
self.assertTrue(proof.is_valid(message, keypair.public_key, ONE_MOD_Q))
self.assertFalse(proof_bad.is_valid(message, keypair.public_key, ONE_MOD_Q))
@settings(
deadline=timedelta(milliseconds=2000),
suppress_health_check=[HealthCheck.too_slow],
max_examples=10,
)
@given(elgamal_keypairs(), elements_mod_q_no_zero(), elements_mod_q())
def test_djcp_proof_broken(
self, keypair: ElGamalKeyPair, nonce: ElementModQ, seed: ElementModQ
):
# verify two different ways to generate an invalid C-P proof.
message = get_optional(elgamal_encrypt(0, nonce, keypair.public_key))
message_bad = get_optional(elgamal_encrypt(2, nonce, keypair.public_key))
proof = make_disjunctive_chaum_pedersen_zero(
message, nonce, keypair.public_key, ONE_MOD_Q, seed
)
proof_bad = make_disjunctive_chaum_pedersen_zero(
message_bad, nonce, keypair.public_key, ONE_MOD_Q, seed
)
self.assertFalse(proof_bad.is_valid(message_bad, keypair.public_key, ONE_MOD_Q))
self.assertFalse(proof.is_valid(message_bad, keypair.public_key, ONE_MOD_Q))
class TestChaumPedersen(TestCase):
def test_cp_proofs_simple(self):
keypair = elgamal_keypair_from_secret(TWO_MOD_Q)
nonce = ONE_MOD_Q
seed = TWO_MOD_Q
message = get_optional(elgamal_encrypt(0, nonce, keypair.public_key))
decryption = message.partial_decrypt(keypair.secret_key)
proof = make_chaum_pedersen(
message, keypair.secret_key, decryption, seed, ONE_MOD_Q
)
bad_proof = make_chaum_pedersen(
message, keypair.secret_key, TWO_MOD_Q, seed, ONE_MOD_Q
)
self.assertTrue(
proof.is_valid(message, keypair.public_key, decryption, ONE_MOD_Q)
)
self.assertFalse(
bad_proof.is_valid(message, keypair.public_key, decryption, ONE_MOD_Q)
)
@settings(
deadline=timedelta(milliseconds=2000),
suppress_health_check=[HealthCheck.too_slow],
max_examples=10,
)
@given(
elgamal_keypairs(),
elements_mod_q_no_zero(),
elements_mod_q(),
integers(0, 100),
integers(0, 100),
)
def test_cp_proof(
self,
keypair: ElGamalKeyPair,
nonce: ElementModQ,
seed: ElementModQ,
constant: int,
bad_constant: int,
):
if constant == bad_constant:
bad_constant = constant + 1
message = get_optional(elgamal_encrypt(constant, nonce, keypair.public_key))
decryption = message.partial_decrypt(keypair.secret_key)
proof = make_chaum_pedersen(
message, keypair.secret_key, decryption, seed, ONE_MOD_Q
)
bad_proof = make_chaum_pedersen(
message, keypair.secret_key, int_to_p(bad_constant), seed, ONE_MOD_Q
)
self.assertTrue(
proof.is_valid(message, keypair.public_key, decryption, ONE_MOD_Q)
)
self.assertFalse(
bad_proof.is_valid(message, keypair.public_key, decryption, ONE_MOD_Q)
)
class TestConstantChaumPedersen(TestCase):
def test_ccp_proofs_simple_encryption_of_zero(self):
keypair = elgamal_keypair_from_secret(TWO_MOD_Q)
nonce = ONE_MOD_Q
seed = TWO_MOD_Q
message = get_optional(elgamal_encrypt(0, nonce, keypair.public_key))
proof = make_constant_chaum_pedersen(
message, 0, nonce, keypair.public_key, seed, ONE_MOD_Q
)
bad_proof = make_constant_chaum_pedersen(
message, 1, nonce, keypair.public_key, seed, ONE_MOD_Q
)
self.assertTrue(proof.is_valid(message, keypair.public_key, ONE_MOD_Q))
self.assertFalse(bad_proof.is_valid(message, keypair.public_key, ONE_MOD_Q))
def test_ccp_proofs_simple_encryption_of_one(self):
keypair = elgamal_keypair_from_secret(TWO_MOD_Q)
nonce = ONE_MOD_Q
seed = TWO_MOD_Q
message = get_optional(elgamal_encrypt(1, nonce, keypair.public_key))
proof = make_constant_chaum_pedersen(
message, 1, nonce, keypair.public_key, seed, ONE_MOD_Q
)
bad_proof = make_constant_chaum_pedersen(
message, 0, nonce, keypair.public_key, seed, ONE_MOD_Q
)
self.assertTrue(proof.is_valid(message, keypair.public_key, ONE_MOD_Q))
self.assertFalse(bad_proof.is_valid(message, keypair.public_key, ONE_MOD_Q))
@settings(
deadline=timedelta(milliseconds=2000),
suppress_health_check=[HealthCheck.too_slow],
max_examples=10,
)
@given(
elgamal_keypairs(),
elements_mod_q_no_zero(),
elements_mod_q(),
integers(0, 100),
integers(0, 100),
)
def test_ccp_proof(
self,
keypair: ElGamalKeyPair,
nonce: ElementModQ,
seed: ElementModQ,
constant: int,
bad_constant: int,
):
# assume() slows down the test-case generation
# so assume(constant != bad_constant)
if constant == bad_constant:
bad_constant = constant + 1
message = get_optional(elgamal_encrypt(constant, nonce, keypair.public_key))
message_bad = get_optional(
elgamal_encrypt(bad_constant, nonce, keypair.public_key)
)
proof = make_constant_chaum_pedersen(
message, constant, nonce, keypair.public_key, seed, ONE_MOD_Q
)
self.assertTrue(proof.is_valid(message, keypair.public_key, ONE_MOD_Q))
proof_bad1 = make_constant_chaum_pedersen(
message_bad, constant, nonce, keypair.public_key, seed, ONE_MOD_Q
)
self.assertFalse(
proof_bad1.is_valid(message_bad, keypair.public_key, ONE_MOD_Q)
)
proof_bad2 = make_constant_chaum_pedersen(
message, bad_constant, nonce, keypair.public_key, seed, ONE_MOD_Q
)
self.assertFalse(proof_bad2.is_valid(message, keypair.public_key, ONE_MOD_Q))
proof_bad3 = ConstantChaumPedersenProof(
proof.pad, proof.data, proof.challenge, proof.response, -1
)
self.assertFalse(proof_bad3.is_valid(message, keypair.public_key, ONE_MOD_Q))
| 38.570896 | 88 | 0.679791 |
4a1c28c4629c1ef051b6dc84ff8f1cad62851610
| 4,545 |
py
|
Python
|
ogc_search/wet/templatetags/search_extras.py
|
open-data/ogc_search
|
cd089ce6fa1113710989299fc0cc663c5ef40e3c
|
[
"MIT"
] | null | null | null |
ogc_search/wet/templatetags/search_extras.py
|
open-data/ogc_search
|
cd089ce6fa1113710989299fc0cc663c5ef40e3c
|
[
"MIT"
] | 30 |
2019-11-19T22:29:48.000Z
|
2022-03-31T15:37:24.000Z
|
ogc_search/wet/templatetags/search_extras.py
|
open-data/ogc_search
|
cd089ce6fa1113710989299fc0cc663c5ef40e3c
|
[
"MIT"
] | 1 |
2020-07-15T16:13:06.000Z
|
2020-07-15T16:13:06.000Z
|
# coding=utf-8
from babel.dates import format_date
import bleach
from django import template
from django.conf import settings
from django.utils.translation import gettext
from dateutil import parser
import json
import markdown2
from urlsafe import url_part_unescape
register = template.Library()
@register.filter('SwapLangCode', autoescape=True)
def other_lang_code(value):
if str(value).lower() == 'en':
return 'fr'
elif str(value).lower() == 'fr':
return 'en'
else:
return ''
@register.filter('SwapLangName', autoescape=True)
def other_lang(value):
if str(value) == 'en':
return 'Français'
elif str(value) == 'fr':
return 'English'
else:
return ''
@register.filter('EmptyFacetMessage', autoescape=True)
def search_facet_is_empty_message(value):
msg = ''
if type(value) is dict:
c = 0
for k,v in value.items():
c = c + v
if c == 0:
msg = gettext("There are no filters for this search")
return msg
@register.filter('ToMonth', autoescape=True)
def to_month(value):
months = [
'January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December'
]
month_int = 0
try:
month_int = int(value)
except ValueError:
pass
if month_int < 1 or month_int > 12:
return ''
else:
return gettext(months[month_int - 1])
@register.filter('isoDateTimeToDate')
def iso_date_time_to_date(value):
my_date = parser.parse(value)
return my_date.strftime("%Y-%m-%d")
@register.filter('service_standards_en')
def si_std_json_to_html_en(value):
std_obj = json.loads(value)
return "<strong>Standard: {0}</strong><br>{1}<br>".format(
std_obj['service_std_id'],
std_obj['service_std_en'])
@register.filter('service_standards_fr')
def si_std_json_to_html_fr(value):
std_obj = json.loads(value)
return "<strong>Norme : {0}</strong><br>{1}<br>".format(
std_obj['service_std_id'],
std_obj['service_std_fr'])
@register.filter('nap_status')
def nap_status_alert(value):
if value in ('Not started', 'Non commencé'):
return '<span class="label label-default">{0}</span>'.format(value)
elif value in ('Limited progress', 'Progrès limité'):
return '<span class="label label-warning">{0}</span>'.format(value)
elif value in ('Substantial progress', 'Progrès important'):
return '<span class="label label-info">{0}</span>'.format(value)
elif value in ('Complete', 'Réalisé'):
return '<span class="label label-success">{0}</span>'.format(value)
else:
return value
@register.filter('friendly_date_en')
def human_friendly_date_en(value: str):
if len(value) == 10:
my_date = parser.parse(value)
return format_date(my_date, 'medium', locale='en_CA')
else:
return ""
@register.filter('friendly_date_fr')
def human_friendly_date_fr(value: str):
if len(value) == 10:
my_date = parser.parse(value)
return format_date(my_date, 'medium', locale='fr_CA')
else:
return ""
@register.filter('trim_left')
def trim_left(value: str, arg: int):
if len(value) < arg:
return value
else:
return value[arg:]
@register.filter('friendly_reporting_period')
def friendly_reporting_period(value: str):
if len(value.split('-')) == 3:
rp = value.split('-')
return "{2} {0}-{1}".format(rp[0], rp[1], rp[2])
else:
return value
@register.filter('normalize_headings')
def normalize_headings(value: str):
headings = {
'</h4>': '</h6>',
'<h4>': '<h6>',
'</h3>': '</h5>',
'<h3>': '<h5>',
'</h2>': '</h4>',
'<h2>': '<h4>',
'</h1>': '</h3>',
'<h1>': '<h3>',
}
for key in headings:
if value.find(key) >= 0:
value = value.replace(key, headings[key])
return value
@register.filter('markdown_filter')
def markdown_filter(text):
text = markdown2.markdown(text, extras=settings.MARKDOWN_FILTER_EXTRAS)
html = bleach.clean(text, tags=settings.MARKDOWN_FILTER_WHITELIST_TAGS)
return bleach.linkify(html)
@register.filter('url_part_unescape')
def url_part_unescape_filter(value: str):
return url_part_unescape(value)
@register.filter('strip_whitespace')
def strip_whitespace(text):
return str(text).strip()
| 25.391061 | 75 | 0.618482 |
4a1c298ea2bb87deea8d7ea07faea8fb15a05bb6
| 433 |
py
|
Python
|
autopylot/__init__.py
|
mattcaldwell/autopylot
|
3825432c04451300318b2ec0afb637c5f7800a30
|
[
"MIT"
] | 2 |
2015-05-14T16:24:13.000Z
|
2015-11-05T18:02:16.000Z
|
autopylot/__init__.py
|
mattcaldwell/autopylot
|
3825432c04451300318b2ec0afb637c5f7800a30
|
[
"MIT"
] | null | null | null |
autopylot/__init__.py
|
mattcaldwell/autopylot
|
3825432c04451300318b2ec0afb637c5f7800a30
|
[
"MIT"
] | null | null | null |
from contextlib import contextmanager
from .enum import *
import urlparse
@contextmanager
def ignored(*exceptions):
''' inspired by http://hg.python.org/cpython/rev/406b47c64480'''
exceptions = exceptions or Exception
try:
yield
except exceptions:
pass
def formaturl(url):
parsed = list(urlparse.urlparse(url))
parsed[2] = parsed[2].replace('//', '/')
return urlparse.urlunparse(parsed)
| 24.055556 | 68 | 0.685912 |
4a1c2a3a993d7b4c10f2661aed4cbe34490069c4
| 359 |
py
|
Python
|
src/rl_agents/agents/__init__.py
|
mateuspontesm/RL-Agents
|
70274b2b4b95402f03857818733fc0d52aa3e385
|
[
"BSD-3-Clause"
] | 1 |
2020-10-30T15:40:01.000Z
|
2020-10-30T15:40:01.000Z
|
src/rl_agents/agents/__init__.py
|
mateuspontesm/RL-Agents
|
70274b2b4b95402f03857818733fc0d52aa3e385
|
[
"BSD-3-Clause"
] | null | null | null |
src/rl_agents/agents/__init__.py
|
mateuspontesm/RL-Agents
|
70274b2b4b95402f03857818733fc0d52aa3e385
|
[
"BSD-3-Clause"
] | null | null | null |
"""
The :mod:`rl_agents.agents` module includes the RL agents
classes and utilities. It includes the MAB variants,
tabular methods and Deep RL.
"""
import rl_agents.agents.mab
from rl_agents.agents.tabular import ( # isort:skip
QLearningAgent,
SarsaAgent,
ExpectedSarsaAgent,
)
__all__ = ["QLearningAgent", "SarsaAgent", "ExpectedSarsaAgent"]
| 22.4375 | 64 | 0.746518 |
4a1c2abc8cb8e060ff63c06b1f4fe23d8eb1f833
| 226 |
py
|
Python
|
tests/test_manage.py
|
weichecn/fancy-cron
|
878f0696b2b51a419c02d974e1a9be7da11a199b
|
[
"MIT"
] | 2 |
2016-05-20T10:29:25.000Z
|
2017-10-12T13:55:16.000Z
|
tests/test_manage.py
|
weichecn/fancy-cron
|
878f0696b2b51a419c02d974e1a9be7da11a199b
|
[
"MIT"
] | null | null | null |
tests/test_manage.py
|
weichecn/fancy-cron
|
878f0696b2b51a419c02d974e1a9be7da11a199b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from click.testing import CliRunner
import manage
runner = CliRunner()
def test_initdb():
runner.invoke(manage.initdb)
def test_destroy():
runner.invoke(manage.destroy)
| 13.294118 | 35 | 0.699115 |
4a1c2af6ea276808e2e4345df7130a40bd4d12ed
| 1,395 |
py
|
Python
|
deprecated/examples_robust/hci/enrico_unimodal_0_robust.py
|
kapikantzari/MultiBench
|
44ab6ea028682040a0c04de68239ce5cdf15123f
|
[
"MIT"
] | 148 |
2021-03-06T06:54:13.000Z
|
2022-03-29T19:27:21.000Z
|
deprecated/examples_robust/hci/enrico_unimodal_0_robust.py
|
kapikantzari/MultiBench
|
44ab6ea028682040a0c04de68239ce5cdf15123f
|
[
"MIT"
] | 10 |
2021-07-19T22:57:49.000Z
|
2022-02-04T03:12:29.000Z
|
deprecated/examples_robust/hci/enrico_unimodal_0_robust.py
|
kapikantzari/MultiBench
|
44ab6ea028682040a0c04de68239ce5cdf15123f
|
[
"MIT"
] | 18 |
2021-07-22T07:17:27.000Z
|
2022-03-27T16:11:40.000Z
|
from unimodals.common_models import VGG16, VGG16Slim, DAN, Linear, MLP, VGG11Slim, VGG11Pruned, VGG16Pruned
import torch
from memory_profiler import memory_usage
from robustness.all_in_one import general_train, general_test
from datasets.enrico.get_data_robust import get_dataloader_robust
from datasets.enrico.get_data import get_dataloader
from fusions.common_fusions import Concat
from training_structures.unimodal import train, test
import sys
import os
from torch import nn
sys.path.append(os.getcwd())
dls, weights = get_dataloader('datasets/enrico/dataset')
traindata, validdata, _ = dls
robustdata = get_dataloader_robust(
'datasets/enrico/dataset', wireframe_noise=False)
modalnum = 0
encoder = VGG11Slim(16, dropout=True, dropoutp=0.2,
freeze_features=True).cuda()
head = Linear(16, 20).cuda()
# head = MLP(16, 32, 20, dropout=False).cuda()
allmodules = [encoder, head]
def trainprocess(filename_encoder, filename_head):
train(encoder, head, traindata, validdata, 50, optimtype=torch.optim.Adam, lr=0.0001,
weight_decay=0, modalnum=modalnum, save_encoder=filename_encoder, save_head=filename_head)
filename = general_train(trainprocess, 'enrico_unimodal_0', encoder=True)
def testprocess(encoder, head, testdata):
return test(encoder, head, testdata, modalnum=modalnum)
general_test(testprocess, filename, robustdata, encoder=True)
| 34.02439 | 107 | 0.784229 |
4a1c2bcbcb15afce60596d083de508129e784da3
| 1,121 |
py
|
Python
|
rxsci/state/state_topology.py
|
maki-nage/rxsci
|
64c9956752cbdd4c65aa9f054b6b28318a056625
|
[
"MIT"
] | 3 |
2021-05-03T13:40:46.000Z
|
2022-03-06T07:59:30.000Z
|
rxsci/state/state_topology.py
|
maki-nage/rxsci
|
64c9956752cbdd4c65aa9f054b6b28318a056625
|
[
"MIT"
] | 9 |
2020-10-22T21:08:10.000Z
|
2021-08-05T09:01:26.000Z
|
rxsci/state/state_topology.py
|
maki-nage/rxsci
|
64c9956752cbdd4c65aa9f054b6b28318a056625
|
[
"MIT"
] | 2 |
2021-01-05T16:48:54.000Z
|
2021-08-07T12:51:01.000Z
|
from collections import namedtuple
ProbeStateTopology = namedtuple('ProbeStateTopology', ['topology'])
ProbeStateTopology.__doc__ = "Event sent to probe for stateful operators"
ProbeStateTopology.topology.__doc__ = "The state topology object to fill"
StateDef = namedtuple('StateDef', ['name', 'data_type', 'default_value'])
class StateTopology(object):
def __init__(self):
self.states = []
self.ids = {}
def create_mapper(self, name):
"""A mapper is a non-indexable state. Mappers are used in group_by
operator (where key is mapped to an index). They do not need to be
stored on persistent storage if no other states are used in the
applcation.
"""
return self.create_state(name, data_type='mapper')
def create_state(self, name, data_type, default_value=None):
if name in self.ids:
self.ids[name] += 1
else:
self.ids[name] = 0
unique_name = '{}-{}'.format(name, self.ids[name])
self.states.append(StateDef(unique_name, data_type, default_value))
return len(self.states) - 1
| 36.16129 | 75 | 0.663693 |
4a1c2bd3fe8c88065ba858b2105ff176e816c266
| 67,972 |
py
|
Python
|
src/calmjs/parse/tests/test_es5_unparser.py
|
KristobalJunta/calmjs.parse
|
0ee6a497404a38670ada1ef029a20f8e6f4499e4
|
[
"MIT"
] | null | null | null |
src/calmjs/parse/tests/test_es5_unparser.py
|
KristobalJunta/calmjs.parse
|
0ee6a497404a38670ada1ef029a20f8e6f4499e4
|
[
"MIT"
] | null | null | null |
src/calmjs/parse/tests/test_es5_unparser.py
|
KristobalJunta/calmjs.parse
|
0ee6a497404a38670ada1ef029a20f8e6f4499e4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import textwrap
from functools import partial
from calmjs.parse import asttypes
from calmjs.parse import es5
from calmjs.parse.ruletypes import Declare
from calmjs.parse.ruletypes import Space
from calmjs.parse.ruletypes import RequiredSpace
from calmjs.parse.ruletypes import Text
from calmjs.parse.parsers.es5 import parse
from calmjs.parse.walkers import Walker
from calmjs.parse.handlers.core import layout_handler_space_drop
from calmjs.parse.handlers.core import default_rules
from calmjs.parse.handlers.core import minimum_rules
from calmjs.parse.handlers.indentation import indent
from calmjs.parse.unparsers.es5 import Unparser
from calmjs.parse.unparsers.es5 import definitions
from calmjs.parse.unparsers.es5 import pretty_print
from calmjs.parse.unparsers.es5 import minify_printer
from calmjs.parse.unparsers.es5 import minify_print
from calmjs.parse.testing.util import build_equality_testcase
def quad(items):
return [i[:4] for i in items]
class BaseVisitorTestCase(unittest.TestCase):
# Many of these tests are here are for showing individual fixes that
# were done to other classes in order to properly support the source
# map feature.
def test_empty_program(self):
unparser = Unparser()
ast = parse('')
self.assertEqual(quad(unparser(ast)), [
])
def test_basic_integer(self):
unparser = Unparser()
ast = parse('0;')
self.assertEqual(quad(unparser(ast)), [
('0', 1, 1, None),
(';', 1, 2, None),
('\n', 0, 0, None),
])
def test_basic_var_space_standard(self):
unparser = Unparser()
ast = parse('var x = 0;')
self.assertEqual(quad(unparser(ast)), [
('var', 1, 1, None), (' ', 0, 0, None), ('x', 1, 5, None),
(' ', 0, 0, None), ('=', 1, 7, None), (' ', 0, 0, None),
('0', 1, 9, None), (';', 1, 10, None),
('\n', 0, 0, None),
])
def test_basic_var_decl(self):
declared_vars = []
def declare(dispatcher, node):
declared_vars.append(node.value)
unparser = Unparser(deferrable_handlers={
Declare: declare,
})
ast = parse('var x = 0;')
# just run through the ast
quad(unparser(ast))
self.assertEqual(['x'], declared_vars)
def test_basic_var_space_drop(self):
unparser = Unparser(layout_handlers={
Space: layout_handler_space_drop,
RequiredSpace: layout_handler_space_drop,
})
ast = parse('var x = 0;\nvar y = 0;')
self.assertEqual(quad(unparser(ast)), [
('var', 1, 1, None), (' ', None, None, None), ('x', 1, 5, None),
(' ', None, None, None), ('=', 1, 7, None),
(' ', None, None, None), ('0', 1, 9, None), (';', 1, 10, None),
('\n', 0, 0, None),
('var', 2, 1, None), (' ', None, None, None), ('y', 2, 5, None),
(' ', None, None, None), ('=', 2, 7, None),
(' ', None, None, None), ('0', 2, 9, None), (';', 2, 10, None),
('\n', 0, 0, None),
])
def test_force_handler_drop(self):
# if there are no rules provided, there will also be no layout
# handlers - not very useful as note that there is now no
# separation between `var` and `x`.
unparser = Unparser(rules=())
ast = parse('var x = 0;')
self.assertEqual(quad(unparser(ast)), [
('var', 1, 1, None), ('x', 1, 5, None), ('=', 1, 7, None),
('0', 1, 9, None),
])
def test_simple_identifier(self):
unparser = Unparser()
ast = parse('this;')
self.assertEqual(quad(unparser(ast)), [
('this', 1, 1, None), (';', 1, 5, None), ('\n', 0, 0, None),
])
def test_simple_identifier_unmapped(self):
# if the definition contains unmapped entries
new_definitions = {}
new_definitions.update(definitions)
new_definitions['This'] = (Text(value='this', pos=None),)
unparser = Unparser(definitions=new_definitions)
ast = parse('this;')
self.assertEqual(quad(unparser(ast)), [
('this', None, None, None), (';', 1, 5, None), ('\n', 0, 0, None),
])
def test_empty_object(self):
unparser = Unparser()
ast = parse('thing = {};')
self.assertEqual(quad(unparser(ast)), [
('thing', 1, 1, None), (' ', 0, 0, None), ('=', 1, 7, None),
(' ', 0, 0, None), ('{', 1, 9, None), ('}', 1, 10, None),
(';', 1, 11, None), ('\n', 0, 0, None),
])
def test_simple_function_declare(self):
unparser = Unparser()
ast = parse('function foo(){}')
self.assertEqual(quad(unparser(ast)), [
('function', 1, 1, None), (' ', 0, 0, None), ('foo', 1, 10, None),
('(', 1, 13, None), (')', 1, 14, None), (' ', 0, 0, None),
('{', 1, 15, None), ('\n', 0, 0, None), ('}', 1, 16, None),
('\n', 0, 0, None),
])
def test_simple_function_invoke(self):
unparser = Unparser()
ast = parse('foo();')
self.assertEqual(quad(unparser(ast)), [
('foo', 1, 1, None), ('(', 1, 4, None), (')', 1, 5, None),
(';', 1, 6, None), ('\n', 0, 0, None),
])
def test_new_new(self):
unparser = Unparser()
ast = parse('new new T();')
self.assertEqual(quad(unparser(ast)), [
('new', 1, 1, None), (' ', 0, 0, None),
('new', 1, 5, None), (' ', 0, 0, None),
('T', 1, 9, None), ('(', 1, 10, None), (')', 1, 11, None),
(';', 1, 12, None), ('\n', 0, 0, None),
])
def test_getter(self):
unparser = Unparser()
ast = parse('x = {get p() {}};')
self.assertEqual(quad(unparser(ast)), [
('x', 1, 1, None), (' ', 0, 0, None), ('=', 1, 3, None),
(' ', 0, 0, None), ('{', 1, 5, None), ('\n', 0, 0, None),
('get', 1, 6, None), (' ', 0, 0, None), ('p', 1, 10, None),
('(', 1, 11, None), (')', 1, 12, None),
(' ', 0, 0, None), ('{', 1, 14, None), ('\n', 0, 0, None),
('}', 1, 15, None), ('\n', 0, 0, None),
('}', 1, 16, None), (';', 1, 17, None), ('\n', 0, 0, None),
])
def test_setter(self):
unparser = Unparser()
ast = parse('x = {set p(a) {}};')
self.assertEqual(quad(unparser(ast)), [
('x', 1, 1, None), (' ', 0, 0, None), ('=', 1, 3, None),
(' ', 0, 0, None), ('{', 1, 5, None), ('\n', 0, 0, None),
('set', 1, 6, None), (' ', 0, 0, None), ('p', 1, 10, None),
('(', 1, 11, None), ('a', 1, 12, None), (')', 1, 13, None),
(' ', 0, 0, None), ('{', 1, 15, None), ('\n', 0, 0, None),
('}', 1, 16, None), ('\n', 0, 0, None),
('}', 1, 17, None), (';', 1, 18, None), ('\n', 0, 0, None),
])
def test_switch_case_default_case(self):
unparser = Unparser()
ast = parse('switch (v) { case true: break; default: case false: }')
self.assertEqual(quad(unparser(ast)), [
('switch', 1, 1, None), (' ', 0, 0, None), ('(', 1, 8, None),
('v', 1, 9, None), (')', 1, 10, None), (' ', 0, 0, None),
('{', 1, 12, None),
('\n', 0, 0, None),
('case', 1, 14, None), (' ', 0, 0, None), ('true', 1, 19, None),
(':', 1, 23, None),
('\n', 0, 0, None),
('break', 1, 25, None), (';', 1, 30, None),
('\n', 0, 0, None),
('default', 1, 32, None), (':', 1, 39, None),
('\n', 0, 0, None),
('case', 1, 41, None), (' ', 0, 0, None), ('false', 1, 46, None),
(':', 1, 51, None),
('\n', 0, 0, None),
('}', 1, 53, None),
('\n', 0, 0, None),
])
def test_elision_0(self):
# basically empty list
unparser = Unparser()
ast = parse('[];')
self.assertEqual(quad(unparser(ast)), [
('[', 1, 1, None), (']', 1, 2, None),
(';', 1, 3, None), ('\n', 0, 0, None),
])
def test_elision_1(self):
unparser = Unparser()
ast = parse('[,];')
self.assertEqual(quad(unparser(ast)), [
('[', 1, 1, None), (',', 1, 2, None), (']', 1, 3, None),
(';', 1, 4, None), ('\n', 0, 0, None),
])
def test_elision_2(self):
unparser = Unparser()
ast = parse('[,,];')
self.assertEqual(quad(unparser(ast)), [
('[', 1, 1, None), (',,', 1, 2, None), (']', 1, 4, None),
(';', 1, 5, None), ('\n', 0, 0, None),
])
def test_elision_4(self):
unparser = Unparser()
ast = parse('[,,,,];')
self.assertEqual(quad(unparser(ast)), [
('[', 1, 1, None), (',,,,', 1, 2, None), (']', 1, 6, None),
(';', 1, 7, None), ('\n', 0, 0, None),
])
def test_elision_v3(self):
unparser = Unparser()
ast = parse('[1,,,,];')
self.assertEqual(quad(unparser(ast)), [
('[', 1, 1, None), ('1', 1, 2, None), (',', 0, 0, None),
(',,,', 1, 4, None), (']', 1, 7, None),
(';', 1, 8, None), ('\n', 0, 0, None),
])
def test_elision_vv3(self):
unparser = Unparser()
ast = parse('[1, 2,,,,];')
self.assertEqual(quad(unparser(ast)), [
('[', 1, 1, None),
('1', 1, 2, None), (',', 0, 0, None), (' ', 0, 0, None),
('2', 1, 5, None), (',', 0, 0, None), # ditto for this
(',,,', 1, 7, None), (']', 1, 10, None),
(';', 1, 11, None), ('\n', 0, 0, None),
])
def test_elision_v3v(self):
unparser = Unparser()
ast = parse('[1,,,, 1];')
self.assertEqual(quad(unparser(ast)), [
('[', 1, 1, None), ('1', 1, 2, None), (',', 0, 0, None),
(',,,', 1, 4, None),
(' ', 0, 0, None),
('1', 1, 8, None), (']', 1, 9, None),
(';', 1, 10, None), ('\n', 0, 0, None),
])
def test_elision_1v1(self):
unparser = Unparser()
ast = parse('[, 1,,];')
self.assertEqual(list(i[:4] for i in unparser(ast)), [
('[', 1, 1, None),
(',', 1, 2, None), (' ', 0, 0, None),
('1', 1, 4, None), (',', 0, 0, None),
(',', 1, 6, None), (']', 1, 7, None),
(';', 1, 8, None), ('\n', 0, 0, None),
])
def test_elision_splits_spaces(self):
unparser = Unparser()
# note the spaces and how they are ignored
ast = parse('[, 1, , 2 , ,,,,, 3,, ,,,, ,,,4,];')
self.assertEqual(list(i[:4] for i in unparser(ast)), [
('[', 1, 1, None), (',', 1, 2, None), (' ', 0, 0, None),
('1', 1, 4, None), (',', 0, 0, None),
(',', 1, 7, None), (' ', 0, 0, None),
('2', 1, 9, None), (',', 0, 0, None),
(',,,,,', 1, 13, None), (' ', 0, 0, None),
('3', 1, 19, None), (',', 0, 0, None),
# though rowcols of the starting comma are maintained.
(',,,,,,,,', 1, 21, None), (' ', 0, 0, None),
('4', 1, 31, None),
(']', 1, 33, None),
(';', 1, 34, None),
('\n', 0, 0, None),
])
def test_elision_splits_newlines(self):
# the newlines in this case will be completely dropped, but
# again as long as the first comma is not shifted, it will be
# a syntactically accurate reconstruction, while the sourcemap
# that is generated isn't completely reflective of what the
# original is.
unparser = Unparser()
ast = parse(textwrap.dedent('''
[, 1,
, 2 , ,,
,,, 3,,
,,,,
,,,4,];
''').strip())
self.assertEqual(list(i[:4] for i in unparser(ast)), [
('[', 1, 1, None), (',', 1, 2, None), (' ', 0, 0, None),
('1', 1, 4, None), (',', 0, 0, None),
(',', 2, 1, None), (' ', 0, 0, None),
('2', 2, 3, None), (',', 0, 0, None), (',,,,,', 2, 7, None),
(' ', 0, 0, None),
('3', 3, 5, None),
(',', 0, 0, None),
(',,,,,,,,', 3, 7, None),
(' ', 0, 0, None),
('4', 5, 4, None),
(']', 5, 6, None),
(';', 5, 7, None),
('\n', 0, 0, None),
])
def test_if_else_block(self):
unparser = Unparser()
ast = parse('if (true) {} else {}')
self.assertEqual([tuple(t[:4]) for t in (unparser(ast))], [
('if', 1, 1, None),
(' ', 0, 0, None),
('(', 1, 4, None),
('true', 1, 5, None),
(')', 1, 9, None),
(' ', 0, 0, None),
('{', 1, 11, None),
('\n', 0, 0, None),
('}', 1, 12, None),
('\n', 0, 0, None),
('else', 1, 14, None),
(' ', 0, 0, None),
('{', 1, 19, None),
('\n', 0, 0, None),
('}', 1, 20, None),
('\n', 0, 0, None),
])
class OtherUsageTestCase(unittest.TestCase):
"""
Test out other forms of usage that are not part of the standard
chain of calls, e.g. calls that involve manual creation/modification
of Nodes within the AST.
"""
def test_manual_element(self):
# a form of possible manual replacement call.
ast = asttypes.ES5Program(children=[
asttypes.ExprStatement(asttypes.FunctionCall(
identifier=asttypes.Identifier('foo'),
args=asttypes.Arguments([asttypes.Identifier('x')]),
)),
])
unparser = Unparser()
self.assertEqual([tuple(t) for t in (unparser(ast))], [
('foo', None, None, None, NotImplemented),
('(', None, None, None, NotImplemented),
('x', None, None, None, NotImplemented),
(')', None, None, None, NotImplemented),
(';', None, None, None, None),
('\n', 0, 0, None, None),
])
def test_remap_function_call(self):
# a form of possible manual replacement call.
walker = Walker()
src = textwrap.dedent("""
(function(foo, bar, arg1, arg2) {
foo(arg1);
bar(arg2);
})(foo, bar, arg1, arg2);
""").strip()
ast = parse(src)
block = walker.extract(ast, lambda n: isinstance(n, asttypes.FuncExpr))
for stmt in block.elements:
fc = stmt.expr
stmt.expr = asttypes.FunctionCall(
args=fc.args, identifier=asttypes.DotAccessor(
node=asttypes.Identifier(value='window'),
identifier=fc.identifier))
# Now try to render.
unparser = Unparser()
self.assertEqual([tuple(t[:4]) for t in (unparser(ast))], [
('(', 1, 1, None),
('function', 1, 2, None),
('(', 1, 10, None),
('foo', 1, 11, None), (',', 0, 0, None), (' ', 0, 0, None),
('bar', 1, 16, None), (',', 0, 0, None), (' ', 0, 0, None),
('arg1', 1, 21, None), (',', 0, 0, None), (' ', 0, 0, None),
('arg2', 1, 27, None), (')', 1, 31, None),
(' ', 0, 0, None),
('{', 1, 33, None),
('\n', 0, 0, None),
# injected elements should have None for lineno/colno
('window', None, None, None), ('.', None, None, None),
('foo', 2, 5, None),
('(', 2, 8, None), ('arg1', 2, 9, None), (')', 2, 13, None),
(';', 2, 14, None),
('\n', 0, 0, None),
('window', None, None, None), ('.', None, None, None),
('bar', 3, 5, None),
('(', 3, 8, None), ('arg2', 3, 9, None), (')', 3, 13, None),
(';', 3, 14, None),
('\n', 0, 0, None),
('}', 4, 1, None),
(')', 4, 2, None),
('(', 4, 3, None), ('foo', 4, 4, None), (',', 0, 0, None),
(' ', 0, 0, None), ('bar', 4, 9, None), (',', 0, 0, None),
(' ', 0, 0, None), ('arg1', 4, 14, None), (',', 0, 0, None),
(' ', 0, 0, None), ('arg2', 4, 20, None),
(')', 4, 24, None), (';', 4, 25, None), ('\n', 0, 0, None),
])
def test_pretty_print(self):
# Simple test of the pretty_print function
src = textwrap.dedent("""
(function(foo, bar, arg1, arg2) {
foo(arg1);
bar(arg2);
})(foo, bar, arg1, arg2);
""").lstrip()
self.assertEqual(pretty_print(parse(src)), src)
def test_pretty_print_custom_indent(self):
# Simple test of the pretty_print function
src = textwrap.dedent("""
(function(foo, bar, arg1, arg2) {
foo(arg1);
bar(arg2);
})(foo, bar, arg1, arg2);
""").lstrip()
self.assertEqual(pretty_print(parse(src), indent_str=' '), src)
def test_minify_print_obfuscate_skip_keywords(self):
# test that the obfuscated minifier for es5 will skip the first
# reserved keyword symbol found, i.e. 'do'
# Note that while these identifiers can be easily generated by
# a simple for loop, it can also be done by iterating through
# an enumerated instance of the obfuscation.NameGenerator class
# and stop at the index where the target token (in this case
# 'do') is.
tree = parse(textwrap.dedent("""
(function() {
var i_0, i_1, i_2, i_3, i_4, i_5, i_6, i_7, i_8, i_9, i_10, i_11,
i_12, i_13, i_14, i_15, i_16, i_17, i_18, i_19, i_20, i_21, i_22,
i_23, i_24, i_25, i_26, i_27, i_28, i_29, i_30, i_31, i_32, i_33,
i_34, i_35, i_36, i_37, i_38, i_39, i_40, i_41, i_42, i_43, i_44,
i_45, i_46, i_47, i_48, i_49, i_50, i_51, i_52, i_53, i_54, i_55,
i_56, i_57, i_58, i_59, i_60, i_61, i_62, i_63, i_64, i_65, i_66,
i_67, i_68, i_69, i_70, i_71, i_72, i_73, i_74, i_75, i_76, i_77,
i_78, i_79, i_80, i_81, i_82, i_83, i_84, i_85, i_86, i_87, i_88,
i_89, i_90, i_91, i_92, i_93, i_94, i_95, i_96, i_97, i_98, i_99,
i_100, i_101, i_102, i_103, i_104, i_105, i_106, i_107, i_108,
i_109, i_110, i_111, i_112, i_113, i_114, i_115, i_116, i_117,
i_118, i_119, i_120, i_121, i_122, i_123, i_124, i_125, i_126,
i_127, i_128, i_129, i_130, i_131, i_132, i_133, i_134, i_135,
i_136, i_137, i_138, i_139, i_140, i_141, i_142, i_143, i_144,
i_145, i_146, i_147, i_148, i_149, i_150, i_151, i_152, i_153,
i_154, i_155, i_156, i_157, i_158, i_159, i_160, i_161, i_162,
i_163, i_164, i_165, i_166, i_167, i_168, i_169, i_170, i_171,
i_172, i_173, i_174, i_175, i_176, i_177, i_178, i_179, i_180,
i_181, i_182, i_183, i_184, i_185, i_186, i_187, i_188, i_189,
i_190, i_191, i_192, i_193, i_194, i_195, i_196, i_197, i_198,
i_199, i_200, i_201, i_202, i_203, i_204, i_205, i_206, i_207,
i_208, i_209, i_210, i_211, i_212, i_213, i_214, i_215, i_216,
i_217, i_218, i_219, i_220, i_221, i_222, i_223, i_224, i_225,
i_226 = 1;
})();
"""))
standard = minify_print(tree, obfuscate=False)
self.assertIn('i_10,i_11,i_12,i_13', standard)
minified = minify_print(tree, obfuscate=True)
# we cannot directly test the output due to the dict lookup not
# being deterministic, however we can assure that the bracketing
# names (dn and dp) are generated, and ensure that 'do' is not
# present.
self.assertIn('dn', minified)
self.assertNotIn('do', minified)
self.assertIn('dp', minified)
def parse_to_sourcemap_tokens_pretty(text):
return quad(Unparser(rules=(
default_rules,
indent(),
))(parse(text, with_comments=True)))
def parse_to_sourcemap_tokens_min(text):
return quad(Unparser(rules=(
minimum_rules,
))(parse(text)))
ParsedNodeTypeSrcmapTokenPPTestCase = build_equality_testcase(
'ParsedNodeTypeSrcmapTokenPPTestCase', parse_to_sourcemap_tokens_pretty, ((
label,
textwrap.dedent(argument).strip(),
result,
) for label, argument, result in [(
'simple',
"""
0;
""", [
('0', 1, 1, None),
(';', 1, 2, None),
('\n', 0, 0, None),
],
), (
'block',
"""
{
var a = 5;
}
""", [
('{', 1, 1, None),
('\n', 0, 0, None),
(' ', None, None, None),
('var', 2, 3, None), (' ', 0, 0, None), ('a', 2, 7, None),
(' ', 0, 0, None), ('=', 2, 9, None), (' ', 0, 0, None),
('5', 2, 11, None), (';', 2, 12, None),
('\n', 0, 0, None),
('}', 3, 1, None),
('\n', 0, 0, None),
],
), (
'variable_statement',
"""
var a;
var b;
var a, b = 3;
var a = 1, b;
var a = 5, b = 7;
""", [
('var', 1, 1, None), (' ', 0, 0, None), ('a', 1, 5, None),
(';', 1, 6, None),
('\n', 0, 0, None),
('var', 2, 1, None), (' ', 0, 0, None), ('b', 2, 5, None),
(';', 2, 6, None),
('\n', 0, 0, None),
('var', 3, 1, None), (' ', 0, 0, None), ('a', 3, 5, None),
(',', 0, 0, None), (' ', 0, 0, None),
('b', 3, 8, None), (' ', 0, 0, None), ('=', 3, 10, None),
(' ', 0, 0, None), ('3', 3, 12, None), (';', 3, 13, None),
('\n', 0, 0, None),
('var', 4, 1, None), (' ', 0, 0, None), ('a', 4, 5, None),
(' ', 0, 0, None), ('=', 4, 7, None), (' ', 0, 0, None),
('1', 4, 9, None), (',', 0, 0, None), (' ', 0, 0, None),
('b', 4, 12, None), (';', 4, 13, None),
('\n', 0, 0, None),
('var', 5, 1, None), (' ', 0, 0, None), ('a', 5, 5, None),
(' ', 0, 0, None), ('=', 5, 7, None), (' ', 0, 0, None),
('5', 5, 9, None), (',', 0, 0, None), (' ', 0, 0, None),
('b', 5, 12, None), (' ', 0, 0, None), ('=', 5, 14, None),
(' ', 0, 0, None), ('7', 5, 16, None), (';', 5, 17, None),
('\n', 0, 0, None),
],
), (
'empty_statement',
"""
;
;
;
""",
[
(';', 1, 1, None),
('\n', 0, 0, None),
(';', 2, 1, None),
('\n', 0, 0, None),
(';', 3, 1, None),
('\n', 0, 0, None),
],
), (
'function_call_0',
"""
test();
""",
[
('test', 1, 1, None), ('(', 1, 5, None), (')', 1, 6, None),
(';', 1, 7, None),
('\n', 0, 0, None),
],
), (
'function_call_1',
"""
test(1);
""",
[
('test', 1, 1, None), ('(', 1, 5, None), ('1', 1, 6, None),
(')', 1, 7, None),
(';', 1, 8, None),
('\n', 0, 0, None),
],
), (
'function_call_2',
"""
test(1, 2);
""",
[
('test', 1, 1, None), ('(', 1, 5, None),
('1', 1, 6, None), (',', 0, 0, None), (' ', 0, 0, None),
('2', 1, 9, None),
(')', 1, 10, None),
(';', 1, 11, None),
('\n', 0, 0, None),
],
), (
'operator_1',
"""
i = 1 + 1;
""",
[
('i', 1, 1, None),
(' ', 0, 0, None),
('=', 1, 3, None),
(' ', 0, 0, None),
('1', 1, 5, None),
(' ', 0, 0, None),
('+', 1, 7, None),
(' ', 0, 0, None),
('1', 1, 9, None),
(';', 1, 10, None),
('\n', 0, 0, None),
],
), (
'unary_op',
"""
!true;
!1;
delete a;
delete(a);
++a;
a++;
""",
[
('!', 1, 1, None),
('true', 1, 2, None),
(';', 1, 6, None),
('\n', 0, 0, None),
('!', 2, 1, None),
('1', 2, 2, None),
(';', 2, 3, None),
('\n', 0, 0, None),
('delete', 3, 1, None),
(' ', 0, 0, None),
('a', 3, 8, None),
(';', 3, 9, None),
('\n', 0, 0, None),
('delete', 4, 1, None),
('(', 4, 7, None),
('a', 4, 8, None),
(')', 4, 9, None),
(';', 4, 10, None),
('\n', 0, 0, None),
('++', 5, 1, None),
('a', 5, 3, None),
(';', 5, 4, None),
('\n', 0, 0, None),
('a', 6, 1, None),
('++', 6, 2, None),
(';', 6, 4, None),
('\n', 0, 0, None),
],
), (
'object',
"""
var obj = {
x: 1,
y: 2,
}
""",
[
('var', 1, 1, None),
(' ', 0, 0, None),
('obj', 1, 5, None),
(' ', 0, 0, None),
('=', 1, 9, None),
(' ', 0, 0, None),
('{', 1, 11, None),
('\n', 0, 0, None),
(' ', None, None, None),
('x', 2, 5, None),
(':', 2, 6, None),
(' ', 0, 0, None),
('1', 2, 8, None),
(',', 3, 9, None),
('\n', 0, 0, None),
(' ', None, None, None),
('y', 3, 5, None),
(':', 3, 6, None),
(' ', 0, 0, None),
('2', 3, 8, None),
('\n', 0, 0, None),
('}', 4, 1, None),
(';', 0, 0, None),
('\n', 0, 0, None),
],
), (
'binop_prefixop',
"""
var a = i+ --i;
var b = i+ ++i;
var c = i+ -i;
""", [
('var', 1, 1, None), (' ', 0, 0, None), ('a', 1, 5, None),
(' ', 0, 0, None), ('=', 1, 7, None), (' ', 0, 0, None),
('i', 1, 9, None), (' ', 0, 0, None), ('+', 1, 10, None),
(' ', 0, 0, None), ('--', 1, 12, None), ('i', 1, 14, None),
(';', 1, 15, None),
('\n', 0, 0, None),
('var', 2, 1, None), (' ', 0, 0, None), ('b', 2, 5, None),
(' ', 0, 0, None), ('=', 2, 7, None), (' ', 0, 0, None),
('i', 2, 9, None), (' ', 0, 0, None), ('+', 2, 10, None),
(' ', 0, 0, None), ('++', 2, 12, None), ('i', 2, 14, None),
(';', 2, 15, None),
('\n', 0, 0, None),
('var', 3, 1, None), (' ', 0, 0, None), ('c', 3, 5, None),
(' ', 0, 0, None), ('=', 3, 7, None), (' ', 0, 0, None),
('i', 3, 9, None), (' ', 0, 0, None), ('+', 3, 10, None),
(' ', 0, 0, None), ('-', 3, 12, None), ('i', 3, 13, None),
(';', 3, 14, None),
('\n', 0, 0, None),
],
), (
'assorted_comments',
"""
// line
/* block
line 2
final */
// hrm
this;
/* more? */
this;
""", [
('// line', 1, 1, None),
('\n', 0, 0, None),
('/* block\n line 2\n final */', 2, 1, None),
('\n', 0, 0, None),
('// hrm', 5, 4, None),
('\n', 0, 0, None),
('this', 6, 4, None),
(';', 6, 8, None),
('\n', 0, 0, None),
('/* more? */', 7, 2, None),
('\n', 0, 0, None),
('this', 8, 1, None),
(';', 8, 5, None),
('\n', 0, 0, None),
],
)])
)
ParsedToMinimumTestcase = build_equality_testcase(
'ParsedToMinimumTestcase', parse_to_sourcemap_tokens_min, ((
label,
textwrap.dedent(argument).strip(),
result,
) for label, argument, result in [(
'simple',
"""
0;
""", [
('0', 1, 1, None),
(';', 1, 2, None),
],
), (
'block',
"""
{
var a = 5;
}
""", [
('{', 1, 1, None),
('var', 2, 3, None), (' ', 0, 0, None), ('a', 2, 7, None),
('=', 2, 9, None), ('5', 2, 11, None), (';', 2, 12, None),
('}', 3, 1, None),
],
), (
'variable_statement',
"""
var a;
var b;
var a, b = 3;
var a = 1, b;
var a = 5, b = 7;
""", [
('var', 1, 1, None), (' ', 0, 0, None), ('a', 1, 5, None),
(';', 1, 6, None),
('var', 2, 1, None), (' ', 0, 0, None), ('b', 2, 5, None),
(';', 2, 6, None),
('var', 3, 1, None), (' ', 0, 0, None), ('a', 3, 5, None),
(',', 0, 0, None),
('b', 3, 8, None), ('=', 3, 10, None), ('3', 3, 12, None),
(';', 3, 13, None),
('var', 4, 1, None), (' ', 0, 0, None), ('a', 4, 5, None),
('=', 4, 7, None), ('1', 4, 9, None), (',', 0, 0, None),
('b', 4, 12, None), (';', 4, 13, None),
('var', 5, 1, None), (' ', 0, 0, None), ('a', 5, 5, None),
('=', 5, 7, None), ('5', 5, 9, None), (',', 0, 0, None),
('b', 5, 12, None), ('=', 5, 14, None), ('7', 5, 16, None),
(';', 5, 17, None),
],
), (
'empty_statement',
"""
;
;
;
""",
[
(';', 1, 1, None),
(';', 2, 1, None),
(';', 3, 1, None),
],
), (
'function_call_0',
"""
test();
""",
[
('test', 1, 1, None), ('(', 1, 5, None), (')', 1, 6, None),
(';', 1, 7, None),
],
), (
'function_call_1',
"""
test(1);
""",
[
('test', 1, 1, None), ('(', 1, 5, None), ('1', 1, 6, None),
(')', 1, 7, None),
(';', 1, 8, None),
],
), (
'function_call_2',
"""
test(1, 2);
""",
[
('test', 1, 1, None), ('(', 1, 5, None),
('1', 1, 6, None), (',', 0, 0, None),
('2', 1, 9, None),
(')', 1, 10, None),
(';', 1, 11, None),
],
), (
'operator_1',
"""
i = 1 + 1;
""",
[
('i', 1, 1, None),
('=', 1, 3, None),
('1', 1, 5, None),
('+', 1, 7, None),
('1', 1, 9, None),
(';', 1, 10, None),
],
), (
'unary_op',
"""
!true;
!1;
delete a;
delete(a);
++a;
a++;
""",
[
('!', 1, 1, None),
('true', 1, 2, None),
(';', 1, 6, None),
('!', 2, 1, None),
('1', 2, 2, None),
(';', 2, 3, None),
('delete', 3, 1, None),
(' ', 0, 0, None),
('a', 3, 8, None),
(';', 3, 9, None),
('delete', 4, 1, None),
('(', 4, 7, None),
('a', 4, 8, None),
(')', 4, 9, None),
(';', 4, 10, None),
('++', 5, 1, None),
('a', 5, 3, None),
(';', 5, 4, None),
('a', 6, 1, None),
('++', 6, 2, None),
(';', 6, 4, None),
],
), (
'object',
"""
var obj = {
x: 1,
y: 2,
}
""",
[
('var', 1, 1, None),
(' ', 0, 0, None),
('obj', 1, 5, None),
('=', 1, 9, None),
('{', 1, 11, None),
('x', 2, 5, None),
(':', 2, 6, None),
('1', 2, 8, None),
(',', 3, 9, None),
('y', 3, 5, None),
(':', 3, 6, None),
('2', 3, 8, None),
('}', 4, 1, None),
(';', 0, 0, None),
],
), (
'binop_prefixop',
"""
var a = i+ --i;
var b = i+ ++i;
var c = i+ -i;
""", [
('var', 1, 1, None), (' ', 0, 0, None), ('a', 1, 5, None),
('=', 1, 7, None),
('i', 1, 9, None), ('+', 1, 10, None),
('--', 1, 12, None), ('i', 1, 14, None),
(';', 1, 15, None),
('var', 2, 1, None), (' ', 0, 0, None), ('b', 2, 5, None),
('=', 2, 7, None),
('i', 2, 9, None), ('+', 2, 10, None),
(' ', 0, 0, None), ('++', 2, 12, None), ('i', 2, 14, None),
(';', 2, 15, None),
('var', 3, 1, None), (' ', 0, 0, None), ('c', 3, 5, None),
('=', 3, 7, None),
('i', 3, 9, None), ('+', 3, 10, None),
('-', 3, 12, None), ('i', 3, 13, None),
(';', 3, 14, None),
],
)])
)
ES5IdentityTestCase = build_equality_testcase(
'ES5IdentityTestCase', es5.pretty_print, ((
label, value, value,
) for label, value in ((
label,
# using lstrip as the pretty printer produces a trailing newline
textwrap.dedent(value).lstrip(),
) for label, value in [(
'block',
"""
{
var a = 5;
}
""",
), (
'variable_statement',
"""
var a;
var b;
var a, b = 3;
var a = 1, b;
var a = 5, b = 7;
""",
), (
'empty_statement',
"""
;
;
;
""",
), (
'if_statement_inline',
"""
if (true) var x = 100;
""",
), (
'if_empty',
"""
if (true);
""",
), (
'if_else_empty',
"""
if (true);
else;
""",
), (
'if_statement_block',
"""
if (true) {
var x = 100;
var y = 200;
}
""",
), (
'if_else_inline',
"""
if (true) if (true) var x = 100;
else var y = 200;
""",
), (
'if_else_block',
"""
if (true) {
var x = 100;
}
else {
var y = 200;
}
""",
), (
'if_else_if_else_block_all_empty',
"""
if (true) {
}
else if (null) {
}
else {
}
""",
), (
'if_else_if_else_block_nested',
"""
if (true) {
}
else if (null) {
if (true) {
}
else if (null) {
}
else {
}
}
else {
}
""",
), (
'iteration_reuse',
"""
for (i = 0; i < 10; i++) {
x = 10 * i;
}
""",
), (
'iteration_var',
"""
for (var i = 0; i < 10; i++) {
x = 10 * i;
}
""",
), (
'iteration_multi_value',
"""
for (i = 0, j = 10; i < j && j < 15; i++, j++) {
x = i * j;
}
""",
), (
'iteration_multi_var_value',
"""
for (var i = 0, j = 10; i < j && j < 15; i++, j++) {
x = i * j;
}
""",
), (
'iteration_in',
"""
for (p in obj) {
}
""",
), (
# retain the semicolon in the initializer part of a 'for' statement
'iteration_conditional_initializer',
"""
for (Q || (Q = []); d < b;) {
d = 1;
}
""",
), (
'iteration_new_object',
"""
for (new Foo(); d < b;) {
d = 1;
}
""",
), (
'iteration_ternary_initializer',
"""
for (2 >> (foo ? 32 : 43) && 54; 21;) {
a = c;
}
""",
), (
'iteration_regex_initializer',
"""
for (/^.+/g; cond(); ++z) {
ev();
}
""",
), (
'iteration_var_in_obj',
"""
for (var p in obj) {
p = 1;
}
""",
), (
'iteration_do_while',
"""
do {
x += 1;
} while (true);
""",
), (
'while_loop',
"""
while (false) {
x = null;
}
""",
), (
'while_empty',
"""
while (false);
""",
), (
'while_loop_continue',
"""
while (true) {
continue;
s = 'I am not reachable';
}
""",
), (
'while_loop_continue_label',
"""
while (true) {
continue label1;
s = 'I am not reachable';
}
""",
), (
'while_loop_break',
"""
while (true) {
break;
s = 'I am not reachable';
}
""",
), (
'while_loop_break_label',
"""
while (true) {
break label1;
s = 'I am not reachable';
}
""",
), (
'return_empty',
"""
{
return;
}
""",
), (
'return_1',
"""
{
return 1;
}
""",
), (
'with_statement',
"""
with (x) {
var y = x * 2;
}
""",
), (
'labelled_statement',
"""
label: while (true) {
x *= 3;
}
""",
), (
'switch_statement',
"""
switch (day_of_week) {
case 6:
case 7:
x = 'Weekend';
break;
case 1:
x = 'Monday';
break;
default:
break;
}
""",
), (
'switch_statement_case_default_case',
"""
switch (result) {
case 'good':
do_good();
case 'pass':
do_pass();
break;
default:
log_unexpected_result();
case 'error':
handle_error();
}
""",
), (
'switch_statement_empty',
"""
switch (a) {
default:
case 1:
case 2:
}
""",
), (
'throw_statement',
"""
throw 'exc';
""",
), (
'debugger_statement',
"""
debugger;
""",
), (
'expression_statement',
"""
5 + 7 - 20 * 10;
++x;
--x;
x++;
x--;
x = 17 /= 3;
s = mot ? z : /x:3;x<5;y</g / i;
""",
), (
'try_catch_statement',
"""
try {
x = 3;
}
catch (exc) {
x = exc;
}
""",
), (
'try_finally_statement',
"""
try {
x = 3;
}
finally {
x = null;
}
""",
), (
'try_catch_finally_statement',
"""
try {
x = 5;
}
catch (exc) {
x = exc;
}
finally {
y = null;
}
""",
), (
'function_with_arguments',
"""
function foo(x, y) {
z = 10;
return x + y + z;
}
""",
), (
'function_without_arguments',
"""
function foo() {
return 10;
}
""",
), (
'var_function_without_arguments',
"""
var a = function() {
return 10;
};
""",
), (
'var_function_with_arguments',
"""
var a = function foo(x, y) {
return x + y;
};
""",
), (
# nested function declaration
'function_nested_declaration',
"""
function foo() {
function bar() {
}
}
""",
), (
'function_immediate_execution',
"""
var mult = function(x) {
return x * 10;
}();
""",
), (
'function_call',
"""
foo();
""",
), (
'function_call_argument',
"""
foo(x, 7);
""",
), (
'function_call_access_element',
"""
foo()[10];
""",
), (
'function_call_access_attribute',
"""
foo().foo;
""",
), (
'new_keyword',
"""
var foo = new Foo();
""",
), (
# dot accessor
'new_keyword_dot_accessor',
"""
var bar = new Foo.Bar();
""",
), (
# bracket accessor
'new_keyword_bracket_accessor',
"""
var bar = new Foo.Bar()[7];
""",
), (
# object literal
'object_literal_litearl_keys',
"""
var obj = {
foo: 10,
bar: 20
};
""",
), (
'object_literal_numeric_keys',
"""
var obj = {
1: 'a',
2: 'b'
};
""",
), (
'object_literal_string_keys',
"""
var obj = {
'a': 100,
'b': 200
};
""",
), (
'object_literal_empty',
"""
var obj = {};
""",
), (
# array
'array_create_access',
"""
var a = [1, 2, 3, 4, 5];
var res = a[3];
""",
), (
# elision
'elision_1',
"""
var a = [,,,];
""",
), (
'elision_2',
"""
var a = [1,,, 4];
""",
), (
'elision_3',
"""
var a = [1,, 3,, 5];
""",
), (
'function_definition',
r"""
String.prototype.foo = function(data) {
var tmpl = this.toString();
return tmpl.replace(/{{\s*(.*?)\s*}}/g, function(a, b) {
var node = data;
if (true) {
var value = true;
}
else {
var value = false;
}
$.each(n.split('.'), function(i, sym) {
node = node[sym];
});
return node;
});
};
""",
), (
'dot_accessor_integer',
"""
(0x25).toString();
(1e3).toString();
(25).toString();
""",
), (
'attr_accessor_integer',
"""
0x25["toString"]();
1e3["toString"]();
25["toString"]();
""",
), (
'parentheses_not_removed',
r"""
Expr.match[type].source + (/(?![^\[]*\])(?![^\(]*\))/.source);
""",
), (
'comparison',
"""
(options = arguments[i]) != null;
""",
), (
'regex_test',
r"""
return (/h\d/i).test(elem.nodeName);
""",
), (
# https://github.com/rspivak/slimit/issues/42
'slimit_issue_42',
"""
e.b(d) ? (a = [c.f(j[1])], e.fn.attr.call(a, d, !0)) : a = [k.f(j[1])];
""",
), (
'closure_scope',
"""
(function() {
x = 5;
}());
""",
), (
'return_statement_negation',
"""
return !(match === true || elem.getAttribute("classid") !== match);
""",
), (
'ternary_dot_accessor',
"""
var el = (elem ? elem.ownerDocument || elem : 0).documentElement;
""",
), (
'typeof',
"""
typeof second.length === "number";
""",
), (
'prepostfix',
"""
i++;
i--;
++i;
--i;
!i;
function foo() {
i++;
i--;
++i;
--i;
!i;
}
""",
), (
'shift_ops',
"""
x << y;
y >> x;
function foo() {
x << y;
y >> x;
}
""",
), (
'mul_ops',
"""
x * y;
y / x;
x % z;
function foo() {
x * y;
y / x;
x % z;
}
""",
), (
'various_ops',
"""
5 + 7 - 20 * 10;
++x;
--x;
x++;
x--;
x = 17 /= 3;
1 << 2;
foo = 2 << 3;
1 < 2;
bar = 1 < 2;
1 | 2;
bar = 1 & 2;
1 | 2 & 8;
bar = 1 & 2 | 8;
x ^ y;
bar = x ^ y;
x && y;
bar = x && y;
1, 2;
""",
), (
'regex_isolated',
"""
s = mot ? z : /x:3;x<5;y</g / i;
""",
), (
# function call in FOR init
'function_call_in_for_init',
"""
for (o(); i < 3; i++) {
}
""",
), (
# function call in FOR init
'function_call_various',
"""
a();
a()();
d()['test'];
d().test;
var i = a();
var i = a()();
var i = d()['test'];
var i = d().test;
""",
), (
# https://github.com/rspivak/slimit/issues/32
'slimit_issue_32',
"""
Name.prototype = {
get fullName() {
return this.first + " " + this.last;
},
set fullName(name) {
var names = name.split(" ");
this.first = names[0];
this.last = names[1];
}
};
""",
), (
'dot_accessor_on_integer',
"""
(0).toString();
""",
), (
'var_function_named',
"""
var x = function y() {
};
""",
), (
'new_expr_lhs',
"""
new T();
new T().derp;
""",
), (
'new_new_expr',
# var T = function(){ return function (){} }
"""
new new T();
var x = new new T();
""",
), (
# delete
'delete_keyword',
"""
var obj = {
foo: 1
};
delete obj.foo;
""",
), (
'object_various',
"""
var obj = {
foo: 1,
set bar(x) {
this._bar = x + 1;
},
get bar() {
return this._bar;
}
};
""",
), (
'void_keyword',
"""
void 0;
""",
), (
'instanceof',
"""
x instanceof y;
""",
), (
# membership
'membership_in',
"""
1 in s;
""",
), (
'for_various',
"""
for (;;);
for (o < (p < q);;);
for (o == (p == q);;);
for (o ^ (p ^ q);;);
for (o | (p | q);;);
for (o & (p & q);;);
for (a ? (b ? c : d) : false;;);
for (var x;;);
for (var x, y, z;;);
""",
), (
'forin_various',
"""
for (f in o < (p < q));
for (f in o == (p == q));
for (f in o ^ (p ^ q));
for (f in o | (p | q));
for (f in o & (p & q));
for (f in a ? (b ? c : d) : false);
for (f in x);
for (f in x, y, z);
""",
), (
'forin_initializer_noin',
"""
for (var x = foo() in (bah())) {
}
""",
), (
'dot_reserved_word',
"""
e.case;
""",
), (
'dot_reserved_word_nobf',
"""
for (x = e.case;;);
""",
), (
'logical_or_expr_nobf',
"""
(true || true) || (false && false);
""",
), (
'multiplicative_expr_nobf',
"""
!0 % 1;
""",
), (
'function_expr_1',
"""
(function(arg) {
});
""",
), (
'octal_slimit_issue_70',
r"""
var x = '\071[90m%s';
""",
), (
'special_array_char_slimit_issue_82',
r"""
var x = ['a', '\n'];
""",
), (
'special_string_slimit_issue_82',
r"""
var x = '\n';
""",
), (
'for_in_without_braces',
"""
for (index in [1, 2, 3]) index;
""",
), (
'for_loop_into_regex_slimit_issue_54',
"""
for (index in [1, 2, 3]) /^salign$/;
""",
), (
'line_continuation_string',
r"""
{
var a = "\
";
}
""",
), (
'var_non_word_char_separation',
"""
var $foo = bar;
""",
)]))
)
PrintWithCommentsTestCase = build_equality_testcase(
'PrintWithCommentsTestCase', pretty_print, ((
label, parse(value, with_comments=True), value,
) for label, value in ((
label,
# using lstrip as the pretty printer produces a trailing newline
textwrap.dedent(value).lstrip(),
) for label, value in [(
'this_keyword',
"""
// foo
// foo
/* foo */
this;
""",
), (
'before_function',
"""
/* a comment */
function foo() {
}
""",
), (
'before_if',
"""
/* a comment */
if (foo == bar) {
}
""",
), (
'not_quite_before_else_if',
"""
if (somecondition) {
}
else /* a comment */
if (foo == bar) {
// also this is indented
var baz = 1;
}
""",
), (
'for_loop',
"""
/* a comment */
// more comments
/* even more comments */
for (i = 0; i < 10; i++) {
var baz = 1;
}
""",
), (
'while_loop',
"""
/* an infinte loop */
while (true) {
// this is very pointless
var baz = 1;
}
""",
)]))
)
def parse_to_sourcemap_tokens_minify(text):
return quad(minify_printer(obfuscate=True)(parse(text)))
ParsedNodeTypeSrcmapTokenMPTestCase = build_equality_testcase(
'ParsedNodeTypeSrcmapTokenMPTestCase', parse_to_sourcemap_tokens_minify, ((
label,
textwrap.dedent(argument).strip(),
result,
) for label, argument, result in [(
'simple',
"""
0;
""", [
('0', 1, 1, None),
(';', 1, 2, None),
],
), (
'block',
"""
{
var a = 5;
}
""", [
('{', 1, 1, None),
('var', 2, 3, None), (' ', 0, 0, None), ('a', 2, 7, None),
('=', 2, 9, None),
('5', 2, 11, None), (';', 2, 12, None),
('}', 3, 1, None),
],
), (
'variable_statement',
"""
var a;
var b;
var a, b = 3;
var a = 1, b;
var a = 5, b = 7;
""", [
('var', 1, 1, None), (' ', 0, 0, None), ('a', 1, 5, None),
(';', 1, 6, None),
('var', 2, 1, None), (' ', 0, 0, None), ('b', 2, 5, None),
(';', 2, 6, None),
('var', 3, 1, None), (' ', 0, 0, None), ('a', 3, 5, None),
(',', 0, 0, None),
('b', 3, 8, None), ('=', 3, 10, None),
('3', 3, 12, None), (';', 3, 13, None),
('var', 4, 1, None), (' ', 0, 0, None), ('a', 4, 5, None),
('=', 4, 7, None),
('1', 4, 9, None), (',', 0, 0, None),
('b', 4, 12, None), (';', 4, 13, None),
('var', 5, 1, None), (' ', 0, 0, None), ('a', 5, 5, None),
('=', 5, 7, None),
('5', 5, 9, None), (',', 0, 0, None),
('b', 5, 12, None), ('=', 5, 14, None),
('7', 5, 16, None), (';', 5, 17, None),
],
), (
'empty_statement',
"""
;
;
;
""",
[
(';', 1, 1, None),
(';', 2, 1, None),
(';', 3, 1, None),
],
), (
'function_call_0',
"""
test();
""",
[
('test', 1, 1, None), ('(', 1, 5, None), (')', 1, 6, None),
(';', 1, 7, None),
],
), (
'function_call_1',
"""
test(1);
""",
[
('test', 1, 1, None), ('(', 1, 5, None), ('1', 1, 6, None),
(')', 1, 7, None),
(';', 1, 8, None),
],
), (
'function_call_2',
"""
test(1, 2);
""",
[
('test', 1, 1, None), ('(', 1, 5, None),
('1', 1, 6, None), (',', 0, 0, None),
('2', 1, 9, None),
(')', 1, 10, None),
(';', 1, 11, None),
],
), (
'operator_1',
"""
i = 1 + 1;
""",
[
('i', 1, 1, None),
('=', 1, 3, None),
('1', 1, 5, None),
('+', 1, 7, None),
('1', 1, 9, None),
(';', 1, 10, None),
],
), (
'unary_op',
"""
!true;
!1;
delete a;
delete(a);
++a;
a++;
""",
[
('!', 1, 1, None),
('true', 1, 2, None),
(';', 1, 6, None),
('!', 2, 1, None),
('1', 2, 2, None),
(';', 2, 3, None),
('delete', 3, 1, None),
(' ', 0, 0, None),
('a', 3, 8, None),
(';', 3, 9, None),
('delete', 4, 1, None),
('(', 4, 7, None),
('a', 4, 8, None),
(')', 4, 9, None),
(';', 4, 10, None),
('++', 5, 1, None),
('a', 5, 3, None),
(';', 5, 4, None),
('a', 6, 1, None),
('++', 6, 2, None),
(';', 6, 4, None),
],
), (
'object',
"""
var obj = {
x: 1,
y: 2,
}
""",
[
('var', 1, 1, None),
(' ', 0, 0, None),
('obj', 1, 5, None),
('=', 1, 9, None),
('{', 1, 11, None),
('x', 2, 5, None),
(':', 2, 6, None),
('1', 2, 8, None),
(',', 3, 9, None),
('y', 3, 5, None),
(':', 3, 6, None),
('2', 3, 8, None),
('}', 4, 1, None),
(';', 0, 0, None),
],
), (
'binop_prefixop',
"""
var a = i+ --i;
var b = i+ ++i;
var c = i+ -i;
""", [
('var', 1, 1, None), (' ', 0, 0, None), ('a', 1, 5, None),
('=', 1, 7, None),
('i', 1, 9, None), ('+', 1, 10, None),
('--', 1, 12, None), ('i', 1, 14, None),
(';', 1, 15, None),
('var', 2, 1, None), (' ', 0, 0, None), ('b', 2, 5, None),
('=', 2, 7, None),
('i', 2, 9, None), ('+', 2, 10, None),
(' ', 0, 0, None), ('++', 2, 12, None), ('i', 2, 14, None),
(';', 2, 15, None),
('var', 3, 1, None), (' ', 0, 0, None), ('c', 3, 5, None),
('=', 3, 7, None),
('i', 3, 9, None), ('+', 3, 10, None),
('-', 3, 12, None), ('i', 3, 13, None),
(';', 3, 14, None),
],
), (
'function_list',
"""
(function main(root) {
root.exports = [
(function(module, exports) {
module.exports = {};
}),
(function(module, exports) {
exports.fun = 1;
}),
];
})(this);
""", [
('(', 1, 1, None), ('function', 1, 2, None), (' ', 0, 0, None),
('main', 1, 11, None),
('(', 1, 15, None), ('a', 1, 16, 'root'), (')', 1, 20, None),
('{', 1, 22, None), ('a', 2, 3, 'root'), ('.', 2, 7, None),
('exports', 2, 8, None), ('=', 2, 16, None),
('[', 2, 18, None),
('(', 3, 5, None), ('function', 3, 6, None), ('(', 3, 14, None),
('a', 3, 15, 'module'), (',', 0, 0, None), ('b', 3, 23, 'exports'),
(')', 3, 30, None), ('{', 3, 32, None),
('a', 4, 7, 'module'), ('.', 4, 13, None),
('exports', 4, 14, None), ('=', 4, 22, None), ('{', 4, 24, None),
('}', 4, 25, None), (';', 4, 26, None), ('}', 5, 5, None),
(')', 5, 6, None),
(',', 0, 0, None),
('(', 6, 5, None), ('function', 6, 6, None), ('(', 6, 14, None),
('b', 6, 15, 'module'), (',', 0, 0, None), ('a', 6, 23, 'exports'),
(')', 6, 30, None), ('{', 6, 32, None),
('a', 7, 7, 'exports'), ('.', 7, 14, None),
('fun', 7, 15, None), ('=', 7, 19, None), ('1', 7, 21, None),
(';', 7, 22, None), ('}', 8, 5, None), (')', 8, 6, None),
(']', 9, 3, None), (';', 9, 4, None),
('}', 10, 1, None), (')', 10, 2, None),
('(', 10, 3, None), ('this', 10, 4, None), (')', 10, 8, None),
(';', 10, 9, None),
],
), (
'elision_function_list',
"""
(function main(root) {
root.exports = [
(function(module, exports) {
module.exports = {};
}),,,,,,,
(function(module, exports) {
exports.fun = 1;
}),,
];
})(this);
""", [
('(', 1, 1, None), ('function', 1, 2, None), (' ', 0, 0, None),
('main', 1, 11, None),
('(', 1, 15, None), ('a', 1, 16, 'root'), (')', 1, 20, None),
('{', 1, 22, None), ('a', 2, 3, 'root'), ('.', 2, 7, None),
('exports', 2, 8, None), ('=', 2, 16, None),
('[', 2, 18, None),
('(', 3, 5, None), ('function', 3, 6, None), ('(', 3, 14, None),
('a', 3, 15, 'module'), (',', 0, 0, None), ('b', 3, 23, 'exports'),
(')', 3, 30, None), ('{', 3, 32, None),
('a', 4, 7, 'module'), ('.', 4, 13, None),
('exports', 4, 14, None), ('=', 4, 22, None), ('{', 4, 24, None),
('}', 4, 25, None), (';', 4, 26, None), ('}', 5, 5, None),
(')', 5, 6, None),
(',', 0, 0, None), (',,,,,,', 5, 8, None),
('(', 6, 5, None), ('function', 6, 6, None), ('(', 6, 14, None),
('b', 6, 15, 'module'), (',', 0, 0, None), ('a', 6, 23, 'exports'),
(')', 6, 30, None), ('{', 6, 32, None),
('a', 7, 7, 'exports'), ('.', 7, 14, None),
('fun', 7, 15, None), ('=', 7, 19, None), ('1', 7, 21, None),
(';', 7, 22, None), ('}', 8, 5, None), (')', 8, 6, None),
(',', 0, 0, None), (',', 8, 8, None),
(']', 9, 3, None), (';', 9, 4, None),
('}', 10, 1, None), (')', 10, 2, None),
('(', 10, 3, None), ('this', 10, 4, None), (')', 10, 8, None),
(';', 10, 9, None),
],
)])
)
MinifyPrintTestCase = build_equality_testcase(
'MinifyPrintTestCase',
partial(minify_print, obfuscate=True, shadow_funcname=True), ((
label,
parse(textwrap.dedent(source).strip()),
answer,
) for label, source, answer in [(
'switch_statement',
"""
(function() {
var result;
switch (day_of_week) {
case 6:
case 7:
result = 'Weekend';
break;
case 1:
result = 'Monday';
break;
default:
break;
}
return result
})();
""",
"(function(){var a;switch(day_of_week){case 6:case 7:a='Weekend';"
"break;case 1:a='Monday';break;default:break;}return a;})();",
), (
'function_with_arguments',
"""
function foo(x, y) {
z = 10 + x;
return x + y + z;
}
""",
"function foo(a,b){z=10+a;return a+b+z;}",
), (
'plus_plusplus_split',
"""
var a = b+ ++c+d;
""",
"var a=b+ ++c+d;"
), (
'minus_plusplus_join',
"""
var a = b- ++c+d;
""",
"var a=b-++c+d;"
), (
'object_props',
"""
(function() {
Name.prototype = {
validated: function(key) {
return token.get(key + this.last);
},
get fullName() {
return this.first + ' ' + this.last;
},
set fullName(name) {
var names = name.split(' ');
this.first = names[0];
this.last = names[1];
}
};
})();
""",
"(function(){Name.prototype={validated:function(a){return token.get("
"a+this.last);},get fullName(){return this.first+' '+this.last;},"
"set fullName(b){var a=b.split(' ');this.first=a[0];this.last=a[1];}};"
"})();"
), (
'object_props_nonword',
"""
(function() {
Name.prototype = {
get $dollar() {
return this.money;
},
set $dollar(value) {
this.money = value;
}
};
})();
""",
"(function(){Name.prototype={get $dollar(){return this.money;},"
"set $dollar(a){this.money=a;}};})();"
), (
'try_catch_shadow',
"""
(function() {
var value = 1;
try {
console.log(value);
throw Error('welp');
}
catch (value) {
console.log(value);
}
})();
""",
"(function(){var a=1;try{console.log(a);throw Error('welp');}catch(a){"
"console.log(a);}})();"
), (
'for_in_a_block',
"""
if (true) {
for(;;);
}
""",
'if(true){for(;;);}',
), (
'function_dollar_sign',
"""
(function $() {
(function $() {
var foo = 1;
})()
})();
""",
'(function $(){(function a(){var a=1;})();})();',
), (
'line_continuation_string',
r"""
var a = "\
";
""",
'var a=" ";',
), (
'var_non_word_char_separation',
r"""
var $foo = bar;
""",
'var $foo=bar;',
), (
'return_string',
"""
return"foo";
""",
'return"foo";'
), (
'return_statement_negation',
"""
return !1;
""",
'return!1;'
), (
'return_nonword',
"""
return $foo;
""",
'return $foo;'
), (
'return_underscore',
"""
return _;
""",
'return _;'
), (
'dollar_instanceof_dollar',
"""
foo$ instanceof $bar;
""",
'foo$ instanceof $bar;'
), (
'while_loop_break_nonword_label',
"""
while (1) {
break $dollar;
}
""",
'while(1){break $dollar;}',
), (
'while_continue_nonword_label',
"""
while (1) {
continue $dollar;
}
""",
'while(1){continue $dollar;}',
), (
'iteration_in_nonword',
"""
for (p in $obj) {
}
""",
'for(p in $obj){}',
), (
'iteration_in_nonword_pre',
"""
for ($bling$ in $bling$bling$) {
}
""",
'for($bling$ in $bling$bling$){}',
), (
'iteration_in_str',
"""
for ($bling$ in"bingbling") {
console.log($bling$);
}
""",
'for($bling$ in"bingbling"){console.log($bling$);}',
), (
'case_various',
"""
switch (foo) {
case $dollar:
break;
case !1:
break;
case"foo":
break;
}
""",
'switch(foo){case $dollar:break;case!1:break;case"foo":break;}',
), (
'throw_various',
"""
throw $exc;
throw!1;
throw"exception";
""",
'throw $exc;throw!1;throw"exception";',
), (
'new_nonword',
"""
new $Money();
""",
'new $Money();',
)])
)
def minify_drop_semi_helper(tree):
result = minify_print(
tree, obfuscate=True, shadow_funcname=True, drop_semi=True)
# try to parse the result to ensure that it also is valid
new_tree = es5(result)
assert result == minify_print(
new_tree, obfuscate=True, shadow_funcname=True, drop_semi=True)
return result
MinifyDropSemiPrintTestCase = build_equality_testcase(
'MinifyDropSemiPrintTestCase',
minify_drop_semi_helper, ((
label,
parse(textwrap.dedent(source).strip()),
answer,
) for label, source, answer in [(
'switch_statement',
"""
(function() {
var result;
switch (day_of_week) {
case 6:
case 7:
result = 'Weekend';
break;
case 1:
result = 'Monday';
break;
default:
break;
}
return result
})();
""",
"(function(){var a;switch(day_of_week){case 6:case 7:a='Weekend';"
"break;case 1:a='Monday';break;default:break}return a})()",
), (
'function_with_arguments',
"""
function foo(x, y) {
z = 10 + x;
return x + y + z;
}
""",
"function foo(a,b){z=10+a;return a+b+z}",
), (
'plus_plusplus_split',
"""
var a = b+ ++c+d;
""",
"var a=b+ ++c+d"
), (
'minus_plusplus_join',
"""
var a = b- ++c+d;
""",
"var a=b-++c+d"
), (
'object_props',
"""
(function() {
Name.prototype = {
validated: function(key) {
return token.get(key + this.last);
},
get fullName() {
return this.first + ' ' + this.last;
},
set fullName(name) {
var names = name.split(' ');
this.first = names[0];
this.last = names[1];
}
};
})();
""",
"(function(){Name.prototype={validated:function(a){return token.get("
"a+this.last)},get fullName(){return this.first+' '+this.last},"
"set fullName(b){var a=b.split(' ');this.first=a[0];this.last=a[1]}}"
"})()"
), (
'try_catch_shadow',
"""
(function() {
var value = 1;
try {
console.log(value);
throw Error('welp');
}
catch (value) {
console.log(value);
}
})();
""",
"(function(){var a=1;try{console.log(a);throw Error('welp')}catch(a){"
"console.log(a)}})()"
), (
'for_in_a_block',
"""
if (true) {
for(;;);
}
""",
'if(true){for(;;);}',
), (
'function_dollar_sign',
"""
(function $() {
(function $() {
var foo = 1;
})()
})();
""",
'(function $(){(function a(){var a=1})()})()',
), (
'nested_return_function',
"""
v = function() {
return function() {
return function() {
};
};
};
""",
'v=function(){return function(){return function(){}}}',
)])
)
| 27.005165 | 79 | 0.354278 |
4a1c2c586703908fbc843b4858b011d7c3179e97
| 11,303 |
py
|
Python
|
cusim/aux.py
|
js1010/cusim
|
b37bd96f734c4ddf9ebc7f50ede7e2db3f31d592
|
[
"Apache-2.0"
] | 27 |
2021-02-20T16:43:19.000Z
|
2022-02-13T19:40:27.000Z
|
cusim/aux.py
|
js1010/cusim
|
b37bd96f734c4ddf9ebc7f50ede7e2db3f31d592
|
[
"Apache-2.0"
] | 4 |
2021-03-29T14:06:07.000Z
|
2021-10-18T08:37:34.000Z
|
cusim/aux.py
|
js1010/cusim
|
b37bd96f734c4ddf9ebc7f50ede7e2db3f31d592
|
[
"Apache-2.0"
] | 6 |
2021-02-23T07:47:49.000Z
|
2022-02-22T12:50:24.000Z
|
# Copyright (c) 2021 Jisang Yoon
# All rights reserved.
#
# This source code is licensed under the Apache 2.0 license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import sys
import json
import time
import logging
import logging.handlers
import numpy as np
import jsmin
from google.protobuf.json_format import Parse, MessageToDict
# get_logger and Option refer to
# https://github.com/kakao/buffalo/blob/
# 5f571c2c7d8227e6625c6e538da929e4db11b66d/buffalo/misc/aux.py
def get_logger(name=__file__, level=2):
if level == 1:
level = logging.WARNING
elif level == 2:
level = logging.INFO
elif level == 3:
level = logging.DEBUG
logger = logging.getLogger(name)
if logger.handlers:
return logger
logger.setLevel(level)
sh0 = logging.StreamHandler()
sh0.setLevel(level)
formatter = logging.Formatter('[%(levelname)-8s] %(asctime)s '
'[%(filename)s] [%(funcName)s:%(lineno)d]'
'%(message)s', '%Y-%m-%d %H:%M:%S')
sh0.setFormatter(formatter)
logger.addHandler(sh0)
return logger
# This function helps you to read non-standard json strings.
# - Handles json string with c++ style inline comments
# - Handles json string with trailing commas.
def load_json_string(cont):
# (1) Removes comment.
# Refer to https://plus.google.com/+DouglasCrockfordEsq/posts/RK8qyGVaGSr
cont = jsmin.jsmin(cont)
# (2) Removes trailing comma.
cont = re.sub(",[ \t\r\n]*}", "}", cont)
cont = re.sub(",[ \t\r\n]*" + r"\]", "]", cont)
return json.loads(cont)
# function read json file from filename
def load_json_file(fname):
with open(fname, "r") as fin:
ret = load_json_string(fin.read())
return ret
# use protobuf to restrict field and types
def get_opt_as_proto(raw, proto_type=None):
assert proto_type is not None
proto = proto_type()
# convert raw to proto
Parse(json.dumps(Option(raw)), proto)
err = []
assert proto.IsInitialized(err), \
f"some required fields are missing in proto {err}\n {proto}"
return proto
def proto_to_dict(proto):
return MessageToDict(proto, \
including_default_value_fields=True, \
preserving_proto_field_name=True)
def copy_proto(proto):
newproto = type(proto)()
Parse(json.dumps(proto_to_dict(proto)), newproto)
return newproto
class Option(dict):
def __init__(self, *args, **kwargs):
args = [arg if isinstance(arg, dict)
else load_json_file(arg) for arg in args]
super().__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, val in arg.items():
if isinstance(val, dict):
self[k] = Option(val)
else:
self[k] = val
if kwargs:
for k, val in kwargs.items():
if isinstance(val, dict):
self[k] = Option(val)
else:
self[k] = val
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super().__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super().__delitem__(key)
del self.__dict__[key]
def __getstate__(self):
return vars(self)
def __setstate__(self, state):
vars(self).update(state)
# reference: https://github.com/tensorflow/tensorflow/blob/
# 85c8b2a817f95a3e979ecd1ed95bff1dc1335cff/tensorflow/python/
# keras/utils/generic_utils.py#L483
class Progbar:
# pylint: disable=too-many-branches,too-many-statements,invalid-name
# pylint: disable=blacklisted-name,no-else-return
"""Displays a progress bar.
Arguments:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that should *not* be
averaged over time. Metrics in this list will be displayed as-is. All
others will be averaged by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
unit_name: Display name for step counts (usually "step" or "sample").
"""
def __init__(self,
target,
width=30,
verbose=1,
interval=0.05,
stateful_metrics=None,
unit_name='step'):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.unit_name = unit_name
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules or
'posix' in sys.modules or
'PYCHARM_HOSTED' in os.environ)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
self._time_after_first_step = None
def update(self, current, values=None, finalize=None):
"""Updates the progress bar.
Arguments:
current: Index of current step.
values: List of tuples: `(name, value_for_last_step)`. If `name` is in
`stateful_metrics`, `value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
finalize: Whether this is the last update for the progress bar. If
`None`, defaults to `current >= self.target`.
"""
if finalize is None:
if self.target is None:
finalize = False
else:
finalize = current >= self.target
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
# In the case that progress bar doesn't have a target value in the first
# epoch, both on_batch_end and on_epoch_end will be called, which will
# cause 'current' and 'self._seen_so_far' to have the same value. Force
# the minimal value to 1 here, otherwise stateful_metric will be 0s.
value_base = max(current - self._seen_so_far, 1)
if k not in self._values:
self._values[k] = [v * value_base, value_base]
else:
self._values[k][0] += v * value_base
self._values[k][1] += value_base
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if now - self._last_update < self.interval and not finalize:
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.log10(self.target)) + 1
bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
time_per_unit = self._estimate_step_duration(current, now)
if self.target is None or finalize:
if time_per_unit >= 1 or time_per_unit == 0:
info += ' %.0fs/%s' % (time_per_unit, self.unit_name)
elif time_per_unit >= 1e-3:
info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name)
else:
info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name)
else:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600,
(eta % 3600) // 60, eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
for k in self._values_order:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if finalize:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if finalize:
numdigits = int(np.log10(self.target)) + 1
count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)
info = count + info
for k in self._values_order:
info += ' - %s:' % k
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def _estimate_step_duration(self, current, now):
"""Estimate the duration of a single step.
Given the step number `current` and the corresponding time `now`
this function returns an estimate for how long a single step
takes. If this is called before one step has been completed
(i.e. `current == 0`) then zero is given as an estimate. The duration
estimate ignores the duration of the (assumed to be non-representative)
first step for estimates when more steps are available (i.e. `current>1`).
Arguments:
current: Index of current step.
now: The current time.
Returns: Estimate of the duration of a single step.
"""
if current:
# there are a few special scenarios here:
# 1) somebody is calling the progress bar without ever supplying step 1
# 2) somebody is calling the progress bar and supplies step one mulitple
# times, e.g. as part of a finalizing call
# in these cases, we just fall back to the simple calculation
if self._time_after_first_step is not None and current > 1:
time_per_unit = (now - self._time_after_first_step) / (current - 1)
else:
time_per_unit = (now - self._start) / current
if current == 1:
self._time_after_first_step = now
return time_per_unit
else:
return 0
| 33.440828 | 80 | 0.613112 |
4a1c2cf247cd98aa11052a1d3f911cd4f8e7761e
| 4,390 |
py
|
Python
|
core/domain/value_generators_domain.py
|
anubhavsinha98/oppia
|
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
|
[
"Apache-2.0"
] | 1 |
2019-08-31T17:06:41.000Z
|
2019-08-31T17:06:41.000Z
|
core/domain/value_generators_domain.py
|
anubhavsinha98/oppia
|
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
|
[
"Apache-2.0"
] | null | null | null |
core/domain/value_generators_domain.py
|
anubhavsinha98/oppia
|
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes relating to value generators."""
from __future__ import absolute_import # pylint: disable=import-only-modules
import copy
import inspect
import os
import pkgutil
import feconf
import python_utils
import utils
class BaseValueGenerator(python_utils.OBJECT):
"""Base value generator class.
A value generator is a class containing a function that takes in
customization args and uses them to generate a value. The generated values
are not typed, so if the caller wants strongly-typed values it would need
to normalize the output of each generator.
Each value generator should define a template file and an AngularJS
directive. The names of these two files should be [ClassName].html and
[ClassName].js respectively, where [ClassName] is the name of the value
generator class.
"""
@property
def id(self):
"""Returns the Class name as a string, i.e "BaseValueGenerator".
Returns:
str. Class name i.e "BaseValueGenerator".
"""
return self.__class__.__name__
@classmethod
def get_html_template(cls):
"""Returns the HTML template for the class.
Returns:
str. The HTML template corresponding to the class.
"""
return utils.get_file_contents(os.path.join(
os.getcwd(), feconf.VALUE_GENERATORS_DIR, 'templates',
'%s.html' % cls.__name__))
def generate_value(self, *args, **kwargs):
"""Generates a new value, using the given customization args.
The first arg should be context_params.
"""
raise NotImplementedError
class Registry(python_utils.OBJECT):
"""Maintains a registry of all the value generators.
Attributes:
value_generators_dict: dict(str : BaseValueGenerator). Dictionary
mapping value generator class names to their classes.
"""
value_generators_dict = {}
@classmethod
def _refresh_registry(cls):
"""Refreshes the dictionary mapping between generator_id and the
corresponding generator classes.
"""
cls.value_generators_dict.clear()
# Assemble all generators in
# extensions/value_generators/models/generators.py.
value_generator_paths = [os.path.join(
os.getcwd(), feconf.VALUE_GENERATORS_DIR, 'models')]
# Crawl the directories and add new generator instances to the
# registries.
for loader, name, _ in pkgutil.iter_modules(
path=value_generator_paths):
if name.endswith('_test'):
continue
module = loader.find_module(name).load_module(name)
for _, clazz in inspect.getmembers(
module, predicate=inspect.isclass):
if issubclass(clazz, BaseValueGenerator):
cls.value_generators_dict[clazz.__name__] = clazz
@classmethod
def get_all_generator_classes(cls):
"""Get the dict of all value generator classes."""
cls._refresh_registry()
return copy.deepcopy(cls.value_generators_dict)
@classmethod
def get_generator_class_by_id(cls, generator_id):
"""Gets a generator class by its id.
Refreshes once if the generator is not found; subsequently, throws an
error.
Args:
generator_id: str. An id corresponding to a generator class.
Returns:
class(BaseValueGenerator). A generator class mapping to the
generator id given.
Raises:
KeyError: The given generator_id is invalid.
"""
if generator_id not in cls.value_generators_dict:
cls._refresh_registry()
return cls.value_generators_dict[generator_id]
| 33.51145 | 78 | 0.673349 |
4a1c2eb7a9e4a9ba3506badffb34f60ff13468b4
| 352 |
py
|
Python
|
articles/admin.py
|
osama-mohamed/recipe_django
|
59fb0597b2903ded743b2843ec4d7291cdeddafa
|
[
"MIT"
] | 3 |
2018-05-02T20:37:11.000Z
|
2020-10-15T17:19:26.000Z
|
articles/admin.py
|
osama-mohamed/recipe_django
|
59fb0597b2903ded743b2843ec4d7291cdeddafa
|
[
"MIT"
] | 1 |
2019-06-10T21:35:13.000Z
|
2019-06-10T21:35:13.000Z
|
articles/admin.py
|
osama-mohamed/recipe_django
|
59fb0597b2903ded743b2843ec4d7291cdeddafa
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Article
# Register your models here.
class ArticleAdmin(admin.ModelAdmin):
list_display = ['id', 'title', 'slug', 'user', 'timestamp', 'updated']
list_display_links = ['id', 'title']
search_fields = ['title', 'content']
raw_id_fields = ['user']
admin.site.register(Article, ArticleAdmin)
| 27.076923 | 72 | 0.713068 |
4a1c2f25931cb97c5fb3c85708b2ca45fa546281
| 1,690 |
py
|
Python
|
hash-table.py
|
vector8188/AlgorithmAnalysisPython
|
026ca8bf846a504c5eae1677680306b0462b49b9
|
[
"MIT"
] | 1 |
2018-02-01T21:54:48.000Z
|
2018-02-01T21:54:48.000Z
|
hash-table.py
|
vector8188/AlgorithmAnalysisPython
|
026ca8bf846a504c5eae1677680306b0462b49b9
|
[
"MIT"
] | null | null | null |
hash-table.py
|
vector8188/AlgorithmAnalysisPython
|
026ca8bf846a504c5eae1677680306b0462b49b9
|
[
"MIT"
] | null | null | null |
class HashTable:
def __init__(self):
self.size = 11
self.slots = [None] * self.size
self.data = [None] * self.size
def put(self,key, data):
hashvalue = self.hashfunction(key,len(self.slots))
if self.slots[hashvalue] == None:
self.slots[hashvalue] = key
self.data[hashvalue] = data
else:
if self.slots[hashvalue] == key:
self.data[hashvalue] = data
else:
nextslot = self.rehash(hashvalue,len(self.slots))
while ( self.slots[nextslot] != None and
self.slots[nextslot] != key):
nextslot = self.rehash(nextslot,len(self.slots))
if self.slots[nextslot] == None:
self.slots[nextslot] = key
self.data[nextslot] = data
else:
self.data[nextslot] = data
def hashfunction(self,key,size):
return key%size
def rehash(self, oldhash,size):
return (oldhash+1)%size
def get(self,key):
startslot = self.hashfunction(key,len(self.slots))
data = None
stop = False
found = False
position = startslot
while (
not found and not stop and self.slots[position] != None
):
if self.slots[position] == key:
found = True
data = self.data[position]
else:
position = self.rehash(position, len(self.slots))
if position == startslot:
stop = True
return data
def __getitem__(self,key):
return self.get(key)
def __setitem__(self,key,data):
self.put(key,data)
H=HashTable()
H[54]="cat"
H[26]="dog"
H[93]="lion"
H[17]="tiger"
H[77]="bird"
H[31]="cow"
H[44]="goat"
H[55]="pig"
H[20]="chicken"
H.slots
[77, 44, 55, 20, 26, 93, 17, None, None, 31, 54]
H.data
['bird', 'goat', 'pig', 'chicken', 'dog', 'lion',
'tiger', None, None, 'cow', 'cat']
print (H[20])
print (H[17])
| 19.882353 | 58 | 0.632544 |
4a1c319ba07dd615eabb6cadca5e28472498e35e
| 97 |
py
|
Python
|
WeatherPy/api_keys.py
|
i-kkwk/PythonAPI_challenge
|
bd2924e35a64e7f37327f4573abd3ac403d9a019
|
[
"ADSL"
] | null | null | null |
WeatherPy/api_keys.py
|
i-kkwk/PythonAPI_challenge
|
bd2924e35a64e7f37327f4573abd3ac403d9a019
|
[
"ADSL"
] | null | null | null |
WeatherPy/api_keys.py
|
i-kkwk/PythonAPI_challenge
|
bd2924e35a64e7f37327f4573abd3ac403d9a019
|
[
"ADSL"
] | null | null | null |
# OpenWeatherMap API Key
api_key = "KEY HERE PLEASE"
# Google API Key
g_key = "KEY HERE PLEASE"
| 16.166667 | 27 | 0.721649 |
4a1c3369c121478c7144984348e3f65347dcafbe
| 4,509 |
py
|
Python
|
scape/modules/messages.py
|
toufikhary/BabangChen
|
a9332c2d0737cd0c6c777f0ad469fe84a16302ab
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
scape/modules/messages.py
|
toufikhary/BabangChen
|
a9332c2d0737cd0c6c777f0ad469fe84a16302ab
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
scape/modules/messages.py
|
toufikhary/BabangChen
|
a9332c2d0737cd0c6c777f0ad469fe84a16302ab
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" scape module for purging unneeded messages(usually spam or ot). """
from asyncio import sleep
from telethon.errors import rpcbaseerrors
from scape import BOTLOG, BOTLOG_CHATID, CMD_HELP
from scape.events import register
@register(outgoing=True, pattern="^.purge$")
async def fastpurger(purg):
""" For .purge command, purge all messages starting from the reply. """
chat = await purg.get_input_chat()
msgs = []
itermsg = purg.client.iter_messages(chat, min_id=purg.reply_to_msg_id)
count = 0
if purg.reply_to_msg_id is not None:
async for msg in itermsg:
msgs.append(msg)
count = count + 1
msgs.append(purg.reply_to_msg_id)
if len(msgs) == 100:
await purg.client.delete_messages(chat, msgs)
msgs = []
else:
await purg.edit("`I need a mesasge to start purging from.`")
return
if msgs:
await purg.client.delete_messages(chat, msgs)
done = await purg.client.send_message(
purg.chat_id, f"`Fast purge complete!`\
\nPurged {str(count)} messages")
if BOTLOG:
await purg.client.send_message(
BOTLOG_CHATID,
"Purge of " + str(count) + " messages done successfully.")
await sleep(2)
await done.delete()
@register(outgoing=True, pattern="^.purgeme")
async def purgeme(delme):
""" For .purgeme, delete x count of your latest message."""
message = delme.text
count = int(message[9:])
i = 1
async for message in delme.client.iter_messages(delme.chat_id,
from_user='me'):
if i > count + 1:
break
i = i + 1
await message.delete()
smsg = await delme.client.send_message(
delme.chat_id,
"`Purge complete!` Purged " + str(count) + " messages.",
)
if BOTLOG:
await delme.client.send_message(
BOTLOG_CHATID,
"Purge of " + str(count) + " messages done successfully.")
await sleep(2)
i = 1
await smsg.delete()
@register(outgoing=True, pattern="^.del$")
async def delete_it(delme):
""" For .del command, delete the replied message. """
msg_src = await delme.get_reply_message()
if delme.reply_to_msg_id:
try:
await msg_src.delete()
await delme.delete()
if BOTLOG:
await delme.client.send_message(
BOTLOG_CHATID, "Deletion of message was successful")
except rpcbaseerrors.BadRequestError:
if BOTLOG:
await delme.client.send_message(
BOTLOG_CHATID, "Well, I can't delete a message")
@register(outgoing=True, pattern="^.edit")
async def editer(edit):
""" For .editme command, edit your last message. """
message = edit.text
chat = await edit.get_input_chat()
self_id = await edit.client.get_peer_id('me')
string = str(message[6:])
i = 1
async for message in edit.client.iter_messages(chat, self_id):
if i == 2:
await message.edit(string)
await edit.delete()
break
i = i + 1
if BOTLOG:
await edit.client.send_message(BOTLOG_CHATID,
"Edit query was executed successfully")
@register(outgoing=True, pattern="^.sd")
async def selfdestruct(destroy):
""" For .sd command, make seflf-destructable messages. """
message = destroy.text
counter = int(message[4:6])
text = str(destroy.text[6:])
await destroy.delete()
smsg = await destroy.client.send_message(destroy.chat_id, text)
await sleep(counter)
await smsg.delete()
if BOTLOG:
await destroy.client.send_message(BOTLOG_CHATID,
"sd query done successfully")
CMD_HELP.update({
"messages":
"`.purge`\
\nUsage: Purges all messages starting from the reply.\
\n\n`.purgeme` <x>\
\nusage: Deletes x amount of your latest messages..\
\n\n`.del`\
\nUsage: Deletes the message you replied to.\
\n\n`.edit`\
\nUsage: Replace your last message with <newmessage>.\
\n\n`.sd `<x> <message>\
\nUsage: Creates a message that selfdestructs in x seconds.\
\nKeep the seconds under 100 since it puts your bot to sleep"
})
| 31.753521 | 78 | 0.616323 |
4a1c35be8b03edcb689b3ee7d86d6a04a8a02241
| 1,439 |
py
|
Python
|
anomaly_detection/deep_geo/main.py
|
ninatu/anomaly_detection
|
6fa35f3fd35976ce2b857801d288e17f454241b9
|
[
"Apache-2.0"
] | 22 |
2020-10-21T07:59:33.000Z
|
2022-03-18T08:07:49.000Z
|
anomaly_detection/deep_geo/main.py
|
ninatu/anomaly_detection
|
6fa35f3fd35976ce2b857801d288e17f454241b9
|
[
"Apache-2.0"
] | 2 |
2020-10-26T05:19:39.000Z
|
2021-09-21T18:16:02.000Z
|
anomaly_detection/deep_geo/main.py
|
ninatu/anomaly_detection
|
6fa35f3fd35976ce2b857801d288e17f454241b9
|
[
"Apache-2.0"
] | 7 |
2020-11-19T12:32:29.000Z
|
2022-03-06T21:02:30.000Z
|
"""
Copyright (c) 2018 izikgo
Copyright (c) 2020 ninatu
This file is a modified file of the project: https://github.com/izikgo/AnomalyDetectionTransformations,
which was released under MIT License.
Go to https://github.com/izikgo/AnomalyDetectionTransformations/blob/master/LICENSE for full license details.
"""
import argparse
import yaml
import tensorflow as tf
from keras.backend import set_session
from anomaly_detection.deep_geo.train import train
from anomaly_detection.deep_geo.evaluate import evaluate
def main():
parser = argparse.ArgumentParser()
parser.add_argument('action', type=str, choices=['train', 'eval', 'train_eval'])
parser.add_argument('configs', type=str, nargs='*', help='Config paths')
args = parser.parse_args()
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
set_session(tf.Session(config=tf_config))
if args.action == 'train':
assert len(args.configs) == 1
train(_load_config(args.configs[0]))
elif args.action == 'eval':
assert len(args.configs) == 1
evaluate(_load_config(args.configs[0]))
else:
assert len(args.configs) == 2
train(_load_config(args.configs[0]))
evaluate(_load_config(args.configs[1]))
def _load_config(path):
with open(path, 'r') as stream:
config = yaml.load(stream, Loader=yaml.FullLoader)
return config
if __name__ == '__main__':
main()
| 28.78 | 109 | 0.707436 |
4a1c35f4d25ccb95c791f362d412443e46a7d075
| 2,855 |
py
|
Python
|
ding/utils/slurm_helper.py
|
davide97l/DI-engine
|
d48c93bcd5c07c29f2ce4ac1b7756b8bc255c423
|
[
"Apache-2.0"
] | 1 |
2022-03-21T16:15:39.000Z
|
2022-03-21T16:15:39.000Z
|
ding/utils/slurm_helper.py
|
jiaruonan/DI-engine
|
268d77db3cb54401b2cfc83e2bc3ec87c31e7b83
|
[
"Apache-2.0"
] | null | null | null |
ding/utils/slurm_helper.py
|
jiaruonan/DI-engine
|
268d77db3cb54401b2cfc83e2bc3ec87c31e7b83
|
[
"Apache-2.0"
] | null | null | null |
import os
import subprocess
from typing import Optional, Dict, Tuple
MANAGER_NODE_TABLE = {
'10.198.8': '10.198.8.31',
'10.198.6': '10.198.6.31',
'10.5.38': '10.5.38.31',
'10.5.39': '10.5.38.31',
'10.5.36': '10.5.36.31',
'10.5.37': '10.5.36.31',
'10.10.30': '10.10.30.91',
}
def get_ip() -> str:
assert os.environ.get('SLURMD_NODENAME'), 'not found SLURMD_NODENAME env variable'
# expecting nodename to be like: 'SH-IDC1-10-5-36-64'
nodename = os.environ.get('SLURMD_NODENAME', '')
myaddr = '.'.join(nodename.split('-')[-4:])
return myaddr
def get_manager_node_ip(node_ip: Optional[str] = None) -> str:
r"""
Overview:
Look up the manager node of the slurm cluster and return the node ip
"""
if 'SLURM_JOB_ID' not in os.environ:
from ditk import logging
logging.error(
'We are not running on slurm!, \'auto\' for manager_ip or '
'coordinator_ip is only intended for running on multiple slurm clusters'
)
return '127.0.0.1'
node_ip = node_ip or get_ip()
learner_manager_ip_prefix = '.'.join(node_ip.split('.')[0:3])
if learner_manager_ip_prefix in MANAGER_NODE_TABLE:
return MANAGER_NODE_TABLE[learner_manager_ip_prefix]
else:
raise KeyError("Cluster not found, please add it to the MANAGER_NODE_TABLE in {}".format(__file__))
# get all info of cluster
def get_cls_info() -> Dict[str, list]:
ret_dict = {}
info = subprocess.getoutput('sinfo -Nh').split('\n')
for line in info:
line = line.strip().split()
if len(line) != 4:
continue
node, _, partition, state = line
if partition not in ret_dict:
ret_dict[partition] = []
assert node not in ret_dict[partition]
if state in ['idle', 'mix']:
ret_dict[partition].append(node)
return ret_dict
def node_to_partition(target_node: str) -> Tuple[str, str]:
info = subprocess.getoutput('sinfo -Nh').split('\n')
for line in info:
line = line.strip().split()
if len(line) != 4:
continue
node, _, partition, state = line
if node == target_node:
return partition
raise RuntimeError("not found target_node: {}".format(target_node))
def node_to_host(node: str) -> str:
return '.'.join(node.split('-')[-4:])
def find_free_port_slurm(node: str) -> int:
partition = node_to_partition(node)
if partition == 'spring_scheduler':
comment = '--comment=spring-submit'
else:
comment = ''
output = subprocess.getoutput(
"srun -p {} -w {} {} python -c \"from ding.utils import find_free_port; print('port' + str(find_free_port(0)))\"" # noqa
.format(partition, node, comment)
)
port = output.split('port')[-1]
return int(port)
| 31.373626 | 129 | 0.610858 |
4a1c36d9055f6a78ee9a38c31c44a46b8ac55922
| 9,885 |
py
|
Python
|
neuraxle_tensorflow/tensorflow_v1.py
|
Neuraxio/Neuraxle-TensorFlow
|
d4ef1b455ce9ba5bedd1d502498c5440bf1af962
|
[
"Apache-2.0"
] | 5 |
2019-12-31T16:51:44.000Z
|
2021-08-24T13:58:35.000Z
|
neuraxle_tensorflow/tensorflow_v1.py
|
Neuraxio/Neuraxle-TensorFlow
|
d4ef1b455ce9ba5bedd1d502498c5440bf1af962
|
[
"Apache-2.0"
] | 15 |
2019-12-23T00:33:05.000Z
|
2022-03-16T21:20:56.000Z
|
neuraxle_tensorflow/tensorflow_v1.py
|
Neuraxio/Neuraxle-TensorFlow
|
d4ef1b455ce9ba5bedd1d502498c5440bf1af962
|
[
"Apache-2.0"
] | 2 |
2020-08-27T20:16:29.000Z
|
2020-11-09T15:58:35.000Z
|
"""
Neuraxle Tensorflow V1 Utility classes
=========================================
Neuraxle utility classes for tensorflow v1.
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tensorflow as tf
from neuraxle.base import BaseSaver, BaseStep, ExecutionContext
from neuraxle.hyperparams.space import HyperparameterSamples, HyperparameterSpace
from neuraxle_tensorflow.tensorflow import BaseTensorflowModelStep
class TensorflowV1ModelStep(BaseTensorflowModelStep):
"""
Base class for tensorflow 1 steps.
It uses :class:`TensorflowV1StepSaver` for saving the model.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/checkpoint>`_,
:class:`~neuraxle.base.BaseStep`
"""
HYPERPARAMS = HyperparameterSamples({})
HYPERPARAMS_SPACE = HyperparameterSpace({})
def __init__(
self,
create_graph,
create_loss,
create_optimizer,
create_feed_dict=None,
data_inputs_dtype=None,
expected_outputs_dtype=None,
variable_scope=None,
has_expected_outputs=True,
print_loss=False,
print_func=None
):
BaseTensorflowModelStep.__init__(
self,
create_model=create_graph,
create_loss=create_loss,
create_optimizer=create_optimizer,
create_inputs=create_feed_dict,
data_inputs_dtype=data_inputs_dtype,
expected_outputs_dtype=expected_outputs_dtype,
step_saver=TensorflowV1StepSaver(),
print_loss=print_loss,
print_func=print_func
)
if variable_scope is None:
variable_scope = self.name
self.variable_scope = variable_scope
self.has_expected_outputs = has_expected_outputs
self.create_feed_dict = create_feed_dict
def setup(self, context: ExecutionContext) -> BaseStep:
"""
Setup tensorflow 1 graph, and session using a variable scope.
:return: self
:rtype: BaseStep
"""
if self.is_initialized:
return self
self.graph = tf.Graph()
with self.graph.as_default():
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True), graph=self.graph)
model = self.create_model(self, context)
if not isinstance(model, tuple):
tf.identity(model, name='output')
else:
tf.identity(model[0], name='output')
tf.identity(model[1], name='inference_output')
tf.identity(self.create_loss(self), name='loss')
self.create_optimizer(self, context).minimize(self['loss'], name='optimizer')
init = tf.global_variables_initializer()
self.session.run(init)
self.is_initialized = True
def teardown(self) -> BaseStep:
"""
Close session on teardown.
:return:
"""
if self.session is not None:
self.session.close()
self.is_initialized = False
return self
def strip(self):
"""
Strip tensorflow 1 properties from to step to make the step serializable.
:return: stripped step
:rtype: BaseStep
"""
self.graph = None
self.session = None
return self
def fit(self, data_inputs, expected_outputs=None) -> 'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.fit_model(data_inputs, expected_outputs)
def fit_model(self, data_inputs, expected_outputs=None) -> BaseStep:
"""
Fit tensorflow model using the variable scope.
:param data_inputs: data inputs
:param expected_outputs: expected outputs to fit on
:return: fitted self
:rtype: BaseStep
"""
feed_dict = {
self['data_inputs']: data_inputs
}
if self.has_expected_outputs:
feed_dict.update({
self['expected_outputs']: expected_outputs
})
if self.create_inputs is not None:
additional_feed_dict_arguments = self.create_inputs(self, data_inputs, expected_outputs)
feed_dict.update(additional_feed_dict_arguments)
results = self.session.run([self['optimizer'], self['loss']], feed_dict=feed_dict)
loss = results[1]
self.add_new_loss(loss)
return self
def transform(self, data_inputs, expected_outputs=None) -> 'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.transform_model(data_inputs)
def transform_model(self, data_inputs):
"""
Transform tensorflow model using the variable scope.
:param data_inputs:
:return:
"""
inference_output_name = self._get_inference_output_name()
feed_dict = {
self['data_inputs']: data_inputs
}
results = self.session.run([self[inference_output_name], self['loss']], feed_dict=feed_dict)
self.add_new_loss(results[1], test_only=True)
return results[0]
def _get_inference_output_name(self):
"""
Return the output tensor name for inference (transform).
In create_graph, the user can return a tuple of two elements : the output tensor for training, and the output tensor for inference.
:return:
"""
inference_output_name = 'output'
if len(self['inference_output'].get_shape().as_list()) > 0:
inference_output_name = 'inference_output'
return inference_output_name
def __getitem__(self, item):
"""
Get a graph tensor by name using get item.
:param item: tensor name
:type item: str
:return: tensor
:rtype: tf.Tensor
"""
if ":" in item:
split = item.split(":")
tensor_name = split[0]
device = split[1]
else:
tensor_name = item
device = "0"
try:
result = self.graph.get_tensor_by_name("{0}/{1}:{2}".format(self.variable_scope, tensor_name, device))
except KeyError:
result = None
if result is None:
try:
result = self.graph.get_operation_by_name("{0}/{1}".format(self.variable_scope, tensor_name))
except KeyError:
result = tf.get_variable(tensor_name, [])
return result
class TensorflowV1StepSaver(BaseSaver):
"""
Step saver for a tensorflow Session using tf.train.Saver().
It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,
:class:`~neuraxle.base.BaseSaver`
"""
def save_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext') -> 'BaseStep':
"""
Save a step that is using tf.train.Saver().
:param step: step to save
:type step: BaseStep
:param context: execution context to save from
:type context: ExecutionContext
:return: saved step
"""
with step.graph.as_default():
saver = tf.train.Saver()
saver.save(step.session, self._get_saved_model_path(context, step))
step.strip()
return step
def load_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext') -> 'BaseStep':
"""
Load a step that is using tensorflow using tf.train.Saver().
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
step.is_initialized = False
step.setup(context)
with step.graph.as_default():
saver = tf.train.Saver()
saver.restore(step.session, self._get_saved_model_path(context, step))
return step
def can_load(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext'):
"""
Returns whether or not we can load.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
meta_exists = os.path.exists(os.path.join(context.get_path(), "{0}.ckpt.meta".format(step.get_name())))
index_exists = os.path.exists(os.path.join(context.get_path(), "{0}.ckpt.index".format(step.get_name())))
return meta_exists and index_exists
def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):
"""
Returns the saved model path using the given execution context, and step name.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
return os.path.join(context.get_path(), "{0}.ckpt".format(step.get_name()))
| 33.737201 | 139 | 0.621345 |
4a1c36ec5e97a412d9fa13bac236d2532bd4d8f6
| 2,889 |
py
|
Python
|
Config/setting.py
|
roperluo32/proxy_pool
|
d4ed10f00c56b05d82f5e655d55a0ccb72db3180
|
[
"MIT"
] | null | null | null |
Config/setting.py
|
roperluo32/proxy_pool
|
d4ed10f00c56b05d82f5e655d55a0ccb72db3180
|
[
"MIT"
] | null | null | null |
Config/setting.py
|
roperluo32/proxy_pool
|
d4ed10f00c56b05d82f5e655d55a0ccb72db3180
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: setting.py
Description : 配置文件
Author : JHao
date: 2019/2/15
-------------------------------------------------
Change Activity:
2019/2/15:
-------------------------------------------------
"""
import sys
from os import getenv
from logging import getLogger
log = getLogger(__name__)
HEADER = """
****************************************************************
*** ______ ********************* ______ *********** _ ********
*** | ___ \_ ******************** | ___ \ ********* | | ********
*** | |_/ / \__ __ __ _ __ _ | |_/ /___ * ___ | | ********
*** | __/| _// _ \ \ \/ /| | | || __// _ \ / _ \ | | ********
*** | | | | | (_) | > < \ |_| || | | (_) | (_) || |___ ****
*** \_| |_| \___/ /_/\_\ \__ |\_| \___/ \___/ \_____/ ****
**** __ / / *****
************************* /___ / *******************************
************************* ********************************
****************************************************************
"""
PY3 = sys.version_info >= (3,)
DB_TYPE = getenv('db_type', 'SSDB').upper()
DB_HOST = getenv('db_host', '127.0.0.1')
DB_PORT = getenv('db_port', 8888)
DB_PASSWORD = getenv('db_password', '')
if DB_TYPE == 'SSDB':
DB_HOST = getenv('ssdb_host', '10.0.0.13')
DB_PORT = getenv('ssdb_port', '6379')
DB_PASSWORD = getenv('ssdb_password', 'Peng3532869')
elif DB_TYPE == 'MONGODB':
DB_HOST = getenv('mongodb_host', '127.0.0.1')
DB_PORT = getenv('mongodb_host', '27017')
DB_PASSWORD = getenv('mongodb_password', '')
else:
raise ConfigError('Unknown database type, your environment variable `db_type` should be one of SSDB/MONGODB.')
""" 数据库配置 """
DATABASES = {
"default": {
"TYPE": DB_TYPE,
"HOST": DB_HOST,
"PORT": DB_PORT,
"NAME": "proxy",
"PASSWORD": DB_PASSWORD
}
}
# register the proxy getter function
PROXY_GETTER = [
'qingtingProxy',
]
""" API config http://127.0.0.1:5010 """
SERVER_API = {
"HOST": "0.0.0.0", # The ip specified which starting the web API
"PORT": 5010 # port number to which the server listens to
}
class ConfigError(BaseException):
pass
def checkConfig():
if DB_TYPE not in ["SSDB", "REDIS"]:
raise ConfigError('db_type Do not support: %s, must SSDB/REDIS .' % DB_TYPE)
if type(DB_PORT) == str and not DB_PORT.isdigit():
raise ConfigError('if db_port is string, it must be digit, not %s' % DB_PORT)
from ProxyGetter import getFreeProxy
illegal_getter = list(filter(lambda key: not hasattr(getFreeProxy.GetFreeProxy, key), PROXY_GETTER))
if len(illegal_getter) > 0:
raise ConfigError("ProxyGetter: %s does not exists" % "/".join(illegal_getter))
checkConfig()
| 30.734043 | 114 | 0.471443 |
4a1c37dc06d7a9f482a22ad7ed3ce2faebc4a235
| 2,480 |
py
|
Python
|
main.py
|
cedar10b/travelapp
|
992b3a14c9d2f9487c09de637304d7aa0165f19e
|
[
"MIT"
] | 1 |
2016-07-19T09:27:48.000Z
|
2016-07-19T09:27:48.000Z
|
main.py
|
cedar10b/travelapp
|
992b3a14c9d2f9487c09de637304d7aa0165f19e
|
[
"MIT"
] | null | null | null |
main.py
|
cedar10b/travelapp
|
992b3a14c9d2f9487c09de637304d7aa0165f19e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sql_functions import *
# number of cities to include in search
N = 100
# number of cities to include in results
top = 10
# read and combine dataframes into one dataframe
cities = read_table('cities')
airfare = read_table('airfare')
weather = read_table('weather')
yelp = read_table('yelp')
safety = read_table('safety')
df = pd.merge(cities.ix[0:N-1, ['city', 'state_abbr', 'norm_popul']],
safety, on=['city', 'state_abbr'], how='left')
df = pd.merge(df, weather.ix[:, ['city', 'state_abbr', 'weather',
'mintemp', 'maxtemp', 'condition']],
on=['city', 'state_abbr'], how='left')
df = pd.merge(df, yelp.ix[:, ['city', 'state_abbr', 'bars', 'restaurants']],
on=['city', 'state_abbr'], how='left')
df = pd.merge(df, airfare.ix[:, ['city', 'state_abbr', 'airfare']],
on=['city', 'state_abbr'], how='left')
create_table(df, 'main')
def rank(df, norm_popul=5, safety=5, bars=5, weather=5, airfare=500):
# compute a weighted average according to user's input
#and normalize from 0 to 10
scores = norm_popul*df.norm_popul + safety*df.safety + \
bars*df.bars + weather*df.weather
scores -= scores.min()
scores /= 0.1*scores.max() #normalize from 0 to 10
scores = np.where(df.airfare <= airfare, np.round(scores, 2), 0.0)
# visualize output
order = np.argsort(scores)[::-1]
final_scores = scores[order][0:top][::-1]
city = np.array([str(df.city[order[i]]) for i in range(top)])[::-1]
state = np.array([str(df.state_abbr[order[i]]) for i in range(top)])[::-1]
best = np.array([city[i] + ', ' + state[i] for i in range(top)])
sns.set_style("whitegrid")
plt.barh(np.arange(top), final_scores, align='center')
plt.ylim((-1, top))
plt.xlim(np.floor(final_scores[0]), np.ceil(final_scores[-1]))
plt.yticks(np.arange(top), best)
plt.xlabel('Score', fontsize=18)
plt.title('Top ten cities', fontsize=18)
plt.tick_params(labelsize=18)
#fig = plt.gcf()
#fig.set_size_inches(6,8)
#fig.savefig('fig2.png', bbox_inches='tight')
plt.show()
print 'Best match: ', best[-1]
print 'Cost of airfare: ', df.airfare[order[0]]
print 'Temperature from ', df.mintemp[order[0]], 'to ', df.maxtemp[order[0]], ' F'
print 'Weather forecast: ', df.condition[order[0]]
| 33.972603 | 84 | 0.621774 |
4a1c39128472e48dcf36c8127b63f3681fc56a8c
| 9,541 |
py
|
Python
|
test/test_oneview_uplink_set.py
|
bryansullins/baremetalesxi-hpesynergyoneview
|
e4541d02ce1c93bb9a98a07a3a483a9b2ac90bce
|
[
"MIT"
] | 1 |
2020-12-11T23:38:00.000Z
|
2020-12-11T23:38:00.000Z
|
test/test_oneview_uplink_set.py
|
bryansullins/baremetalesxi-hpesynergyoneview
|
e4541d02ce1c93bb9a98a07a3a483a9b2ac90bce
|
[
"MIT"
] | null | null | null |
test/test_oneview_uplink_set.py
|
bryansullins/baremetalesxi-hpesynergyoneview
|
e4541d02ce1c93bb9a98a07a3a483a9b2ac90bce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import mock
import pytest
from copy import deepcopy
from hpe_test_utils import OneViewBaseTest
from oneview_module_loader import UplinkSetModule
DEFAULT_UPLINK_NAME = 'Test Uplink Set'
RENAMED_UPLINK_SET = 'Renamed Uplink Set'
LOGICAL_INTERCONNECT = dict(uri="/rest/logical-interconnects/0de81de6-6652-4861-94f9-9c24b2fd0d66",
name='Name of the Logical Interconnect')
EXISTENT_UPLINK_SETS = [
dict(name=DEFAULT_UPLINK_NAME,
logicalInterconnectUri="/rest/logical-interconnects/c4ae6a56-a595-4b06-8c7a-405212df8b93"),
dict(name=DEFAULT_UPLINK_NAME,
status="OK",
logicalInterconnectUri=LOGICAL_INTERCONNECT['uri'],
networkUris=[
'/rest/ethernet-networks/9e8472ad-5ad1-4cbd-aab1-566b67ffc6a4',
'/rest/ethernet-networks/28ea7c1a-4930-4432-854b-30cf239226a2']),
dict(name=DEFAULT_UPLINK_NAME,
logicalInterconnectUri="/rest/logical-interconnects/c4ae6a56-a595-4b06-8c7a-405212df8b93")]
UPLINK_SET_FOUND_BY_KEY = EXISTENT_UPLINK_SETS[1]
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(
name=DEFAULT_UPLINK_NAME,
logicalInterconnectUri=LOGICAL_INTERCONNECT['uri'],
networkUris=[
'/rest/ethernet-networks/9e8472ad-5ad1-4cbd-aab1-566b67ffc6a4',
'/rest/ethernet-networks/28ea7c1a-4930-4432-854b-30cf239226a2'
],
)
)
PARAMS_FOR_PRESENT_WITH_LI_NAME = dict(
config='config.json',
state='present',
data=dict(
name=DEFAULT_UPLINK_NAME,
logicalInterconnectName=LOGICAL_INTERCONNECT['name'],
networkUris=[
'/rest/ethernet-networks/9e8472ad-5ad1-4cbd-aab1-566b67ffc6a4',
'/rest/ethernet-networks/28ea7c1a-4930-4432-854b-30cf239226a2'
]
)
)
PARAMS_TO_RENAME = dict(
config='config.json',
state='present',
data=dict(name=DEFAULT_UPLINK_NAME,
logicalInterconnectUri=LOGICAL_INTERCONNECT['uri'],
newName=RENAMED_UPLINK_SET)
)
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
data=dict(
name=DEFAULT_UPLINK_NAME,
logicalInterconnectUri=LOGICAL_INTERCONNECT['uri'],
networkUris=[
'/rest/ethernet-networks/9e8472ad-5ad1-4cbd-aab1-566b67ffc6a4',
'/rest/ethernet-networks/28ea7c1a-4930-4432-854b-30cf239226a2',
'/rest/ethernet-networks/96g7df9g-6njb-n5jg-54um-fmsd879gdfgm'
],
)
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(name=DEFAULT_UPLINK_NAME,
logicalInterconnectUri=LOGICAL_INTERCONNECT['uri'])
)
PARAMS_FOR_ABSENT_WITH_LI_NAME = dict(
config='config.json',
state='absent',
data=dict(name=DEFAULT_UPLINK_NAME,
logicalInterconnectName=LOGICAL_INTERCONNECT['name'])
)
@pytest.mark.resource(TestUplinkSetModule='uplink_sets')
class TestUplinkSetModule(OneViewBaseTest):
def test_should_create(self):
self.resource.get_by.return_value = []
self.resource.create.return_value = UPLINK_SET_FOUND_BY_KEY
self.mock_ansible_module.params = deepcopy(PARAMS_FOR_PRESENT)
UplinkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=UplinkSetModule.MSG_CREATED,
ansible_facts=dict(uplink_set=UPLINK_SET_FOUND_BY_KEY)
)
def test_should_replace_logical_interconnect_name_by_uri(self):
self.resource.get_by.return_value = []
self.resource.create.return_value = UPLINK_SET_FOUND_BY_KEY
self.mock_ov_client.logical_interconnects.get_by_name.return_value = LOGICAL_INTERCONNECT
self.mock_ansible_module.params = deepcopy(PARAMS_FOR_PRESENT_WITH_LI_NAME)
UplinkSetModule().run()
self.mock_ov_client.logical_interconnects.get_by_name.assert_called_once_with(
'Name of the Logical Interconnect')
self.resource.create.assert_called_once_with(PARAMS_FOR_PRESENT['data'])
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=UplinkSetModule.MSG_CREATED,
ansible_facts=dict(uplink_set=UPLINK_SET_FOUND_BY_KEY)
)
def test_should_fail_when_logical_interconnect_not_found(self):
self.resource.get_by.return_value = []
self.mock_ov_client.logical_interconnects.get_by_name.return_value = None
self.mock_ansible_module.params = deepcopy(PARAMS_FOR_PRESENT_WITH_LI_NAME)
UplinkSetModule().run()
self.mock_ov_client.logical_interconnects.get_by_name.assert_called_once_with(
'Name of the Logical Interconnect')
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=UplinkSetModule.MSG_LOGICAL_INTERCONNECT_NOT_FOUND)
def test_should_not_update_when_data_is_equals(self):
self.resource.get_by.return_value = EXISTENT_UPLINK_SETS
self.mock_ansible_module.params = deepcopy(PARAMS_FOR_PRESENT)
UplinkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=UplinkSetModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(uplink_set=UPLINK_SET_FOUND_BY_KEY)
)
def test_update_when_data_has_modified_attributes(self):
data_merged = UPLINK_SET_FOUND_BY_KEY.copy()
data_merged['description'] = 'New description'
self.resource.get_by.return_value = EXISTENT_UPLINK_SETS
self.resource.update.return_value = data_merged
self.mock_ansible_module.params = deepcopy(PARAMS_WITH_CHANGES)
UplinkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=UplinkSetModule.MSG_UPDATED,
ansible_facts=dict(uplink_set=data_merged)
)
def test_rename_when_resource_exists(self):
data_merged = UPLINK_SET_FOUND_BY_KEY.copy()
data_merged['name'] = RENAMED_UPLINK_SET
params_to_rename = deepcopy(PARAMS_TO_RENAME)
self.resource.get_by = mock.MagicMock(side_effect=[EXISTENT_UPLINK_SETS, []])
self.resource.update.return_value = data_merged
self.mock_ansible_module.params = params_to_rename
UplinkSetModule().run()
self.resource.update.assert_called_once_with(data_merged)
def test_should_delete(self):
self.resource.get_by.return_value = EXISTENT_UPLINK_SETS
self.mock_ansible_module.params = deepcopy(PARAMS_FOR_ABSENT)
UplinkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=UplinkSetModule.MSG_DELETED
)
def test_should_replace_logical_interconnect_name_by_uri_on_absent_state(self):
self.resource.get_by.return_value = EXISTENT_UPLINK_SETS
self.mock_ov_client.logical_interconnects.get_by_name.return_value = LOGICAL_INTERCONNECT
self.mock_ansible_module.params = deepcopy(PARAMS_FOR_ABSENT_WITH_LI_NAME)
UplinkSetModule().run()
self.mock_ov_client.logical_interconnects.get_by_name.assert_called_once_with(
'Name of the Logical Interconnect')
self.resource.delete.assert_called_once_with(UPLINK_SET_FOUND_BY_KEY)
def test_should_do_nothing_when_not_exist(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = deepcopy(PARAMS_FOR_ABSENT)
UplinkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=UplinkSetModule.MSG_ALREADY_ABSENT
)
def test_should_fail_when_name_not_set(self):
params = deepcopy(PARAMS_FOR_ABSENT)
params['data'].pop('name')
self.mock_ansible_module.params = params
UplinkSetModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=UplinkSetModule.MSG_KEY_REQUIRED)
def test_should_fail_when_logical_interconnect_uri_not_set(self):
params = deepcopy(PARAMS_FOR_ABSENT)
params['data'].pop('logicalInterconnectUri')
self.mock_ansible_module.params = params
UplinkSetModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=UplinkSetModule.MSG_KEY_REQUIRED)
def test_should_fail_when_logical_interconnect_name_not_set(self):
params = deepcopy(PARAMS_FOR_ABSENT_WITH_LI_NAME)
params['data'].pop('logicalInterconnectName')
self.mock_ansible_module.params = params
UplinkSetModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=UplinkSetModule.MSG_KEY_REQUIRED)
if __name__ == '__main__':
pytest.main([__file__])
| 37.269531 | 142 | 0.723719 |
4a1c3a1e9e1c8909c820d718aa536a57f2da424f
| 93,272 |
py
|
Python
|
tests/test_iot/test_iot.py
|
chrisevett/moto
|
7d44df5266efa8d9406d9f6ed5ccdea49fb3e299
|
[
"Apache-2.0"
] | 2 |
2021-06-21T17:50:27.000Z
|
2021-06-21T19:14:21.000Z
|
tests/test_iot/test_iot.py
|
chrisevett/moto
|
7d44df5266efa8d9406d9f6ed5ccdea49fb3e299
|
[
"Apache-2.0"
] | 1 |
2021-01-26T13:46:32.000Z
|
2021-01-26T13:46:32.000Z
|
tests/test_iot/test_iot.py
|
chrisevett/moto
|
7d44df5266efa8d9406d9f6ed5ccdea49fb3e299
|
[
"Apache-2.0"
] | null | null | null |
import json
import sure # noqa # pylint: disable=unused-import
import boto3
from moto import mock_iot, mock_cognitoidentity
from botocore.exceptions import ClientError
import pytest
def generate_thing_group_tree(iot_client, tree_dict, _parent=None):
"""
Generates a thing group tree given the input tree structure.
:param iot_client: the iot client for boto3
:param tree_dict: dictionary with the key being the group_name, and the value being a sub tree.
tree_dict = {
"group_name_1a":{
"group_name_2a":{
"group_name_3a":{} or None
},
},
"group_name_1b":{}
}
:return: a dictionary of created groups, keyed by group name
"""
if tree_dict is None:
tree_dict = {}
created_dict = {}
for group_name in tree_dict.keys():
params = {"thingGroupName": group_name}
if _parent:
params["parentGroupName"] = _parent
created_group = iot_client.create_thing_group(**params)
created_dict[group_name] = created_group
subtree_dict = generate_thing_group_tree(
iot_client=iot_client, tree_dict=tree_dict[group_name], _parent=group_name
)
created_dict.update(created_dict)
created_dict.update(subtree_dict)
return created_dict
@mock_iot
def test_attach_policy():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_policy(policyName=policy_name, target=cert_arn)
res = client.list_attached_policies(target=cert_arn)
res.should.have.key("policies").which.should.have.length_of(1)
res["policies"][0]["policyName"].should.equal("my-policy")
@mock_iot
@mock_cognitoidentity
def test_attach_policy_to_identity():
region = "ap-northeast-1"
cognito_identity_client = boto3.client("cognito-identity", region_name=region)
identity_pool_name = "test_identity_pool"
identity_pool = cognito_identity_client.create_identity_pool(
IdentityPoolName=identity_pool_name, AllowUnauthenticatedIdentities=True
)
identity = cognito_identity_client.get_id(
AccountId="test", IdentityPoolId=identity_pool["IdentityPoolId"]
)
client = boto3.client("iot", region_name=region)
policy_name = "my-policy"
doc = "{}"
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_policy(policyName=policy_name, target=identity["IdentityId"])
res = client.list_attached_policies(target=identity["IdentityId"])
res.should.have.key("policies").which.should.have.length_of(1)
res["policies"][0]["policyName"].should.equal(policy_name)
@mock_iot
def test_detach_policy():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_policy(policyName=policy_name, target=cert_arn)
res = client.list_attached_policies(target=cert_arn)
res.should.have.key("policies").which.should.have.length_of(1)
res["policies"][0]["policyName"].should.equal("my-policy")
client.detach_policy(policyName=policy_name, target=cert_arn)
res = client.list_attached_policies(target=cert_arn)
res.should.have.key("policies").which.should.be.empty
@mock_iot
def test_list_attached_policies():
client = boto3.client("iot", region_name="ap-northeast-1")
cert = client.create_keys_and_certificate(setAsActive=True)
policies = client.list_attached_policies(target=cert["certificateArn"])
policies["policies"].should.be.empty
@mock_iot
def test_policy_versions():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
policy = client.create_policy(policyName=policy_name, policyDocument=doc)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(json.dumps({}))
policy.should.have.key("policyVersionId").which.should.equal("1")
policy = client.get_policy(policyName=policy_name)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(json.dumps({}))
policy.should.have.key("defaultVersionId").which.should.equal(
policy["defaultVersionId"]
)
policy1 = client.create_policy_version(
policyName=policy_name,
policyDocument=json.dumps({"version": "version_1"}),
setAsDefault=True,
)
policy1.should.have.key("policyArn").which.should_not.be.none
policy1.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_1"})
)
policy1.should.have.key("policyVersionId").which.should.equal("2")
policy1.should.have.key("isDefaultVersion").which.should.equal(True)
policy2 = client.create_policy_version(
policyName=policy_name,
policyDocument=json.dumps({"version": "version_2"}),
setAsDefault=False,
)
policy2.should.have.key("policyArn").which.should_not.be.none
policy2.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_2"})
)
policy2.should.have.key("policyVersionId").which.should.equal("3")
policy2.should.have.key("isDefaultVersion").which.should.equal(False)
policy = client.get_policy(policyName=policy_name)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_1"})
)
policy.should.have.key("defaultVersionId").which.should.equal(
policy1["policyVersionId"]
)
policy3 = client.create_policy_version(
policyName=policy_name,
policyDocument=json.dumps({"version": "version_3"}),
setAsDefault=False,
)
policy3.should.have.key("policyArn").which.should_not.be.none
policy3.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_3"})
)
policy3.should.have.key("policyVersionId").which.should.equal("4")
policy3.should.have.key("isDefaultVersion").which.should.equal(False)
policy4 = client.create_policy_version(
policyName=policy_name,
policyDocument=json.dumps({"version": "version_4"}),
setAsDefault=False,
)
policy4.should.have.key("policyArn").which.should_not.be.none
policy4.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_4"})
)
policy4.should.have.key("policyVersionId").which.should.equal("5")
policy4.should.have.key("isDefaultVersion").which.should.equal(False)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(5)
list(
map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])
).count(True).should.equal(1)
default_policy = list(
filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])
)
default_policy[0].should.have.key("versionId").should.equal(
policy1["policyVersionId"]
)
policy = client.get_policy(policyName=policy_name)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_1"})
)
policy.should.have.key("defaultVersionId").which.should.equal(
policy1["policyVersionId"]
)
client.set_default_policy_version(
policyName=policy_name, policyVersionId=policy4["policyVersionId"]
)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(5)
list(
map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])
).count(True).should.equal(1)
default_policy = list(
filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])
)
default_policy[0].should.have.key("versionId").should.equal(
policy4["policyVersionId"]
)
policy = client.get_policy(policyName=policy_name)
policy.should.have.key("policyName").which.should.equal(policy_name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(
json.dumps({"version": "version_4"})
)
policy.should.have.key("defaultVersionId").which.should.equal(
policy4["policyVersionId"]
)
try:
client.create_policy_version(
policyName=policy_name,
policyDocument=json.dumps({"version": "version_5"}),
setAsDefault=False,
)
assert False, "Should have failed in previous call"
except Exception as exception:
exception.response["Error"]["Message"].should.equal(
"The policy %s already has the maximum number of versions (5)" % policy_name
)
client.delete_policy_version(policyName=policy_name, policyVersionId="1")
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(4)
client.delete_policy_version(
policyName=policy_name, policyVersionId=policy1["policyVersionId"]
)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(3)
client.delete_policy_version(
policyName=policy_name, policyVersionId=policy2["policyVersionId"]
)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(2)
client.delete_policy_version(
policyName=policy_name, policyVersionId=policy3["policyVersionId"]
)
policy_versions = client.list_policy_versions(policyName=policy_name)
policy_versions.should.have.key("policyVersions").which.should.have.length_of(1)
# should fail as it"s the default policy. Should use delete_policy instead
try:
client.delete_policy_version(
policyName=policy_name, policyVersionId=policy4["policyVersionId"]
)
assert False, "Should have failed in previous call"
except Exception as exception:
exception.response["Error"]["Message"].should.equal(
"Cannot delete the default version of a policy"
)
@mock_iot
def test_things():
client = boto3.client("iot", region_name="ap-northeast-1")
name = "my-thing"
type_name = "my-type-name"
# thing type
thing_type = client.create_thing_type(thingTypeName=type_name)
thing_type.should.have.key("thingTypeName").which.should.equal(type_name)
thing_type.should.have.key("thingTypeArn")
thing_type["thingTypeArn"].should.contain(type_name)
res = client.list_thing_types()
res.should.have.key("thingTypes").which.should.have.length_of(1)
for thing_type in res["thingTypes"]:
thing_type.should.have.key("thingTypeName").which.should_not.be.none
thing_type = client.describe_thing_type(thingTypeName=type_name)
thing_type.should.have.key("thingTypeName").which.should.equal(type_name)
thing_type.should.have.key("thingTypeProperties")
thing_type.should.have.key("thingTypeMetadata")
thing_type.should.have.key("thingTypeArn")
thing_type["thingTypeArn"].should.contain(type_name)
# thing
thing = client.create_thing(thingName=name, thingTypeName=type_name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
res = client.list_things()
res.should.have.key("things").which.should.have.length_of(1)
for thing in res["things"]:
thing.should.have.key("thingName").which.should_not.be.none
thing.should.have.key("thingArn").which.should_not.be.none
thing = client.update_thing(
thingName=name, attributePayload={"attributes": {"k1": "v1"}}
)
res = client.list_things()
res.should.have.key("things").which.should.have.length_of(1)
for thing in res["things"]:
thing.should.have.key("thingName").which.should_not.be.none
thing.should.have.key("thingArn").which.should_not.be.none
res["things"][0]["attributes"].should.have.key("k1").which.should.equal("v1")
thing = client.describe_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("defaultClientId")
thing.should.have.key("thingTypeName")
thing.should.have.key("attributes")
thing.should.have.key("version")
# delete thing
client.delete_thing(thingName=name)
res = client.list_things()
res.should.have.key("things").which.should.have.length_of(0)
# delete thing type
client.delete_thing_type(thingTypeName=type_name)
res = client.list_thing_types()
res.should.have.key("thingTypes").which.should.have.length_of(0)
@mock_iot
def test_list_thing_types():
client = boto3.client("iot", region_name="ap-northeast-1")
for i in range(0, 100):
client.create_thing_type(thingTypeName=str(i + 1))
thing_types = client.list_thing_types()
thing_types.should.have.key("nextToken")
thing_types.should.have.key("thingTypes").which.should.have.length_of(50)
thing_types["thingTypes"][0]["thingTypeName"].should.equal("1")
thing_types["thingTypes"][-1]["thingTypeName"].should.equal("50")
thing_types = client.list_thing_types(nextToken=thing_types["nextToken"])
thing_types.should.have.key("thingTypes").which.should.have.length_of(50)
thing_types.should_not.have.key("nextToken")
thing_types["thingTypes"][0]["thingTypeName"].should.equal("51")
thing_types["thingTypes"][-1]["thingTypeName"].should.equal("100")
@mock_iot
def test_list_thing_types_with_typename_filter():
client = boto3.client("iot", region_name="ap-northeast-1")
client.create_thing_type(thingTypeName="thing")
client.create_thing_type(thingTypeName="thingType")
client.create_thing_type(thingTypeName="thingTypeName")
client.create_thing_type(thingTypeName="thingTypeNameGroup")
client.create_thing_type(thingTypeName="shouldNotFind")
client.create_thing_type(thingTypeName="find me it shall not")
thing_types = client.list_thing_types(thingTypeName="thing")
thing_types.should_not.have.key("nextToken")
thing_types.should.have.key("thingTypes").which.should.have.length_of(4)
thing_types["thingTypes"][0]["thingTypeName"].should.equal("thing")
thing_types["thingTypes"][-1]["thingTypeName"].should.equal("thingTypeNameGroup")
thing_types = client.list_thing_types(thingTypeName="thingTypeName")
thing_types.should_not.have.key("nextToken")
thing_types.should.have.key("thingTypes").which.should.have.length_of(2)
thing_types["thingTypes"][0]["thingTypeName"].should.equal("thingTypeName")
thing_types["thingTypes"][-1]["thingTypeName"].should.equal("thingTypeNameGroup")
@mock_iot
def test_list_things_with_next_token():
client = boto3.client("iot", region_name="ap-northeast-1")
for i in range(0, 200):
client.create_thing(thingName=str(i + 1))
things = client.list_things()
things.should.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("1")
things["things"][0]["thingArn"].should.equal("arn:aws:iot:ap-northeast-1:1:thing/1")
things["things"][-1]["thingName"].should.equal("50")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/50"
)
things = client.list_things(nextToken=things["nextToken"])
things.should.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("51")
things["things"][0]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/51"
)
things["things"][-1]["thingName"].should.equal("100")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/100"
)
things = client.list_things(nextToken=things["nextToken"])
things.should.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("101")
things["things"][0]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/101"
)
things["things"][-1]["thingName"].should.equal("150")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/150"
)
things = client.list_things(nextToken=things["nextToken"])
things.should_not.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("151")
things["things"][0]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/151"
)
things["things"][-1]["thingName"].should.equal("200")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/200"
)
@mock_iot
def test_list_things_with_attribute_and_thing_type_filter_and_next_token():
client = boto3.client("iot", region_name="ap-northeast-1")
client.create_thing_type(thingTypeName="my-thing-type")
for i in range(0, 200):
if not (i + 1) % 3:
attribute_payload = {"attributes": {"foo": "bar"}}
elif not (i + 1) % 5:
attribute_payload = {"attributes": {"bar": "foo"}}
else:
attribute_payload = {}
if not (i + 1) % 2:
thing_type_name = "my-thing-type"
client.create_thing(
thingName=str(i + 1),
thingTypeName=thing_type_name,
attributePayload=attribute_payload,
)
else:
client.create_thing(
thingName=str(i + 1), attributePayload=attribute_payload
)
# Test filter for thingTypeName
things = client.list_things(thingTypeName=thing_type_name)
things.should.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("2")
things["things"][0]["thingArn"].should.equal("arn:aws:iot:ap-northeast-1:1:thing/2")
things["things"][-1]["thingName"].should.equal("100")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/100"
)
all(item["thingTypeName"] == thing_type_name for item in things["things"])
things = client.list_things(
nextToken=things["nextToken"], thingTypeName=thing_type_name
)
things.should_not.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("102")
things["things"][0]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/102"
)
things["things"][-1]["thingName"].should.equal("200")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/200"
)
all(item["thingTypeName"] == thing_type_name for item in things["things"])
# Test filter for attributes
things = client.list_things(attributeName="foo", attributeValue="bar")
things.should.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("3")
things["things"][0]["thingArn"].should.equal("arn:aws:iot:ap-northeast-1:1:thing/3")
things["things"][-1]["thingName"].should.equal("150")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/150"
)
all(item["attributes"] == {"foo": "bar"} for item in things["things"])
things = client.list_things(
nextToken=things["nextToken"], attributeName="foo", attributeValue="bar"
)
things.should_not.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(16)
things["things"][0]["thingName"].should.equal("153")
things["things"][0]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/153"
)
things["things"][-1]["thingName"].should.equal("198")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/198"
)
all(item["attributes"] == {"foo": "bar"} for item in things["things"])
# Test filter for attributes and thingTypeName
things = client.list_things(
thingTypeName=thing_type_name, attributeName="foo", attributeValue="bar"
)
things.should_not.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(33)
things["things"][0]["thingName"].should.equal("6")
things["things"][0]["thingArn"].should.equal("arn:aws:iot:ap-northeast-1:1:thing/6")
things["things"][-1]["thingName"].should.equal("198")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/198"
)
all(
item["attributes"] == {"foo": "bar"}
and item["thingTypeName"] == thing_type_name
for item in things["things"]
)
@mock_iot
def test_endpoints():
region_name = "ap-northeast-1"
client = boto3.client("iot", region_name=region_name)
# iot:Data
endpoint = client.describe_endpoint(endpointType="iot:Data")
endpoint.should.have.key("endpointAddress").which.should_not.contain("ats")
endpoint.should.have.key("endpointAddress").which.should.contain(
"iot.{}.amazonaws.com".format(region_name)
)
# iot:Data-ATS
endpoint = client.describe_endpoint(endpointType="iot:Data-ATS")
endpoint.should.have.key("endpointAddress").which.should.contain(
"ats.iot.{}.amazonaws.com".format(region_name)
)
# iot:Data-ATS
endpoint = client.describe_endpoint(endpointType="iot:CredentialProvider")
endpoint.should.have.key("endpointAddress").which.should.contain(
"credentials.iot.{}.amazonaws.com".format(region_name)
)
# iot:Data-ATS
endpoint = client.describe_endpoint(endpointType="iot:Jobs")
endpoint.should.have.key("endpointAddress").which.should.contain(
"jobs.iot.{}.amazonaws.com".format(region_name)
)
# raise InvalidRequestException
try:
client.describe_endpoint(endpointType="iot:Abc")
except client.exceptions.InvalidRequestException as exc:
error_code = exc.response["Error"]["Code"]
error_code.should.equal("InvalidRequestException")
else:
raise Exception("Should have raised error")
@mock_iot
def test_certificate_id_generation_deterministic():
# Creating the same certificate twice should result in the same certificate ID
client = boto3.client("iot", region_name="us-east-1")
cert1 = client.create_keys_and_certificate(setAsActive=False)
client.delete_certificate(certificateId=cert1["certificateId"])
cert2 = client.register_certificate(
certificatePem=cert1["certificatePem"], setAsActive=False
)
cert2.should.have.key("certificateId").which.should.equal(cert1["certificateId"])
client.delete_certificate(certificateId=cert2["certificateId"])
@mock_iot
def test_certs():
client = boto3.client("iot", region_name="us-east-1")
cert = client.create_keys_and_certificate(setAsActive=True)
cert.should.have.key("certificateArn").which.should_not.be.none
cert.should.have.key("certificateId").which.should_not.be.none
cert.should.have.key("certificatePem").which.should_not.be.none
cert.should.have.key("keyPair")
cert["keyPair"].should.have.key("PublicKey").which.should_not.be.none
cert["keyPair"].should.have.key("PrivateKey").which.should_not.be.none
cert_id = cert["certificateId"]
cert = client.describe_certificate(certificateId=cert_id)
cert.should.have.key("certificateDescription")
cert_desc = cert["certificateDescription"]
cert_desc.should.have.key("certificateArn").which.should_not.be.none
cert_desc.should.have.key("certificateId").which.should_not.be.none
cert_desc.should.have.key("certificatePem").which.should_not.be.none
cert_desc.should.have.key("validity").which.should_not.be.none
validity = cert_desc["validity"]
validity.should.have.key("notBefore").which.should_not.be.none
validity.should.have.key("notAfter").which.should_not.be.none
cert_desc.should.have.key("status").which.should.equal("ACTIVE")
cert_pem = cert_desc["certificatePem"]
res = client.list_certificates()
for cert in res["certificates"]:
cert.should.have.key("certificateArn").which.should_not.be.none
cert.should.have.key("certificateId").which.should_not.be.none
cert.should.have.key("status").which.should_not.be.none
cert.should.have.key("creationDate").which.should_not.be.none
client.update_certificate(certificateId=cert_id, newStatus="REVOKED")
cert = client.describe_certificate(certificateId=cert_id)
cert_desc = cert["certificateDescription"]
cert_desc.should.have.key("status").which.should.equal("REVOKED")
client.delete_certificate(certificateId=cert_id)
res = client.list_certificates()
res.should.have.key("certificates")
# Test register_certificate flow
cert = client.register_certificate(certificatePem=cert_pem, setAsActive=True)
cert.should.have.key("certificateId").which.should_not.be.none
cert.should.have.key("certificateArn").which.should_not.be.none
cert_id = cert["certificateId"]
res = client.list_certificates()
res.should.have.key("certificates").which.should.have.length_of(1)
for cert in res["certificates"]:
cert.should.have.key("certificateArn").which.should_not.be.none
cert.should.have.key("certificateId").which.should_not.be.none
cert.should.have.key("status").which.should_not.be.none
cert.should.have.key("creationDate").which.should_not.be.none
client.update_certificate(certificateId=cert_id, newStatus="REVOKED")
cert = client.describe_certificate(certificateId=cert_id)
cert_desc = cert["certificateDescription"]
cert_desc.should.have.key("status").which.should.equal("REVOKED")
client.delete_certificate(certificateId=cert_id)
res = client.list_certificates()
res.should.have.key("certificates")
# Test register_certificate without CA flow
cert = client.register_certificate_without_ca(
certificatePem=cert_pem, status="INACTIVE"
)
cert.should.have.key("certificateId").which.should_not.be.none
cert.should.have.key("certificateArn").which.should_not.be.none
cert_id = cert["certificateId"]
res = client.list_certificates()
res.should.have.key("certificates").which.should.have.length_of(1)
for cert in res["certificates"]:
cert.should.have.key("certificateArn").which.should_not.be.none
cert.should.have.key("certificateId").which.should_not.be.none
cert.should.have.key("status").which.should_not.be.none
cert.should.have.key("creationDate").which.should_not.be.none
client.delete_certificate(certificateId=cert_id)
res = client.list_certificates()
res.should.have.key("certificates")
@mock_iot
def test_create_certificate_validation():
# Test we can't create a cert that already exists
client = boto3.client("iot", region_name="us-east-1")
cert = client.create_keys_and_certificate(setAsActive=False)
with pytest.raises(ClientError) as e:
client.register_certificate(
certificatePem=cert["certificatePem"], setAsActive=False
)
e.value.response["Error"]["Message"].should.contain(
"The certificate is already provisioned or registered"
)
with pytest.raises(ClientError) as e:
client.register_certificate_without_ca(
certificatePem=cert["certificatePem"], status="ACTIVE"
)
e.value.response["Error"]["Message"].should.contain(
"The certificate is already provisioned or registered"
)
@mock_iot
def test_delete_policy_validation():
doc = """{
"Version": "2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Action":[
"iot: *"
],
"Resource":"*"
}
]
}
"""
client = boto3.client("iot", region_name="ap-northeast-1")
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
policy_name = "my-policy"
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_principal_policy(policyName=policy_name, principal=cert_arn)
with pytest.raises(ClientError) as e:
client.delete_policy(policyName=policy_name)
e.value.response["Error"]["Message"].should.contain(
"The policy cannot be deleted as the policy is attached to one or more principals (name=%s)"
% policy_name
)
res = client.list_policies()
res.should.have.key("policies").which.should.have.length_of(1)
client.detach_principal_policy(policyName=policy_name, principal=cert_arn)
client.delete_policy(policyName=policy_name)
res = client.list_policies()
res.should.have.key("policies").which.should.have.length_of(0)
@mock_iot
def test_delete_certificate_validation():
doc = """{
"Version": "2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Action":[
"iot: *"
],
"Resource":"*"
}
]
}
"""
client = boto3.client("iot", region_name="ap-northeast-1")
cert = client.create_keys_and_certificate(setAsActive=True)
cert_id = cert["certificateId"]
cert_arn = cert["certificateArn"]
policy_name = "my-policy"
thing_name = "thing-1"
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_principal_policy(policyName=policy_name, principal=cert_arn)
client.create_thing(thingName=thing_name)
client.attach_thing_principal(thingName=thing_name, principal=cert_arn)
with pytest.raises(ClientError) as e:
client.delete_certificate(certificateId=cert_id)
e.value.response["Error"]["Message"].should.contain(
"Certificate must be deactivated (not ACTIVE) before deletion."
)
res = client.list_certificates()
res.should.have.key("certificates").which.should.have.length_of(1)
client.update_certificate(certificateId=cert_id, newStatus="REVOKED")
with pytest.raises(ClientError) as e:
client.delete_certificate(certificateId=cert_id)
e.value.response["Error"]["Message"].should.contain(
"Things must be detached before deletion (arn: %s)" % cert_arn
)
res = client.list_certificates()
res.should.have.key("certificates").which.should.have.length_of(1)
client.detach_thing_principal(thingName=thing_name, principal=cert_arn)
with pytest.raises(ClientError) as e:
client.delete_certificate(certificateId=cert_id)
e.value.response["Error"]["Message"].should.contain(
"Certificate policies must be detached before deletion (arn: %s)" % cert_arn
)
res = client.list_certificates()
res.should.have.key("certificates").which.should.have.length_of(1)
client.detach_principal_policy(policyName=policy_name, principal=cert_arn)
client.delete_certificate(certificateId=cert_id)
res = client.list_certificates()
res.should.have.key("certificates").which.should.have.length_of(0)
@mock_iot
def test_certs_create_inactive():
client = boto3.client("iot", region_name="ap-northeast-1")
cert = client.create_keys_and_certificate(setAsActive=False)
cert_id = cert["certificateId"]
cert = client.describe_certificate(certificateId=cert_id)
cert.should.have.key("certificateDescription")
cert_desc = cert["certificateDescription"]
cert_desc.should.have.key("status").which.should.equal("INACTIVE")
client.update_certificate(certificateId=cert_id, newStatus="ACTIVE")
cert = client.describe_certificate(certificateId=cert_id)
cert.should.have.key("certificateDescription")
cert_desc = cert["certificateDescription"]
cert_desc.should.have.key("status").which.should.equal("ACTIVE")
@mock_iot
def test_policy():
client = boto3.client("iot", region_name="ap-northeast-1")
name = "my-policy"
doc = "{}"
policy = client.create_policy(policyName=name, policyDocument=doc)
policy.should.have.key("policyName").which.should.equal(name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(doc)
policy.should.have.key("policyVersionId").which.should.equal("1")
policy = client.get_policy(policyName=name)
policy.should.have.key("policyName").which.should.equal(name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(doc)
policy.should.have.key("defaultVersionId").which.should.equal("1")
res = client.list_policies()
res.should.have.key("policies").which.should.have.length_of(1)
for policy in res["policies"]:
policy.should.have.key("policyName").which.should_not.be.none
policy.should.have.key("policyArn").which.should_not.be.none
client.delete_policy(policyName=name)
res = client.list_policies()
res.should.have.key("policies").which.should.have.length_of(0)
@mock_iot
def test_principal_policy():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
client.create_policy(policyName=policy_name, policyDocument=doc)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
client.attach_policy(policyName=policy_name, target=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key("policies").which.should.have.length_of(1)
for policy in res["policies"]:
policy.should.have.key("policyName").which.should_not.be.none
policy.should.have.key("policyArn").which.should_not.be.none
# do nothing if policy have already attached to certificate
client.attach_policy(policyName=policy_name, target=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key("policies").which.should.have.length_of(1)
for policy in res["policies"]:
policy.should.have.key("policyName").which.should_not.be.none
policy.should.have.key("policyArn").which.should_not.be.none
res = client.list_policy_principals(policyName=policy_name)
res.should.have.key("principals").which.should.have.length_of(1)
for principal in res["principals"]:
principal.should_not.be.none
client.detach_policy(policyName=policy_name, target=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key("policies").which.should.have.length_of(0)
res = client.list_policy_principals(policyName=policy_name)
res.should.have.key("principals").which.should.have.length_of(0)
with pytest.raises(ClientError) as e:
client.detach_policy(policyName=policy_name, target=cert_arn)
e.value.response["Error"]["Code"].should.equal("ResourceNotFoundException")
@mock_iot
def test_principal_policy_deprecated():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
policy = client.create_policy(policyName=policy_name, policyDocument=doc)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
client.attach_principal_policy(policyName=policy_name, principal=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key("policies").which.should.have.length_of(1)
for policy in res["policies"]:
policy.should.have.key("policyName").which.should_not.be.none
policy.should.have.key("policyArn").which.should_not.be.none
res = client.list_policy_principals(policyName=policy_name)
res.should.have.key("principals").which.should.have.length_of(1)
for principal in res["principals"]:
principal.should_not.be.none
client.detach_principal_policy(policyName=policy_name, principal=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key("policies").which.should.have.length_of(0)
res = client.list_policy_principals(policyName=policy_name)
res.should.have.key("principals").which.should.have.length_of(0)
@mock_iot
def test_principal_thing():
client = boto3.client("iot", region_name="ap-northeast-1")
thing_name = "my-thing"
client.create_thing(thingName=thing_name)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
client.attach_thing_principal(thingName=thing_name, principal=cert_arn)
res = client.list_principal_things(principal=cert_arn)
res.should.have.key("things").which.should.have.length_of(1)
res["things"][0].should.equal(thing_name)
res = client.list_thing_principals(thingName=thing_name)
res.should.have.key("principals").which.should.have.length_of(1)
for principal in res["principals"]:
principal.should_not.be.none
client.detach_thing_principal(thingName=thing_name, principal=cert_arn)
res = client.list_principal_things(principal=cert_arn)
res.should.have.key("things").which.should.have.length_of(0)
res = client.list_thing_principals(thingName=thing_name)
res.should.have.key("principals").which.should.have.length_of(0)
with pytest.raises(ClientError) as e:
client.list_thing_principals(thingName="xxx")
e.value.response["Error"]["Code"].should.equal("ResourceNotFoundException")
e.value.response["Error"]["Message"].should.equal(
"Failed to list principals for thing xxx because the thing does not exist in your account"
)
@mock_iot
def test_delete_principal_thing():
client = boto3.client("iot", region_name="ap-northeast-1")
thing_name = "my-thing"
client.create_thing(thingName=thing_name)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
cert_id = cert["certificateId"]
client.attach_thing_principal(thingName=thing_name, principal=cert_arn)
client.delete_thing(thingName=thing_name)
res = client.list_principal_things(principal=cert_arn)
res.should.have.key("things").which.should.have.length_of(0)
client.update_certificate(certificateId=cert_id, newStatus="INACTIVE")
client.delete_certificate(certificateId=cert_id)
class TestListThingGroup:
group_name_1a = "my-group-name-1a"
group_name_1b = "my-group-name-1b"
group_name_2a = "my-group-name-2a"
group_name_2b = "my-group-name-2b"
group_name_3a = "my-group-name-3a"
group_name_3b = "my-group-name-3b"
group_name_3c = "my-group-name-3c"
group_name_3d = "my-group-name-3d"
tree_dict = {
group_name_1a: {
group_name_2a: {group_name_3a: {}, group_name_3b: {}},
group_name_2b: {group_name_3c: {}, group_name_3d: {}},
},
group_name_1b: {},
}
@mock_iot
def test_should_list_all_groups(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups()
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(8)
@mock_iot
def test_should_list_all_groups_non_recursively(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(recursive=False)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
@mock_iot
def test_should_list_all_groups_filtered_by_parent(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(parentGroup=self.group_name_1a)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(6)
resp = client.list_thing_groups(parentGroup=self.group_name_2a)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(parentGroup=self.group_name_1b)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(0)
with pytest.raises(ClientError) as e:
client.list_thing_groups(parentGroup="inexistant-group-name")
e.value.response["Error"]["Code"].should.equal("ResourceNotFoundException")
@mock_iot
def test_should_list_all_groups_filtered_by_parent_non_recursively(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(parentGroup=self.group_name_1a, recursive=False)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(parentGroup=self.group_name_2a, recursive=False)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
@mock_iot
def test_should_list_all_groups_filtered_by_name_prefix(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(namePrefixFilter="my-group-name-1")
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(namePrefixFilter="my-group-name-3")
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(4)
resp = client.list_thing_groups(namePrefixFilter="prefix-which-doesn-not-match")
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(0)
@mock_iot
def test_should_list_all_groups_filtered_by_name_prefix_non_recursively(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(
namePrefixFilter="my-group-name-1", recursive=False
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(
namePrefixFilter="my-group-name-3", recursive=False
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(0)
@mock_iot
def test_should_list_all_groups_filtered_by_name_prefix_and_parent(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(
namePrefixFilter="my-group-name-2", parentGroup=self.group_name_1a
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(
namePrefixFilter="my-group-name-3", parentGroup=self.group_name_1a
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(4)
resp = client.list_thing_groups(
namePrefixFilter="prefix-which-doesn-not-match",
parentGroup=self.group_name_1a,
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(0)
@mock_iot
def test_delete_thing_group():
client = boto3.client("iot", region_name="ap-northeast-1")
group_name_1a = "my-group-name-1a"
group_name_2a = "my-group-name-2a"
tree_dict = {
group_name_1a: {group_name_2a: {},},
}
generate_thing_group_tree(client, tree_dict)
# delete group with child
try:
client.delete_thing_group(thingGroupName=group_name_1a)
except client.exceptions.InvalidRequestException as exc:
error_code = exc.response["Error"]["Code"]
error_code.should.equal("InvalidRequestException")
else:
raise Exception("Should have raised error")
# delete child group
client.delete_thing_group(thingGroupName=group_name_2a)
res = client.list_thing_groups()
res.should.have.key("thingGroups").which.should.have.length_of(1)
res["thingGroups"].should_not.have.key(group_name_2a)
# now that there is no child group, we can delete the previous group safely
client.delete_thing_group(thingGroupName=group_name_1a)
res = client.list_thing_groups()
res.should.have.key("thingGroups").which.should.have.length_of(0)
# Deleting an invalid thing group does not raise an error.
res = client.delete_thing_group(thingGroupName="non-existent-group-name")
res["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
@mock_iot
def test_describe_thing_group_metadata_hierarchy():
client = boto3.client("iot", region_name="ap-northeast-1")
group_name_1a = "my-group-name-1a"
group_name_1b = "my-group-name-1b"
group_name_2a = "my-group-name-2a"
group_name_2b = "my-group-name-2b"
group_name_3a = "my-group-name-3a"
group_name_3b = "my-group-name-3b"
group_name_3c = "my-group-name-3c"
group_name_3d = "my-group-name-3d"
tree_dict = {
group_name_1a: {
group_name_2a: {group_name_3a: {}, group_name_3b: {}},
group_name_2b: {group_name_3c: {}, group_name_3d: {}},
},
group_name_1b: {},
}
group_catalog = generate_thing_group_tree(client, tree_dict)
# describe groups
# groups level 1
# 1a
thing_group_description1a = client.describe_thing_group(
thingGroupName=group_name_1a
)
thing_group_description1a.should.have.key("thingGroupName").which.should.equal(
group_name_1a
)
thing_group_description1a.should.have.key("thingGroupProperties")
thing_group_description1a.should.have.key("thingGroupMetadata")
thing_group_description1a["thingGroupMetadata"].should.have.key("creationDate")
thing_group_description1a.should.have.key("version")
# 1b
thing_group_description1b = client.describe_thing_group(
thingGroupName=group_name_1b
)
thing_group_description1b.should.have.key("thingGroupName").which.should.equal(
group_name_1b
)
thing_group_description1b.should.have.key("thingGroupProperties")
thing_group_description1b.should.have.key("thingGroupMetadata")
thing_group_description1b["thingGroupMetadata"].should.have.length_of(1)
thing_group_description1b["thingGroupMetadata"].should.have.key("creationDate")
thing_group_description1b.should.have.key("version")
# groups level 2
# 2a
thing_group_description2a = client.describe_thing_group(
thingGroupName=group_name_2a
)
thing_group_description2a.should.have.key("thingGroupName").which.should.equal(
group_name_2a
)
thing_group_description2a.should.have.key("thingGroupProperties")
thing_group_description2a.should.have.key("thingGroupMetadata")
thing_group_description2a["thingGroupMetadata"].should.have.length_of(3)
thing_group_description2a["thingGroupMetadata"].should.have.key(
"parentGroupName"
).being.equal(group_name_1a)
thing_group_description2a["thingGroupMetadata"].should.have.key(
"rootToParentThingGroups"
)
thing_group_description2a["thingGroupMetadata"][
"rootToParentThingGroups"
].should.have.length_of(1)
thing_group_description2a["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupName"
].should.match(group_name_1a)
thing_group_description2a["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn"
].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description2a.should.have.key("version")
# 2b
thing_group_description2b = client.describe_thing_group(
thingGroupName=group_name_2b
)
thing_group_description2b.should.have.key("thingGroupName").which.should.equal(
group_name_2b
)
thing_group_description2b.should.have.key("thingGroupProperties")
thing_group_description2b.should.have.key("thingGroupMetadata")
thing_group_description2b["thingGroupMetadata"].should.have.length_of(3)
thing_group_description2b["thingGroupMetadata"].should.have.key(
"parentGroupName"
).being.equal(group_name_1a)
thing_group_description2b["thingGroupMetadata"].should.have.key(
"rootToParentThingGroups"
)
thing_group_description2b["thingGroupMetadata"][
"rootToParentThingGroups"
].should.have.length_of(1)
thing_group_description2b["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupName"
].should.match(group_name_1a)
thing_group_description2b["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn"
].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description2b.should.have.key("version")
# groups level 3
# 3a
thing_group_description3a = client.describe_thing_group(
thingGroupName=group_name_3a
)
thing_group_description3a.should.have.key("thingGroupName").which.should.equal(
group_name_3a
)
thing_group_description3a.should.have.key("thingGroupProperties")
thing_group_description3a.should.have.key("thingGroupMetadata")
thing_group_description3a["thingGroupMetadata"].should.have.length_of(3)
thing_group_description3a["thingGroupMetadata"].should.have.key(
"parentGroupName"
).being.equal(group_name_2a)
thing_group_description3a["thingGroupMetadata"].should.have.key(
"rootToParentThingGroups"
)
thing_group_description3a["thingGroupMetadata"][
"rootToParentThingGroups"
].should.have.length_of(2)
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupName"
].should.match(group_name_1a)
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn"
].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName"
].should.match(group_name_2a)
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn"
].should.match(group_catalog[group_name_2a]["thingGroupArn"])
thing_group_description3a.should.have.key("version")
# 3b
thing_group_description3b = client.describe_thing_group(
thingGroupName=group_name_3b
)
thing_group_description3b.should.have.key("thingGroupName").which.should.equal(
group_name_3b
)
thing_group_description3b.should.have.key("thingGroupProperties")
thing_group_description3b.should.have.key("thingGroupMetadata")
thing_group_description3b["thingGroupMetadata"].should.have.length_of(3)
thing_group_description3b["thingGroupMetadata"].should.have.key(
"parentGroupName"
).being.equal(group_name_2a)
thing_group_description3b["thingGroupMetadata"].should.have.key(
"rootToParentThingGroups"
)
thing_group_description3b["thingGroupMetadata"][
"rootToParentThingGroups"
].should.have.length_of(2)
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupName"
].should.match(group_name_1a)
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn"
].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName"
].should.match(group_name_2a)
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn"
].should.match(group_catalog[group_name_2a]["thingGroupArn"])
thing_group_description3b.should.have.key("version")
# 3c
thing_group_description3c = client.describe_thing_group(
thingGroupName=group_name_3c
)
thing_group_description3c.should.have.key("thingGroupName").which.should.equal(
group_name_3c
)
thing_group_description3c.should.have.key("thingGroupProperties")
thing_group_description3c.should.have.key("thingGroupMetadata")
thing_group_description3c["thingGroupMetadata"].should.have.length_of(3)
thing_group_description3c["thingGroupMetadata"].should.have.key(
"parentGroupName"
).being.equal(group_name_2b)
thing_group_description3c["thingGroupMetadata"].should.have.key(
"rootToParentThingGroups"
)
thing_group_description3c["thingGroupMetadata"][
"rootToParentThingGroups"
].should.have.length_of(2)
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupName"
].should.match(group_name_1a)
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn"
].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName"
].should.match(group_name_2b)
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn"
].should.match(group_catalog[group_name_2b]["thingGroupArn"])
thing_group_description3c.should.have.key("version")
# 3d
thing_group_description3d = client.describe_thing_group(
thingGroupName=group_name_3d
)
thing_group_description3d.should.have.key("thingGroupName").which.should.equal(
group_name_3d
)
thing_group_description3d.should.have.key("thingGroupProperties")
thing_group_description3d.should.have.key("thingGroupMetadata")
thing_group_description3d["thingGroupMetadata"].should.have.length_of(3)
thing_group_description3d["thingGroupMetadata"].should.have.key(
"parentGroupName"
).being.equal(group_name_2b)
thing_group_description3d["thingGroupMetadata"].should.have.key(
"rootToParentThingGroups"
)
thing_group_description3d["thingGroupMetadata"][
"rootToParentThingGroups"
].should.have.length_of(2)
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupName"
].should.match(group_name_1a)
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn"
].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName"
].should.match(group_name_2b)
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn"
].should.match(group_catalog[group_name_2b]["thingGroupArn"])
thing_group_description3d.should.have.key("version")
@mock_iot
def test_thing_groups():
client = boto3.client("iot", region_name="ap-northeast-1")
group_name = "my-group-name"
# thing group
thing_group = client.create_thing_group(thingGroupName=group_name)
thing_group.should.have.key("thingGroupName").which.should.equal(group_name)
thing_group.should.have.key("thingGroupArn")
thing_group["thingGroupArn"].should.contain(group_name)
res = client.list_thing_groups()
res.should.have.key("thingGroups").which.should.have.length_of(1)
for thing_group in res["thingGroups"]:
thing_group.should.have.key("groupName").which.should_not.be.none
thing_group.should.have.key("groupArn").which.should_not.be.none
thing_group = client.describe_thing_group(thingGroupName=group_name)
thing_group.should.have.key("thingGroupName").which.should.equal(group_name)
thing_group.should.have.key("thingGroupProperties")
thing_group.should.have.key("thingGroupMetadata")
thing_group.should.have.key("version")
thing_group.should.have.key("thingGroupArn")
thing_group["thingGroupArn"].should.contain(group_name)
# delete thing group
client.delete_thing_group(thingGroupName=group_name)
res = client.list_thing_groups()
res.should.have.key("thingGroups").which.should.have.length_of(0)
# props create test
props = {
"thingGroupDescription": "my first thing group",
"attributePayload": {"attributes": {"key1": "val01", "Key02": "VAL2"}},
}
thing_group = client.create_thing_group(
thingGroupName=group_name, thingGroupProperties=props
)
thing_group.should.have.key("thingGroupName").which.should.equal(group_name)
thing_group.should.have.key("thingGroupArn")
thing_group = client.describe_thing_group(thingGroupName=group_name)
thing_group.should.have.key("thingGroupProperties").which.should.have.key(
"attributePayload"
).which.should.have.key("attributes")
res_props = thing_group["thingGroupProperties"]["attributePayload"]["attributes"]
res_props.should.have.key("key1").which.should.equal("val01")
res_props.should.have.key("Key02").which.should.equal("VAL2")
# props update test with merge
new_props = {"attributePayload": {"attributes": {"k3": "v3"}, "merge": True}}
client.update_thing_group(thingGroupName=group_name, thingGroupProperties=new_props)
thing_group = client.describe_thing_group(thingGroupName=group_name)
thing_group.should.have.key("thingGroupProperties").which.should.have.key(
"attributePayload"
).which.should.have.key("attributes")
res_props = thing_group["thingGroupProperties"]["attributePayload"]["attributes"]
res_props.should.have.key("key1").which.should.equal("val01")
res_props.should.have.key("Key02").which.should.equal("VAL2")
res_props.should.have.key("k3").which.should.equal("v3")
# props update test
new_props = {"attributePayload": {"attributes": {"k4": "v4"}}}
client.update_thing_group(thingGroupName=group_name, thingGroupProperties=new_props)
thing_group = client.describe_thing_group(thingGroupName=group_name)
thing_group.should.have.key("thingGroupProperties").which.should.have.key(
"attributePayload"
).which.should.have.key("attributes")
res_props = thing_group["thingGroupProperties"]["attributePayload"]["attributes"]
res_props.should.have.key("k4").which.should.equal("v4")
res_props.should_not.have.key("key1")
@mock_iot
def test_thing_group_relations():
client = boto3.client("iot", region_name="ap-northeast-1")
name = "my-thing"
group_name = "my-group-name"
# thing group
thing_group = client.create_thing_group(thingGroupName=group_name)
thing_group.should.have.key("thingGroupName").which.should.equal(group_name)
thing_group.should.have.key("thingGroupArn")
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# add in 4 way
client.add_thing_to_thing_group(thingGroupName=group_name, thingName=name)
client.add_thing_to_thing_group(
thingGroupArn=thing_group["thingGroupArn"], thingArn=thing["thingArn"]
)
client.add_thing_to_thing_group(
thingGroupName=group_name, thingArn=thing["thingArn"]
)
client.add_thing_to_thing_group(
thingGroupArn=thing_group["thingGroupArn"], thingName=name
)
things = client.list_things_in_thing_group(thingGroupName=group_name)
things.should.have.key("things")
things["things"].should.have.length_of(1)
thing_groups = client.list_thing_groups_for_thing(thingName=name)
thing_groups.should.have.key("thingGroups")
thing_groups["thingGroups"].should.have.length_of(1)
# remove in 4 way
client.remove_thing_from_thing_group(thingGroupName=group_name, thingName=name)
client.remove_thing_from_thing_group(
thingGroupArn=thing_group["thingGroupArn"], thingArn=thing["thingArn"]
)
client.remove_thing_from_thing_group(
thingGroupName=group_name, thingArn=thing["thingArn"]
)
client.remove_thing_from_thing_group(
thingGroupArn=thing_group["thingGroupArn"], thingName=name
)
things = client.list_things_in_thing_group(thingGroupName=group_name)
things.should.have.key("things")
things["things"].should.have.length_of(0)
# update thing group for thing
client.update_thing_groups_for_thing(thingName=name, thingGroupsToAdd=[group_name])
things = client.list_things_in_thing_group(thingGroupName=group_name)
things.should.have.key("things")
things["things"].should.have.length_of(1)
client.update_thing_groups_for_thing(
thingName=name, thingGroupsToRemove=[group_name]
)
things = client.list_things_in_thing_group(thingGroupName=group_name)
things.should.have.key("things")
things["things"].should.have.length_of(0)
@mock_iot
def test_create_job():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing# job document
# job_document = {
# "field": "value"
# }
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job.should.have.key("description")
@mock_iot
def test_list_jobs():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing# job document
# job_document = {
# "field": "value"
# }
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job1 = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job1.should.have.key("jobId").which.should.equal(job_id)
job1.should.have.key("jobArn")
job1.should.have.key("description")
job2 = client.create_job(
jobId=job_id + "1",
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job2.should.have.key("jobId").which.should.equal(job_id + "1")
job2.should.have.key("jobArn")
job2.should.have.key("description")
jobs = client.list_jobs()
jobs.should.have.key("jobs")
jobs.should_not.have.key("nextToken")
jobs["jobs"][0].should.have.key("jobId").which.should.equal(job_id)
jobs["jobs"][1].should.have.key("jobId").which.should.equal(job_id + "1")
@mock_iot
def test_describe_job():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job = client.describe_job(jobId=job_id)
job.should.have.key("documentSource")
job.should.have.key("job")
job.should.have.key("job").which.should.have.key("jobArn")
job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("job").which.should.have.key("targets")
job.should.have.key("job").which.should.have.key("jobProcessDetails")
job.should.have.key("job").which.should.have.key("lastUpdatedAt")
job.should.have.key("job").which.should.have.key("createdAt")
job.should.have.key("job").which.should.have.key("jobExecutionsRolloutConfig")
job.should.have.key("job").which.should.have.key(
"targetSelection"
).which.should.equal("CONTINUOUS")
job.should.have.key("job").which.should.have.key("presignedUrlConfig")
job.should.have.key("job").which.should.have.key(
"presignedUrlConfig"
).which.should.have.key("roleArn").which.should.equal(
"arn:aws:iam::1:role/service-role/iot_job_role"
)
job.should.have.key("job").which.should.have.key(
"presignedUrlConfig"
).which.should.have.key("expiresInSec").which.should.equal(123)
job.should.have.key("job").which.should.have.key(
"jobExecutionsRolloutConfig"
).which.should.have.key("maximumPerMinute").which.should.equal(10)
@mock_iot
def test_describe_job_1():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job = client.describe_job(jobId=job_id)
job.should.have.key("job")
job.should.have.key("job").which.should.have.key("jobArn")
job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("job").which.should.have.key("targets")
job.should.have.key("job").which.should.have.key("jobProcessDetails")
job.should.have.key("job").which.should.have.key("lastUpdatedAt")
job.should.have.key("job").which.should.have.key("createdAt")
job.should.have.key("job").which.should.have.key("jobExecutionsRolloutConfig")
job.should.have.key("job").which.should.have.key(
"targetSelection"
).which.should.equal("CONTINUOUS")
job.should.have.key("job").which.should.have.key("presignedUrlConfig")
job.should.have.key("job").which.should.have.key(
"presignedUrlConfig"
).which.should.have.key("roleArn").which.should.equal(
"arn:aws:iam::1:role/service-role/iot_job_role"
)
job.should.have.key("job").which.should.have.key(
"presignedUrlConfig"
).which.should.have.key("expiresInSec").which.should.equal(123)
job.should.have.key("job").which.should.have.key(
"jobExecutionsRolloutConfig"
).which.should.have.key("maximumPerMinute").which.should.equal(10)
@mock_iot
def test_delete_job():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job = client.describe_job(jobId=job_id)
job.should.have.key("job")
job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id)
client.delete_job(jobId=job_id)
client.list_jobs()["jobs"].should.have.length_of(0)
@mock_iot
def test_cancel_job():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job = client.describe_job(jobId=job_id)
job.should.have.key("job")
job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id)
job = client.cancel_job(jobId=job_id, reasonCode="Because", comment="You are")
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job = client.describe_job(jobId=job_id)
job.should.have.key("job")
job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("job").which.should.have.key("status").which.should.equal(
"CANCELED"
)
job.should.have.key("job").which.should.have.key(
"forceCanceled"
).which.should.equal(False)
job.should.have.key("job").which.should.have.key("reasonCode").which.should.equal(
"Because"
)
job.should.have.key("job").which.should.have.key("comment").which.should.equal(
"You are"
)
@mock_iot
def test_get_job_document_with_document_source():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job_document = client.get_job_document(jobId=job_id)
job_document.should.have.key("document").which.should.equal("")
@mock_iot
def test_get_job_document_with_document():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job_document = client.get_job_document(jobId=job_id)
job_document.should.have.key("document").which.should.equal('{"field": "value"}')
@mock_iot
def test_describe_job_execution():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job.should.have.key("description")
job_execution = client.describe_job_execution(jobId=job_id, thingName=name)
job_execution.should.have.key("execution")
job_execution["execution"].should.have.key("jobId").which.should.equal(job_id)
job_execution["execution"].should.have.key("status").which.should.equal("QUEUED")
job_execution["execution"].should.have.key("forceCanceled").which.should.equal(
False
)
job_execution["execution"].should.have.key("statusDetails").which.should.equal(
{"detailsMap": {}}
)
job_execution["execution"].should.have.key("thingArn").which.should.equal(
thing["thingArn"]
)
job_execution["execution"].should.have.key("queuedAt")
job_execution["execution"].should.have.key("startedAt")
job_execution["execution"].should.have.key("lastUpdatedAt")
job_execution["execution"].should.have.key("executionNumber").which.should.equal(
123
)
job_execution["execution"].should.have.key("versionNumber").which.should.equal(123)
job_execution["execution"].should.have.key(
"approximateSecondsBeforeTimedOut"
).which.should.equal(123)
job_execution = client.describe_job_execution(
jobId=job_id, thingName=name, executionNumber=123
)
job_execution.should.have.key("execution")
job_execution["execution"].should.have.key("jobId").which.should.equal(job_id)
job_execution["execution"].should.have.key("status").which.should.equal("QUEUED")
job_execution["execution"].should.have.key("forceCanceled").which.should.equal(
False
)
job_execution["execution"].should.have.key("statusDetails").which.should.equal(
{"detailsMap": {}}
)
job_execution["execution"].should.have.key("thingArn").which.should.equal(
thing["thingArn"]
)
job_execution["execution"].should.have.key("queuedAt")
job_execution["execution"].should.have.key("startedAt")
job_execution["execution"].should.have.key("lastUpdatedAt")
job_execution["execution"].should.have.key("executionNumber").which.should.equal(
123
)
job_execution["execution"].should.have.key("versionNumber").which.should.equal(123)
job_execution["execution"].should.have.key(
"approximateSecondsBeforeTimedOut"
).which.should.equal(123)
try:
client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=456)
except ClientError as exc:
error_code = exc.response["Error"]["Code"]
error_code.should.equal("ResourceNotFoundException")
else:
raise Exception("Should have raised error")
@mock_iot
def test_cancel_job_execution():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job.should.have.key("description")
client.cancel_job_execution(jobId=job_id, thingName=name)
job_execution = client.describe_job_execution(jobId=job_id, thingName=name)
job_execution.should.have.key("execution")
job_execution["execution"].should.have.key("status").which.should.equal("CANCELED")
@mock_iot
def test_delete_job_execution():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job.should.have.key("description")
client.delete_job_execution(jobId=job_id, thingName=name, executionNumber=123)
try:
client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123)
except ClientError as exc:
error_code = exc.response["Error"]["Code"]
error_code.should.equal("ResourceNotFoundException")
else:
raise Exception("Should have raised error")
@mock_iot
def test_list_job_executions_for_job():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job.should.have.key("description")
job_execution = client.list_job_executions_for_job(jobId=job_id)
job_execution.should.have.key("executionSummaries")
job_execution["executionSummaries"][0].should.have.key(
"thingArn"
).which.should.equal(thing["thingArn"])
job_execution = client.list_job_executions_for_job(jobId=job_id, status="QUEUED")
job_execution.should.have.key("executionSummaries")
job_execution["executionSummaries"][0].should.have.key(
"thingArn"
).which.should.equal(thing["thingArn"])
@mock_iot
def test_list_job_executions_for_thing():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job.should.have.key("description")
job_execution = client.list_job_executions_for_thing(thingName=name)
job_execution.should.have.key("executionSummaries")
job_execution["executionSummaries"][0].should.have.key("jobId").which.should.equal(
job_id
)
job_execution = client.list_job_executions_for_thing(
thingName=name, status="QUEUED"
)
job_execution.should.have.key("executionSummaries")
job_execution["executionSummaries"][0].should.have.key("jobId").which.should.equal(
job_id
)
@mock_iot
def test_list_job_executions_for_thing_paginated():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
thing = client.create_thing(thingName=name)
for idx in range(0, 10):
client.create_job(
jobId=f"TestJob_{idx}",
targets=[thing["thingArn"]],
document=json.dumps({"field": "value"}),
)
res = client.list_job_executions_for_thing(thingName=name, maxResults=2)
executions = res["executionSummaries"]
executions.should.have.length_of(2)
res.should.have.key("nextToken")
res = client.list_job_executions_for_thing(
thingName=name, maxResults=1, nextToken=res["nextToken"]
)
executions = res["executionSummaries"]
executions.should.have.length_of(1)
res.should.have.key("nextToken")
res = client.list_job_executions_for_thing(
thingName=name, nextToken=res["nextToken"]
)
executions = res["executionSummaries"]
executions.should.have.length_of(7)
res.shouldnt.have.key("nextToken")
class TestTopicRules:
name = "my-rule"
payload = {
"sql": "SELECT * FROM 'topic/*' WHERE something > 0",
"actions": [
{"dynamoDBv2": {"putItem": {"tableName": "my-table"}, "roleArn": "my-role"}}
],
"errorAction": {
"republish": {"qos": 0, "roleArn": "my-role", "topic": "other-topic"}
},
"description": "my-description",
"ruleDisabled": False,
"awsIotSqlVersion": "2016-03-23",
}
@mock_iot
def test_topic_rule_create(self):
client = boto3.client("iot", region_name="ap-northeast-1")
client.create_topic_rule(ruleName=self.name, topicRulePayload=self.payload)
# duplicated rule name
with pytest.raises(ClientError) as ex:
client.create_topic_rule(ruleName=self.name, topicRulePayload=self.payload)
error_code = ex.value.response["Error"]["Code"]
error_code.should.equal("ResourceAlreadyExistsException")
@mock_iot
def test_topic_rule_list(self):
client = boto3.client("iot", region_name="ap-northeast-1")
# empty response
res = client.list_topic_rules()
res.should.have.key("rules").which.should.have.length_of(0)
client.create_topic_rule(ruleName=self.name, topicRulePayload=self.payload)
client.create_topic_rule(ruleName="my-rule-2", topicRulePayload=self.payload)
res = client.list_topic_rules()
res.should.have.key("rules").which.should.have.length_of(2)
for rule, name in zip(res["rules"], [self.name, "my-rule-2"]):
rule.should.have.key("ruleName").which.should.equal(name)
rule.should.have.key("createdAt").which.should_not.be.none
rule.should.have.key("ruleArn").which.should_not.be.none
rule.should.have.key("ruleDisabled").which.should.equal(
self.payload["ruleDisabled"]
)
rule.should.have.key("topicPattern").which.should.equal("topic/*")
@mock_iot
def test_topic_rule_get(self):
client = boto3.client("iot", region_name="ap-northeast-1")
# no such rule
with pytest.raises(ClientError) as ex:
client.get_topic_rule(ruleName=self.name)
error_code = ex.value.response["Error"]["Code"]
error_code.should.equal("ResourceNotFoundException")
client.create_topic_rule(ruleName=self.name, topicRulePayload=self.payload)
rule = client.get_topic_rule(ruleName=self.name)
rule.should.have.key("ruleArn").which.should_not.be.none
rule.should.have.key("rule")
rrule = rule["rule"]
rrule.should.have.key("actions").which.should.equal(self.payload["actions"])
rrule.should.have.key("awsIotSqlVersion").which.should.equal(
self.payload["awsIotSqlVersion"]
)
rrule.should.have.key("createdAt").which.should_not.be.none
rrule.should.have.key("description").which.should.equal(
self.payload["description"]
)
rrule.should.have.key("errorAction").which.should.equal(
self.payload["errorAction"]
)
rrule.should.have.key("ruleDisabled").which.should.equal(
self.payload["ruleDisabled"]
)
rrule.should.have.key("ruleName").which.should.equal(self.name)
rrule.should.have.key("sql").which.should.equal(self.payload["sql"])
@mock_iot
def test_topic_rule_replace(self):
client = boto3.client("iot", region_name="ap-northeast-1")
# no such rule
with pytest.raises(ClientError) as ex:
client.replace_topic_rule(ruleName=self.name, topicRulePayload=self.payload)
error_code = ex.value.response["Error"]["Code"]
error_code.should.equal("ResourceNotFoundException")
client.create_topic_rule(ruleName=self.name, topicRulePayload=self.payload)
payload = self.payload.copy()
payload["description"] = "new-description"
client.replace_topic_rule(
ruleName=self.name, topicRulePayload=payload,
)
rule = client.get_topic_rule(ruleName=self.name)
rule["rule"]["ruleName"].should.equal(self.name)
rule["rule"]["description"].should.equal(payload["description"])
@mock_iot
def test_topic_rule_disable(self):
client = boto3.client("iot", region_name="ap-northeast-1")
# no such rule
with pytest.raises(ClientError) as ex:
client.disable_topic_rule(ruleName=self.name)
error_code = ex.value.response["Error"]["Code"]
error_code.should.equal("ResourceNotFoundException")
client.create_topic_rule(ruleName=self.name, topicRulePayload=self.payload)
client.disable_topic_rule(ruleName=self.name)
rule = client.get_topic_rule(ruleName=self.name)
rule["rule"]["ruleName"].should.equal(self.name)
rule["rule"]["ruleDisabled"].should.equal(True)
@mock_iot
def test_topic_rule_enable(self):
client = boto3.client("iot", region_name="ap-northeast-1")
# no such rule
with pytest.raises(ClientError) as ex:
client.enable_topic_rule(ruleName=self.name)
error_code = ex.value.response["Error"]["Code"]
error_code.should.equal("ResourceNotFoundException")
payload = self.payload.copy()
payload["ruleDisabled"] = True
client.create_topic_rule(ruleName=self.name, topicRulePayload=payload)
client.enable_topic_rule(ruleName=self.name)
rule = client.get_topic_rule(ruleName=self.name)
rule["rule"]["ruleName"].should.equal(self.name)
rule["rule"]["ruleDisabled"].should.equal(False)
@mock_iot
def test_topic_rule_delete(self):
client = boto3.client("iot", region_name="ap-northeast-1")
# no such rule
with pytest.raises(ClientError) as ex:
client.delete_topic_rule(ruleName=self.name)
error_code = ex.value.response["Error"]["Code"]
error_code.should.equal("ResourceNotFoundException")
client.create_topic_rule(ruleName=self.name, topicRulePayload=self.payload)
client.enable_topic_rule(ruleName=self.name)
client.delete_topic_rule(ruleName=self.name)
res = client.list_topic_rules()
res.should.have.key("rules").which.should.have.length_of(0)
@mock_iot
def test_deprecate_undeprecate_thing_type(self):
client = boto3.client("iot", region_name="ap-northeast-1")
thing_type_name = "my-type-name"
client.create_thing_type(
thingTypeName=thing_type_name,
thingTypeProperties={"searchableAttributes": ["s1", "s2", "s3"]},
)
res = client.describe_thing_type(thingTypeName=thing_type_name)
res["thingTypeMetadata"]["deprecated"].should.equal(False)
client.deprecate_thing_type(thingTypeName=thing_type_name, undoDeprecate=False)
res = client.describe_thing_type(thingTypeName=thing_type_name)
res["thingTypeMetadata"]["deprecated"].should.equal(True)
client.deprecate_thing_type(thingTypeName=thing_type_name, undoDeprecate=True)
res = client.describe_thing_type(thingTypeName=thing_type_name)
res["thingTypeMetadata"]["deprecated"].should.equal(False)
@mock_iot
def test_deprecate_thing_type_not_exist(self):
client = boto3.client("iot", region_name="ap-northeast-1")
thing_type_name = "my-type-name"
with pytest.raises(client.exceptions.ResourceNotFoundException):
client.deprecate_thing_type(
thingTypeName=thing_type_name, undoDeprecate=False
)
@mock_iot
def test_create_thing_with_deprecated_type(self):
client = boto3.client("iot", region_name="ap-northeast-1")
thing_type_name = "my-type-name"
client.create_thing_type(
thingTypeName=thing_type_name,
thingTypeProperties={"searchableAttributes": ["s1", "s2", "s3"]},
)
client.deprecate_thing_type(thingTypeName=thing_type_name, undoDeprecate=False)
with pytest.raises(client.exceptions.InvalidRequestException):
client.create_thing(thingName="thing-name", thingTypeName=thing_type_name)
@mock_iot
def test_update_thing_with_deprecated_type(self):
client = boto3.client("iot", region_name="ap-northeast-1")
thing_type_name = "my-type-name"
thing_name = "thing-name"
client.create_thing_type(
thingTypeName=thing_type_name,
thingTypeProperties={"searchableAttributes": ["s1", "s2", "s3"]},
)
deprecated_thing_type_name = "my-type-name-deprecated"
client.create_thing_type(
thingTypeName=deprecated_thing_type_name,
thingTypeProperties={"searchableAttributes": ["s1", "s2", "s3"]},
)
client.deprecate_thing_type(
thingTypeName=deprecated_thing_type_name, undoDeprecate=False
)
client.create_thing(thingName=thing_name, thingTypeName=thing_type_name)
with pytest.raises(client.exceptions.InvalidRequestException):
client.update_thing(
thingName=thing_name, thingTypeName=deprecated_thing_type_name
)
| 39.707109 | 100 | 0.696115 |
4a1c3ae563c55fb1768fea5c4e3eb467237512bd
| 7,214 |
py
|
Python
|
tests/feature/test_gherkin_terminal_reporter.py
|
hristiy4n/pytest-bdd
|
76ed2ece2aa822cdf764b37a8d042227db9ff9c9
|
[
"MIT"
] | null | null | null |
tests/feature/test_gherkin_terminal_reporter.py
|
hristiy4n/pytest-bdd
|
76ed2ece2aa822cdf764b37a8d042227db9ff9c9
|
[
"MIT"
] | null | null | null |
tests/feature/test_gherkin_terminal_reporter.py
|
hristiy4n/pytest-bdd
|
76ed2ece2aa822cdf764b37a8d042227db9ff9c9
|
[
"MIT"
] | null | null | null |
import textwrap
import pytest
FEATURE = """\
Feature: Gherkin terminal output feature
Scenario: Scenario example 1
Given there is a bar
When the bar is accessed
Then world explodes
"""
TEST = """\
from pytest_bdd import given, when, then, scenario
@given('there is a bar')
def a_bar():
return 'bar'
@when('the bar is accessed')
def the_bar_is_accessed():
pass
@then('world explodes')
def world_explodes():
pass
@scenario('test.feature', 'Scenario example 1')
def test_scenario_1():
pass
"""
def test_default_output_should_be_the_same_as_regular_terminal_reporter(testdir):
testdir.makefile(".feature", test=FEATURE)
testdir.makepyfile(TEST)
regular = testdir.runpytest()
gherkin = testdir.runpytest("--gherkin-terminal-reporter")
regular.assert_outcomes(passed=1, failed=0)
gherkin.assert_outcomes(passed=1, failed=0)
def parse_lines(lines):
return [line for line in lines if not line.startswith("===")]
assert all(l1 == l2 for l1, l2 in zip(parse_lines(regular.stdout.lines), parse_lines(gherkin.stdout.lines)))
def test_verbose_mode_should_display_feature_and_scenario_names_instead_of_test_names_in_a_single_line(testdir):
testdir.makefile(".feature", test=FEATURE)
testdir.makepyfile(TEST)
result = testdir.runpytest("--gherkin-terminal-reporter", "-v")
result.assert_outcomes(passed=1, failed=0)
result.stdout.fnmatch_lines("Feature: Gherkin terminal output feature")
result.stdout.fnmatch_lines("*Scenario: Scenario example 1 PASSED")
def test_verbose_mode_should_preserve_displaying_regular_tests_as_usual(testdir):
testdir.makepyfile(
textwrap.dedent(
"""\
def test_1():
pass
"""
)
)
regular = testdir.runpytest()
gherkin = testdir.runpytest("--gherkin-terminal-reporter", "-v")
regular.assert_outcomes(passed=1, failed=0)
gherkin.assert_outcomes(passed=1, failed=0)
regular.stdout.fnmatch_lines("test_verbose_mode_should_preserve_displaying_regular_tests_as_usual.py . [100%]")
gherkin.stdout.fnmatch_lines(
"test_verbose_mode_should_preserve_displaying_regular_tests_as_usual.py::test_1 PASSED [100%]"
)
def test_double_verbose_mode_should_display_full_scenario_description(testdir):
testdir.makefile(".feature", test=FEATURE)
testdir.makepyfile(TEST)
result = testdir.runpytest("--gherkin-terminal-reporter", "-vv")
result.assert_outcomes(passed=1, failed=0)
result.stdout.fnmatch_lines("*Scenario: Scenario example 1")
result.stdout.fnmatch_lines("*Given there is a bar")
result.stdout.fnmatch_lines("*When the bar is accessed")
result.stdout.fnmatch_lines("*Then world explodes")
result.stdout.fnmatch_lines("*PASSED")
@pytest.mark.parametrize("verbosity", ["", "-v", "-vv"])
def test_error_message_for_missing_steps(testdir, verbosity):
testdir.makefile(".feature", test=FEATURE)
testdir.makepyfile(
textwrap.dedent(
"""\
from pytest_bdd import scenarios
scenarios('.')
"""
)
)
result = testdir.runpytest("--gherkin-terminal-reporter", verbosity)
result.assert_outcomes(passed=0, failed=1)
result.stdout.fnmatch_lines(
"""*StepDefinitionNotFoundError: Step definition is not found: Given "there is a bar". """
"""Line 3 in scenario "Scenario example 1"*"""
)
@pytest.mark.parametrize("verbosity", ["", "-v", "-vv"])
def test_error_message_should_be_displayed(testdir, verbosity):
testdir.makefile(".feature", test=FEATURE)
testdir.makepyfile(
textwrap.dedent(
"""\
from pytest_bdd import given, when, then, scenario
@given('there is a bar')
def a_bar():
return 'bar'
@when('the bar is accessed')
def the_bar_is_accessed():
pass
@then('world explodes')
def world_explodes():
raise Exception("BIGBADABOOM")
@scenario('test.feature', 'Scenario example 1')
def test_scenario_1():
pass
"""
)
)
result = testdir.runpytest("--gherkin-terminal-reporter", verbosity)
result.assert_outcomes(passed=0, failed=1)
result.stdout.fnmatch_lines("E Exception: BIGBADABOOM")
result.stdout.fnmatch_lines("test_error_message_should_be_displayed.py:15: Exception")
def test_local_variables_should_be_displayed_when_showlocals_option_is_used(testdir):
testdir.makefile(".feature", test=FEATURE)
testdir.makepyfile(
textwrap.dedent(
"""\
from pytest_bdd import given, when, then, scenario
@given('there is a bar')
def a_bar():
return 'bar'
@when('the bar is accessed')
def the_bar_is_accessed():
pass
@then('world explodes')
def world_explodes():
local_var = "MULTIPASS"
raise Exception("BIGBADABOOM")
@scenario('test.feature', 'Scenario example 1')
def test_scenario_1():
pass
"""
)
)
result = testdir.runpytest("--gherkin-terminal-reporter", "--showlocals")
result.assert_outcomes(passed=0, failed=1)
result.stdout.fnmatch_lines("""request*=*<FixtureRequest for *""")
result.stdout.fnmatch_lines("""local_var*=*MULTIPASS*""")
def test_step_parameters_should_be_replaced_by_their_values(testdir):
example = {"start": 10, "eat": 3, "left": 7}
testdir.makefile(
".feature",
test=textwrap.dedent(
"""\
Feature: Gherkin terminal output feature
Scenario Outline: Scenario example 2
Given there are <start> cucumbers
When I eat <eat> cucumbers
Then I should have <left> cucumbers
Examples:
| start | eat | left |
|{start}|{eat}|{left}|
""".format(
**example
)
),
)
testdir.makepyfile(
test_gherkin=textwrap.dedent(
"""\
from pytest_bdd import given, when, scenario, then
@given('there are <start> cucumbers')
def start_cucumbers(start):
return start
@when('I eat <eat> cucumbers')
def eat_cucumbers(start_cucumbers, eat):
pass
@then('I should have <left> cucumbers')
def should_have_left_cucumbers(start_cucumbers, start, eat, left):
pass
@scenario('test.feature', 'Scenario example 2')
def test_scenario_2():
pass
"""
)
)
result = testdir.runpytest("--gherkin-terminal-reporter", "--gherkin-terminal-reporter-expanded", "-vv")
result.assert_outcomes(passed=1, failed=0)
result.stdout.fnmatch_lines("*Scenario: Scenario example 2")
result.stdout.fnmatch_lines("*Given there are {start} cucumbers".format(**example))
result.stdout.fnmatch_lines("*When I eat {eat} cucumbers".format(**example))
result.stdout.fnmatch_lines("*Then I should have {left} cucumbers".format(**example))
result.stdout.fnmatch_lines("*PASSED")
| 30.697872 | 115 | 0.64555 |
4a1c3bdf5468b4f818b0945e82064c49a9c403c2
| 17,742 |
py
|
Python
|
pysnmp/CISCO-LWAPP-LINKTEST-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11 |
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/CISCO-LWAPP-LINKTEST-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75 |
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/CISCO-LWAPP-LINKTEST-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10 |
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CISCO-LWAPP-LINKTEST-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-LWAPP-LINKTEST-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:48:41 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
IpAddress, Unsigned32, Bits, Integer32, Counter32, Counter64, iso, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, MibIdentifier, NotificationType, TimeTicks, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "Unsigned32", "Bits", "Integer32", "Counter32", "Counter64", "iso", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "MibIdentifier", "NotificationType", "TimeTicks", "ModuleIdentity")
TruthValue, TextualConvention, TimeInterval, RowStatus, DisplayString, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "TimeInterval", "RowStatus", "DisplayString", "MacAddress")
ciscoLwappLinkTestMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 516))
ciscoLwappLinkTestMIB.setRevisions(('2006-04-06 00:00',))
if mibBuilder.loadTexts: ciscoLwappLinkTestMIB.setLastUpdated('200604060000Z')
if mibBuilder.loadTexts: ciscoLwappLinkTestMIB.setOrganization('Cisco Systems Inc.')
ciscoLwappLinkTestMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 516, 0))
ciscoLwappLinkTestMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 516, 1))
ciscoLwappLinkTestMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 516, 2))
ciscoLwappLinkTestConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 1))
ciscoLwappLinkTestRun = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2))
ciscoLwappLinkTestStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 3))
cLLtResponder = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 1, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLLtResponder.setStatus('current')
cLLtPacketSize = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 1500)).clone(50)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLLtPacketSize.setStatus('current')
cLLtNumberOfPackets = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)).clone(20)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLLtNumberOfPackets.setStatus('current')
cLLtTestPurgeTime = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(15, 1800)).clone(15)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cLLtTestPurgeTime.setStatus('current')
cLLtClientLinkTestTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 1), )
if mibBuilder.loadTexts: cLLtClientLinkTestTable.setStatus('current')
cLLtClientLinkTestEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 1, 1), ).setIndexNames((0, "CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtIndex"))
if mibBuilder.loadTexts: cLLtClientLinkTestEntry.setStatus('current')
cLLtClientLtIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 32)))
if mibBuilder.loadTexts: cLLtClientLtIndex.setStatus('current')
cLLtClientLtMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 1, 1, 2), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cLLtClientLtMacAddress.setStatus('current')
cLLtClientLtRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 1, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cLLtClientLtRowStatus.setStatus('current')
cLLtClientLTResultsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2), )
if mibBuilder.loadTexts: cLLtClientLTResultsTable.setStatus('current')
cLLtClientLTResultsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1), ).setIndexNames((0, "CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtIndex"))
if mibBuilder.loadTexts: cLLtClientLTResultsEntry.setStatus('current')
cLLtClientLtPacketsSent = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 1), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtPacketsSent.setStatus('current')
cLLtClientLtPacketsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 2), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtPacketsRx.setStatus('current')
cLLtClientLtTotalPacketsLost = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 3), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtTotalPacketsLost.setStatus('current')
cLLtClientLtApToClientPktsLost = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 4), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtApToClientPktsLost.setStatus('current')
cLLtClientLtClientToApPktsLost = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 5), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtClientToApPktsLost.setStatus('current')
cLLtClientLtMinRoundTripTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 6), TimeInterval()).setUnits('hundredths-seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtMinRoundTripTime.setStatus('current')
cLLtClientLtMaxRoundTripTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 7), TimeInterval()).setUnits('hundredths-seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtMaxRoundTripTime.setStatus('current')
cLLtClientLtAvgRoundTripTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 8), TimeInterval()).setUnits('hundredths-seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtAvgRoundTripTime.setStatus('current')
cLLtClientLtUplinkMinRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 9), Integer32()).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtUplinkMinRSSI.setStatus('current')
cLLtClientLtUplinkMaxRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 10), Integer32()).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtUplinkMaxRSSI.setStatus('current')
cLLtClientLtUplinkAvgRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 11), Integer32()).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtUplinkAvgRSSI.setStatus('current')
cLLtClientLtDownlinkMinRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 12), Integer32()).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtDownlinkMinRSSI.setStatus('current')
cLLtClientLtDownlinkMaxRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 13), Integer32()).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtDownlinkMaxRSSI.setStatus('current')
cLLtClientLtDownlinkAvgRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 14), Integer32()).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtDownlinkAvgRSSI.setStatus('current')
cLLtClientLtUplinkMinSNR = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 15), Integer32()).setUnits('dB').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtUplinkMinSNR.setStatus('current')
cLLtClientLtUplinkMaxSNR = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 16), Integer32()).setUnits('dB').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtUplinkMaxSNR.setStatus('current')
cLLtClientLtUplinkAvgSNR = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 17), Integer32()).setUnits('dB').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtUplinkAvgSNR.setStatus('current')
cLLtClientLtDownlinkMinSNR = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 18), Integer32()).setUnits('dB').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtDownlinkMinSNR.setStatus('current')
cLLtClientLtDownlinkMaxSNR = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 19), Integer32()).setUnits('dB').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtDownlinkMaxSNR.setStatus('current')
cLLtClientLtDownlinkAvgSNR = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 20), Integer32()).setUnits('dB').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtDownlinkAvgSNR.setStatus('current')
cLLtClientLtTotalTxRetriesAP = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 21), Counter32()).setUnits('retries').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtTotalTxRetriesAP.setStatus('current')
cLLtClientLtMaxTxRetriesAP = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 22), Counter32()).setUnits('retries').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtMaxTxRetriesAP.setStatus('current')
cLLtClientLtTotalTxRetriesClient = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 23), Counter32()).setUnits('retries').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtTotalTxRetriesClient.setStatus('current')
cLLtClientLtMaxTxRetriesClient = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 24), Counter32()).setUnits('retries').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtMaxTxRetriesClient.setStatus('current')
cLLtClientLtStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 2, 2, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("cLLtClientLtStatusFailed", 0), ("cLLtClientLtStatusCcxInProgress", 1), ("cLLtClientLtStatusPngInProgress", 2), ("cLLtClientLtStatusPingSuccess", 3), ("cLLtClientLtStatusCcxLtSuccess", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtStatus.setStatus('current')
cLLtClientLtDataRateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 3, 1), )
if mibBuilder.loadTexts: cLLtClientLtDataRateTable.setStatus('current')
cLLtClientLtDataRateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtIndex"), (0, "CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtDataRate"))
if mibBuilder.loadTexts: cLLtClientLtDataRateEntry.setStatus('current')
cLLtClientLtDataRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 3, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 255)))
if mibBuilder.loadTexts: cLLtClientLtDataRate.setStatus('current')
cLLtClientLtRateDownlinkPktsSent = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 3, 1, 1, 2), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtRateDownlinkPktsSent.setStatus('current')
cLLtClientLtRateUplinkPktsSent = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 516, 1, 3, 1, 1, 3), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cLLtClientLtRateUplinkPktsSent.setStatus('current')
ciscoLwappLinkTestMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 516, 2, 1))
ciscoLwappLinkTestMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 516, 2, 2))
ciscoLwappLinkTestMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 516, 2, 1, 1)).setObjects(("CISCO-LWAPP-LINKTEST-MIB", "cLLinkTestConfigGroup"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLinkTestRunsGroup"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLinkTestStatusGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoLwappLinkTestMIBCompliance = ciscoLwappLinkTestMIBCompliance.setStatus('current')
cLLinkTestConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 516, 2, 2, 1)).setObjects(("CISCO-LWAPP-LINKTEST-MIB", "cLLtResponder"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtPacketSize"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtNumberOfPackets"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtTestPurgeTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLLinkTestConfigGroup = cLLinkTestConfigGroup.setStatus('current')
cLLinkTestRunsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 516, 2, 2, 2)).setObjects(("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtMacAddress"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtPacketsSent"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtPacketsRx"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtTotalPacketsLost"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtApToClientPktsLost"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtClientToApPktsLost"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtMinRoundTripTime"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtMaxRoundTripTime"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtAvgRoundTripTime"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtUplinkMinRSSI"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtUplinkMaxRSSI"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtUplinkAvgRSSI"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtDownlinkMinRSSI"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtDownlinkMaxRSSI"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtDownlinkAvgRSSI"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtUplinkMinSNR"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtUplinkMaxSNR"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtUplinkAvgSNR"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtDownlinkMinSNR"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtDownlinkMaxSNR"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtDownlinkAvgSNR"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtTotalTxRetriesAP"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtMaxTxRetriesAP"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtTotalTxRetriesClient"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtMaxTxRetriesClient"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtStatus"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLLinkTestRunsGroup = cLLinkTestRunsGroup.setStatus('current')
cLLinkTestStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 516, 2, 2, 3)).setObjects(("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtRateDownlinkPktsSent"), ("CISCO-LWAPP-LINKTEST-MIB", "cLLtClientLtRateUplinkPktsSent"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cLLinkTestStatusGroup = cLLinkTestStatusGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-LWAPP-LINKTEST-MIB", cLLtClientLtRowStatus=cLLtClientLtRowStatus, cLLtClientLtTotalPacketsLost=cLLtClientLtTotalPacketsLost, cLLtClientLtUplinkMinRSSI=cLLtClientLtUplinkMinRSSI, cLLtClientLtDownlinkMaxRSSI=cLLtClientLtDownlinkMaxRSSI, cLLinkTestRunsGroup=cLLinkTestRunsGroup, cLLtClientLinkTestTable=cLLtClientLinkTestTable, ciscoLwappLinkTestConfig=ciscoLwappLinkTestConfig, ciscoLwappLinkTestMIBGroups=ciscoLwappLinkTestMIBGroups, cLLtPacketSize=cLLtPacketSize, cLLtClientLtDownlinkAvgSNR=cLLtClientLtDownlinkAvgSNR, cLLtNumberOfPackets=cLLtNumberOfPackets, cLLinkTestConfigGroup=cLLinkTestConfigGroup, cLLtClientLtDataRateTable=cLLtClientLtDataRateTable, cLLtResponder=cLLtResponder, ciscoLwappLinkTestMIBCompliance=ciscoLwappLinkTestMIBCompliance, cLLtClientLtDownlinkMaxSNR=cLLtClientLtDownlinkMaxSNR, cLLtClientLTResultsEntry=cLLtClientLTResultsEntry, cLLtClientLtDownlinkMinRSSI=cLLtClientLtDownlinkMinRSSI, cLLtClientLtStatus=cLLtClientLtStatus, ciscoLwappLinkTestMIBNotifs=ciscoLwappLinkTestMIBNotifs, cLLtTestPurgeTime=cLLtTestPurgeTime, cLLtClientLtAvgRoundTripTime=cLLtClientLtAvgRoundTripTime, cLLtClientLinkTestEntry=cLLtClientLinkTestEntry, cLLtClientLtTotalTxRetriesAP=cLLtClientLtTotalTxRetriesAP, cLLtClientLtIndex=cLLtClientLtIndex, ciscoLwappLinkTestMIB=ciscoLwappLinkTestMIB, ciscoLwappLinkTestMIBConform=ciscoLwappLinkTestMIBConform, cLLtClientLtPacketsSent=cLLtClientLtPacketsSent, cLLtClientLtUplinkAvgRSSI=cLLtClientLtUplinkAvgRSSI, cLLtClientLtDataRate=cLLtClientLtDataRate, cLLtClientLtMinRoundTripTime=cLLtClientLtMinRoundTripTime, cLLtClientLtDownlinkAvgRSSI=cLLtClientLtDownlinkAvgRSSI, cLLtClientLtClientToApPktsLost=cLLtClientLtClientToApPktsLost, ciscoLwappLinkTestRun=ciscoLwappLinkTestRun, cLLtClientLtUplinkMinSNR=cLLtClientLtUplinkMinSNR, cLLtClientLtDownlinkMinSNR=cLLtClientLtDownlinkMinSNR, ciscoLwappLinkTestMIBCompliances=ciscoLwappLinkTestMIBCompliances, cLLtClientLtMaxRoundTripTime=cLLtClientLtMaxRoundTripTime, cLLtClientLtUplinkAvgSNR=cLLtClientLtUplinkAvgSNR, cLLtClientLtMaxTxRetriesClient=cLLtClientLtMaxTxRetriesClient, cLLtClientLtApToClientPktsLost=cLLtClientLtApToClientPktsLost, cLLtClientLtUplinkMaxSNR=cLLtClientLtUplinkMaxSNR, cLLtClientLtTotalTxRetriesClient=cLLtClientLtTotalTxRetriesClient, ciscoLwappLinkTestMIBObjects=ciscoLwappLinkTestMIBObjects, cLLtClientLtUplinkMaxRSSI=cLLtClientLtUplinkMaxRSSI, ciscoLwappLinkTestStatus=ciscoLwappLinkTestStatus, cLLtClientLtPacketsRx=cLLtClientLtPacketsRx, cLLtClientLTResultsTable=cLLtClientLTResultsTable, cLLtClientLtRateUplinkPktsSent=cLLtClientLtRateUplinkPktsSent, cLLinkTestStatusGroup=cLLinkTestStatusGroup, cLLtClientLtMaxTxRetriesAP=cLLtClientLtMaxTxRetriesAP, cLLtClientLtDataRateEntry=cLLtClientLtDataRateEntry, cLLtClientLtMacAddress=cLLtClientLtMacAddress, cLLtClientLtRateDownlinkPktsSent=cLLtClientLtRateDownlinkPktsSent, PYSNMP_MODULE_ID=ciscoLwappLinkTestMIB)
| 144.243902 | 2,912 | 0.776294 |
4a1c3d8444d85547c8dd9dc83744f0e17ea348a8
| 848 |
py
|
Python
|
BotHandler.py
|
MrAlexandrKolesnikov/TelegramBot
|
a9189d6612dd089289273f7ebe35d2159886d2f6
|
[
"MIT"
] | null | null | null |
BotHandler.py
|
MrAlexandrKolesnikov/TelegramBot
|
a9189d6612dd089289273f7ebe35d2159886d2f6
|
[
"MIT"
] | null | null | null |
BotHandler.py
|
MrAlexandrKolesnikov/TelegramBot
|
a9189d6612dd089289273f7ebe35d2159886d2f6
|
[
"MIT"
] | null | null | null |
import requests
class BotHandler:
def __init__(self, token):
self.token = token
self.api_url = "https://api.telegram.org/bot{}/".format(token)
def get_updates(self, offset=None, timeout=30):
method = 'getUpdates'
params = {'timeout': timeout, 'offset': offset}
resp = requests.get(self.api_url + method, params)
result_json = resp.json()['result']
return result_json
def send_message(self, chat_id, text):
params = {'chat_id': chat_id, 'text': text}
method = 'sendMessage'
resp = requests.post(self.api_url + method, params)
return resp
def get_last_update(self):
get_result = self.get_updates()
if len(get_result) > 0:
last_update = get_result[-1]
return True, last_update
return False, 0
| 29.241379 | 70 | 0.606132 |
4a1c3f3e98017cc49dc3b6aebfd42c0faab6306d
| 1,988 |
py
|
Python
|
aiotieba/tieba_protobuf/PbPageResIdl_pb2.py
|
Starry-OvO/Tieba-Manager
|
da50893edd1c4437f45f2ed50ca9f7a94aef9e11
|
[
"Unlicense"
] | 15 |
2022-02-04T14:36:39.000Z
|
2022-03-29T08:54:08.000Z
|
aiotieba/tieba_protobuf/PbPageResIdl_pb2.py
|
Starry-OvO/Tieba-Manager
|
da50893edd1c4437f45f2ed50ca9f7a94aef9e11
|
[
"Unlicense"
] | null | null | null |
aiotieba/tieba_protobuf/PbPageResIdl_pb2.py
|
Starry-OvO/Tieba-Manager
|
da50893edd1c4437f45f2ed50ca9f7a94aef9e11
|
[
"Unlicense"
] | 6 |
2022-03-16T05:01:45.000Z
|
2022-03-29T08:54:09.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: PbPageResIdl.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import SimpleForum_pb2 as SimpleForum__pb2
from . import Page_pb2 as Page__pb2
from . import Post_pb2 as Post__pb2
from . import ThreadInfo_pb2 as ThreadInfo__pb2
from . import User_pb2 as User__pb2
from . import SubPostList_pb2 as SubPostList__pb2
from . import Error_pb2 as Error__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12PbPageResIdl.proto\x1a\x11SimpleForum.proto\x1a\nPage.proto\x1a\nPost.proto\x1a\x10ThreadInfo.proto\x1a\nUser.proto\x1a\x11SubPostList.proto\x1a\x0b\x45rror.proto\"\x94\x02\n\x0cPbPageResIdl\x12\x15\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x06.Error\x12#\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x15.PbPageResIdl.DataRes\x1a\xc7\x01\n\x07\x44\x61taRes\x12\x1b\n\x05\x66orum\x18\x02 \x01(\x0b\x32\x0c.SimpleForum\x12\x13\n\x04page\x18\x03 \x01(\x0b\x32\x05.Page\x12\x18\n\tpost_list\x18\x06 \x03(\x0b\x32\x05.Post\x12\x1b\n\x06thread\x18\x08 \x01(\x0b\x32\x0b.ThreadInfo\x12\x18\n\tuser_list\x18\r \x03(\x0b\x32\x05.User\x12\x1f\n\rsub_post_list\x18\x0f \x01(\x0b\x32\x08.SubPost\x12\x18\n\x10has_fold_comment\x18\x44 \x01(\x05\x62\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'PbPageResIdl_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_PBPAGERESIDL._serialized_start=128
_PBPAGERESIDL._serialized_end=404
_PBPAGERESIDL_DATARES._serialized_start=205
_PBPAGERESIDL_DATARES._serialized_end=404
# @@protoc_insertion_point(module_scope)
| 56.8 | 800 | 0.810362 |
4a1c412ccf28101bab73b9bc5e6cc143ee68b907
| 6,028 |
py
|
Python
|
sira.py
|
gino2010/GICG
|
56f189e099ac09d20d5c15ef882cee779e2b1778
|
[
"MIT"
] | null | null | null |
sira.py
|
gino2010/GICG
|
56f189e099ac09d20d5c15ef882cee779e2b1778
|
[
"MIT"
] | null | null | null |
sira.py
|
gino2010/GICG
|
56f189e099ac09d20d5c15ef882cee779e2b1778
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Sort IP and Reverse to Address
import argparse
import re
import operator
import requests
from requests.packages import urllib3
urllib3.disable_warnings()
__author__ = 'gino'
IGNORE_IP = ['216.', ]
ONLY_IP = []
def filter_ip(ip):
if ONLY_IP:
for item in ONLY_IP:
if ip.startswith(item):
return False
return True
else:
for item in IGNORE_IP:
if ip.startswith(item):
return True
return False
def sort_all_ip():
regex = re.compile("443/tcp open https|443/tcp filtered https")
match_lines = []
match_ips = {}
with open('raw_output', 'r') as fo:
# search 443 is opened
count = 0
for line in fo:
result = regex.search(line)
if result is not None and result.string == '443/tcp open https\n':
match_lines.append((count, 0))
elif result is not None and result.string == '443/tcp filtered https\n':
match_lines.append((count, 1))
count += 1
# get ips
fo.seek(0)
lines = fo.readlines()
for item in match_lines:
latency = 1.0
if item[1] == 0:
# latency less than 1S
temp = re.findall(r'0.\d+', lines[item[0] - 2])
if temp:
latency = temp[0]
else:
continue
elif item[1] == 1:
temp = re.findall(r'Host is up\.', lines[item[0] - 2])
if not temp:
continue
try:
ip_addresses = re.findall(r'[0-9]+(?:\.[0-9]+){3}', lines[item[0] - 3])
ip_address = ip_addresses[1] if len(ip_addresses) == 2 else ip_addresses[0]
if filter_ip(ip_address):
print('pass %s address' % ip_address)
continue
match_ips[ip_address] = float(latency)
except:
print('line %s error!' % item[0])
return sorted(match_ips.items(), key=operator.itemgetter(1))
# get address map ip
def reverse_address(rest_num, sorted_ips):
fot = open('timeout', 'w')
fca = open('collect_list', 'w')
output = []
list_add = []
with open('address_list', 'r') as fa:
for line in fa:
list_add.append(line.rstrip())
list_temp = list_add[:]
set_add = set()
outcount = 0
try:
for item in sorted_ips:
try:
add_ip = item[0]
requests.get('https://{}'.format(add_ip), timeout=1.5)
except requests.exceptions.SSLError as e:
message = str(e.message)
pos = message.find('of')
rev_add_temp = []
if pos != -1:
rev_add_temp = message[message.find('of') + 4:-1].split("', '")
else:
rev_add_temp.append(message[message.find('match') + 7:-1])
# just collect site address
set_add = set_add.union(set(rev_add_temp))
fca.write('ip:{} address:{} \n'.format(add_ip, str(rev_add_temp)))
list_add = list_temp[:]
for str_temp in list_add:
if str_temp in rev_add_temp:
output.append(
'address=/{}/{}\n'.format(str_temp[2:] if str_temp.startswith('*.') else str_temp, add_ip))
# add a rule for ingress
if str_temp == '*.google.com':
output.append('address=/{}/{}\n'.format('m-dot-betaspike.appspot.com', add_ip))
list_temp.remove(str_temp)
print('{} is checked'.format(add_ip))
except requests.exceptions.ConnectTimeout:
fot.write(add_ip + ' is TIMEOUT \n')
print('{} is timeout'.format(add_ip))
except Exception as e:
fot.write(add_ip + ' is ERROR \n')
print('{} is error, message:{}'.format(add_ip, e.message))
rest_num -= 1
if rest_num > 0:
print('left {} item(s) will be check.'.format(str(rest_num)))
else:
print('left {} address(es) need to check and already check {} address(es).'.format(len(list_temp),
0 - rest_num))
if len(list_temp) != outcount:
print(list_temp)
outcount = len(list_temp)
if not list_temp or not rest_num:
break
except KeyboardInterrupt:
print('abort scan...')
if list_temp:
print('Notice: %s not found ip' % str(list_temp))
for temp in list_temp:
output.append('address=/{}/{}\n'.format(temp[2:] if temp.startswith('*.') else temp, '0.0.0.0'))
else:
print('Total {} items have been checked'.format(0 - rest_num if rest_num < 0 else rest_num))
fot.close()
# output distinct address
collect_list = list(set_add)
collect_list.sort()
for item in collect_list:
fca.write(item + '\n')
fca.flush()
fca.close()
ffd = open('dnsmasq', 'w')
output.sort()
for line in output:
ffd.write(line)
ffd.flush()
ffd.close()
def parse_args():
parser = argparse.ArgumentParser(description='Scan and reverse address.')
parser.add_argument('integers', metavar='Num', type=int, nargs='?',
help='an integer for the number of scan', default=100)
return parser.parse_args().integers
if __name__ == '__main__':
source_num = parse_args()
print('Start to analyse file and sort ip records by latency\n')
sorted_ips = sort_all_ip()
print('Check top %d records and generate dnsmasq address list\n' % source_num)
reverse_address(source_num, sorted_ips)
| 34.25 | 119 | 0.519575 |
4a1c41d1c7fcf350175623d594bac7f8c1d4515f
| 3,205 |
py
|
Python
|
testing_suite/filtering/test_filter_talon_transcripts.py
|
kopardev/TALON
|
8014faed5f982e5e106ec05239e47d65878e76c3
|
[
"MIT"
] | 47 |
2020-03-31T19:56:11.000Z
|
2022-03-31T18:00:21.000Z
|
testing_suite/filtering/test_filter_talon_transcripts.py
|
kopardev/TALON
|
8014faed5f982e5e106ec05239e47d65878e76c3
|
[
"MIT"
] | 44 |
2020-03-23T02:15:08.000Z
|
2022-03-30T17:27:26.000Z
|
testing_suite/filtering/test_filter_talon_transcripts.py
|
kopardev/TALON
|
8014faed5f982e5e106ec05239e47d65878e76c3
|
[
"MIT"
] | 11 |
2020-05-13T18:41:23.000Z
|
2021-12-28T07:48:58.000Z
|
import os
import optparse_mock_filt as omf
from talon.post import filter_talon_transcripts as filt
def test_filter_lax():
""" Working with our mock dataset, the very lax settings
max_frac_A = 1;
min_count = 1;
min_datasets = 1 ;
allow_genomic = True;
should give us all of the original transcripts:
gene_ID transcript_ID
1 1
1 2
1 3
1 4
"""
database = "scratch/filter/test.db"
annot = "toy"
options = omf.OptParseMockFilt(database, annot, max_frac_A = 1,
min_count = 1, min_datasets = 1,
allow_genomic = True)
datasets = ["dataset_1", "dataset_2", "dataset_3", "dataset_4", "dataset_5"]
filtered = filt.filter_talon_transcripts(database, annot, datasets, options)
print(filtered)
assert len(filtered) == 4
assert list(filtered.iloc[0]) == [1, 1]
assert list(filtered.iloc[1]) == [1, 2]
assert list(filtered.iloc[2]) == [1, 3]
assert list(filtered.iloc[3]) == [1, 4]
def test_filter_keep_genomic():
""" Working with our mock dataset, these settings
max_frac_A = 0.5;
min_count = 1;
min_datasets = 2;
allow_genomic = True;
should give us the known transcripts (because they are known)
and the genomic transcript, but not the ISM:
gene_ID transcript_ID
1 1
1 3
"""
database = "scratch/filter/test.db"
annot = "toy"
options = omf.OptParseMockFilt(database, annot, max_frac_A = 0.5,
min_count = 1, min_datasets = 2,
allow_genomic = True)
datasets = ["dataset_1", "dataset_2", "dataset_3", "dataset_4", "dataset_5"]
filtered = filt.filter_talon_transcripts(database, annot, datasets, options)
print(filtered)
assert len(filtered) == 3
assert list(filtered.iloc[0]) == [1, 1]
assert list(filtered.iloc[1]) == [1, 2]
assert list(filtered.iloc[2]) == [1, 3]
def test_filter_discard_genomic():
""" Working with our mock dataset, these settings
max_frac_A = 1;
min_count = 2;
min_datasets = 2;
allow_genomic = False;
should give us the known transcripts (because they are known)
and ISM, but not the genomic:
gene_ID transcript_ID
1 1
1 4
"""
database = "scratch/filter/test.db"
annot = "toy"
options = omf.OptParseMockFilt(database, annot, max_frac_A = 1,
min_count = 2, min_datasets = 1,
allow_genomic = False)
datasets = ["dataset_1", "dataset_2", "dataset_3", "dataset_4", "dataset_5"]
filtered = filt.filter_talon_transcripts(database, annot, datasets, options)
print(filtered)
assert len(filtered) == 3
assert list(filtered.iloc[0]) == [1, 1]
assert list(filtered.iloc[1]) == [1, 2]
assert list(filtered.iloc[2]) == [1, 4]
| 34.836957 | 80 | 0.554758 |
4a1c424922aeed2a4d51cc098ef63c2dc4857227
| 4,439 |
py
|
Python
|
lib/tracker_base.py
|
sumeshpremraj/1RM-Tracker
|
68650659661c7410d8303ee68b481184369e8b42
|
[
"MIT"
] | null | null | null |
lib/tracker_base.py
|
sumeshpremraj/1RM-Tracker
|
68650659661c7410d8303ee68b481184369e8b42
|
[
"MIT"
] | null | null | null |
lib/tracker_base.py
|
sumeshpremraj/1RM-Tracker
|
68650659661c7410d8303ee68b481184369e8b42
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import logging
import os
import matplotlib.pyplot as plt
from collections import OrderedDict
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
# If modifying these scopes, delete the file token.json
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler('tracker.log')
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# Example data structure
# stats =
# {
# 'Squat': {
# 112.0: '8/10/2018',
# 124.66666666666667: '15/10/2018',
# 120.0: '12/10/2018'
# },
# 'Bench': {
# 80.16666666666667: '15/10/2018',
# 78.0: '8/10/2018',
# 82.0: '12/10/2018'
# },
# 'Deadlift': {
# 113.33333333333333: '12/10/2018',
# 105.0: '10/10/2018'
# },
# 'Press': {
# 57.0: '10/10/2018',
# 66.66666666666666: '12/10/2018'
# }
# }
class TrackerBase(object):
def auth(self):
logger.info("Initiating Sheets authorization")
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
return service
def get_data(self, spreadsheet_id='1mp0x8AV7cTM45BqHzQ5uO-ga9JN48OA-yfNedcsnRkQ',
ranges=['PR Sheet!A2:P4', 'PR Sheet!A6:P8', 'PR Sheet!A10:P12', 'PR Sheet!A14:P16']):
service = self.auth()
logger.info("Getting data from Sheets")
values = service.spreadsheets().values().batchGet(spreadsheetId=spreadsheet_id, ranges=ranges,
majorDimension='ROWS', valueRenderOption='UNFORMATTED_VALUE',
dateTimeRenderOption='FORMATTED_STRING').execute()
return values
def plot_data(self, values):
self.ensure_graph_dir()
stats = {}
for i in range(0, 4):
plt.xlabel("Date")
plt.ylabel("1RM")
plt.grid(True)
lift = ''
for reps, weights, dates in zip(values['valueRanges'][i]['values'][0], values['valueRanges'][i]['values'][1],
values['valueRanges'][i]['values'][2]):
if weights == 'Rep max':
# First column contains Squat/Rep Max/Date as the data
# This is to get the name of the lift
lift = reps
logger.debug("Lift: " + lift)
stats[lift] = {}
continue
elif weights not in (0, ''):
if not all([reps, weights, dates]):
logger.info("Missing data in the column, check spreadsheet")
continue
for weight, date in zip(str(weights).split(','), dates.split(',')):
# Epley formula
rep_max = float(weight) * (1 + reps / 30)
logger.debug(date + " " + str(rep_max))
# TODO: Convert string to Python date objects OR check Sheets API for Date type
stats[lift][date] = rep_max
logger.info("Plotting " + lift)
x = OrderedDict(sorted(stats[lift].items(), key=lambda t: t[0]))
plt.title(lift)
plt.plot(list(x.keys()), list(x.values()), 'go-') # (x-axis: date, y-axis: 1RM)
plt.savefig(lift + '.png')
plt.close()
logger.debug("Stats: ")
logger.debug(stats)
# TODO: Make graph dir customizable
def ensure_graph_dir(self, graph_dir='graphs'):
if not os.path.exists(graph_dir):
logger.debug("./" + graph_dir + "does not exist, creating it")
os.makedirs(graph_dir)
logger.debug("Changing to ./" + graph_dir)
os.chdir(graph_dir)
| 38.267241 | 121 | 0.528272 |
4a1c42d1ef573ceca2cf055e53aa8ae53b1360b8
| 8,274 |
py
|
Python
|
skills/dff_gaming_skill/dialogflows/common/nlg.py
|
oserikov/dream
|
109ba2df799025dcdada1fddbb7380e1c03100eb
|
[
"Apache-2.0"
] | 34 |
2021-08-18T14:51:44.000Z
|
2022-03-10T14:14:48.000Z
|
skills/dff_gaming_skill/dialogflows/common/nlg.py
|
oserikov/dream
|
109ba2df799025dcdada1fddbb7380e1c03100eb
|
[
"Apache-2.0"
] | 27 |
2021-08-30T14:42:09.000Z
|
2022-03-17T22:11:45.000Z
|
skills/dff_gaming_skill/dialogflows/common/nlg.py
|
oserikov/dream
|
109ba2df799025dcdada1fddbb7380e1c03100eb
|
[
"Apache-2.0"
] | 40 |
2021-08-22T07:13:32.000Z
|
2022-03-29T11:45:32.000Z
|
import logging
import os
import string
from datetime import datetime, timedelta
from itertools import product
import sentry_sdk
from dateparser import parse
import common.constants as common_constants
import common.dialogflow_framework.utils.state as state_utils
import common.gaming as common_gaming
import dialogflows.common.shared_memory_ops as gaming_memory
sentry_sdk.init(dsn=os.getenv("SENTRY_DSN"))
logger = logging.getLogger(__name__)
CONF_1 = 1.0
CONF_092_CAN_CONTINUE = 0.92
CONF_09_DONT_UNDERSTAND_DONE = 0.9
CONF_0 = 0.0
EXPERIENCE = {
"negative": timedelta(0),
"low": timedelta(180),
"moderate": timedelta(730),
"large": timedelta(2400),
}
def error_response(vars):
logger.info("exec error_response")
state_utils.set_confidence(vars, 0)
return "Sorry"
def error_handler(f):
def wrapper(*args, **kwargs):
try:
response = f(*args, **kwargs)
except Exception as exc:
logger.exception(exc)
sentry_sdk.capture_exception(exc)
if args:
vars = args[0]
else:
vars = kwargs["vars"]
response = error_response(vars)
return response
return wrapper
def get_theme_and_genre_groups(themes, genres):
themes = set(themes)
genres = set(genres)
groups = []
for group, genres_and_themes in common_gaming.genre_and_theme_groups.items():
if genres & set(genres_and_themes["genres"]) or themes & set(genres_and_themes["themes"]):
groups.append(group)
return groups
def get_all_relevant_linkto_responses_based_on_genres_and_themes(vars):
game = gaming_memory.get_current_igdb_game(vars, assert_not_empty=False)
candidate_responses = []
if game is not None:
themes, genres = game.get("themes", []), game.get("genres", [])
theme_and_genre_groups = get_theme_and_genre_groups(themes, genres)
for skill_links in [common_gaming.links_to_movies, common_gaming.links_to_books]:
for theme in themes:
candidate_responses += skill_links["theme"].get(theme, [])
for group in theme_and_genre_groups:
candidate_responses += skill_links["theme_genre_group"].get(group, [])
return candidate_responses
def get_new_linkto_response_based_on_genres_and_themes(vars):
linkto_responses_based_on_genres_and_themes = get_all_relevant_linkto_responses_based_on_genres_and_themes(vars)
result = None
if linkto_responses_based_on_genres_and_themes:
used_linkto_phrases_ids = gaming_memory.get_used_linkto_phrase_ids(vars)
for response in linkto_responses_based_on_genres_and_themes:
id_ = gaming_memory.LINKTO_RESPONSES_TO_LINKTO_IDS.get(response)
assert id_ is not None, (
f"Link phrases added to shared memory has to be from `common.gaming`. " f"Got: '{response}'"
)
if id_ not in used_linkto_phrases_ids:
result = response
break
return result
@error_handler
def link_to_other_skills_response(vars, prefix="Okay.", shared_memory_actions=None):
response = get_new_linkto_response_based_on_genres_and_themes(vars)
if response is None:
response = ""
state_utils.set_confidence(vars, confidence=CONF_0)
else:
response = " ".join([prefix, response])
state_utils.set_confidence(vars, confidence=CONF_09_DONT_UNDERSTAND_DONE)
if shared_memory_actions is not None:
for action in shared_memory_actions:
action(vars)
state_utils.set_can_continue(vars, continue_flag=common_constants.CAN_NOT_CONTINUE)
gaming_memory.mark_current_bot_utterance_as_link_to_other_skill(vars)
return response
def compose_strings_that_are_not_time():
result = {
"me",
"time",
"on",
"most",
"more",
"to",
"an",
"or",
"be",
"ago",
"a",
"to get",
"fan",
"i",
"sit",
"too",
"day",
"week",
"month",
"year",
"days",
"weeks",
"months",
"years",
}
digits = list(string.digits)
digit_words = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
ordinals = ["zeroth", "first", "second", "third", "fourth", "fifth", "sixth", "seventh", "eighth", "ninth"]
two_digit_number_words = [
"ten",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen",
]
multiples_of_ten = ["twenty", "thirty", "fourty", "fifty", "sixty", "seventy", "eighty", "ninety"]
ordinals += ["tenth", "eleventh", "twelfth"] + [td + "th" for td in two_digit_number_words[3:]]
two_digit_number_words += [" ".join([mt, dw]) for mt in multiples_of_ten for dw in digit_words]
number_words = digit_words + two_digit_number_words
ordinals += [" ".join([mt, dw]) for mt in multiples_of_ten for dw in ordinals[1:10]]
ordinals += [mt[:-1] + "ieth" for mt in multiples_of_ten]
numbers = digits + ["".join(sd) for r in range(1, 4) for sd in product(digits, repeat=r)]
all_number_strings = number_words + ordinals + numbers
result.update(all_number_strings)
additional_strings = [
" month",
" months",
" week",
" weeks",
" year",
" years",
" hour",
" hours",
" minute",
" minutes",
" second",
" seconds",
",",
" of the",
" of",
", and the",
")",
]
result.update([ns + ad_s for ns in all_number_strings for ad_s in additional_strings])
result.update([s + "," for s in result])
return result
NOT_TIME_STRINGS = compose_strings_that_are_not_time()
def extract_time_from_text(text):
result = []
tokens = text.split()
for num_tokens in range(6, 0, -1):
for start in range(0, len(tokens) - num_tokens + 1):
substr = " ".join(tokens[start : start + num_tokens])
if substr.lower() in NOT_TIME_STRINGS:
continue
parsed = parse(substr, languages=["en"])
if parsed is not None:
result.append((substr, parsed))
return result
def compose_experience_comment(user_text):
extracted = extract_time_from_text(user_text)
if extracted:
time = extracted[0][1]
else:
time = None
now = datetime.now() + timedelta(1) # correction for possible effect of time zone
if time is None:
experience_comment = "Interesting."
else:
now = now.replace(tzinfo=None)
time = time.replace(tzinfo=None)
experience = now - time
if experience < EXPERIENCE["negative"]:
experience_comment = "It seems you came from the future. You probably know what will say next."
elif experience < EXPERIENCE["low"]:
experience_comment = "Oh, you are a beginner like me!"
elif experience < EXPERIENCE["moderate"]:
experience_comment = "So you are more experienced than me!."
elif experience < EXPERIENCE["large"]:
experience_comment = "It looks like you have a lot of experience with the game."
else:
experience_comment = "Wow! You have probably seen everything in the game."
return experience_comment, time is not None
def maybe_set_confidence_and_continue_based_on_previous_bot_phrase(vars, bot_text, response_candidate):
result = True
logger.info(f"bot_text: {repr(bot_text)}")
if any([p.lower() in bot_text.lower() for p in common_gaming.CAN_NOT_CONTINUE_PHRASES]):
response_candidate = error_response(vars)
state_utils.set_confidence(vars, CONF_0)
state_utils.set_can_continue(vars, common_constants.CAN_NOT_CONTINUE)
elif any([p.lower() in bot_text.lower() for p in common_gaming.CAN_CONTINUE_PHRASES]):
state_utils.set_confidence(vars, CONF_092_CAN_CONTINUE)
state_utils.set_can_continue(vars, common_constants.CAN_CONTINUE_SCENARIO)
else:
result = False
return result, response_candidate
| 33.497976 | 116 | 0.641165 |
4a1c42d4705892696cacf53b55f43de6bc08677f
| 12,380 |
py
|
Python
|
Script/clf_final.py
|
ywu94/Tencent-Ads-Algo-Comp-2020
|
8f008fc1cc21c832e6bdb76056d12ad357da5475
|
[
"MIT"
] | 27 |
2020-06-09T18:33:45.000Z
|
2021-11-15T11:49:54.000Z
|
Script/clf_final.py
|
Wannaman/Tencent-Ads-Algo-Comp-2020
|
8f008fc1cc21c832e6bdb76056d12ad357da5475
|
[
"MIT"
] | 2 |
2020-06-21T01:58:56.000Z
|
2020-11-12T18:12:40.000Z
|
Script/clf_final.py
|
Wannaman/Tencent-Ads-Algo-Comp-2020
|
8f008fc1cc21c832e6bdb76056d12ad357da5475
|
[
"MIT"
] | 15 |
2020-06-07T14:19:57.000Z
|
2020-07-16T08:27:42.000Z
|
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
# ========================== #
# Utility Layer #
# ========================== #
class Kmax_Pooling_Layer(nn.Module):
def __init__(self, dim, k, **kwargs):
super(Kmax_Pooling_Layer, self).__init__(**kwargs)
self.dim = dim
self.k = k
def forward(self, inp):
index = inp.topk(self.k, dim=self.dim)[1].sort(dim=self.dim)[0]
return inp.gather(self.dim, index)
# ========================== #
# Output Layer #
# ========================== #
class Output_MLP(nn.Module):
def __init__(self, inp_size, out_size, dropout=0.5, **kwargs):
super(Output_MLP, self).__init__(**kwargs)
self.inp_size = inp_size
self.out_size = out_size
self.dropout = dropout
self.l1 = nn.Linear(inp_size, 1024)
self.l2 = nn.Linear(1024, 512)
self.l3 = nn.Linear(512, 256)
self.l4 = nn.Linear(256, out_size)
self.bn1 = nn.BatchNorm1d(1024)
self.bn2 = nn.BatchNorm1d(512)
self.bn3 = nn.BatchNorm1d(256)
self.dropout1 = nn.Dropout(p=dropout)
self.dropout2 = nn.Dropout(p=dropout)
self.dropout3 = nn.Dropout(p=dropout)
self._reset_weights()
def _reset_weights(self):
nn.init.kaiming_uniform_(self.l1.weight.data, nonlinearity='leaky_relu', a=0.01)
nn.init.zeros_(self.l1.bias.data)
nn.init.kaiming_uniform_(self.l2.weight.data, nonlinearity='leaky_relu', a=0.01)
nn.init.zeros_(self.l2.bias.data)
nn.init.kaiming_uniform_(self.l3.weight.data, nonlinearity='leaky_relu', a=0.01)
nn.init.zeros_(self.l3.bias.data)
nn.init.kaiming_uniform_(self.l4.weight.data, nonlinearity='leaky_relu', a=0.01)
nn.init.zeros_(self.l4.bias.data)
def forward(self, inp):
inp = self.dropout1(F.leaky_relu(self.bn1(self.l1(inp)), negative_slope=0.01))
inp = self.dropout2(F.leaky_relu(self.bn2(self.l2(inp)), negative_slope=0.01))
inp = self.dropout3(F.leaky_relu(self.bn3(self.l3(inp)), negative_slope=0.01))
inp = self.l4(inp)
return inp
# ========================== #
# Extraction Layer #
# ========================== #
class Extraction_ResGRU(nn.Module):
def __init__(self, embed_size, hidden_size, max_seq_len=100, dropout=0.2, **kwargs):
super(Extraction_ResGRU, self).__init__(**kwargs)
self.embed_size = embed_size
self.hidden_size = hidden_size
self.max_seq_len = max_seq_len
self.dropout = dropout
self.gru1 = nn.GRU(input_size=embed_size, hidden_size=hidden_size, batch_first=True, bidirectional=True)
self.gru2 = nn.GRU(input_size=2*hidden_size, hidden_size=2*hidden_size, batch_first=True)
self.gru3 = nn.GRU(input_size=2*hidden_size, hidden_size=2*hidden_size, batch_first=True)
self.ln1 = nn.LayerNorm(embed_size)
self.ln2 = nn.LayerNorm(2*hidden_size)
self.ln3 = nn.LayerNorm(2*hidden_size)
self.dropout1 = nn.Dropout(p=dropout)
self.dropout2 = nn.Dropout(p=dropout)
self.dropout3 = nn.Dropout(p=dropout)
def _pack(self, inp, inp_len):
return nn.utils.rnn.pack_padded_sequence(inp, batch_first=True, lengths=inp_len, enforce_sorted=False)
def _unpack(self, inp):
return nn.utils.rnn.pad_packed_sequence(inp, batch_first=True, total_length=self.max_seq_len)[0]
def forward(self, inp, inp_len):
inp = self._pack(self.dropout1(self.ln1(inp)), inp_len)
inp = self._unpack(self.gru1(inp)[0])
out = self._pack(self.dropout2(self.ln2(inp)), inp_len)
inp = inp + self.dropout2(self._unpack(self.gru2(out)[0]))
out = self._pack(self.ln3(inp), inp_len)
inp = inp + self.dropout3(self._unpack(self.gru3(out)[0]))
return inp
class Extraction_CNN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super(Extraction_CNN, self).__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.conv1 = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size)
self.bn1 = nn.BatchNorm1d(out_channels)
self.ac1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv1d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size)
self.bn2 = nn.BatchNorm1d(out_channels)
self.ac2 = nn.ReLU(inplace=True)
def forward(self, inp):
inp = self.ac1(self.bn1(self.conv1(inp)))
inp = self.ac2(self.bn2(self.conv2(inp)))
return inp
class PreLN_Transformer_Encoder(nn.Module):
def __init__(self, d_model, n_head, intermediate_size=2048, device=None, dropout=0.1, **kwargs):
super(PreLN_Transformer_Encoder, self).__init__(**kwargs)
self.d_model = d_model
self.n_head = n_head
self.intermediate_size = intermediate_size
self.device = device if device else torch.device('cpu')
self.dropout = dropout
self.mha = nn.MultiheadAttention(d_model, n_head, dropout=dropout)
self.ln1 = nn.LayerNorm(d_model)
self.ln2 = nn.LayerNorm(d_model)
self.l1 = nn.Linear(d_model, intermediate_size)
self.l2 = nn.Linear(intermediate_size, d_model)
self.attn_dropout = nn.Dropout(p=dropout)
self.dropout1 = nn.Dropout(p=dropout)
self.dropout2 = nn.Dropout(p=dropout)
self._reset_weights()
def _reset_weights(self):
nn.init.kaiming_uniform_(self.l1.weight.data, nonlinearity='relu')
nn.init.zeros_(self.l1.bias.data)
nn.init.kaiming_uniform_(self.l2.weight.data, nonlinearity='relu')
nn.init.zeros_(self.l2.bias.data)
def _get_padding_mask(self, batch_size, seq_len, inp_len):
padding_mask = np.ones((batch_size, seq_len))
for index, l in enumerate(inp_len):
padding_mask[index,:l] = 0
return torch.from_numpy(padding_mask).bool().to(self.device)
def forward(self, inp, inp_len):
batch_size, seq_len, _ = inp.shape
padding_mask = self._get_padding_mask(batch_size, seq_len, inp_len)
inp1 = self.ln1(inp).permute(1,0,2)
inp2 = self.mha(inp1, inp1, inp1, key_padding_mask=padding_mask)[0].permute(1,0,2)
inp = inp + self.attn_dropout(inp2)
inp1 = self.ln2(inp)
inp2 = self.l2(self.dropout1(F.relu(self.l1(inp1))))
inp = inp + self.dropout2(inp2)
return inp
class Extraction_PreLN_Transformer(nn.Module):
def __init__(self, n_layer, d_model, n_head, intermediate_size=2048, device=None, dropout=0.1, **kwargs):
super(Extraction_PreLN_Transformer, self).__init__(**kwargs)
self.n_layer = n_layer
self.d_model = d_model
self.n_head = n_head
self.intermediate_size = intermediate_size
self.device = device if device else torch.device('cpu')
self.dropout = dropout
for index in range(n_layer):
setattr(self, 'pre_ln_tf_encoder_{}'.format(index),
PreLN_Transformer_Encoder(d_model, n_head, intermediate_size=intermediate_size, device=self.device, dropout=dropout))
def forward(self, inp, inp_len):
for index in range(self.n_layer):
inp = getattr(self, 'pre_ln_tf_encoder_{}'.format(index))(inp, inp_len)
return inp
# ========================== #
# Final Model - 1 #
# ========================== #
class Final_ResGRU(nn.Module):
def __init__(self, out_size, embed_size, hidden_size, max_seq_len=100, rnn_dropout=0.2, dnn_dropout=0.5, **kwargs):
super(Final_ResGRU, self).__init__(**kwargs)
self.out_size = out_size
self.embed_size = embed_size
self.hidden_size = hidden_size
self.max_seq_len = max_seq_len
self.rnn_dropout = rnn_dropout
self.dnn_dropout = dnn_dropout
self.mlp_inp_size = sum(map(lambda x:4*x, hidden_size))
for index, (e_size, h_size) in enumerate(zip(embed_size, hidden_size)):
setattr(self, 'ResGRU_{}'.format(index), Extraction_ResGRU(e_size, h_size, max_seq_len=max_seq_len, dropout=rnn_dropout))
self.bn = nn.BatchNorm1d(self.mlp_inp_size)
self.dropout = nn.Dropout(p=dnn_dropout)
self.MLP = Output_MLP(self.mlp_inp_size, out_size, dropout=dnn_dropout)
def forward(self, *args):
batch_size, inp_len, buf = args[0].shape[0], args[-1], []
for index, inp in enumerate(args[:-1]):
inp = getattr(self, 'ResGRU_{}'.format(index))(inp, inp_len)
out1 = inp[np.arange(len(inp_len)),inp_len-1,:]
out2 = torch.stack([torch.max(inp[index,:l,:], dim=0)[0] for index, l in enumerate(inp_len)], dim=0)
buf.append(torch.cat((out1, out2), dim=1))
out = torch.cat(buf, dim=1)
out = self.MLP(self.dropout(F.leaky_relu(self.bn(out), negative_slope=0.01)))
return out
# ========================== #
# Final Model - 2 #
# ========================== #
class Final_ResGRU_CNN(nn.Module):
def __init__(self, out_size, embed_size, hidden_size, conv_channel=64, kernel_size=3, top_k=2, max_seq_len=100, rnn_dropout=0.2, dnn_dropout=0.5, **kwargs):
super(Final_ResGRU_CNN, self).__init__(**kwargs)
self.out_size = out_size
self.embed_size = embed_size
self.hidden_size = hidden_size
self.conv_channel = conv_channel
self.kernel_size = kernel_size
self.top_k = top_k
self.max_seq_len = max_seq_len
self.rnn_dropout = rnn_dropout
self.dnn_dropout = dnn_dropout
self.mlp_inp_size = sum(map(lambda x:4*x+conv_channel*top_k, hidden_size))
for index, (e_size, h_size) in enumerate(zip(embed_size, hidden_size)):
setattr(self, 'ResGRU_{}'.format(index), Extraction_ResGRU(e_size, h_size, max_seq_len=max_seq_len, dropout=rnn_dropout))
setattr(self, 'CNN_{}'.format(index), Extraction_CNN(2*h_size, conv_channel, kernel_size))
self.Kmax = Kmax_Pooling_Layer(2, top_k)
self.bn = nn.BatchNorm1d(self.mlp_inp_size)
self.dropout = nn.Dropout(p=dnn_dropout)
self.MLP = Output_MLP(self.mlp_inp_size, out_size, dropout=dnn_dropout)
def forward(self, *args):
batch_size, inp_len, buf = args[0].shape[0], args[-1], []
for index, inp in enumerate(args[:-1]):
inp = getattr(self, 'ResGRU_{}'.format(index))(inp, inp_len)
out1 = inp[np.arange(len(inp_len)),inp_len-1,:]
out2 = torch.stack([torch.max(inp[index,:l,:], dim=0)[0] for index, l in enumerate(inp_len)], dim=0)
out3 = self.Kmax(getattr(self, 'CNN_{}'.format(index))(inp.permute(0,2,1))).view(batch_size, -1)
buf.append(torch.cat((out1, out2, out3), dim=1))
out = torch.cat(buf, dim=1)
out = self.MLP(self.dropout(F.leaky_relu(self.bn(out), negative_slope=0.01)))
return out
# ========================== #
# Final Model - 3 #
# ========================== #
class Final_PreLN_Transformer(nn.Module):
def __init__(self, out_size, embed_size, n_layer=1, n_head=4, intermediate_size=2048, device=None, max_seq_len=100, tf_dropout=0.1, rnn_dropout=0.2, dnn_dropout=0.5, **kwargs):
super(Final_PreLN_Transformer, self).__init__(**kwargs)
self.out_size = out_size
self.embed_size = embed_size
self.n_layer = n_layer
self.n_head = n_head
self.intermediate_size = intermediate_size
self.device = device if device else torch.device('cpu')
self.max_seq_len = max_seq_len
self.tf_dropout = tf_dropout
self.rnn_dropout = rnn_dropout
self.dnn_dropout = dnn_dropout
self.mlp_inp_size = sum(map(lambda x:2*x, embed_size))
for index, e_size in enumerate(embed_size):
setattr(self, 'PreLN_TF_{}'.format(index), Extraction_PreLN_Transformer(n_layer, e_size, n_head, intermediate_size=intermediate_size, dropout=tf_dropout, device=self.device))
setattr(self, 'LN_{}'.format(index), nn.LayerNorm(e_size))
setattr(self, 'LSTM_{}'.format(index), nn.LSTM(input_size=e_size, hidden_size=e_size, batch_first=True))
setattr(self, 'Dropout_{}'.format(index), nn.Dropout(p=rnn_dropout))
self.bn = nn.BatchNorm1d(self.mlp_inp_size)
self.dropout = nn.Dropout(p=dnn_dropout)
self.MLP = Output_MLP(self.mlp_inp_size, out_size, dropout=dnn_dropout)
def _pack(self, inp, inp_len):
return nn.utils.rnn.pack_padded_sequence(inp, batch_first=True, lengths=inp_len, enforce_sorted=False)
def _unpack(self, inp):
return nn.utils.rnn.pad_packed_sequence(inp, batch_first=True, total_length=self.max_seq_len)[0]
def forward(self, *args):
batch_size, inp_len, buf = args[0].shape[0], args[-1], []
for index, inp in enumerate(args[:-1]):
inp = getattr(self, 'LN_{}'.format(index))(getattr(self, 'PreLN_TF_{}'.format(index))(inp, inp_len))
inp = getattr(self, 'LSTM_{}'.format(index))(self._pack(inp, inp_len))[0]
inp = getattr(self, 'Dropout_{}'.format(index))(self._unpack(inp))
out1 = inp[np.arange(len(inp_len)),inp_len-1,:]
out2 = torch.stack([torch.max(inp[index,:l,:], dim=0)[0] for index, l in enumerate(inp_len)], dim=0)
buf.append(torch.cat((out1, out2), dim=1))
out = torch.cat(buf, dim=1)
out = self.MLP(self.dropout(F.leaky_relu(self.bn(out), negative_slope=0.01)))
return out
| 39.807074 | 177 | 0.706139 |
4a1c42fbf8fe30d66fe52b0cd3eea22798d99572
| 20,545 |
py
|
Python
|
flux_combined_high_binding/model_609.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_609.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_609.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 72500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.115741 | 798 | 0.804089 |
4a1c4352dc9152aa0f20a73eb26aa75d6ab13b37
| 33,932 |
py
|
Python
|
saleor/checkout/utils.py
|
qylove516/saleor
|
12b3901f1bc513b57bc0fb3e7e67d740fd77709b
|
[
"BSD-3-Clause"
] | 2 |
2019-02-21T01:54:54.000Z
|
2019-02-21T01:55:04.000Z
|
saleor/checkout/utils.py
|
qylove516/saleor
|
12b3901f1bc513b57bc0fb3e7e67d740fd77709b
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/checkout/utils.py
|
qylove516/saleor
|
12b3901f1bc513b57bc0fb3e7e67d740fd77709b
|
[
"BSD-3-Clause"
] | null | null | null |
"""Cart-related utility functions."""
from datetime import date, timedelta
from functools import wraps
from uuid import UUID
from django.conf import settings
from django.contrib import messages
from django.db import transaction
from django.db.models import Sum
from django.utils.encoding import smart_text
from django.utils.translation import get_language, pgettext, pgettext_lazy
from prices import TaxedMoneyRange
from . import AddressType, logger
from ..account.forms import get_address_form
from ..account.models import Address
from ..account.utils import store_user_address
from ..core.exceptions import InsufficientStock
from ..core.utils import to_local_currency
from ..core.utils.taxes import ZERO_MONEY, get_taxes_for_country
from ..discount import VoucherType
from ..discount.models import NotApplicable, Voucher
from ..discount.utils import (
get_products_voucher_discount, get_shipping_voucher_discount,
get_value_voucher_discount, increase_voucher_usage)
from ..order.models import Order
from ..shipping.models import ShippingMethod
from .forms import (
AddressChoiceForm, AnonymousUserBillingForm, AnonymousUserShippingForm,
BillingAddressChoiceForm)
from .models import Cart
COOKIE_NAME = 'cart'
def set_cart_cookie(simple_cart, response):
"""Update response with a cart token cookie."""
# FIXME: document why session is not used
max_age = int(timedelta(days=30).total_seconds())
response.set_signed_cookie(COOKIE_NAME, simple_cart.token, max_age=max_age)
def contains_unavailable_variants(cart):
"""Return `True` if cart contains any unfulfillable lines."""
try:
for line in cart:
line.variant.check_quantity(line.quantity)
except InsufficientStock:
return True
return False
def token_is_valid(token):
"""Validate a cart token."""
if token is None:
return False
if isinstance(token, UUID):
return True
try:
UUID(token)
except ValueError:
return False
return True
def remove_unavailable_variants(cart):
"""Remove any unavailable items from cart."""
for line in cart:
try:
add_variant_to_cart(
cart, line.variant, line.quantity, replace=True)
except InsufficientStock as e:
quantity = e.item.quantity_available
add_variant_to_cart(cart, line.variant, quantity, replace=True)
def get_variant_prices_from_lines(lines):
"""Get's price of each individual item within the lines."""
return [
line.variant.get_price()
for line in lines
for item in range(line.quantity)]
def get_prices_of_discounted_products(lines, discounted_products):
"""Get prices of variants belonging to the discounted products."""
# If there's no discounted_products,
# it means that all products are discounted
if discounted_products:
lines = (
line for line in lines
if line.variant.product in discounted_products)
return get_variant_prices_from_lines(lines)
def get_prices_of_products_in_discounted_collections(
lines, discounted_collections):
"""Get prices of variants belonging to the discounted collections."""
# If there's no discounted collections,
# it means that all of them are discounted
if discounted_collections:
discounted_collections = set(discounted_collections)
lines = (
line for line in lines
if line.variant and
set(line.variant.product.collections.all()).intersection(
discounted_collections))
return get_variant_prices_from_lines(lines)
def get_prices_of_products_in_discounted_categories(
lines, discounted_categories):
"""Get prices of variants belonging to the discounted categories.
Product must be assigned directly to the discounted category, assigning
product to child category won't work.
"""
# If there's no discounted collections,
# it means that all of them are discounted
if discounted_categories:
discounted_categories = set(discounted_categories)
lines = (
line for line in lines
if line.variant and
line.variant.product.category in discounted_categories)
return get_variant_prices_from_lines(lines)
def check_product_availability_and_warn(request, cart):
"""Warn if cart contains any lines that cannot be fulfilled."""
if contains_unavailable_variants(cart):
msg = pgettext_lazy(
'Cart warning message',
'Sorry. We don\'t have that many items in stock. '
'Quantity was set to maximum available for now.')
messages.warning(request, msg)
remove_unavailable_variants(cart)
def find_and_assign_anonymous_cart(queryset=Cart.objects.all()):
"""Assign cart from cookie to request user."""
def get_cart(view):
@wraps(view)
def func(request, *args, **kwargs):
response = view(request, *args, **kwargs)
token = request.get_signed_cookie(COOKIE_NAME, default=None)
if not token_is_valid(token):
return response
cart = get_anonymous_cart_from_token(
token=token, cart_queryset=queryset)
if cart is None:
return response
if request.user.is_authenticated:
with transaction.atomic():
change_cart_user(cart, request.user)
carts_to_close = Cart.objects.filter(user=request.user)
carts_to_close = carts_to_close.exclude(token=token)
carts_to_close.delete()
response.delete_cookie(COOKIE_NAME)
return response
return func
return get_cart
def get_or_create_anonymous_cart_from_token(
token, cart_queryset=Cart.objects.all()):
"""Return an open unassigned cart with given token or create a new one."""
return cart_queryset.filter(token=token, user=None).get_or_create(
defaults={'user': None})[0]
def get_or_create_user_cart(user, cart_queryset=Cart.objects.all()):
"""Return an open cart for given user or create a new one."""
defaults = {
'shipping_address': user.default_shipping_address,
'billing_address': user.default_billing_address}
return cart_queryset.get_or_create(user=user, defaults=defaults)[0]
def get_anonymous_cart_from_token(token, cart_queryset=Cart.objects.all()):
"""Return an open unassigned cart with given token if any."""
return cart_queryset.filter(token=token, user=None).first()
def get_user_cart(user, cart_queryset=Cart.objects.all()):
"""Return an open cart for given user if any."""
return cart_queryset.filter(user=user).first()
def get_or_create_cart_from_request(request, cart_queryset=Cart.objects.all()):
"""Fetch cart from database or create a new one based on cookie."""
if request.user.is_authenticated:
return get_or_create_user_cart(request.user, cart_queryset)
token = request.get_signed_cookie(COOKIE_NAME, default=None)
return get_or_create_anonymous_cart_from_token(token, cart_queryset)
def get_cart_from_request(request, cart_queryset=Cart.objects.all()):
"""Fetch cart from database or return a new instance based on cookie."""
if request.user.is_authenticated:
cart = get_user_cart(request.user, cart_queryset)
user = request.user
else:
token = request.get_signed_cookie(COOKIE_NAME, default=None)
cart = get_anonymous_cart_from_token(token, cart_queryset)
user = None
if cart is not None:
return cart
if user:
return Cart(user=user)
return Cart()
def get_or_create_db_cart(cart_queryset=Cart.objects.all()):
"""Decorate view to always receive a saved cart instance.
Changes the view signature from `func(request, ...)` to
`func(request, cart, ...)`.
If no matching cart is found, one will be created and a cookie will be set
for users who are not logged in.
"""
# FIXME: behave like middleware and assign cart to request instead
def get_cart(view):
@wraps(view)
def func(request, *args, **kwargs):
cart = get_or_create_cart_from_request(request, cart_queryset)
response = view(request, cart, *args, **kwargs)
if not request.user.is_authenticated:
set_cart_cookie(cart, response)
return response
return func
return get_cart
def get_or_empty_db_cart(cart_queryset=Cart.objects.all()):
"""Decorate view to receive a cart if one exists.
Changes the view signature from `func(request, ...)` to
`func(request, cart, ...)`.
If no matching cart is found, an unsaved `Cart` instance will be used.
"""
# FIXME: behave like middleware and assign cart to request instead
def get_cart(view):
@wraps(view)
def func(request, *args, **kwargs):
cart = get_cart_from_request(request, cart_queryset)
return view(request, cart, *args, **kwargs)
return func
return get_cart
def get_cart_data(cart, shipping_range, currency, discounts, taxes):
"""Return a JSON-serializable representation of the cart."""
cart_total = None
local_cart_total = None
shipping_required = False
total_with_shipping = None
local_total_with_shipping = None
if cart:
cart_total = cart.get_subtotal(discounts, taxes)
local_cart_total = to_local_currency(cart_total, currency)
shipping_required = cart.is_shipping_required()
total_with_shipping = TaxedMoneyRange(
start=cart_total, stop=cart_total)
if shipping_required and shipping_range:
total_with_shipping = shipping_range + cart_total
local_total_with_shipping = to_local_currency(
total_with_shipping, currency)
return {
'cart_total': cart_total,
'local_cart_total': local_cart_total,
'shipping_required': shipping_required,
'total_with_shipping': total_with_shipping,
'local_total_with_shipping': local_total_with_shipping}
def find_open_cart_for_user(user):
"""Find an open cart for the given user."""
carts = user.carts.all()
open_cart = carts.first()
if len(carts) > 1:
logger.warning('%s has more than one open basket', user)
carts.exclude(token=open_cart.token).delete()
return open_cart
def change_cart_user(cart, user):
"""Assign cart to a user.
If the user already has an open cart assigned, cancel it.
"""
open_cart = find_open_cart_for_user(user)
if open_cart is not None:
open_cart.delete()
cart.user = user
cart.shipping_address = user.default_shipping_address
cart.billing_address = user.default_billing_address
cart.save(update_fields=['user', 'shipping_address', 'billing_address'])
def update_cart_quantity(cart):
"""Update the total quantity in cart."""
total_lines = cart.lines.aggregate(
total_quantity=Sum('quantity'))['total_quantity']
if not total_lines:
total_lines = 0
cart.quantity = total_lines
cart.save(update_fields=['quantity'])
def add_variant_to_cart(
cart, variant, quantity=1, replace=False, check_quantity=True):
"""Add a product variant to cart.
The `data` parameter may be used to differentiate between items with
different customization options.
If `replace` is truthy then any previous quantity is discarded instead
of added to.
"""
line, _ = cart.lines.get_or_create(
variant=variant, defaults={'quantity': 0, 'data': {}})
new_quantity = quantity if replace else (quantity + line.quantity)
if new_quantity < 0:
raise ValueError('%r is not a valid quantity (results in %r)' % (
quantity, new_quantity))
if new_quantity == 0:
line.delete()
else:
if check_quantity:
variant.check_quantity(new_quantity)
line.quantity = new_quantity
line.save(update_fields=['quantity'])
update_cart_quantity(cart)
def get_shipping_address_forms(cart, user_addresses, data, country):
"""Forms initialized with data depending on shipping address in cart."""
shipping_address = (
cart.shipping_address or cart.user.default_shipping_address)
if shipping_address and shipping_address in user_addresses:
address_form, preview = get_address_form(
data, country_code=country.code,
initial={'country': country})
addresses_form = AddressChoiceForm(
data, addresses=user_addresses,
initial={'address': shipping_address.id})
elif shipping_address:
address_form, preview = get_address_form(
data, country_code=shipping_address.country.code,
instance=shipping_address)
addresses_form = AddressChoiceForm(
data, addresses=user_addresses)
else:
address_form, preview = get_address_form(
data, country_code=country.code,
initial={'country': country})
addresses_form = AddressChoiceForm(
data, addresses=user_addresses)
return address_form, addresses_form, preview
def update_shipping_address_in_cart(cart, user_addresses, data, country):
"""Return shipping address choice forms and if an address was updated."""
address_form, addresses_form, preview = (
get_shipping_address_forms(cart, user_addresses, data, country))
updated = False
if addresses_form.is_valid() and not preview:
use_existing_address = (
addresses_form.cleaned_data['address'] !=
AddressChoiceForm.NEW_ADDRESS)
if use_existing_address:
address_id = addresses_form.cleaned_data['address']
address = Address.objects.get(id=address_id)
change_shipping_address_in_cart(cart, address)
updated = True
elif address_form.is_valid():
address = address_form.save()
change_shipping_address_in_cart(cart, address)
updated = True
return addresses_form, address_form, updated
def update_shipping_address_in_anonymous_cart(cart, data, country):
"""Return shipping address choice forms and if an address was updated."""
address_form, preview = get_address_form(
data, country_code=country.code,
autocomplete_type='shipping',
instance=cart.shipping_address,
initial={'country': country})
user_form = AnonymousUserShippingForm(
data if not preview else None, instance=cart)
updated = False
if user_form.is_valid() and address_form.is_valid():
user_form.save()
address = address_form.save()
change_shipping_address_in_cart(cart, address)
updated = True
return user_form, address_form, updated
def get_billing_forms_with_shipping(cart, data, user_addresses, country):
"""Get billing form based on a the current billing and shipping data."""
shipping_address = cart.shipping_address
billing_address = cart.billing_address or Address(country=country)
if not billing_address.id or billing_address == shipping_address:
address_form, preview = get_address_form(
data, country_code=shipping_address.country.code,
autocomplete_type='billing',
initial={'country': shipping_address.country})
addresses_form = BillingAddressChoiceForm(
data, addresses=user_addresses, initial={
'address': BillingAddressChoiceForm.SHIPPING_ADDRESS})
elif billing_address in user_addresses:
address_form, preview = get_address_form(
data, country_code=billing_address.country.code,
autocomplete_type='billing',
initial={'country': billing_address.country})
addresses_form = BillingAddressChoiceForm(
data, addresses=user_addresses, initial={
'address': billing_address.id})
else:
address_form, preview = get_address_form(
data, country_code=billing_address.country.code,
autocomplete_type='billing',
initial={'country': billing_address.country},
instance=billing_address)
addresses_form = BillingAddressChoiceForm(
data, addresses=user_addresses, initial={
'address': BillingAddressChoiceForm.NEW_ADDRESS})
return address_form, addresses_form, preview
def update_billing_address_in_cart_with_shipping(
cart, user_addresses, data, country):
"""Return shipping address choice forms and if an address was updated."""
address_form, addresses_form, preview = get_billing_forms_with_shipping(
cart, data, user_addresses, country)
updated = False
if addresses_form.is_valid() and not preview:
address = None
address_id = addresses_form.cleaned_data['address']
if address_id == BillingAddressChoiceForm.SHIPPING_ADDRESS:
if cart.user and cart.shipping_address in user_addresses:
address = cart.shipping_address
else:
address = cart.shipping_address.get_copy()
elif address_id != BillingAddressChoiceForm.NEW_ADDRESS:
address = user_addresses.get(id=address_id)
elif address_form.is_valid():
address = address_form.save()
if address:
change_billing_address_in_cart(cart, address)
updated = True
return addresses_form, address_form, updated
def get_anonymous_summary_without_shipping_forms(cart, data, country):
"""Forms initialized with data depending on addresses in cart."""
billing_address = cart.billing_address
if billing_address:
address_form, preview = get_address_form(
data, country_code=billing_address.country.code,
autocomplete_type='billing', instance=billing_address)
else:
address_form, preview = get_address_form(
data, country_code=country.code,
autocomplete_type='billing', initial={'country': country})
return address_form, preview
def update_billing_address_in_anonymous_cart(cart, data, country):
"""Return shipping address choice forms and if an address was updated."""
address_form, preview = get_anonymous_summary_without_shipping_forms(
cart, data, country)
user_form = AnonymousUserBillingForm(data, instance=cart)
updated = False
if user_form.is_valid() and address_form.is_valid() and not preview:
user_form.save()
address = address_form.save()
change_billing_address_in_cart(cart, address)
updated = True
return user_form, address_form, updated
def get_summary_without_shipping_forms(cart, user_addresses, data, country):
"""Forms initialized with data depending on addresses in cart."""
billing_address = cart.billing_address
if billing_address and billing_address in user_addresses:
address_form, preview = get_address_form(
data,
autocomplete_type='billing',
country_code=billing_address.country.code,
initial={'country': billing_address.country})
initial_address = billing_address.id
elif billing_address:
address_form, preview = get_address_form(
data,
autocomplete_type='billing',
country_code=billing_address.country.code,
initial={'country': billing_address.country},
instance=billing_address)
initial_address = AddressChoiceForm.NEW_ADDRESS
else:
address_form, preview = get_address_form(
data,
autocomplete_type='billing',
country_code=country.code,
initial={'country': country})
if cart.user and cart.user.default_billing_address:
initial_address = cart.user.default_billing_address.id
else:
initial_address = AddressChoiceForm.NEW_ADDRESS
addresses_form = AddressChoiceForm(
data, addresses=user_addresses, initial={'address': initial_address})
return address_form, addresses_form, preview
def update_billing_address_in_cart(cart, user_addresses, data, country):
"""Return shipping address choice forms and if an address was updated."""
address_form, addresses_form, preview = (
get_summary_without_shipping_forms(
cart, user_addresses, data, country))
updated = False
if addresses_form.is_valid():
use_existing_address = (
addresses_form.cleaned_data['address'] !=
AddressChoiceForm.NEW_ADDRESS)
if use_existing_address:
address_id = addresses_form.cleaned_data['address']
address = Address.objects.get(id=address_id)
change_billing_address_in_cart(cart, address)
updated = True
elif address_form.is_valid():
address = address_form.save()
change_billing_address_in_cart(cart, address)
updated = True
return addresses_form, address_form, updated
def _check_new_cart_address(cart, address, address_type):
"""Check if and address in cart has changed and if to remove old one."""
if address_type == AddressType.BILLING:
old_address = cart.billing_address
else:
old_address = cart.shipping_address
has_address_changed = any([
not address and old_address,
address and not old_address,
address and old_address and address != old_address])
remove_old_address = (
has_address_changed and
old_address is not None and
(not cart.user or old_address not in cart.user.addresses.all()))
return has_address_changed, remove_old_address
def change_billing_address_in_cart(cart, address):
"""Save billing address in cart if changed.
Remove previously saved address if not connected to any user.
"""
changed, remove = _check_new_cart_address(
cart, address, AddressType.BILLING)
if changed:
if remove:
cart.billing_address.delete()
cart.billing_address = address
cart.save(update_fields=['billing_address'])
def change_shipping_address_in_cart(cart, address):
"""Save shipping address in cart if changed.
Remove previously saved address if not connected to any user.
"""
changed, remove = _check_new_cart_address(
cart, address, AddressType.SHIPPING)
if changed:
if remove:
cart.shipping_address.delete()
cart.shipping_address = address
cart.save(update_fields=['shipping_address'])
def get_cart_data_for_checkout(cart, discounts, taxes):
"""Data shared between views in checkout process."""
lines = [(line, line.get_total(discounts, taxes)) for line in cart]
subtotal = cart.get_subtotal(discounts, taxes)
total = cart.get_total(discounts, taxes)
shipping_price = cart.get_shipping_price(taxes)
return {
'cart': cart,
'cart_are_taxes_handled': bool(taxes),
'cart_lines': lines,
'cart_shipping_price': shipping_price,
'cart_subtotal': subtotal,
'cart_total': total}
def _get_shipping_voucher_discount_for_cart(voucher, cart):
"""Calculate discount value for a voucher of shipping type."""
if not cart.is_shipping_required():
msg = pgettext(
'Voucher not applicable',
'Your order does not require shipping.')
raise NotApplicable(msg)
shipping_method = cart.shipping_method
if not shipping_method:
msg = pgettext(
'Voucher not applicable',
'Please select a shipping method first.')
raise NotApplicable(msg)
# check if voucher is limited to specified countries
shipping_country = cart.shipping_address.country
if voucher.countries and shipping_country.code not in voucher.countries:
msg = pgettext(
'Voucher not applicable',
'This offer is not valid in your country.')
raise NotApplicable(msg)
return get_shipping_voucher_discount(
voucher, cart.get_subtotal(), shipping_method.get_total())
def _get_products_voucher_discount(order_or_cart, voucher):
"""Calculate products discount value for a voucher, depending on its type.
"""
if voucher.type == VoucherType.PRODUCT:
prices = get_prices_of_discounted_products(
order_or_cart.lines.all(), voucher.products.all())
elif voucher.type == VoucherType.COLLECTION:
prices = get_prices_of_products_in_discounted_collections(
order_or_cart.lines.all(), voucher.collections.all())
elif voucher.type == VoucherType.CATEGORY:
prices = get_prices_of_products_in_discounted_categories(
order_or_cart.lines.all(), voucher.categories.all())
if not prices:
msg = pgettext(
'Voucher not applicable',
'This offer is only valid for selected items.')
raise NotApplicable(msg)
return get_products_voucher_discount(voucher, prices)
def get_voucher_discount_for_cart(voucher, cart):
"""Calculate discount value depending on voucher and discount types.
Raise NotApplicable if voucher of given type cannot be applied.
"""
if voucher.type == VoucherType.VALUE:
return get_value_voucher_discount(voucher, cart.get_subtotal())
if voucher.type == VoucherType.SHIPPING:
return _get_shipping_voucher_discount_for_cart(voucher, cart)
if voucher.type in (
VoucherType.PRODUCT, VoucherType.COLLECTION, VoucherType.CATEGORY):
return _get_products_voucher_discount(cart, voucher)
raise NotImplementedError('Unknown discount type')
def get_voucher_for_cart(cart, vouchers=None):
"""Return voucher with voucher code saved in cart if active or None."""
if cart.voucher_code is not None:
if vouchers is None:
vouchers = Voucher.objects.active(date=date.today())
try:
return vouchers.get(code=cart.voucher_code)
except Voucher.DoesNotExist:
return None
return None
def recalculate_cart_discount(cart, discounts, taxes):
"""Recalculate `cart.discount` based on the voucher.
Will clear both voucher and discount if the discount is no longer
applicable.
"""
voucher = get_voucher_for_cart(cart)
if voucher is not None:
try:
discount = get_voucher_discount_for_cart(voucher, cart)
except NotApplicable:
remove_voucher_from_cart(cart)
else:
subtotal = cart.get_subtotal(discounts, taxes).gross
cart.discount_amount = min(discount, subtotal)
cart.discount_name = str(voucher)
cart.translated_discount_name = (
voucher.translated.name
if voucher.translated.name != voucher.name else '')
cart.save(
update_fields=[
'translated_discount_name',
'discount_amount', 'discount_name'])
else:
remove_voucher_from_cart(cart)
def add_voucher_to_cart(voucher, cart):
"""Add voucher data to cart.
Raise NotApplicable if voucher of given type cannot be applied."""
discount_amount = get_voucher_discount_for_cart(voucher, cart)
cart.voucher_code = voucher.code
cart.discount_name = voucher.name
cart.translated_discount_name = (
voucher.translated.name
if voucher.translated.name != voucher.name else '')
cart.discount_amount = discount_amount
cart.save(
update_fields=[
'voucher_code', 'discount_name', 'translated_discount_name',
'discount_amount'])
def remove_voucher_from_cart(cart):
"""Remove voucher data from cart."""
cart.voucher_code = None
cart.discount_name = None
cart.translated_discount_name = None
cart.discount_amount = ZERO_MONEY
cart.save(
update_fields=[
'voucher_code', 'discount_name', 'translated_discount_name',
'discount_amount'])
def get_taxes_for_cart(cart, default_taxes):
"""Return taxes (if handled) due to shipping address or default one."""
if not settings.VATLAYER_ACCESS_KEY:
return None
if cart.shipping_address:
return get_taxes_for_country(cart.shipping_address.country)
return default_taxes
def is_valid_shipping_method(cart, taxes, discounts):
"""Check if shipping method is valid and remove (if not)."""
if not cart.shipping_method:
return False
valid_methods = ShippingMethod.objects.applicable_shipping_methods(
price=cart.get_subtotal(discounts, taxes).gross,
weight=cart.get_total_weight(),
country_code=cart.shipping_address.country.code)
if cart.shipping_method not in valid_methods:
clear_shipping_method(cart)
return False
return True
def clear_shipping_method(cart):
cart.shipping_method = None
cart.save(update_fields=['shipping_method'])
def _process_voucher_data_for_order(cart):
"""Fetch, process and return voucher/discount data from cart."""
vouchers = Voucher.objects.active(date=date.today()).select_for_update()
voucher = get_voucher_for_cart(cart, vouchers)
if cart.voucher_code and not voucher:
msg = pgettext(
'Voucher not applicable',
'Voucher expired in meantime. Order placement aborted.')
raise NotApplicable(msg)
if not voucher:
return {}
increase_voucher_usage(voucher)
return {
'voucher': voucher,
'discount_amount': cart.discount_amount,
'discount_name': cart.discount_name,
'translated_discount_name': cart.translated_discount_name}
def _process_shipping_data_for_order(cart, taxes):
"""Fetch, process and return shipping data from cart."""
if not cart.is_shipping_required():
return {}
shipping_address = cart.shipping_address
if cart.user:
store_user_address(cart.user, shipping_address, AddressType.SHIPPING)
if cart.user.addresses.filter(pk=shipping_address.pk).exists():
shipping_address = shipping_address.get_copy()
return {
'shipping_address': shipping_address,
'shipping_method': cart.shipping_method,
'shipping_method_name': smart_text(cart.shipping_method),
'shipping_price': cart.get_shipping_price(taxes),
'weight': cart.get_total_weight()}
def _process_user_data_for_order(cart):
"""Fetch, process and return shipping data from cart."""
billing_address = cart.billing_address
if cart.user:
store_user_address(cart.user, billing_address, AddressType.BILLING)
if cart.user.addresses.filter(pk=billing_address.pk).exists():
billing_address = billing_address.get_copy()
return {
'user': cart.user,
'user_email': cart.user.email if cart.user else cart.email,
'billing_address': billing_address}
def _fill_order_with_cart_data(order, cart, discounts, taxes):
"""Fill an order with data (variants, note) from cart."""
from ..order.utils import add_variant_to_order
for line in cart:
add_variant_to_order(
order, line.variant, line.quantity, discounts, taxes)
cart.payments.update(order=order)
if cart.note:
order.customer_note = cart.note
order.save(update_fields=['customer_note'])
@transaction.atomic
def create_order(cart, tracking_code, discounts, taxes):
"""Create an order from the cart.
Each order will get a private copy of both the billing and the shipping
address (if shipping).
If any of the addresses is new and the user is logged in the address
will also get saved to that user's address book.
Current user's language is saved in the order so we can later determine
which language to use when sending email.
"""
order = Order.objects.filter(checkout_token=cart.token).first()
if order is not None:
return order
# FIXME: save locale along with the language
try:
order_data = _process_voucher_data_for_order(cart)
except NotApplicable:
return None
order_data.update(_process_shipping_data_for_order(cart, taxes))
order_data.update(_process_user_data_for_order(cart))
order_data.update({
'language_code': get_language(),
'tracking_client_id': tracking_code,
'total': cart.get_total(discounts, taxes)})
order = Order.objects.create(**order_data, checkout_token=cart.token)
_fill_order_with_cart_data(order, cart, discounts, taxes)
return order
def is_fully_paid(cart: Cart, taxes, discounts):
"""Check if checkout is fully paid."""
payments = [
payment for payment in cart.payments.all() if payment.is_active]
total_paid = sum([p.total for p in payments])
cart_total = cart.get_total(discounts=discounts, taxes=taxes).gross.amount
return total_paid >= cart_total
def ready_to_place_order(cart: Cart, taxes, discounts):
"""Check if checkout can be completed."""
if cart.is_shipping_required():
if not cart.shipping_method:
return False, pgettext_lazy(
'order placement_error', 'Shipping method is not set')
if not cart.shipping_address:
return False, pgettext_lazy(
'order placement error', 'Shipping address is not set')
if not is_valid_shipping_method(cart, taxes, discounts):
return False, pgettext_lazy(
'order placement error',
'Shipping method is not valid for your shipping address')
if not is_fully_paid(cart, taxes, discounts):
return False, pgettext_lazy(
'order placement error', 'Checkout is not fully paid')
return True, None
| 36.329764 | 79 | 0.688318 |
4a1c43f716739e661d8a5ef3da8a82e46db68eb5
| 18,427 |
py
|
Python
|
code/default/python27/1.0/lib/noarch/front_base/http2_connection.py
|
wuyongwen/XX-Net
|
313aefd862b8f230f7c61dc29db1b2b93a17e6ab
|
[
"BSD-2-Clause"
] | null | null | null |
code/default/python27/1.0/lib/noarch/front_base/http2_connection.py
|
wuyongwen/XX-Net
|
313aefd862b8f230f7c61dc29db1b2b93a17e6ab
|
[
"BSD-2-Clause"
] | null | null | null |
code/default/python27/1.0/lib/noarch/front_base/http2_connection.py
|
wuyongwen/XX-Net
|
313aefd862b8f230f7c61dc29db1b2b93a17e6ab
|
[
"BSD-2-Clause"
] | null | null | null |
import Queue
import threading
import socket
import errno
import struct
from http_common import *
from hyper.common.bufsocket import BufferedSocket
from hyper.packages.hyperframe.frame import (
FRAMES, DataFrame, HeadersFrame, PushPromiseFrame, RstStreamFrame,
SettingsFrame, Frame, WindowUpdateFrame, GoAwayFrame, PingFrame,
BlockedFrame, FRAME_MAX_ALLOWED_LEN, FRAME_MAX_LEN
)
from http2_stream import Stream
from hyper.http20.window import BaseFlowControlManager
from hyper.packages.hpack import Encoder, Decoder
# this is defined in rfc7540
# default window size 64k
DEFAULT_WINDOW_SIZE = 65535
# default max frame is 16k, defined in rfc7540
DEFAULT_MAX_FRAME = FRAME_MAX_LEN
class FlowControlManager(BaseFlowControlManager):
"""
``hyper``'s default flow control manager.
This implements hyper's flow control algorithms. This algorithm attempts to
reduce the number of WINDOWUPDATE frames we send without blocking the remote
endpoint behind the flow control window.
This algorithm will become more complicated over time. In the current form,
the algorithm is very simple:
- When the flow control window gets less than 3/4 of the maximum size,
increment back to the maximum.
- Otherwise, if the flow control window gets to less than 1kB, increment
back to the maximum.
"""
def increase_window_size(self, frame_size):
future_window_size = self.window_size - frame_size
if ((future_window_size < (self.initial_window_size * 3 / 4)) or
(future_window_size < 1000)):
return self.initial_window_size - future_window_size
return 0
def blocked(self):
return self.initial_window_size - self.window_size
class RawFrame(object):
def __init__(self, dat):
self.dat = dat
def serialize(self):
return self.dat
def __repr__(self):
out_str = "{type}".format(type=type(self).__name__)
return out_str
class Http2Worker(HttpWorker):
version = "2"
def __init__(self, logger, ip_manager, config, ssl_sock, close_cb, retry_task_cb, idle_cb, log_debug_data):
super(Http2Worker, self).__init__(
logger, ip_manager, config, ssl_sock, close_cb, retry_task_cb, idle_cb, log_debug_data)
self.network_buffer_size = 65535
# Google http/2 time out is 4 mins.
self.ssl_sock.settimeout(240)
self._sock = BufferedSocket(ssl_sock, self.network_buffer_size)
self.next_stream_id = 1
self.streams = {}
self.last_ping_time = time.time()
self.last_active_time = self.ssl_sock.create_time - 1
self.continue_timeout = 0
# count ping not ACK
# increase when send ping
# decrease when recv ping ack
# if this in not 0, don't accept request.
self.ping_on_way = 0
self.accept_task = False
# request_lock
self.request_lock = threading.Lock()
# all send frame must put to this queue
# then send by send_loop
# every frame put to this queue must allowed by stream window and connection window
# any data frame blocked by connection window should put to self.blocked_send_frames
self.send_queue = Queue.Queue()
self.encoder = Encoder()
self.decoder = Decoder()
# keep blocked data frame in this buffer
# which is allowed by stream window but blocked by connection window.
# They will be sent when connection window open
self.blocked_send_frames = []
# Values for the settings used on an HTTP/2 connection.
# will send to remote using Setting Frame
self.local_settings = {
SettingsFrame.INITIAL_WINDOW_SIZE: 16 * 1024 * 1024,
SettingsFrame.SETTINGS_MAX_FRAME_SIZE: 256 * 1024
}
self.local_connection_initial_windows = 32 * 1024 * 1024
self.local_window_manager = FlowControlManager(self.local_connection_initial_windows)
# changed by server, with SettingFrame
self.remote_settings = {
SettingsFrame.INITIAL_WINDOW_SIZE: DEFAULT_WINDOW_SIZE,
SettingsFrame.SETTINGS_MAX_FRAME_SIZE: DEFAULT_MAX_FRAME,
SettingsFrame.MAX_CONCURRENT_STREAMS: 100
}
#self.remote_window_size = DEFAULT_WINDOW_SIZE
self.remote_window_size = 32 * 1024 * 1024
# send Setting frame before accept task.
self._send_preamble()
threading.Thread(target=self.send_loop).start()
threading.Thread(target=self.recv_loop).start()
# export api
def request(self, task):
if not self.keep_running:
# race condition
self.retry_task_cb(task)
return
if len(self.streams) > self.config.http2_max_concurrent:
self.accept_task = False
task.set_state("h2_req")
self.request_task(task)
def encode_header(self, headers):
return self.encoder.encode(headers)
def request_task(self, task):
with self.request_lock:
# create stream to process task
stream_id = self.next_stream_id
# http/2 client use odd stream_id
self.next_stream_id += 2
stream = Stream(self.logger, self.config, self, self.ip, stream_id, task,
self._send_cb, self._close_stream_cb, self.encode_header, self.decoder,
FlowControlManager(self.local_settings[SettingsFrame.INITIAL_WINDOW_SIZE]),
self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE],
self.remote_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE])
self.streams[stream_id] = stream
stream.start_request()
def send_loop(self):
while self.keep_running:
frame = self.send_queue.get(True)
if not frame:
# None frame means exist
break
# self.logger.debug("%s Send:%s", self.ip, str(frame))
data = frame.serialize()
try:
self._sock.send(data, flush=False)
# don't flush for small package
# reduce send api call
if self.send_queue._qsize():
continue
# wait for payload frame
time.sleep(0.01)
# combine header and payload in one tcp package.
if not self.send_queue._qsize():
self._sock.flush()
except socket.error as e:
if e.errno not in (errno.EPIPE, errno.ECONNRESET):
self.logger.warn("%s http2 send fail:%r", self.ip, e)
else:
self.logger.exception("send error:%r", e)
self.close("send fail:%r" % e)
except Exception as e:
self.logger.debug("http2 %s send error:%r", self.ip, e)
self.close("send fail:%r" % e)
def recv_loop(self):
while self.keep_running:
try:
self._consume_single_frame()
except Exception as e:
self.logger.exception("recv fail:%r", e)
self.close("recv fail:%r" % e)
def get_rtt_rate(self):
return self.rtt + len(self.streams) * 3000
def close(self, reason="conn close"):
self.keep_running = False
self.accept_task = False
# Notify loop to exit
# This function may be call by out side http2
# When gae_proxy found the appid or ip is wrong
self.send_queue.put(None)
for stream in self.streams.values():
if stream.task.responsed:
# response have send to client
# can't retry
stream.close(reason=reason)
else:
self.retry_task_cb(stream.task)
self.streams = {}
super(Http2Worker, self).close(reason)
def send_ping(self):
p = PingFrame(0)
p.opaque_data = struct.pack("!d", time.time())
self.send_queue.put(p)
self.last_ping_time = time.time()
self.ping_on_way += 1
def _send_preamble(self):
self.send_queue.put(RawFrame(b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'))
f = SettingsFrame(0)
f.settings[SettingsFrame.ENABLE_PUSH] = 0
f.settings[SettingsFrame.INITIAL_WINDOW_SIZE] = self.local_settings[SettingsFrame.INITIAL_WINDOW_SIZE]
f.settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE] = self.local_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE]
self._send_cb(f)
# update local connection windows size
f = WindowUpdateFrame(0)
f.window_increment = self.local_connection_initial_windows - DEFAULT_WINDOW_SIZE
self._send_cb(f)
def increase_remote_window_size(self, inc_size):
# check and send blocked frames if window allow
self.remote_window_size += inc_size
#self.logger.debug("%s increase send win:%d result:%d", self.ip, inc_size, self.remote_window_size)
while len(self.blocked_send_frames):
frame = self.blocked_send_frames[0]
if len(frame.data) > self.remote_window_size:
return
self.remote_window_size -= len(frame.data)
self.send_queue.put(frame)
self.blocked_send_frames.pop(0)
if self.keep_running and \
self.accept_task == False and \
len(self.streams) < self.config.http2_max_concurrent and \
self.remote_window_size > 10000:
self.accept_task = True
self.idle_cb()
def _send_cb(self, frame):
# can called by stream
# put to send_blocked if connection window not allow,
if frame.type == DataFrame.type:
if len(frame.data) > self.remote_window_size:
self.blocked_send_frames.append(frame)
self.accept_task = False
return
else:
self.remote_window_size -= len(frame.data)
self.send_queue.put(frame)
else:
self.send_queue.put(frame)
def _close_stream_cb(self, stream_id, reason):
# call by stream to remove from streams list
# self.logger.debug("%s close stream:%d %s", self.ssl_sock.ip, stream_id, reason)
try:
del self.streams[stream_id]
except KeyError:
pass
if self.keep_running and \
len(self.streams) < self.config.http2_max_concurrent and \
self.remote_window_size > 10000:
self.accept_task = True
self.idle_cb()
self.processed_tasks += 1
def _consume_single_frame(self):
try:
header = self._sock.recv(9)
except Exception as e:
self.logger.debug("%s _consume_single_frame:%r, inactive time:%d", self.ip, e, time.time()-self.last_active_time)
self.close("disconnect:%r" % e)
return
# Parse the header. We can use the returned memoryview directly here.
frame, length = Frame.parse_frame_header(header)
if length > FRAME_MAX_ALLOWED_LEN:
self.logger.error("%s Frame size exceeded on stream %d (received: %d, max: %d)",
self.ip, frame.stream_id, length, FRAME_MAX_LEN)
# self._send_rst_frame(frame.stream_id, 6) # 6 = FRAME_SIZE_ERROR
data = self._recv_payload(length)
self.last_active_time = time.time()
self._consume_frame_payload(frame, data)
def _recv_payload(self, length):
if not length:
return memoryview(b'')
buffer = bytearray(length)
buffer_view = memoryview(buffer)
index = 0
data_length = -1
# _sock.recv(length) might not read out all data if the given length
# is very large. So it should be to retrieve from socket repeatedly.
while length and data_length:
data = self._sock.recv(length)
data_length = len(data)
end = index + data_length
buffer_view[index:end] = data[:]
length -= data_length
index = end
return buffer_view[:end]
def _consume_frame_payload(self, frame, data):
frame.parse_body(data)
# self.logger.debug("%s Recv:%s", self.ip, str(frame))
# Maintain our flow control window. We do this by delegating to the
# chosen WindowManager.
if frame.type == DataFrame.type:
size = frame.flow_controlled_length
increment = self.local_window_manager._handle_frame(size)
if increment < 0:
self.logger.warn("increment:%d", increment)
elif increment:
#self.logger.debug("%s frame size:%d increase win:%d", self.ip, size, increment)
w = WindowUpdateFrame(0)
w.window_increment = increment
self._send_cb(w)
elif frame.type == PushPromiseFrame.type:
self.logger.error("%s receive push frame", self.ip,)
# Work out to whom this frame should go.
if frame.stream_id != 0:
try:
stream = self.streams[frame.stream_id]
stream.receive_frame(frame)
except KeyError as e:
if frame.type not in [WindowUpdateFrame.type]:
self.logger.exception("%s Unexpected stream identifier %d, frame.type:%s e:%r",
self.ip, frame.stream_id, frame, e)
else:
self.receive_frame(frame)
def receive_frame(self, frame):
if frame.type == WindowUpdateFrame.type:
# self.logger.debug("WindowUpdateFrame %d", frame.window_increment)
self.increase_remote_window_size(frame.window_increment)
elif frame.type == PingFrame.type:
if 'ACK' in frame.flags:
ping_time = struct.unpack("!d", frame.opaque_data)[0]
time_now = time.time()
rtt = (time_now - ping_time) * 1000
if rtt < 0:
self.logger.error("rtt:%f ping_time:%f now:%f", rtt, ping_time, time_now)
self.rtt = rtt
self.ping_on_way -= 1
#self.logger.debug("RTT:%d, on_way:%d", self.rtt, self.ping_on_way)
if self.keep_running and self.ping_on_way == 0:
self.accept_task = True
else:
# The spec requires us to reply with PING+ACK and identical data.
p = PingFrame(0)
p.flags.add('ACK')
p.opaque_data = frame.opaque_data
self._send_cb(p)
# self.last_active_time = time.time()
elif frame.type == SettingsFrame.type:
if 'ACK' not in frame.flags:
# send ACK as soon as possible
f = SettingsFrame(0)
f.flags.add('ACK')
self._send_cb(f)
# this may trigger send DataFrame blocked by remote window
self._update_settings(frame)
else:
self.accept_task = True
self.idle_cb()
elif frame.type == GoAwayFrame.type:
# If we get GoAway with error code zero, we are doing a graceful
# shutdown and all is well. Otherwise, throw an exception.
# If an error occured, try to read the error description from
# code registry otherwise use the frame's additional data.
error_string = frame._extra_info()
time_cost = time.time() - self.last_active_time
if frame.additional_data != "session_timed_out":
self.logger.warn("goaway:%s, t:%d", error_string, time_cost)
self.close("GoAway:%s inactive time:%d" % (error_string, time_cost))
elif frame.type == BlockedFrame.type:
self.logger.warn("%s get BlockedFrame", self.ip)
elif frame.type in FRAMES:
# This frame isn't valid at this point.
#raise ValueError("Unexpected frame %s." % frame)
self.logger.error("%s Unexpected frame %s.", self.ip, frame)
else: # pragma: no cover
# Unexpected frames belong to extensions. Just drop it on the
# floor, but log so that users know that something happened.
self.logger.error("%s Received unknown frame, type %d", self.ip, frame.type)
def _update_settings(self, frame):
if SettingsFrame.HEADER_TABLE_SIZE in frame.settings:
new_size = frame.settings[SettingsFrame.HEADER_TABLE_SIZE]
self.remote_settings[SettingsFrame.HEADER_TABLE_SIZE] = new_size
#self.encoder.header_table_size = new_size
if SettingsFrame.INITIAL_WINDOW_SIZE in frame.settings:
newsize = frame.settings[SettingsFrame.INITIAL_WINDOW_SIZE]
oldsize = self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE]
delta = newsize - oldsize
for stream in self.streams.values():
stream.remote_window_size += delta
self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE] = newsize
if SettingsFrame.SETTINGS_MAX_FRAME_SIZE in frame.settings:
new_size = frame.settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE]
if not (FRAME_MAX_LEN <= new_size <= FRAME_MAX_ALLOWED_LEN):
self.logger.error("%s Frame size %d is outside of allowed range", self.ip, new_size)
# Tear the connection down with error code PROTOCOL_ERROR
self.close("bad max frame size")
#error_string = ("Advertised frame size %d is outside of range" % (new_size))
#raise ConnectionError(error_string)
return
self.remote_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE] = new_size
for stream in self.streams.values():
stream.max_frame_size += new_size
def get_trace(self):
out_list = []
out_list.append(" continue_timeout:%d" % self.continue_timeout)
out_list.append(" processed:%d" % self.processed_tasks)
out_list.append(" h2.stream_num:%d" % len(self.streams))
out_list.append(" sni:%s, host:%s" % (self.ssl_sock.sni, self.ssl_sock.host))
return ",".join(out_list)
| 38.309771 | 125 | 0.612688 |
4a1c44707caf17cd72ecf16f7c6aa6274cd2c30d
| 2,720 |
py
|
Python
|
hiddenymouz.py
|
aliya02/hiddenymouz
|
bebdda7671315ef8e6a9c3d11eccb8200be0d637
|
[
"MIT"
] | 5 |
2018-09-19T19:16:07.000Z
|
2020-11-29T22:06:45.000Z
|
hiddenymouz.py
|
aliya02/hiddenymouz
|
bebdda7671315ef8e6a9c3d11eccb8200be0d637
|
[
"MIT"
] | null | null | null |
hiddenymouz.py
|
aliya02/hiddenymouz
|
bebdda7671315ef8e6a9c3d11eccb8200be0d637
|
[
"MIT"
] | 3 |
2018-09-20T16:52:57.000Z
|
2020-09-27T15:26:19.000Z
|
#!/usr/bin/env python
import os
import sys
import base64
import argparse
str_decryptor = '<?php /* Mr-Gandrunx - Hiddenymouz */ error_reporting(0); define("__LOCALFILE__",__FILE__); goto RSA; TWOFISH: extract($TripleDES); goto AES; RSA: $TripleDES["\\x69\\x73"] = "\\147\\172\\x69\\x6e\\146\\x6c\\x61\\x74\\145"; goto BLOWFISH; BLOWFISH: $TripleDES["\\x65\\x76\\151\\x6c"] = "\\x62\\141\\163\\x65\\66\\x34\\x5f\\144\\145\\143\\x6f\\x64\\145"; goto TWOFISH; AES: eval($is($evil("nVTrcppAGH2VDL1Bm2bUqGmGiNiEznSaaNQuRkAoK7utU8siYtxATKeRljx2F0LQ6eVHu7oD3+Wc881ZAPk+8S0fecQPJu5HviSIjzulTrYaiHpT4iCee8Tt4skU8Z6PPqbNU3vMsrLB7z1P92OZ2+W4Xcs67Ry3Tt+8PVUsSxD0Ei1la5Rx3q8GXrjjYEJc/iHVEaL09l6R40RM/PtalnjgEIvU0Tzwp2gbv6m9eCFEE7yBP8nhlUZBtKW21xh/8nniOxsuvYCOhJc55FgQV2g6R/8MVBhw5aNg4bs7BVZcZQbnP50zyuUDo17K/3R/+45W8cPmRo3Cw18Isg7WnRNtXbY4GJ5JVSsGrZeNcn2fXWusXme7atAaZrUq2yzHauUaw9azmBMRDXx7HPBbooLITGbHMPHInC8el8157256O4VFaRfPGa32inTP4sGybZBlQ0WodUWu4XrWPbsF/eAGAzN0r+mt24InoBvFAMCEREritNSQACXxAF3jZP59AL8uMIDRF9QKB101cKTYHEk93byiuukoAVjGLIcvPvRh4CRLBQAv9sDJwg61cz1pj6GkAMvRBvqyHc5UM0FIkwZdc0wi+t0FsG86wxvWd+22zBj0e+/tu6/32OXZFVTVGwLi0OxDOE7U2ayHjh113homqg96RwsQrn9oPW82hCRheVl34tdaBFk8ktMYNVlMewxHJKfXlTQbFPONrmITsrltaa6Avqp/aHr5nH/Ubaa6WqoHUz5MQXi0MOyzZaozzOboMV10l8aas5a1MH49vDNZL4wy7fdpTzZb1oPDdDYzjZuAMo9lRXK3/Ura58ybE3YuMkLrlQroHfMt9CL6bdB5+44TBGHn6dOd/3s6BvYydT1w5ODSVc0AycotAfQbxmDFTjxjF6L8lUKX9pTnmhK391fC39TLI0aQv84LdzpxP/OWlX+wxCKx/RkTnQnin11YF88EkR6U0EHFriG0jzE+LNUOYal+eFgdOxXnVaVW3hdXPwE="))); __halt_compiler();#'
def fn_decrypt(fname):
with open(fname, 'rb') as fp:
s = fp.read()
try:
a = s.split('#')[1]
print base64.b64decode(''.join(chr(ord(a[i]) - (0x0c if ((i % 2) == 0) else 0x0e)) for i in range(len(a))))
except:
print '[-] error: invalid data!'
def fn_encrypt(fname):
with open(fname, 'r') as fp:
s = base64.b64encode(fp.read())
fnew = os.path.splitext(os.path.basename(fname))[0]
with open(fnew + '-hidden.php', 'wb') as fp:
fp.write(str_decryptor + ''.join(chr(ord(s[i]) + (0x0c if ((i % 2) == 0) else 0x0e)) for i in range(len(s))))
print '[+] encoded file: {}-hidden.php'.format(fnew)
def parse_cmd():
p = argparse.ArgumentParser()
p.add_argument('-f', '--file', dest='filename', help='Input filename')
g = p.add_mutually_exclusive_group(required=True)
g.add_argument('-e', '--encode', action='store_true', dest='e')
g.add_argument('-d', '--decode', action='store_true', dest='d')
a = p.parse_args()
if a.e and a.filename:
fn_encrypt(a.filename)
elif a.d and a.filename:
fn_decrypt(a.filename)
else:
sys.exit()
if __name__ == '__main__':
parse_cmd()
| 54.4 | 1,443 | 0.741544 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.