content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def group_by_xs(datasets):
"""
Return datasets grouped by polarization cross section.
"""
cross_sections = {}
for data in datasets:
cross_sections.setdefault(data.polarization, []).append(data)
#print("datasets", [":".join((d.name, d.entry, d.polarization, d.intent)) for d in datasets])
#print("xs", cross_sections)
return cross_sections
|
768d044a23836e0ece52950145621c5f5f15d50a
| 625,768 |
def get_sparse_memory_usage(matrix):
"""
Return the number of bytes need to store a sparse matrix
"""
return matrix.data.nbytes + matrix.indices.nbytes + matrix.indptr.nbytes
|
e96e3db5fb78cf29b5d28ceb355e218d783f461a
| 478,087 |
def strip_html_comments(text):
"""Strip HTML comments from a unicode string."""
lines = text.splitlines(True) # preserve line endings.
# Remove HTML comments (which we only allow to take a special form).
new_lines = [line for line in lines if not line.startswith("<!--")]
return "".join(new_lines)
|
289ab694a1fa2a6c9a1f60e0ead8b13e62a0bff0
| 679,297 |
def get_all_rate(reviewer):
"""Function that returns all the rates of a viewer."""
#All the rates and put them into a list.
rates = list(reviewer['Review Rating'])
#Return this list.
return rates
|
81de4dd087e8ef37ba0df6ce49d6c3c7dc800c53
| 422,184 |
def findreplace_prefix_cols(df, old_prefix, new_prefix):
"""List columns names with old prefix replaced by new prefix."""
sel = [colname.startswith(old_prefix) for colname in df.columns]
old_cols = df.columns[sel]
new_cols = [old_col.replace(old_prefix, new_prefix, 1)
for old_col in old_cols]
return new_cols
|
0de6636e892a8ad4ddb7b8fed87151f72fb329da
| 434,117 |
def get_longest_parent(task):
"""
Returns the parent of the given task which has the longest execution time
Args:
task (Node)
Returns:
Node
"""
longest_parent = task.get_parents()[0]
for parent in task.get_parents():
if longest_parent.get_exec_time() < parent.get_exec_time():
longest_parent = parent
return longest_parent
|
8e41984c1e287f8a2d457150d332f868660c4eef
| 697,634 |
def list_journals(config):
"""List the journals specified in the configuration file"""
sep = "\n"
journal_list = sep.join(config['journals'])
return journal_list
|
3c4ec8e93b896022ccd934bc3052b76f72f251f8
| 67,876 |
from typing import List
from typing import Tuple
def extract_sound_mappings(config_file_name: str) -> List[Tuple[str, str]]:
"""
Extract a list of mappings from characters/symbols to sounds.
:param config_file_name: the path to the file to extract the mappings from
:return: a list of tuples consisting containing (symbol, sound_equivalent)
"""
sound_mappings = []
with open(config_file_name, "r", encoding='utf-8') as config_file:
for line in config_file.readlines():
if line[0] == '#':
continue
mapping = list(filter(None, line.strip().split(' ', 1)))
if len(mapping) > 1:
sound_mappings.append((mapping[0], mapping[1]))
return sound_mappings
|
616684c06c3e8c184e1d2694aefe505ec17812c2
| 305,522 |
def flip_keypoint(keypoint, size, y_flip=False, x_flip=False):
"""Modify keypoints according to image flips.
Args:
keypoint (~numpy.ndarray): Keypoints in the image.
The shape of this array is :math:`(K, 2)`. :math:`K` is the number
of keypoints in the image.
The last dimension is composed of :math:`y` and :math:`x`
coordinates of the keypoints.
size (tuple): A tuple of length 2. The height and the width
of the image which is associated with the keypoints.
y_flip (bool): Modify keypoints according to a vertical flip of
an image.
x_flip (bool): Modify keypoints according to a horizontal flip of
an image.
Returns:
~numpy.ndarray:
Keypoints modified according to image flips.
"""
H, W = size
keypoint = keypoint.copy()
if y_flip:
keypoint[:, 0] = H - 1 - keypoint[:, 0]
if x_flip:
keypoint[:, 1] = W - 1 - keypoint[:, 1]
return keypoint
|
a76542afced772cee70f091752a96e244e02362f
| 240,723 |
from typing import Optional
def _convert_tag_plain2bio(tag: str, previous: Optional[str] = None) -> str:
"""
add bio prefix to plain tag, depending on previous tag
Args:
tag: e.g. 'ORG'
previous: e.g. 'ORG'
Returns:
bio_tag: e.g. 'I-ORG'
"""
if tag == "O" or tag.startswith("["):
return tag
elif previous is None:
return f"B-{tag}"
elif tag != previous:
return f"B-{tag}"
else:
return f"I-{tag}"
|
3ba3ab5a63ae59d08a74f716bd7f22b8a97adbff
| 149,225 |
def _look_before (index_sentence,context) :
"""Generate the look before context starting with the sentence index and looking no less than the first sentence"""
context_pairs=[]
for i in range(1,context+1) :
s_index=index_sentence-i
if s_index>=0 :
context_pairs.append(( s_index,index_sentence))
return context_pairs
|
ce48596b37be997fa9c7176b9c0f18b2c6a78e4b
| 302,910 |
import re
def does_text_contain_section(pagetext: str, section: str) -> bool:
"""
Determine whether the page text contains the given section title.
It does not care whether a section string may contain spaces or
underlines. Both will match.
If a section parameter contains an internal link, it will match the
section with or without a preceding colon which is required for a
text link e.g. for categories and files.
:param pagetext: The wikitext of a page
:param section: a section of a page including wikitext markups
"""
# match preceding colon for text links
section = re.sub(r'\\\[\\\[(\\?:)?', r'\[\[\:?', re.escape(section))
# match underscores and white spaces
section = re.sub(r'\\?[ _]', '[ _]', section)
m = re.search("=+[ ']*{}[ ']*=+".format(section), pagetext)
return bool(m)
|
fd46b7f57d060aebcc141b7f306140ebec7b36e8
| 93,795 |
def get_model_name(obj):
""" returns the model name of an object """
return type(obj).__name__
|
c38245a02f58967b3be848cd383493d83c107557
| 524,932 |
import socket
def is_port_available(port, host='127.0.0.1'):
"""check whether a port is in use return True if the port is available else
False."""
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((host, int(port)))
return True
except socket.error:
return False
finally:
if s:
s.close()
|
0b83e68cee1873bbbe45817ef25a066c0e39fa4b
| 484,860 |
def compare_settings(local_config_vars, remote_config_vars):
"""Compare local and remote settings and return the diff.
This function takes two dictionaries, and compares the two. Any given
setting will have one of the following statuses:
'=' In both, and same value for both (no action required)
'!' In both, but with different values (an 'update' to a known setting)
'+' [In local, but not in remote (a 'new' setting to be applied)
'?' In remote, but not in local (reference only - these are generally
Heroku add-on specfic settings that do not need to be captured
locally; they are managed through the Heroku CLI / website.)
NB This function will convert all local settings values to strings
before comparing - as the environment settings on Heroku are string.
This means that, e.g. if a bool setting is 'true' on Heroku and True
locally, they will **not** match.
Returns a list of 4-tuples that contains:
(setting name, local value, remote value, status)
The status value is one of '=', '!', '+', '?', as described above.
"""
diff = []
for k, v in local_config_vars.items():
if k in remote_config_vars:
if str(remote_config_vars[k]) == str(v):
diff.append((k, v, remote_config_vars[k], '='))
else:
diff.append((k, v, remote_config_vars[k], '!'))
else:
diff.append((k, v, None, '+'))
# that's the local settings done - now for the remote settings.
for k, v in remote_config_vars.items():
if k not in local_config_vars:
diff.append((k, None, v, '?'))
return sorted(diff)
|
d8f66968f7daa2d573f2e40d17640b883f408bad
| 322,098 |
def round_to_quarter(value):
"""
This function is used to round a value to the nearest quarter.
Examples:
3.82 >> 3.75
6.91 >> 7.0
5.23 >> 5.25
2.11 >> 2.0
"""
return round(value*4)/4
|
01ba27a76f53cc464fa804d8a4556844c86ac316
| 337,082 |
def make_msg(code, data=None, to_slave_addr=2, from_host_addr=1):
"""Makes a ccTalk message from a ccTalk code and data to be sent with this packet.
Parameters
----------
code : int
ccTalk code for this message.
data : list of integers
Data to be sent in this message.
to_slave_addr : int
Address of slave to be sent to. Defaults to 2.
from_host_addr : int
Address of host that is sending the message. Defaults to 1.
Returns
-------
packet : list of integers
An integer equivalent of the ccTalk packet.
This needs to be converted to a byte packet prior to sending.
"""
if not data:
seq = [to_slave_addr, 0, from_host_addr, code]
else:
seq = [to_slave_addr, len(data), from_host_addr, code] + data
packet_sum = 0
for i in seq:
packet_sum += i
end_byte = 256 - (packet_sum%256)
packet = seq + [end_byte]
return packet
|
c9f7fcbb6567bcff1d38941135c2ce41a5879ada
| 270,468 |
import pathlib
from typing import List
def _find_python_modules(folder: pathlib.Path) -> List[str]:
"""Find modules in a directory.
:param folder: The directory to look for Python modules in.
:return: A list of Python module names.
"""
module_names = []
for child in folder.iterdir():
if child.is_file() and child.suffix in (".py", ".so"):
module_names.append(child.stem)
elif child.is_dir() and not child.stem.startswith("__"):
module_names.append(child.stem)
return sorted(module_names)
|
0a01a789e8fa28b31c997920e277c7d26e2a87c4
| 232,619 |
def get_is_tors_list(mode_dict):
"""
A list contains the boolean values of each mode to determine
if the normal mode is internal rotation or not.
"""
is_tors_list = []
for mode in sorted(mode_dict.keys()):
is_tors = True if mode_dict[mode]['mode'] == 'tors' else False
is_tors_list.append(is_tors)
return is_tors_list
|
cfbf984c78800c4ce3b789a65ffedff822753156
| 614,864 |
def fixed_lr(global_step, learning_rate):
"""Fixed learning rate policy.
This function always returns ``learning_rate``, ignoring ``global_step``
value.
Args:
global_step: global step TensorFlow tensor (ignored for this policy).
learning_rate (float): fixed learning rate to use.
Returns:
learning rate at step ``global_step``.
"""
return learning_rate
|
7a59059205fe18627ca9d26d33341305f80591f3
| 145,283 |
def filterType(sequence, object_type):
""" Get the sequence items matching the type supplied
:param sequence: Sequence of things to check
:type sequence: iter
:param object_type: class name to match against
:type object_type: any
:return: filter instance of matched sequence items
:rtype: filter
"""
return filter(lambda o: type(o) == object_type, sequence)
|
2f48ec941f8a1f29a72b41455df8b2421828558c
| 468,416 |
def parse_hgtector(input_f):
""" Parse output of HGTector version 0.2.1.
Parameters
----------
input_f: string
file descriptor for HGTector output results
Returns
-------
output: string
one putative HGT-derived gene per line
columns: query_id, donor_taxid, donor_species, donor_lineage, pct_id,
pct_coverage
"""
hgts = []
for line in input_f:
x = line.strip('\r\n').split('\t')
if (len(x) == 15) and (x[7] == '1'):
hgt = '\t'.join((x[0], x[12], x[13], x[14], x[10], x[11]))
hgts.append(hgt)
return '\n'.join(hgts)
|
b7eacd2eed2467d107fb67795378339dc20aef27
| 82,327 |
import requests
from bs4 import BeautifulSoup
def getContent(url: str):
"""
Gets the content for a given url in the web
Arguments:
url {str} -- [
The url of the webpage to scrap
]
Returns:
[type] -- [
The content as a BeautifulSoup object
]
"""
print(f"Now scrapping {url}")
response = requests.get(url, timeout=5)
content = BeautifulSoup(response.content, "html.parser")
return content
|
b56c7069fcc58707ab2d73f1584fbee464e08899
| 325,366 |
def get_axes(self, *args):
"""Returns the list of axes.
Parameters
----------
self: Data
a Data object
args : list
list of axes names
Returns
-------
axes_list : list of axes (Data)
"""
axes = self.axes
if len(args) > 0:
axes_list = []
for name in args:
for axis in axes:
if axis.name == name:
axes_list.append(axis)
else:
axes_list = axes
return axes_list
|
3b0a5089bf0baa92f8ef963acb3fadd70e35d2ee
| 275,208 |
import typing
def address_to_str(address: typing.Tuple[str, int]) -> str:
"""Converts a ``(host, port)`` tuple into a ``host:port`` string."""
return "{}:{}".format(*address)
|
c6908e57f9e88c18bd41fe580432e24c9a2f6bfe
| 440,639 |
def option_rep(optionname, optiondef):
"""Returns a textual representation of an option.
option_rep('IndentCaseLabels', ('bool', []))
=> 'IndentCaseLabels bool'
option_rep('PointerAlignment', ('PointerAlignmentStyle',
[u'Left', u'Right', u'Middle']))
=> 'PointerAlignment PointerAlignmentStyle
Left
Right
Middle'
"""
optiontype, configs = optiondef
fragments = [optionname + ' ' + optiontype]
for c in configs:
fragments.append(" " * 8 + c)
rep = "\n".join(fragments)
return rep
|
005462286006469ce333ca9973c4fb1ee056be9e
| 77,698 |
import threading
def run_in_thread(fn):
"""
Decorator to run a function in a thread.
>>> 1 + 1
2
>>> @run_in_thread
... def threaded_sleep(seconds):
... from time import sleep
... sleep(seconds)
>>> thread = threaded_sleep(0.1)
>>> type(thread)
<class 'threading.Thread'>
>>> thread.is_alive()
True
>>> thread.join()
>>> thread.is_alive()
False
"""
def run(*k, **kw):
t = threading.Thread(target=fn, args=k, kwargs=kw)
t.start()
return t
return run
|
b0ae9245e2f7f401a6458064d7122d8b72f3932a
| 447,981 |
import re
def alphanum_key(s):
""" Key func for sorting strings according to numerical value. """
return [int(c) if c.isdigit() else c for c in re.split('([0-9]+)', s)]
|
c9147a41cad775700db280e92efe500fb6d8469e
| 35,719 |
def _get_deploy_iso_name(node):
"""Returns the deploy ISO file name for a given node.
:param node: the node for which ISO file name is to be provided.
"""
return "deploy-%s.iso" % node.name
|
cae34990e5bc0c94408151b69b8562dc5d4c2432
| 630,242 |
def digest_line(line, name):
"""
Interprets a line of input to a point-tuple.
"""
x, _, y = line.partition(",")
x = int(x.strip())
y = int(y.strip())
return (x, y, name)
|
9b7908287e6735312bae59129dedc8d1ff520c6b
| 490,706 |
import base64
def file_to_base64(filepath):
"""
Returns the content of a file as a Base64 encoded string.
:param filepath: Path to the file.
:type filepath: str
:return: The file content, Base64 encoded.
:rtype: str
"""
with open(filepath, 'rb') as f:
encoded_str = base64.b64encode(f.read())
return encoded_str.decode('utf-8')
|
5baa42c50c91ef03c7bdbc12f79a652744975458
| 653,875 |
import inspect
def iscoroutinefunction(func):
"""Checks if a function is a coroutine-function, like:
* ``async def f(): ...`` (since Python 3.5)
* ``@asyncio.coroutine def f(): ...`` (since Python3)
.. note:: Compatibility helper
Avoids to import :mod:`asyncio` module directly (since Python3),
which in turns initializes the :mod:`logging` module as side-effect.
:param func: Function to check.
:return: True, if function is a coroutine function.
False, otherwise.
"""
# -- NOTE: inspect.iscoroutinefunction() is available since Python 3.5
# Checks also if @asyncio.coroutine decorator is not used.
# pylint: disable=no-member
return (getattr(func, "_is_coroutine", False) or
(hasattr(inspect, "iscoroutinefunction") and
inspect.iscoroutinefunction(func)))
|
e819334b7078c3cf490acea24cc136fffd499d5a
| 308,228 |
def convert_string(x):
"""
Convert the string to lower case and strip all non [z-z0-9-_] characters
:param str x: the string to convert
:return: the converted string
:rtype: str
"""
# we define the things to keep this way, just for clarity and in case we want to add other things.
wanted = set()
# lower case letters
wanted.update(set(range(97, 123)))
# numbers
wanted.update(set(range(48, 58)))
# - and _
wanted.update({45, 95})
# space
wanted.add(32)
s = ''
for c in x:
if ord(c) in wanted:
s += c
elif 65 <= ord(c) <= 90:
s += chr(ord(c) + 32)
return s
|
e43a5da3815aac5a59bbb91a97727e257e831f14
| 25,504 |
def traverse_xsyn(element, on_element):
"""
.. function: traverse_xsyn traverses element and returns concatenated lists of calling on_element of each element.
:param element element in DOM.
:param on_element callback on element that returns list of values.
"""
res = on_element(element)
for child in element.childNodes:
child_results = traverse_xsyn(child, on_element)
res += child_results
return res
|
ca4f27b477b62a62245718ec1283e50badebfd87
| 439,656 |
def _process_opt(opt):
"""
Helper function that extracts certain fields from the opt dict and assembles the processed dict
"""
return {'gelfhttp': opt.get('gelfhttp'),
'port': str(opt.get('port', '12022')),
'custom_fields': opt.get('custom_fields', []),
'sourcetype': opt.get('sourcetype_nebula', 'hubble_osquery'),
'gelfhttp_ssl': opt.get('gelfhttp_ssl', True),
'proxy': opt.get('proxy', {}),
'timeout': opt.get('timeout', 9.05)}
|
ffbcd27be0c2def8c79679edd29e35a21027cf44
| 449,953 |
def ctrl_next(W, pg, player):
"""Find controlled predecessor set.
These are the nodes that in one step:
- can reach W because player controls them
- will reach W because opponent controls them,
but has no other choice than next(W)
"""
cnext = set()
for node in W:
for pred in pg.predecessors_iter(node):
if pg.node[pred]['player'] == player:
print('controlled by player, good')
elif len(pg.succ[pred]) == 1:
print('controlled by opponent, bad only 1 outgoing')
else:
print('not in CNext')
continue
cnext.add(pred)
return cnext
|
683a5cfcb05734087eec449a50909d61917ac078
| 409,974 |
def get_cpp_bool(bool):
"""
Simply converts a Python boolean value to a string representation of a C++ bool
"""
return "true" if bool else "false"
|
5f73a83dd753a236ded313b2e6e67554a2762888
| 523,684 |
def first(iterable):
"""Returns the first element of an iterable"""
for element in iterable:
return element
|
20eb077082ecbe6c073b507000bf148164f81503
| 83,686 |
def any_none(*args):
"""
Returns a boolean indicating if any argument is None.
"""
return any(arg is None for arg in args)
|
ce2c7a650877f0cc67e45f54f7e4cf6d8d1d5fce
| 533,312 |
def save_line(fout, path, y, array):
"""
Save a line into `fout`. The line contains
path y array
Parameters:
-----------
fout : string
instance of the output file
path : string
path to the image
y : int
true label
array : array_like
list containing features, softmax or other values
"""
line = ''
for el in array:
line += str(el)+' '
fout.write('%s %d %s\n' % (path, y, line[:-1]))
return fout
|
3ec1bc7e784ac6a9613875437aa9cb0ad091a26c
| 262,116 |
from datetime import datetime
def all_time_containing(dt=None):
"""Returns a half-open interval of all time."""
return datetime(year=1900, month=1, day=1), datetime.max
|
f6b0a8b711f552fbe5ab428a30d51abd0fcaad15
| 239,943 |
def dup_abs(f, K):
"""
Make all coefficients positive in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_abs(x**2 - 1)
x**2 + 1
"""
return [K.abs(coeff) for coeff in f]
|
88e076726aa4c4a9fee4977e5b268f33ff810610
| 170,330 |
def math_answer_valid(answer):
"""The purpose of this function is to prevent errors within the
honeybee_math() function by ensuring answers to the math questions
are integers. If the user does not provide a valid input, this function
will prompt the user until they enter a valid integer input. Once the
user enters a valid integer value, the value is returned."""
# A try statement tests to ensure the user input is an integer (all math
# question answers are integers)
try:
# If the user input is an integer, it is returned in integer form
valid_answer = int(answer)
return int(valid_answer)
# If the user input cannot be converted into an integer class,
# the function restarts with an error reminding the user the answer
# needs to be an integer. The program asks for a new answer to pass
# to the function before restarting.
except ValueError:
print("\nOops! Make sure you enter an integer.")
new_answer = input("Enter your new answer: ")
math_answer_valid(new_answer)
|
52e935b2c854b46771790b57505ea76e38063695
| 566,888 |
import math
def image_normalize(image):
"""normalize a 3d numpy array simiiar to tf.image.per_image_standardization"""
mean = image.mean()
stddev = image.std()
adjusted_stddev = max(stddev, 1.0/math.sqrt(image.size))
standardized_image = (image - mean) / adjusted_stddev
return standardized_image
|
0ec4288bd8f5bbf93f4c2bd06bc8a69efafb86c8
| 129,401 |
def edge_to_string(t, is_head=False):
"""
Converts the token to an edge string representation
:param token: the token
:return: the edge string
"""
return '/'.join([t.lemma_.strip().lower(), t.pos_, t.dep_ if t.dep_ != '' and not is_head else 'ROOT'])
|
abd73c14fa4f12297ad6ac91b303dcd4064ef78f
| 403,879 |
def _iscomment(line):
"""
Determine if a line is a comment line. A valid line contains at least three
words, with the first two being integers. Note that Python 2 and 3 deal
with strings differently.
"""
if line.isspace():
return True
elif len(line.split()) >= 3:
try: # python 3 str
if line.split()[0].isdecimal() and line.split()[1].isdecimal():
return False
except: # python 2 str
if (line.decode().split()[0].isdecimal() and
line.split()[1].decode().isdecimal()):
return False
return True
else:
return True
|
7021fd3bb449dc9a6b682487d1a37c4cdbd18dfe
| 315,918 |
def compute_indentation(props):
"""
Compute the indentation in inches from the properties of a paragraph style.
"""
res = 0
for k, v in props.items():
if k in ['margin-left', 'text-indent']:
try:
res += float(v.replace('in', ''))
except:
pass
return res
|
d422421560de9d7eff6831773a23c6fb5a823cca
| 658,101 |
import math
def is_int(value):
"""Tests whether value is an integer value (numeric type that represents an integer value)"""
return value == math.floor(value)
|
7846bf81f41bdf661210544fec2a714017f6026d
| 370,427 |
def is_tune_grid_search(obj):
"""Checks if obj is a dictionary returned by tune.grid_search.
Returns bool.
"""
return isinstance(
obj, dict) and len(obj) == 1 and "grid_search" in obj and isinstance(
obj["grid_search"], list)
|
45864bccf9573e409ee7c4078b9c0f3fa5530e34
| 225,343 |
def getCaseClass(test_case):
"""
Returns test case class name.
"""
return test_case.__class__.__name__
|
5a5a4ccb38cef6a05fb9a4315e000b67949fdb49
| 154,537 |
def _IsListOfType(elements, element_type):
"""Determines whether or not elements is a list of unique elements of type."""
if not elements or not isinstance(elements, list):
return False
return all(isinstance(s, element_type) for s in elements)
|
ba2224b00b01cf36415aad380e4a67c905a613bf
| 387,958 |
def rename_img(path, number):
"""Renames a file path of an image
Returns a new name of the file path for the resized images
Parameters
----------
path: a path (str)
a string that leads to an existing file path
number: a number (int)
a number that is used in the concatination for the image rename
"""
output_path = path + '/' + str(number) + '.jpg'
return output_path
|
73c1ac1d02bc0d568aa036a128ee61093dfd05a2
| 471,736 |
import torch
def softmax(logits, dim=1):
"""
Softmax.
:param logits: logits
:type logits: torch.Tensor
:param dim: dimension
:type dim: int
:return: softmax
:rtype: torch.Tensor
"""
if logits.size()[1] > 1:
return torch.nn.functional.softmax(logits, dim=dim)
else:
probabilities = torch.nn.functional.sigmoid(logits)
return torch.cat((1 - probabilities, probabilities), dim=dim)
|
6978f741d338a257c9962f8f3450f5fab6ed4cda
| 399,200 |
def make_player_team_map_for_game(boxscore_data):
"""
Creates a dict mapping player id to team id for a game
:param dict boxscore_data: list of dicts with boxscore data for a game
:return: player id team id dict
:rtype: dict
"""
player_game_team_map = {
player["PLAYER_ID"]: player["TEAM_ID"] for player in boxscore_data
}
return player_game_team_map
|
234232273521ad5fd773209217b51ac204a8997d
| 325,747 |
def pull_pid(t_cmd):
"""Function will attempt to pull PID from CMD output, if output is not present it'll return -1"""
t_data = t_cmd.split()
try:
return t_data[7]
except IndexError:
return -1
|
4107da2c4ef5f51820cf50146c1b08dbe3fea154
| 358,832 |
def is_rule(fun):
""" Returns whether something is a rule or not """
is_callable = hasattr(fun, '__call__')
return is_callable and hasattr(fun, "is_rule") and fun.is_rule
|
b6f6d2650d03734286c67ad8f6d6bfd14f47d15d
| 532,873 |
def pathsplit(path):
"""Split a /-delimited path into a directory part and a basename.
:param path: The path to split.
:return: Tuple with directory name and basename
"""
try:
(dirname, basename) = path.rsplit(b"/", 1)
except ValueError:
return (b"", path)
else:
return (dirname, basename)
|
df5beffc3897f5efa38f0f22c7cc251587773cae
| 580,728 |
import re
def is_valid_email(email):
"""
Validate email parameter is a valid formatted email address
:param email: string containing email address
:return: True if email is valid otherwise False
"""
if not email:
return False
return bool(re.match("^.+@(\[?)[a-zA-Z0-9-.]+.([a-zA-Z]{2,3}|[0-9]{1,3})(]?)$", email))
|
15b40326334292ce8e7d1278a3747e4fbc34feb6
| 260,240 |
import math
def humanise_bytes(num_bytes, si=False):
"""
Make a human-readable string for a number of bytes
>>> humanise_bytes(689275)
'673.1 KB'
Taken from https://programming.guide/worlds-most-copied-so-snippet.html
:param int num_bytes:
:param int si: Whether to use SI units. Defaults to False.
"""
unit = 1000 if si else 1024
abs_bytes = abs(num_bytes)
if abs_bytes < unit:
return "{} B".format(num_bytes)
exp = int(math.log(abs_bytes) / math.log(unit))
thresh = int(math.pow(unit, exp) * (unit - 0.05))
if exp < 6 and abs_bytes >= thresh - (52 if (thresh & 0xfff) == 0xd00 else 0):
exp += 1
pre = ("kMGTPE" if si else "KMGTPE")[exp - 1] + ("" if si else "i")
if exp > 4:
num_bytes /= unit
exp -= 1
return "{:.1f} {}B".format(num_bytes / math.pow(unit, exp), pre)
|
9c8eb4aeda64b46ab04710a621d32a0a4f505a73
| 58,210 |
def subset_answer_choices(answer):
"""
If a user changes their answers multiple times, an iOS device will have redundant answers at the beginning
and end of the list, so we remove them.
Args:
answer(list):
List of changed answers
Returns:
answer(list):
List of changed answers with redundant answers removed
"""
if isinstance(answer[0], float):
answer = answer[1:]
if len(answer) > 1:
if answer[-1] == answer[-2]:
answer = answer[:-1]
return answer
|
e3a3facbaacef20ce2ccce34b400118902d7b61f
| 613,572 |
def _Q(x):
"""Helper function.
returns True when x has a certain message and origin_name
"""
return x.message == "event A" and x.origin_name == "b.log"
|
4da99d194b6c6a2d772522571564f465cf05bde5
| 386,556 |
def path_from_root(gltf, vnode_id):
"""Returns the ids of all vnodes from the root to vnode_id."""
path = []
while vnode_id is not None:
path.append(vnode_id)
vnode_id = gltf.vnodes[vnode_id].parent
path.reverse()
return path
|
dfa5204e6ee3edd0ab745da8cfe9decb7eb1e18e
| 452,813 |
import math
def computeStartsOfInterval(maxVal: int, intervalLength=1024, min_overlap_part=0.33):
"""
Divide the [0; maxVal] interval into a uniform distribution with at least min_overlap_part of overlapping
:param maxVal: end of the base interval
:param intervalLength: length of the new intervals
:param min_overlap_part: min overlapping part of intervals, if less, adds intervals with length / 2 offset
:return: list of starting coordinates for the new intervals
"""
if maxVal <= intervalLength:
return [0]
nbDiv = math.ceil(maxVal / intervalLength)
# Computing gap to get something that tends to a uniform distribution
gap = (nbDiv * intervalLength - maxVal) / (nbDiv - 1)
coordinates = []
for i in range(nbDiv):
coordinate = round(i * (intervalLength - gap))
if i == nbDiv - 1:
# Should not be useful but acting as a security
coordinates.append(maxVal - intervalLength)
else:
coordinates.append(coordinate)
# If gap is not enough, we add division with a intervalLength / 2 offset
if gap < intervalLength * min_overlap_part:
coordinates.append(coordinate + intervalLength // 2)
return coordinates
|
baea88e231439b5d0cb57eb433126f000301b445
| 640,958 |
def rescale(volume, min, max):
"""Rescale the values of a volume between min and max."""
factor = float(max - min) / float(volume.max() - volume.min())
return ((volume - volume.min()) * factor) + min
|
d15ecf01591f90daf9d196cf3beb31295eccaaa2
| 48,478 |
def convert_float_to_uint_audio(audio):
"""
Converts a numpy array of double [-1,1] audio samples to properly scaled, 14-bit audio samples
:param audio: numpy array of double [-1,1] audio samples
:return: uint16 numpy array of scaled 14-bit audio samples
"""
if audio.max() > 1 or audio.min() < -1:
raise ValueError('Audio out of bounds!')
max_reading = 0x3FFF
audio = audio + 1
audio = audio / 2
audio = audio * max_reading
audio = audio.astype('uint16')
return audio
|
a3830a9eed37241494c4aa76d767f9c1178e6b79
| 258,638 |
def get_fname(config):
"""
Parameters
----------
config : dict
A dictionary with all the arguments and flags.
Returns
-------
fname : str
The filename for the saved model.
"""
hidden_dims_str = '_'.join([str(x) for x in config['hidden_dims']])
num_heads_str = '_'.join([str(x) for x in config['num_heads']])
batch_size = config['batch_size']
epochs = config['epochs']
lr = config['lr']
weight_decay = config['weight_decay']
dropout = config['dropout']
transductive = str(config['transductive'])
fname = 'gat_hidden_dims_{}_num_heads_{}_batch_size_{}_epochs_{}_lr_{}_weight_decay_{}_dropout_{}_transductive_{}.pth'.format(
hidden_dims_str, num_heads_str, batch_size, epochs, lr,
weight_decay, dropout, transductive)
return fname
|
51ba0e3fdeda03e9e8b7d13fd9806ab629c547f1
| 590,402 |
def get_genefiles(prefix, genes):
"""
Create a list of genes files for the given patient.
:param prefix: prefix
:param genes: KO genes
:return: List of names to be processed
"""
genefiles = []
for gene in genes:
if gene != "":
name = prefix + "_personalized__" + gene + "_ko"
else:
name = prefix + "_personalized"
genefiles.append(name)
return genefiles
|
755ef4b651a8bde7148943f3716a1a5939ab5a9a
| 263,552 |
import traceback
def system_hook_format(type, value, tb, optional_info: str= '') -> str:
"""
Intended to be assigned to sys.exception as a hook.
Gives programmer opportunity to do something useful with info from uncaught exceptions.
Parameters
type: Exception type
value: Exception's value
tb: Exception's traceback
optional_info: additional information we want to log or show
"""
# NOTE: because format() is returning a list of string,
# I'm going to join them into a single string, separating each with a new line
traceback_details = '\n'.join(traceback.extract_tb(tb).format())
error_msg = "Uncaught exception:\n" \
f"{optional_info}\n" \
f"Type: {type}\n" \
f"Value: {value}\n" \
f"Traceback: {traceback_details}"
return error_msg
|
65ebd028cd950e9c5891eb340eb49f6c640e7b15
| 464,149 |
def is_number(value):
"""
Checks whether the value is a number
:param value:
:return: bool
"""
try:
complex(value)
except ValueError:
return False
return True
|
abeff66f6109143bb4b38f001e599eaa1f32280c
| 147,972 |
def daterange_to_yaml(daterange):
"""Converts a tuple into a dict with 'Start' and optional 'End' keys"""
if daterange[1] is None:
return {'Start': daterange[0].date()}
else:
return {'Start': daterange[0].date(), 'End': daterange[1].date()}
|
86be1a79514fddd6240f0f17e230b885c2709032
| 547,702 |
def splitblocks(lst, limit):
"""Split list lst in blocks of max. limit entries. Return list of blocks."""
res = []
start = 0
while start < len(lst):
res.append(lst[start:start + limit])
start += limit
return res
|
3189355235cfada1cd0548e3362b501fccfef0eb
| 653,487 |
def has_required_spacing(string, required_spacing):
"""
Checks if a string has the required amount of spaces
on each side.
:param string: The string which is to be checked.
:param required_spacing: The number of spaces expected on both
sides of the string.
:return: True if the string has the required
number of spaces on both sides, False
otherwise.
>>> has_required_spacing(" foo ", 2)
True
>>> has_required_spacing(" foo", 1)
False
"""
leading_spaces = len(string) - len(string.lstrip(' '))
trailing_spaces = len(string) - len(string.rstrip(' '))
return (
leading_spaces == required_spacing
and trailing_spaces == required_spacing)
|
4e303ecf74b22245212d9573479b72682446d818
| 309,931 |
def read_ccloud_config(config_file):
"""Read Confluent Cloud configuration for librdkafka clients"""
conf = {}
with open(config_file) as fh:
for line in fh:
line = line.strip()
if line[0] != "#" and len(line) != 0:
parameter, value = line.strip().split('=', 1)
conf[parameter] = value.strip()
return conf
|
86ec495ad4c1d2b552b79d001add8aebf3cfe8a5
| 676,987 |
def line_with_unit_test_header(line):
"""Check if the given string represents unit test header."""
return line.startswith("Name ") and line.endswith("Stmts Miss Cover Missing")
|
7e0e0d0e89b087e1ea1da66ce2107573c3e4ca50
| 126,071 |
def get_sort_params(value, default_key=None):
"""Parse a string into a list of sort_keys and a list of sort_dirs.
:param value: A string that contains the sorting parameters.
:param default_key: An optional key set as the default sorting key when
no sorting option value is specified.
:return: A list of sorting keys and a list of sorting dirs.
"""
keys = []
dirs = []
if value:
for s in value.split(','):
s_key, _s, s_dir = s.partition(':')
keys.append(s_key)
s_dir = s_dir or 'asc'
nulls_appendix = 'nullsfirst' if s_dir == 'asc' else 'nullslast'
sort_dir = '-'.join([s_dir, nulls_appendix])
dirs.append(sort_dir)
elif default_key:
# use default if specified
return [default_key, 'id'], ['asc-nullsfirst', 'asc']
if 'id' not in keys:
keys.append('id')
dirs.append('asc')
return keys, dirs
|
f47d2c6e3c6d5f527bc2dedfecdba36e18283e44
| 609,347 |
import json
def load_services(file_name="./json/amtrak-trip.json"):
"""Load a json file with parsed services from an amtrak itinerary."""
with open(file_name) as f:
return json.loads(f.read())
|
e3378086a937b6138248e214af2cc20761eaba2d
| 601,606 |
def set_label(termtype, timeperiod):
""" Sets label based on the radio buttons selected"""
label = 'Graph the following concepts (comma-separated, using yearly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'yearly' \
else 'Graph the following comma-separated noun phrases (monthly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'monthly' \
else 'Graph the following comma-separated entities (yearly frequencies):' if termtype == 'Wikipedia entities' and timeperiod == 'yearly' \
else 'Graph the following comma-separated entities (monthly frequencies):' if termtype == 'Wikipedia entities' and timeperiod == 'monthly' \
else 'Enter a phrase and show its cluster together with its other concepts:'
return label
|
985d0893c37bbeeb9f259c7e68dc3d1e79f9d92a
| 329,387 |
def depth(self):
"""
Computes the depth of a diagram by foliating it.
>>> from discopy.monoidal import *
>>> x, y = Ty('x'), Ty('y')
>>> f, g = Box('f', x, y), Box('g', y, x)
>>> assert Id(x @ y).depth() == 0
>>> assert f.depth() == 1
>>> assert (f @ g).depth() == 1
>>> assert (f >> g).depth() == 2
"""
*_, slices = self.foliate(yield_slices=True)
return len(slices)
|
8a4df4633503de4dfdd1a57d6c6ff105a114a111
| 179,166 |
from typing import Sequence
import re
def parse_log(log_file: str, fields: Sequence[str]):
"""parse fields from the log file
Args:
log_file (str): the log file path.
fields (Sequence[str]): The wanted fields.
Returns: dict. key is the field name and value is the parsed values.
"""
with open(log_file, "r") as f:
lines = f.readlines()
res = {}
for field in fields:
res[field] = []
for line in lines:
matches = re.findall(r"(\w+): ([0-9.]*[0-9])", line)
for (k, v) in matches:
if k in fields:
res[k].append(float(v))
return res
|
6165b4aeaafa3e3eecb2fcae39a6542cafc1653c
| 53,043 |
def write_xml(filename, element_tree):
""" Write data to XML or GML file
:param filename: name of XML or GML file to write data to
:type filename: str
:param element_tree: data as ElementTree object
:type element_tree: xmlElementTree
"""
return element_tree.write(filename)
# this will write declaration tag in first line:
# return element_tree.write(filename, encoding='utf-8', xml_declaration=True)
|
b7da10953df3c58beff18907ad52f0fe5313d6a1
| 150,262 |
def _gen_event_lat_lon(catalog):
""" Create dictonary of event latitude and longitude using
event.resource_id.id as key"""
event_lat = dict()
event_lon = dict()
for event in catalog.events:
eve_id = event.resource_id.id
event_lat[eve_id] = event.origins[0].latitude
event_lon[eve_id] = event.origins[0].longitude
return event_lat, event_lon
|
97137474728785e2f1b57b76ff7a8d4a2a007ff8
| 160,371 |
def pixels_to_EMU(value):
"""1 pixel = 9525 EMUs"""
return int(value * 9525)
|
c9be7deacae47819ab30d5589dbae555124d6409
| 50,124 |
def _format_collider_string(colliders):
""" Write the string for the bath gas collider and their efficiencies
for the Lindemann and Troe functional expressions:
:param colliders: the {collider: efficiency} dct
:type colliders: dct {str: float}
:return: collider_str: Chemkin string with colliders and efficiencies
:rtype: str
"""
collider_str = ' ' # name_buffer
collider_str += ''.join(
('{0:s}/{1:4.3f}/ '.format(collider, efficiency)
for collider, efficiency in colliders.items()))
collider_str += '\n'
return collider_str
|
4e4aa8ae46dfcf05f00222b4cf5c95384977d651
| 48,788 |
def delanguageTag(obj):
"""
Function to take a language-tagged list of dicts and return an untagged
string.
:param obj: list of language-tagged dict
:type obj: list
:returns: string
"""
if not isinstance(obj, list):
return(obj)
data = (obj if len(obj) else [{}])[-1]
return data['@value'] if data.get('@value', '') else data.get('@id', '')
|
0bec893fe3fe02061147ff6fa4e8ed8878bd7378
| 56,207 |
def from_roman(roman_string):
""" Converts roman number to integer. """
NUMBERS = {
'I' : 1,
'V' : 5,
'X' : 10,
'L' : 50,
'C' : 100,
'D' : 500,
'M' : 1000,
}
result = 0
prev_value = 0
for c in roman_string.upper():
value = NUMBERS[c]
if prev_value > 0 and prev_value < value:
result -= prev_value * 2
result += value
prev_value = value
return result
|
076bca27f5657faa097117e6eea78d51179923b9
| 319,209 |
def take_data(data, mask):
"""Selects correlation(difference) data at given masked indices.
Parameters
----------
data : tuple of ndarrays
Data tuple as returned by `ccorr` and `acorr` functions
mask : ndarray
A boolean frame mask array
Returns
-------
out : tuple
Same data structure as input data, but with all arrays in data masked
with the provided mask array.
"""
def _mask(i,data):
if i !=1:
data = data[...,mask,:] if data is not None else None
return data
return tuple((_mask(i,d) for (i,d) in enumerate(data)))
|
907b23ca8c15f254d65dad94c789143ceea234fa
| 84,522 |
import re
def convert_to_prettyprint(xmlstr):
"""
Convert XML to pretty print for older python versions (< 2.7).
:param xmlstr: input XML string
:return: XML string (pretty printed)
"""
text_re = re.compile(r'>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) # Python 3 (added r)
return text_re.sub(r'>\g<1></', xmlstr)
|
1f3a9a9d0a92d46c9dafe111f1c71746f1a1ad0c
| 125,895 |
def parse_user_assign_sync_agent_data(json):
"""
Parse the incoming statement for a sync agent event concerning a user role assignment.
:param json: A statement concerning a user role assignment that needs to be synced.
:type json: dict(str, NoneType)
:return: A dictionary containing all user information, a course id, and its role for that course.
:rtype: dict(str, str)
"""
statement = json['statement']
role = statement['object']['definition']['name']['en']
course_id = statement['object']['id'].split("=")[-1]
user_info = statement['context']['extensions']['http://id.tincanapi.com/activitytype/role']
sync_user_role_data = {
'role': role,
'course_id': course_id,
}
sync_user_role_data.update(user_info)
return sync_user_role_data
|
f5856987729b9644858b1409feb1271c99058a61
| 244,844 |
def _check_dict_keys(mapping, valid_keys,
key_description, valid_key_source):
"""Check that the keys in dictionary are valid against a set list.
Return the input dictionary if it is valid,
otherwise raise a ValueError with a readable error message.
Parameters
----------
mapping : dict
The user-provided dict whose keys we want to check.
valid_keys : iterable
The valid keys.
key_description : str
Description of the keys in ``mapping``, e.g., "channel name(s)" or
"annotation(s)".
valid_key_source : str
Description of the ``valid_keys`` source, e.g., "info dict" or
"annotations in the data".
Returns
-------
mapping
If all keys are valid the input dict is returned unmodified.
"""
missing = set(mapping) - set(valid_keys)
if len(missing):
_is = 'are' if len(missing) > 1 else 'is'
msg = (f'Invalid {key_description} {missing} {_is} not present in '
f'{valid_key_source}')
raise ValueError(msg)
return mapping
|
5d137fecfc20161460b8d4c289471bf053a91063
| 608,916 |
def __get_xml_text(node):
"""Finds all child text nodes of the given xml node and returns them
concatenated as a string
"""
result = ""
for node in node.childNodes:
if node.nodeType == node.TEXT_NODE:
result += node.data
return result
|
87dca2ed0b590511518009c3aa0d173e2386f2aa
| 541,720 |
def get_error(msg, prefix, default_value='Error:'):
""" Generate an error message given the
actual message (msg) and a prefix (prefix)
>>> get_error('Coredumped','Error:')
'Error: Coredumped'
"""
if prefix is not default_value:
prefix = default_value + prefix
error_msg = prefix + ' ' + msg
return error_msg
|
02d367a09f3bbdc851815f9a2d786ddb9e0f6454
| 501,993 |
def measurement_exists(cur, project_key, id):
"""
Check whether the measurement exists in the database in the given project.
"""
measurement = cur.execute(
"SELECT * FROM measurements WHERE id=? AND projects_key=?", (id, project_key)
).fetchone()
if not measurement:
return False
return True
|
d5b8392813960a50364640692aeb12d7b694a44b
| 627,634 |
def tosym(name):
"""Replace unsupported characters in ASN.1 symbol names"""
return str(name).replace(' ', '').replace('-', '_')
|
0ef619960e64c21aa422733708fb9b4ce5e890e9
| 447,573 |
def truncate_directory(directory: str) -> str:
"""Simple truncation of a given directory path as string."""
if len(directory) > 50:
split_directory = directory.split('/')
prefix_truncated = '/'.join(directory.split('/')[len(split_directory)-5:])
return '.../' + prefix_truncated
else:
return directory
|
a9d4c1958096be181b67983711907b0d15cfa2c7
| 72,451 |
def update_operators_cache(sdk):
"""
Update the global element id->name and name->id caches.
:param sdk: Logged in CloudGenix SDK Constructor
:return: True or False, expected to fail if no access.
"""
global OPERATORS_ID2N
operators_resp = sdk.get.operators_t()
if not operators_resp.cgx_status:
# user may not have access. Return failure.
return False
# update cache
operators_items = sdk.extract_items(operators_resp)
OPERATORS_ID2N = sdk.build_lookup_dict(operators_items, key_val='id', value_val='email')
# return success
return True
|
3758db43e770d9bcfa882e9846a888ff5b413c37
| 160,711 |
import re
def str_split(string, split_length=1):
"""Method splits string to substrings
Args:
string (str): original string
split_length (int): substrin length
Returns:
list: list of strings
"""
return list(filter(None, re.split('(.{1,%d})' % split_length, string)))
|
65dd325fb7fda7ac1af2b18840e42f567d1b971d
| 679,733 |
def quote_unident(val):
"""
This method returns a new string replacing "" with ",
and removing the " at the start and end of the string.
"""
if val != None and len(val) > 0:
val = val.replace('""', '"')
if val != None and len(val) > 1 and val[0] == '"' and val[-1] == '"':
val = val[1:-1]
return val
|
5decf3d2402d2701e5fcebc069a8cfb773243f52
| 360,142 |
def parse_results(result_lines):
"""
Parses results from the format:
['Dimension name1 = 123',
'Long dimension name2 = 23.45']
Into a dict {'Dimension name1': 123.0, 'Dimension name2': 23.45}
"""
result_dict = dict()
for line in result_lines:
line_splits = line.split('=')
metric = line_splits[0].strip()
value = float(line_splits[1].strip())
result_dict[metric] = value
return result_dict
|
a28d1978928cc3d90052745b9710503eda30bfc6
| 242,248 |
def env_str_to_bool(varname, val):
"""Convert the boolean environment value string `val` to a Python bool
on behalf of environment variable `varname`.
"""
if val in ["False", "false", "FALSE", "F", "f", "0", False, 0]:
rval = False
elif val in ["True", "true", "TRUE", "T", "t", "1", True, 1]:
rval = True
else:
raise ValueError("Invalid value " + repr(val) +
" for boolean env var " + repr(varname))
return rval
|
6572a4368f492510b6287a49ef38be2ef614dca0
| 109,414 |
from math import pow
def minkowski_distance(x, y, p=2):
"""
Calculates the minkowski distance between two points.
:param x: the first point
:param y: the second point
:param p: the order of the minkowski algorithm. If *p=1* it is equal
to the manhatten distance, if *p=2* it is equal to the euclidian
distance. The higher the order, the closer it converges to the
Chebyshev distance, which has *p=infinity*.
"""
assert len(y) == len(x)
assert len(x) >= 1
sum = 0
for i in range(len(x)):
sum += abs(x[i] - y[i]) ** p
return pow(sum, 1.0 / float(p))
|
ffa688e780a2253184b9bd1a502de74234e07d06
| 505,379 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.