content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import gzip
def gzip_file(file, file_name):
""" Gzip a file
:param file file:
:param str file_name:
:return: the gzipped file
:rtype: gzip file
"""
with gzip.open(file_name + '.gz', 'wb') as gzipped_file:
gzipped_file.writelines(file)
return gzipped_file
|
d1569b3ef8ebed46eb7a3fd128479d3999fb228c
| 31,282 |
import json
def to_review_json(s):
"""
Converts the string to json
:param s: A string
:return: A json formatted string representation of the object
"""
b = {}
if s:
b.update({"reviewComment": s})
return json.dumps(b)
else:
return None
|
384f8e4f8d625081fa858e184ae7a559f590a420
| 31,288 |
def _build_index_vcf_command_str(bgzipped_vcf):
"""Generate command string to index vcf file."""
command = " ".join([
'tabix -p vcf', bgzipped_vcf
])
return command
|
dbc1a370b2d97ffa5726d625135a4b484f41b5c6
| 31,293 |
def is_from(category, symbol):
"""Checks if the symbol is from the category given.
Args:
category (dict): The dictionary of the category to check.
symbol (str): The symbol or word given to analyze.
Returns:
bool: Whether the symbol is part of the category given.
"""
try:
category[symbol]
return True
except:
return False
|
69a2e9905a86d149aac6aff215e5f07542b3f8ab
| 31,295 |
def varassign(v,X,E,rho, argument):
"""!
Assigns input to specified gate set variables
Parameters
-------
v : numpy array
New set of variables
X : numpy array
Current gate estimate
E : numpy array
Current POVM estimate
rho : numpy array
Current initial state estimate
argument : {"X", "E", "rho"}
Which part of the gate set is updated
Returns
-------
[.,.,.]: 3 element list
List in the order [X,E,rho] where either X, E or rho is repaced by v, depending on the input to the "arguement" variable
"""
if argument == "X" or argument == "K":
return [v,E,rho]
elif argument == "E":
return [X,v,rho]
elif argument == "rho":
return [X,E,v]
|
ee71ca488883e12fb036acc31a1ec96f184bc4f0
| 31,302 |
def P_to_a(P, Mstar):
"""
Convenience function to convert periods to semimajor axis from Kepler's Law
Parameters
----------
P : array-like
orbital periods [days]
Mstar : float
stellar mass [solar masses]
Returns
-------
a : array-like
semi-major axis [stellar radii]
"""
Pearth = 365.24 # [days]
aearth = 215.05 # [solar radii]
return aearth * ((P/Pearth)**2 *(1/Mstar))**(1/3)
|
0596930a7f84679db0e050974a44ae8dfa437e10
| 31,304 |
def html_spam_guard(addr, entities_only=False):
"""Return a spam-protected version of email ADDR that renders the
same in HTML as the original address. If ENTITIES_ONLY, use a less
thorough mangling scheme involving entities only, avoiding the use
of tags."""
if entities_only:
def mangle(x):
return "&#%d;" % ord (x)
else:
def mangle(x):
return "<span>&#%d;</span>" % ord(x)
return "".join(map(mangle, addr))
|
239115425ab9a39fdea1a70ba07e648a63e91d2b
| 31,305 |
def person_image_file(person):
"""Finds primary person image file name.
Scans INDI's OBJE records and finds "best" FILE record from those.
Parameters
----------
person : `ged4py.model.Individual`
INDI record representation.
Returns
-------
file_name : `str` or ``None``
String with file name or ``None``.
Notes
-----
OBJE record contains one (in 5.5) or few (in 5.5.1) related multimedia
files. In 5.5 file contents can be embedded as BLOB record though we do
not support this. In 5.5.1 file name is stored in a record.
In 5.5.1 OBJE record is supposed to have structure::
OBJE
+1 FILE <MULTIMEDIA_FILE_REFN> {1:M}
+2 FORM <MULTIMEDIA_FORMAT> {1:1}
+3 MEDI <SOURCE_MEDIA_TYPE> {0:1}
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
+1 _PRIM {Y|N} {0:1}
Some applications which claim to be 5.5.1 version still store OBJE
record in 5.5-like format::
OBJE
+1 FILE <MULTIMEDIA_FILE_REFN> {1:1}
+1 FORM <MULTIMEDIA_FORMAT> {1:1}
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
+1 _PRIM {Y|N} {0:1}
This method returns the name of the FILE corresponding to _PRIM=Y, or if
there is no _PRIM record then the first FILE record. Potentially we also
need to look at MEDI record to only chose image type, but I have not seen
examples of MEDI use yet, so for now I only select FORM which correspond
to images.
"""
first = None
for obje in person.sub_tags('OBJE'):
# assume by default it is some image format
objform = obje.sub_tag("FORM")
objform = objform.value if objform else 'jpg'
primary = obje.sub_tag("_PRIM")
primary = primary.value == 'Y' if primary is not None else False
files = obje.sub_tags("FILE")
for file in files:
form = file.sub_tag("FORM")
form = form.value if form is not None else objform
if form.lower() in ('jpg', 'gif', 'tif', 'bmp'):
if primary:
return file.value
elif not first:
first = file.value
return first
|
fb8682f66a938ae14690486880d62c6ec4e6995f
| 31,307 |
def entity(reference):
"""Return a numeric (&#reference;) or symbolic: (&reference;) entity,
depending on the reference's type
"""
try:
return '&#{0:d};'.format(reference)
except ValueError:
return '&{0};'.format(reference)
#
|
1b547a9506badd9fc3ddd575c3e67acebe6af539
| 31,312 |
def remove_added_loadgen(net_t, loadorgen):
"""
Removes load or sgen namned Cap test
INPUT
net_t (PP net) - Pandapower net
loadorgen (str) - 'sgen' or 'load' for generation or load for additional capacity connected
OUTPUT
net_t (PP net) - Updated Pandapower net
"""
if loadorgen == "load":
net_t.load = net_t.load.drop(net_t.load[net_t.load.name == 'Cap test'].index)
elif loadorgen == "sgen":
net_t.sgen = net_t.sgen.drop(net_t.sgen[net_t.sgen.name == 'Cap test'].index)
return net_t
|
73ccf198c096454fef4676a18355c6359a076b2c
| 31,317 |
from typing import Optional
def _term_converter(term: Optional[int]) -> Optional[int]:
"""
Converter function for ``term`` in :class:`~.BaseProduct``.
:param term: The number of months that a product lasts for if it is fixed length.
"""
if term is None:
return None
else:
return int(term)
|
65277a31fe4666d410c9aa46a8b01fe59dbcb4fc
| 31,322 |
def generate_block_data(data, index, blocktype):
"""Generates the largest block possible starting at index in data.
returns (output, new_index) where output is the generated block and
new_index points to where the next block in data would be extracted"""
output = bytearray()
output += b'\x3c'
output.append(blocktype)
sz = len(data) - index
if (sz > 0xff):
sz = 0xff
output.append(sz)
output += data[index:index + sz]
checksum = sum(output[1:]) & 0xff
output.append(checksum)
return (output, index + sz)
|
ddf6af222b22be7e1c73ee98c3ebd982ea23fe5b
| 31,325 |
from warnings import warn
def pc2in(picas):
"""
Converts picas to inches
Parameters
----------
picas : int or float
dimension in picas
Returns
-------
inches : float
dimensions in inches
"""
if picas not in [19, 27, 33, 39, 68]:
warn("Not a standard AMS width")
return picas / 6.
|
4fee36743e00b263f0f46fca90bf07edca083002
| 31,329 |
def support_count(itemset, transactions):
"""
Count support count for itemset
:param itemset: items to measure support count for
:param transactions: list of sets (all transactions)
>>> simple_transactions = ['ABC', 'BC', 'BD', 'D']
>>> [support_count(item, simple_transactions) for item in 'ABCDE']
[1, 3, 2, 2, 0]
>>> some_transactions = [set(['beer', 'bread', 'milk']), set(['beer']), set(['milk'])]
>>> support_count(set(['beer']), some_transactions)
2
"""
return len([row for row in transactions if set(itemset) <= set(row)])
|
2d533a81a646b1973980386c9b85998d8fb65be0
| 31,332 |
from typing import Counter
def total_useful_clusters(model):
"""A useful cluster here is defined as being a cluster with > 1 member."""
clusters = Counter(model.labels_)
useful_clusters = 0
for cluster_num, total_members in clusters.items():
if total_members > 1:
useful_clusters += 1
return useful_clusters
|
93df3354c37f5f252f2071b50b7937c61ce81946
| 31,333 |
def hours(time_str):
"""
Get hours from time.
time_str: str (hh:mm:ss)
"""
h, m, s = [int(x) for x in time_str.split(':')]
return(h + m/60.0 + s/3600.0)
|
c5f95ac4bed1198eba616f959595d6ce7a9c79fd
| 31,335 |
import re
def get_nom_val(atrv):
"""Given a string containing a nominal type, returns a tuple of the
possible values.
A nominal type is defined as something framed between braces ({}).
Parameters
----------
atrv : str
Nominal type definition
Returns
-------
poss_vals : tuple
possible values
Examples
--------
>>> get_nom_val("{floup, bouga, fl, ratata}")
('floup', 'bouga', 'fl', 'ratata')
"""
r_nominal = re.compile('{(.+)}')
m = r_nominal.match(atrv)
if m:
return tuple(i.strip() for i in m.group(1).split(','))
else:
raise ValueError("This does not look like a nominal string")
|
4b059ff48779ce631ed95c4bc9471eafdf617ecb
| 31,341 |
import re
import string
from collections import Counter
def count_words(phrase):
"""
Returns a dict with count of each word in a phrase
keys are the words and values the count of occurrence.
"""
phrase = phrase.lower()
tokens = re.findall(r'[0-9a-zA-Z\']+', phrase)
tokens = [word.strip(string.punctuation) for word in tokens]
counts = Counter(tokens)
return counts
|
b8b7abaa7330906335ed38c59fdc58a6b47dc48b
| 31,344 |
def IsTrivialAttrSpec(attr):
"""Determines whether a given attr only has its name field set.
Args:
attr: an AttrSpec instance.
Returns:
true iff the only field that is set is the name field.
"""
return (attr.DESCRIPTOR.full_name == 'amp.validator.AttrSpec' and
attr.HasField('name') and len(attr.ListFields()) == 1)
|
1ff22c6f7cb5d6457430a8d4220fb6caa798cc1f
| 31,349 |
import traceback
def format_traceback_the_way_python_does(type, exc, tb):
"""
Returns a traceback that looks like the one python gives you in the shell, e.g.
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
NameError: name 'name' is not defined
"""
tb = ''.join(traceback.format_tb(tb))
return f'Traceback (most recent call last):\n{tb}{type.__name__}: {exc}'
|
e11783b2215bcd22cd08f7e8657d6ff7a5fc7f35
| 31,352 |
def _get_xml_declaration(version='1.0', encoding='UTF-8'):
"""Gets XML declaration (for the specified version and encoding).
:param version: XML version
:param encoding: encoding
:return: XML declaration
:rtype: str
"""
return '<?xml version="' + version + '" encoding="' + encoding + '"?>'
|
2f9125ca02624cd9c74a80fe9668a6801220f898
| 31,353 |
def get_study_collections(studies, irods_backend):
"""Return a list of all study collection names."""
return [irods_backend.get_path(s) for s in studies]
|
53ff6851d12612467d045604c676ea7ed6b0081c
| 31,354 |
import logging
def get_logger(logger_name: str) -> logging.Logger:
"""Return the logger with the name specified by logger_name arg.
Args:
logger_name: The name of logger.
Returns:
Logger reformatted for this package.
"""
logger = logging.getLogger(logger_name)
logger.propagate = False
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter("[%(name)s] [%(levelname)s] %(message)s"))
logger.addHandler(ch)
return logger
|
505f6c89c46dd95c86a5b9193389b64142c31d81
| 31,358 |
import math
def wien(x):
"""
Wien's displacement constant is defined by b = h * c / (k_B * x), where
x is described by this nonlinear equation.
"""
return 5 * math.exp(-x) + x - 5
|
6db7749651dd72fbaee1971bc940b59944a52db3
| 31,362 |
def asfolder(folder):
"""
Add "/" at the end of the folder if not inserted
:param folder: the folder name
:type folder: str
:return: file names with / at the end
:rtype: str
"""
if folder[-1] != "/":
return (folder + "/")
else:
return (folder)
|
07340a8c9b21bcc1d7be210bc7f9fa80378cb3a8
| 31,364 |
import torch
def split(value, num_or_size_splits, axis=0):
"""
Splits a tensor into sub tensors.
Parameters
----------
value : tensor
The Tensor to split.
num_or_size_splits : list
Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or
Python list containing the sizes of each output tensor along split_dim.
axis : int
The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0.
num : int
used to specify the number of outputs when it cannot be inferred from the shape of size_splits.
Returns
-------
Tensor objects resulting from splitting value.
"""
return torch.split(value, num_or_size_splits, dim=axis)
|
b997285da46db1e20ca92916f46ebc51b8840786
| 31,366 |
import requests
def parse_configdb(configdb_address='http://configdb.lco.gtn/sites/'):
"""
Parse the contents of the configdb.
Parameters
----------
configdb_address : str
URL of the configdb, must be inside LCOGT VPN
Returns
-------
sites : list of dicts
each site dictionary contains a timezone.
cameras : list of dicts
each camera dictionary contains a site, instrument code, and camera type.
"""
results = requests.get(configdb_address).json()['results']
cameras = []
sites = []
for site in results:
sites.append({'code': site['code'], 'timezone': site['timezone']})
for enc in site['enclosure_set']:
for tel in enc['telescope_set']:
for ins in tel['instrument_set']:
sci_cam = ins.get('science_camera')
if sci_cam is not None:
cameras.append({'site': site['code'],
'instrument': sci_cam['code'],
'camera_type': sci_cam['camera_type']['code'],
'schedulable': ins['state'] == 'SCHEDULABLE'})
return sites, cameras
|
501f9217901f0db30e2bddfebb0a85fbb266a766
| 31,371 |
import random
def sample_with_replacement(population, k):
""" Sample <k> items from iterable <population> with replacement """
n = len(population)
pop = list(population)
return [pop[int(n * random.random())] for _ in range(k)]
|
4114a49faae9981dc2f850db4465ec1ccdc7101c
| 31,381 |
def vertical_velocity_from_pass_diagnostics(diagnostic):
"""Method for handling a chain_pass diagnostic and outputting it's vertical velocity
Shouldn't be calculated for pass chains of 1 pass
vertical velocity = vertical distance / cumulative time
"""
vertical_distance = diagnostic[5]
elapsed_time = diagnostic[9]
pass_count = diagnostic[7]
if pass_count > 1:
return float(vertical_distance) / elapsed_time
else:
return None
|
8d9c0d49d9efd97870ad188c0ca8ad144a2ceb40
| 31,390 |
def is_pkcs7_padded(binary_data):
"""Returns whether the data is PKCS 7 padded."""
# Take what we expect to be the padding
padding = binary_data[-binary_data[-1]:]
# Check that all the bytes in the range indicated by the padding are equal to the padding value itself
return all(padding[b] == len(padding) for b in range(0, len(padding)))
|
6b462f979e4bb1ae2a4176d4c3e9d8378bd52e6d
| 31,394 |
from typing import Sequence
from typing import List
def _rescale(val: Sequence[float], low: float, high: float) -> List[float]:
"""
Rescales a list of confidence value between 0 and 1 to an interval [low,
high].
Args:
val (float): List of values in interval (0,1)
low (float): Lower bound of rescaling interval
high (float): Upper bound of rescaling interval
Returns:
Rescaled value (float).
"""
return [(high - low) * x + low for x in val]
|
60722f288fac88bae035aec7bc3cc73b96c282bb
| 31,395 |
def get_handler_name(message):
"""
Looks at a message, checks it has a sensible type, and returns the
handler name for that type.
"""
# Check message looks OK
if "type" not in message:
raise ValueError("Incoming message has no 'type' attribute")
if message["type"].startswith("_"):
raise ValueError("Malformed type in message (leading underscore)")
# Extract type and replace . with _
return message["type"].replace(".", "_")
|
1e0e19db61de993df4c01466551daf075a7fe60a
| 31,397 |
import hashlib
import pickle
def input_data_fingerprint(input_data):
"""Fingerprint of the input data. Will be used in dupelicate detect.
"""
m = hashlib.md5()
m.update(pickle.dumps(input_data))
return m.hexdigest()
|
d841e7d1ecbba938ada996294769ab7c6743f480
| 31,401 |
def _fractional_tune(tune: float) -> float:
"""
Return only the fractional part of a tune value.
Args:
tune (float): tune value.
Returns:
The fractional part.
"""
return tune - int(tune)
|
42b2cc45c3fa071c0ea78343fb3d6152ecacbbf1
| 31,405 |
def select_random_from_category(conn, cat):
"""
Obtains a random fortune from a given category.
:param conn: A handle to the database connection.
:param cat: The category ID.
:return: The text of the fortune.
"""
conn[1].execute("SELECT data FROM fortunes WHERE category = ? ORDER BY "
+ "RANDOM() LIMIT 1", (str(cat),))
return conn[1].fetchall()[0][0]
|
622de6c17c718df1d60d124f98003eefdaeefdba
| 31,407 |
def get_prefix(n, factor=1024, prefixes=None):
"""Get magnitude prefix for number."""
if prefixes is None:
prefixes = ('',) + tuple('kMGTPEZY')
if abs(n) < factor or len(prefixes) == 1:
return n, prefixes[0]
return get_prefix(n / factor, factor=factor, prefixes=prefixes[1:])
|
6d66bbe1642b8711484489f3be878196c763607e
| 31,410 |
from typing import Tuple
def process_arguments(arguments: str) -> Tuple[str, str, int]:
"""
Process the arguments given to !weather, dividing them into state, location and future
Uses default of QLD, Brisbane and 0 if not given
"""
args = arguments.split(" ") if arguments else []
if args and args[-1].lstrip('-+').isnumeric():
future = int(args.pop())
else:
future = 0
# get location
if args:
if args[0].upper() in ["NSW", "ACT", "NT", "QLD", "SA", "TAS", "VIC", "WA"]:
state = args.pop(0).upper()
else:
state = "QLD"
location = " ".join(args)
else:
state = "QLD"
location = "Brisbane"
return state, location, future
|
8bd887bab7d3bbc002973e4a327610933130021a
| 31,411 |
def read_input_file(filename: str):
"""
Reads the input file and creates a list of sentences in which each sentence is a list of its word where the word
is a 2-dim tuple, whose elements are the word itself and its label (named entity), respectively. Also creates
a map of label to index.
Expected files have a sequence of sentences. It has one word by line in first column (in a tab-separated file)
followed in second column by its label, i.e., the named entity. The sentences are separated by an empty line.
:param filename: Name of the file
:return: List of sentences, map of label to index
"""
sentences = []
sentence = []
label2idx = {'O': 0}
label_idx = 1
with open(filename, 'r', encoding='utf-8') as file:
for line in file:
line = line.strip()
if line == "":
if len(sentence) > 0:
sentences.append(sentence)
sentence = []
continue
splits = line.split('\t')
word = splits[0]
label = splits[1]
sentence.append((word, label))
if label not in label2idx.keys():
label2idx[label] = label_idx
label_idx += 1
if len(sentence) > 0:
sentences.append(sentence)
return sentences, label2idx
|
c44e19aafb8b2e1a58b96275bd794cb270b9ad76
| 31,412 |
def bsc(n):
""" count the bits set in n"""
l = n.bit_length()
c = 0
x = 1
for _ in range(0, l):
if n & x:
c = c + 1
x = x << 1
return c
|
d44dfc3495a293d5f98f053c279813318cb8906a
| 31,414 |
def group_months(x):
""" A binning function to reduce the number of months patterns are released. """
if x < 3:
x = 1
if 3 <= x< 5:
x = 2
if 5 <= x< 7:
x = 3
if 7 <= x< 9:
x = 4
if 9 <= x< 11:
x = 5
elif 11 <= x< 13:
x = 5
return x
|
b4bb4a86a3403e3ce897c2d1a49209873279c11f
| 31,415 |
def isWinner(board):
"""Looks at `board` and returns either '1' or '2' if there is a winner or
'tie' or 'no winner' if there isn't. The game ends when a player has 24 or
more seeds in their mancala or one side of the board has 0 seeds in each
pocket."""
b = board # Make a shorter variable name to use in this function.
# If either players has >= 24 or no seeds, the game ends.
if (b['1'] >= 24) or (b['2'] >= 24) or \
(b['A'] + b['B'] + b['C'] + b['D'] + b['E'] + b['F'] == 0) or \
(b['G'] + b['H'] + b['I'] + b['J'] + b['K'] + b['L'] == 0):
# Game is over, find player with largest score.
if b['1'] > b['2']:
return '1'
elif b['2'] > b['1']:
return '2'
else:
return 'tie'
return 'no winner'
|
ff0b34f3e4072e72533482600124d4be0ad0ba31
| 31,419 |
from typing import Any
def _strip_db_indexing_key(obj: dict[str, Any]):
"""
Strip MongoDB's ObjectId key `_id`.
"""
return {key: obj[key] for key in obj if key != "_id"}
|
5c63742bb4f8e2bd8a3844eb549468a33dd5df7c
| 31,422 |
def is_ambiguous_align(tags, multi_align_tag):
"""Returns whether the read aligns to multiple locations. The
multi_align_tag depends on mapper. For bowtie2 it is XS."""
for t in tags:
if t[0] == multi_align_tag:
return True
return False
|
fa70ff74d57215b74ccd7afd10ca381f7a1c5762
| 31,423 |
def html_table(table, cellspacing="10", style='"width:500px"'):
"""
Creates a html code to a table
:param table: A list of rows of list of cells(text )
:param style: Html style for the table: e.g "width:300px" (default )
:return: html code to the table
e.g: print html_table( [['hello', 'world', 'haaa'], ['test', 'test4', 'cx854']])
<table style="width:300px">
<tr>
<td>hello</td>
<td>world</td>
<td>haaa</td>
<tr>
<tr>
<td>test</td>
<td>test4</td>
<td>cx854</td>
<tr>
</table>
"""
txt_ = ""
for row in table:
txt = ""
for cell in row:
txt = "".join([txt, '\t<td>', cell, '</td>\n'])
# print txt
txt_ = "".join([txt_, '<tr>\n', txt, '\n<tr>\n'])
# return "".join( [ "<table style=", style, '>\n', txt_, '\n</table>' ])
return "".join(['<table cellspacing="', str(cellspacing), '" >\n', txt_, '\n</table>'])
|
b3404a43b7ca20fe83f97892595d53de90a8987d
| 31,424 |
import math
def entropy(ps):
"""Calculates the entropy (log 2) of the distribution given by p
"""
entropy = 0.0
for p in ps:
if not(p == 0):
entropy -= p*math.log(p, 2)
return entropy
|
609e0e2f03579c8ce39f274116f69ca177eabccc
| 31,425 |
def get_ray_index_for_grid_point(ray, grid_idx, n_depth_pts):
"""Given a ray and a particular point on the physical grid, return the index along that ray corresponding to that point."""
if ray.mu < 0:
return (grid_idx)
else:
return (n_depth_pts - (grid_idx + 1))
|
9d1573812c4d66ef53cf0640a8c1e6602b170772
| 31,428 |
def cB_to_zipf(cB):
"""
Convert a word frequency from centibels to the Zipf scale
(see `zipf_to_freq`).
The Zipf scale is related to centibels, the logarithmic unit that wordfreq
uses internally, because the Zipf unit is simply the bel, with a different
zero point. To convert centibels to Zipf, add 900 and divide by 100.
"""
return (cB + 900) / 100
|
c71a2a7b7417480cc00855852cf447e4741317b0
| 31,429 |
def bayes_factor_pass(bayes_factor, bf_filter):
"""
Checks to see out of a list of bayes factors which ones
pass the filter check. 1 if pass, 0 if no pass.
"""
if isinstance(bayes_factor, float):
bayes_factor = [bayes_factor]
bf_list = []
for bf in bayes_factor:
if abs(bf) < bf_filter:
bf_list.append(0)
else:
bf_list.append(1)
return bf_list
|
067b929cdc52502baaf14b7532dbfa69e495551e
| 31,434 |
def get_wavelengths(snirf):
"""Returns a list of the channel wavelengths in the SNIRF file."""
wavelengths = snirf["nirs"]["probe"]["wavelengths"][:]
return wavelengths.flatten().astype(float)
|
eca371732c0faf0d8f6bbcad0049601b6b889f1a
| 31,435 |
def make_catalog_sources(catalog_roi_model, source_names):
"""Construct and return dictionary of sources that are a subset of sources
in catalog_roi_model.
Parameters
----------
catalog_roi_model : dict or `fermipy.roi_model.ROIModel`
Input set of sources
source_names : list
Names of sourcs to extract
Returns dict mapping source_name to `fermipy.roi_model.Source` object
"""
sources = {}
for source_name in source_names:
sources[source_name] = catalog_roi_model[source_name]
return sources
|
1939e8d1819b3b6823edf1c4cd1635d461bfe189
| 31,436 |
from typing import Match
import re
def find_raw_string_literal_end(file_src: str, m: Match[str]) -> int:
"""Returns the pos just beyond the raw string literal that starts with m."""
if not m.group(0).endswith('R"'):
raise AssertionError(f'Expected start of raw string literal: {m.group()}')
# We've matched the start of a Raw String mcucore::Literal. Determine the delimiter,
# then search for the end of the string.
regexp = re.compile(r'[^()\\ \f\n\r\t\v]{0,16}\(')
m2 = regexp.match(file_src, pos=m.end())
if not m2:
raise AssertionError(
'Unable to locate opening delimiter of the Raw String mcucore::Literal '
f'starting at {m.start()}: {file_src[m.start():m.start()+32]!r}')
needle = ')' + m2.group()[0:-1] + '"'
pos1 = file_src.find(needle, m2.end())
if pos1 < 0:
raise AssertionError(
'Unable to locate closing delimiter of the Raw String mcucore::Literal '
f'starting at {m.start()}: {file_src[m.start():m.start()+32]!r}')
pos2 = pos1 + len(needle)
return pos2
|
1b5523d7a4185bc857e4e7c44b1a5334f56efcb2
| 31,439 |
def notas(*notas, status=False):
"""
Funcao para analisar as notas e as situacoes dos alunos.
:param notas: Notas dos alunos [Obrigatorio].
:param status: Mostra a situacao do aluno [Opcional], Padrao: "False".
:return: Retorna as informacoes do aluno (dicionario).
"""
aluno = dict()
for i, k in enumerate(notas):
aluno[f"nota{i+1}"] = k
media = sum(notas)/len(notas)
aluno['media'] = media
aluno['total'] = len(notas)
if status:
if media >= 7:
aluno["status"] = 'Boa'
elif 7 > media >= 5:
aluno["status"] = 'Razoavel'
elif 5 > media:
aluno["status"] = 'Ruim'
return aluno
|
75323654a2e68895b4b73004803a9a17fc497bc5
| 31,440 |
def can_contain(parent_type, child_type):
""" Returns true if parent block can contain child block.
"""
return (parent_type in ['Document', 'BlockQuote', 'ListItem'] or
(parent_type == 'List' and child_type == 'ListItem'))
|
c8cef3515b3306f779525c59486b526654649433
| 31,441 |
def test_callback(container, text=''):
"""
A callback used for basic testing.
"""
return {
'actions':
[
{
'action': 'chat.postMessage',
'kwargs': {
'text': '{}'.format(text)
}
}
]
}
|
7e83d55ce00b176a1aa06da7ad5faea5ec58e1b7
| 31,448 |
def interpret_as_slice(column):
"""Interprets the 'column' argument of loadFitnessHistory into a slice()"""
if column is None: # No specific column is requested, return everything
return slice(None)
elif isinstance(column, int): # One specific column is requested
return slice(column, column + 1)
elif len(column) == 2 and all(isinstance(val, int) for val in column): # Multiple columns are requested
return slice(*column)
else: # 'column' does not match expected format
raise Exception("Invalid format for 'column': {col}".format(col=column))
|
081a9634169a0752ecd311c82b22e3bd498048d8
| 31,451 |
import pickle
def read_pickle(path):
"""Read serialized pickle file.
Parameters
----------
path : str
Path of the file to read.
Returns
-------
data = pandas.DataFrame or np.ndarray
Data store in the pickle file (an image or coordinates with labels and
metadata).
"""
# open the file and read it
with open(path, mode='rb') as f:
data = pickle.load(f)
return data
|
e66fc4bc3d3047a420ce5e5751fe36b178f602d5
| 31,458 |
def calculate_sequence_distance(seq1, seq2, case_insensitive=True):
"""Calulate the number of nucleotide differences between two sequences.
The sequences must be the same length.
Args:
seq1 : DNA string 1
seq2 : DNA string 2
case_insensitive : optional flag for case insensitive compare, defaults to True
Returns:
int number of differences
"""
if case_insensitive:
allowed_bases = frozenset(['A', 'C', 'G', 'T'])
seq1 = seq1.upper()
seq2 = seq2.upper()
else:
allowed_bases = frozenset(['A', 'C', 'G', 'T', 'a', 'c', 'g', 't'])
mismatches = 0
for pos in range(len(seq1)):
base1 = seq1[pos]
base2 = seq2[pos]
if base1 not in allowed_bases:
continue
if base2 not in allowed_bases:
continue
if base1 != base2:
mismatches += 1
return mismatches
|
cbbd5b3528dcd1b4cbd89f0d4e020aa7d829f203
| 31,462 |
import json
def json_loads(resp):
"""Handle parsing json from an HTTP response for both Python 2 and Python 3."""
try:
charset = resp.headers.getparam('charset')
charset = 'utf8' if not charset else charset
except AttributeError:
charset = resp.headers.get_content_charset()
return json.loads(resp.read().decode(charset))
|
41781a2b55287b5c31439346d8d37f72b481823c
| 31,463 |
import torch
def max_neg_value(tensor):
"""Returns the maximum negative value that can be represented by the data type of the tensor"""
return -torch.finfo(tensor.dtype).max
|
a9ad008ba712d2ac8e7f94e81c20a36f5eebbf8c
| 31,465 |
def remove_consecutive_dups(lst):
""" return a copy of lst with consecutive duplicates of elements eliminated. For example, for lst = [a, a, a, a, b, c, c, a, a, d, e, e, e, e], the returned list is [a, b, c, a, d, e]. """
return [v for i, v in enumerate(lst) if i == 0 or v != lst[i-1]]
|
7d791c87c5c51c37c7ca5ffa01d04e48b3de0286
| 31,466 |
def make_arguments(**params):
"""
Create a script argument string from dictionary
"""
param_strings = ["--{} '{}'".format(key, params[key]) for key in params.keys()]
return ' '.join(param_strings)
|
297247bebb1705d3bba04421864bbfa8509b7e15
| 31,468 |
def read_file(file_name, verbose=False):
"""Takes a file name and returns the lines from the file as a list. Optional verbosity param
:param file_name: path to file to be read
:param verbose: run with extra logging
:returns lines: list strings representing lines in the file
"""
if verbose:
print('Reading file: <' + file_name + '>')
lines = None
with open(file_name, 'r+') as infile:
lines = infile.readlines()
if verbose:
print('Lines read: <' + str(len(lines)) + '>')
return lines
|
3f689a78d61b7d1d4eb35f0c232fa945ee123074
| 31,469 |
def merge_results(x, y):
"""
Given two dicts, x and y, merge them into a new dict as a shallow copy.
The result only differs from `x.update(y)` in the way that it handles list
values when both x and y have list values for the same key. In which case
the returned dictionary, z, has a value according to:
z[key] = x[key] + z[key]
:param x: The first dictionary
:type x: :py:class:`dict`
:param y: The second dictionary
:type y: :py:class:`dict`
:returns: The merged dictionary
:rtype: :py:class:`dict`
"""
z = x.copy()
for key, value in y.items():
if isinstance(value, list) and isinstance(z.get(key), list):
z[key] += value
else:
z[key] = value
return z
|
8e0f301bd1840381b2ff1a5ef64c142caa4e21a3
| 31,470 |
def getABMN(scheme, idx):
""" Get coordinates of four-point cfg with id `idx` from DataContainerERT
`scheme`."""
coords = {}
for elec in "abmn":
elec_id = int(scheme(elec)[idx])
elec_pos = scheme.sensorPosition(elec_id)
coords[elec] = elec_pos.x(), elec_pos.y()
return coords
|
219dfc01b94e277fbe37e76e1076f94b9ae354cf
| 31,472 |
def _ec2Instance_tag_dict(ec2_object):
"""Given an tagable ec2_object, return dictionary of existing tags."""
tag_dict = {}
if ec2_object.tags is None:
return tag_dict
for tag in ec2_object.tags:
tag_dict[tag['Key']] = tag['Value']
return tag_dict
|
b63a38faf15d839a3081b35b377d5329c3a9e796
| 31,474 |
import math
def build_lr(total_steps, lr_init=0.0, lr_end=0.0, lr_max=0.1, warmup_steps=0, decay_type='cosine'):
"""
Applies cosine decay to generate learning rate array.
Args:
total_steps(int): all steps in training.
lr_init(float): init learning rate.
lr_end(float): end learning rate
lr_max(float): max learning rate.
warmup_steps(int): all steps in warmup epochs.
Returns:
list, learning rate array.
"""
lr_init, lr_end, lr_max = float(lr_init), float(lr_end), float(lr_max)
decay_steps = total_steps - warmup_steps
lr_all_steps = []
inc_per_step = (lr_max - lr_init) / warmup_steps if warmup_steps else 0
for i in range(total_steps):
if i < warmup_steps:
lr = lr_init + inc_per_step * (i + 1)
else:
if decay_type == 'cosine':
cosine_decay = 0.5 * (1 + math.cos(math.pi * (i - warmup_steps) / decay_steps))
lr = (lr_max - lr_end) * cosine_decay + lr_end
elif decay_type == 'square':
frac = 1.0 - float(i - warmup_steps) / (total_steps - warmup_steps)
lr = (lr_max - lr_end) * (frac * frac) + lr_end
else:
lr = lr_max
lr_all_steps.append(lr)
return lr_all_steps
|
488e75e661cf4397a67bb2ae21c8882ab795c739
| 31,482 |
def format_anime_status(media_status: str) -> str:
"""
Formats the anime status.
"""
AnimeStatus = {
"FINISHED": "Finished",
"RELEASING": "Currently Airing",
"NOT_YET_RELEASED": "Not Yet Aired",
"CANCELLED": "Cancelled",
}
return AnimeStatus[media_status]
|
66f64596c02f095a9010295fab9506e647282599
| 31,487 |
def checksum(buffer, checkA, checkB):
"""
8-bit Fletcher algorithm for packet integrity checksum. Refer to [1]
(section 32.4 UBX Checksum, pages 135 - 136).
Inputs:
buffer - They byte buffer to compute the checksum over.
checkA - The first part of the reference checksum to compare the
computed value to.
checkB - The second part of the reference checksum to compare the
computed value to.
Outputs:
valid - Boolean flag indicating whether or not packet checksum matches
reference checksum.
buffer_checkA - First part of checksum computed from input buffer.
buffer_checkB - Second part of checksum computed from input buffer.
"""
# Compute buffer checksum
buffer_checkA = 0
buffer_checkB = 0
for byte in buffer:
buffer_checkA = buffer_checkA + byte
buffer_checkB = buffer_checkB + buffer_checkA
buffer_checkA = buffer_checkA & 0xFF
buffer_checkB = buffer_checkB & 0xFF
# Compare to packet provided checksum
valid = True
if checkA != buffer_checkA or checkB != buffer_checkB:
valid = False
return valid, buffer_checkA, buffer_checkB
|
b6fb80603c03e96cbe7c4f34760c71b0b75113f8
| 31,488 |
def leiaOperacao(text):
"""
Esta função lê e valida o input de um usuário, sendo que esse input só pode ser de um número de 1 a 5.
:param text: Texto que será exibido no input.
:return: O valor, já tratado, digitado pelo usuário.
"""
operation = 0
while True:
try:
operation = int(input(text))
while operation == 0 or operation > 5:
print("\33[31mVocê não digitou um valor válido. Tente novamente!\33[m")
continue
except (ValueError, TypeError):
print("\33[31mVocê não digitou um valor válido. Tente novamente!\33[m")
continue
finally:
break
return operation
|
667667c8e5d945a7d25f1400010fca8cc9e6da1e
| 31,490 |
def convert_to_int(s):
"""
Filter to convert a string to an int
"""
if s is not None:
return int( s.strip() )
return None
|
8cb0a12b107644f969c54603d59ebf3086fdba22
| 31,492 |
def _build_selpand(item, attributes):
"""
This method builds an expand or select term for an STA Query
:param item: string either expand or select
:param attributes: a list of strings that has to be expanded / selected
:return: the resulting select or expand-term
"""
selector = item + "="
if not isinstance(attributes, list):
attributes = [attributes]
for i, attribute in enumerate(attributes):
if i != 0:
selector += ","
selector += str(attribute)
return selector
|
a9eb52ba6107411f48f140033fc4227906228a0e
| 31,494 |
def sumMatchesAndMismatches(segment):
"""
Get total matches/mismatches from CIGAR string (M field)
Code dictionary:
M BAM_CMATCH 0
I BAM_CINS 1
D BAM_CDEL 2
N BAM_CREF_SKIP 3
S BAM_CSOFT_CLIP 4
H BAM_CHARD_CLIP 5
P BAM_CPAD 6
= BAM_CEQUAL 7
X BAM_CDIFF 8
B BAM_CBACK 9
"""
return sum(
[value for (code, value) in segment.cigartuples if code == 0]
)
|
101b50d859c949e18563e981b2c419a224e3de68
| 31,495 |
def linear_combinaison(alpha = 1.0, m1 = {},
beta = None, m2 = {}):
"""
Return the linear combinaison m = alpha * m1 + beta * m2.
"""
if m2 == {}:
m2 = m1
beta = 0.0
m = {}
for (name, value) in m1.items():
m[name] = alpha * value + beta * m2.get(name, 0.0)
return m
|
4bb76e76e0e905e29135f9d5d00c9e16edb7920d
| 31,499 |
def monitor_cb(ud, msg):
"""
Arguments are the userdata and the message. This needs to return False
when we want the monitor state to terminate.
In this case, the monitor state will return 'invalid'
"""
return False
|
1b1b210e94de0bcf2fdae9ecb884be945d9ead01
| 31,501 |
def _hide_num_nodes(shape):
"""Set the first dimension as unknown
"""
shape = list(shape)
shape[0] = None
return shape
|
ea0a8bb452752c9efdce4ae75d9a37cf301d4217
| 31,502 |
from typing import Dict
from typing import Any
def summed_frequencies(freq: Dict[Any, int]):
"""Get total sum of frequencies in given frequency dict."""
return sum(freq.values())
|
d2dc9b873aab42c8c4739b350545b58bd60620a3
| 31,503 |
import json
def dictify_json_loads(text: str):
"""
Like json.loads, but returns an empty dict for an empty or whitespace string.
:param text:
:type text:
:return:
:rtype:
"""
text = text.strip()
if text:
return json.loads(text)
else:
return {}
|
30479d493c1215ac2595d2dc75aae7023ea17bf5
| 31,506 |
def tostr(value):
"""Cast value to str except when None
value[in] Value to be cast to str
Returns value as str instance or None.
"""
return None if value is None else str(value)
|
252dca23a22a13ad6b58d310cc1dae5196792fc8
| 31,508 |
def _contains_op(meta_graphdef, op_name):
"""Returns true if the graph def contains the given op."""
# Check the main graph
if any(node.op == op_name for node in meta_graphdef.graph_def.node):
return True
# Check the graph genederated from user defined functions
for func in meta_graphdef.graph_def.library.function:
for node in func.node_def:
if node.op == op_name:
return True
return False
|
bdcc4150877796594a261d93320382a415cf230a
| 31,512 |
import json
def prettify_json(dictionary, sort_keys=True, indent=4):
"""
Takes a dictionary as an input and returns a string containing the JSON with correct format.
:param dictionary: dictionary to be prettified (dict)
:param sort_keys: indicates if the keys should be sorted (bool)
:param indent: number of spaces to use as indentation (int)
:return: prettified json (str)
"""
report = json.dumps(dictionary, sort_keys=sort_keys, indent=indent, separators=(',', ': '))
return report
|
885db2075ef5041fe431863b9b8b607faece4e5c
| 31,518 |
def icon_to_pixmap(icon, width, height=None):
"""Converts a given icon to a pixmap. Automatically adjusts to high-DPI scaling.
:param icon: Icon to convert.
:param int width: Target point height.
:param int height: Target point height.
:return: ``QPixmap`` instance.
"""
height = height or width
return icon.pixmap(width, height)
|
d7e38ec7fc0efda5751cf301a935a71f9ca9589c
| 31,520 |
from typing import Optional
from typing import Any
def safe_list_get(list_: list, index: int, default: Optional[Any] = None) -> Any:
"""
A function to safely retrieve a list entry,
retuning `None` as a default value.
"""
try:
return list_[index]
except IndexError:
return default
|
6e1ed71fa7204412dbc20141543d29f1e0c5a9bc
| 31,521 |
import re
def unescape(text):
"""Unescape some html chars."""
text = re.sub(">", r">", text)
text = re.sub("'", r"'", text)
return text
|
ed92ebf78faceae979f1aad26e2e722dfc502346
| 31,530 |
def add_time(df, stim_dict, stimulus_timestamps):
"""
add_time(df, stim_dict, stimulus_timestamps)
Updates dataframe with time columns.
Arguments:
df (pandas): stimulus dataframe.
stim_dict (dict): experiment stim dictionary, loaded from pickle
stimulus_timestamps (1D array): timem stamps for each stimulus frames.
Returns:
df (pandas): updated stimulus table with time.
"""
df = df.copy()
df = df.sort_values("start_frame_twop").reset_index(drop=True)
df["start_time_sec"] = stimulus_timestamps[
df["start_frame_stim"].values.astype(int)
]
non_final = range(0, len(df) - 1)
df.loc[non_final, "stop_time_sec"] = stimulus_timestamps[
df["stop_frame_stim"].values[:-1].astype(int)
]
# final line
final_row = len(df) - 1
last_duration = (
df.loc[final_row, "stop_frame_stim"] -
df.loc[final_row, "start_frame_stim"]
) / stim_dict["fps"]
df.loc[final_row, "stop_time_sec"] = \
df.loc[final_row, "start_time_sec"] + last_duration
df["duration_sec"] = df["stop_time_sec"] - df["start_time_sec"]
return df
|
58c0ac6cf4544b04737317a08115a814a737d98c
| 31,531 |
from typing import Tuple
from typing import List
from typing import Any
def get_column_key(label: Tuple[str, ...], metrics: List[str]) -> Tuple[Any, ...]:
"""
Sort columns when combining metrics.
MultiIndex labels have the metric name as the last element in the
tuple. We want to sort these according to the list of passed metrics.
"""
parts: List[Any] = list(label)
metric = parts[-1]
parts[-1] = metrics.index(metric)
return tuple(parts)
|
007b0979bfd6db653dd21f3e4827668183d79529
| 31,534 |
def normalize_axes(df, sample_axis, feature_axis):
"""Tests and transposes DataFrame to sample * feature format.
Checks to make sure a user's axes inputs are
properly formatted. Flips a DF to put samples in
index and features in columns.
Arguments:
df: dataset
sample_axis: axis containing samples (0, 1)
feature_axis: axis containing features (0,1)
Returns:
DataFrame as samples * features
"""
if sample_axis == 1 and feature_axis == 0:
df = df.T
elif sample_axis != 0 and feature_axis != 1:
raise Exception('Invalid axis! Should be 0 or 1')
return df
|
6c0fe26b5d79dfba90a8e395edecd08eb15e4649
| 31,536 |
def drop(self, parameters, inplace=True):
"""
Remove input parameters from WaterFrame.data.
Parameters
----------
parameters: str, list of str
Parameters of WaterFrame.data.
inplace: bool
If True, Drop in place and return 'True', If False, return a copy of the WaterFrame
without the input parameters.
Returns
-------
new_wf: WaterFrame
"""
keys = []
if isinstance(parameters, str):
keys.append(parameters)
keys.append(parameters + '_QC')
elif isinstance(keys, list):
for parameter in parameters:
keys.append(parameter)
keys.append(parameter + '_QC')
if inplace:
self.data.drop(keys, axis=1, inplace=True)
for key in keys:
self.vocabulary.pop(key)
return True
else:
new_wf = self.copy()
new_wf.vocabulary = self.vocabulary.copy()
new_wf.metadata = self.metadata.copy()
new_wf.data = self.data.drop(keys, axis=1)
for key in keys:
new_wf.vocabulary.pop(key)
return new_wf
|
6bd8e1c4414ca98d061cc661832746c40be6abdf
| 31,544 |
def GetDeprecatedTagWarning(models):
"""Returns a warning string iff any device model is marked deprecated."""
for model in models:
for tag in model.tags:
if 'deprecated' in tag:
return ('Some devices are deprecated. Learn more at https://firebase.'
'google.com/docs/test-lab/available-testing-devices#deprecated')
return None
|
0c5cc18597d7835c6df9c2efd84bb0bb4518c9e9
| 31,546 |
import requests
import json
def send_message(port, msg):
"""
Sends a JSON message to the node and returns its reply.
"""
return requests.post("http://localhost:{}/".format(port), json.dumps(msg),
headers={'content-type': 'application/json'})
|
b830e9da3d22092fc78fc8f8b3ef804982bc3343
| 31,547 |
def nunique(ls):
""" returns the number of unique values of a list.
"""
unique = []
for val in ls:
if val not in unique:
unique.append(val)
return len(unique)
|
6f4a6ebdcc1b13f291b003be10da78bc403c3f81
| 31,553 |
def get_retcode(ret):
"""
Determine a retcode for a given return
"""
retcode = 0
# if there is a dict with retcode, use that
if isinstance(ret, dict) and ret.get("retcode", 0) != 0:
return ret["retcode"]
# if its a boolean, False means 1
elif isinstance(ret, bool) and not ret:
return 1
return retcode
|
d04f27231e2708f6e4d23ab81fa8c186649c767c
| 31,555 |
def get_app_fullname(app):
"""
Returns the full python name of an app - e.g. django.contrib.auth
"""
return app.__name__[:-11]
|
c90310bbdd082023c01c25cc0736984c60b60a79
| 31,559 |
def is_summary(number):
"""
Takes a number as input
Identifies a sentence as part of the summery or not
"""
if number != 0:
return 'yes'
else:
return 'no'
|
753e3d698e484976c7cb42a85c4a6966c5bcc9c8
| 31,560 |
import itertools
def complete_inds(n, d):
"""
Return all combinations of powers in an n dimensional d degree
complete polynomial. This will include a term for all 0th order
variables (i.e. a constant)
Parameters
----------
n : int
The number of parameters in the polynomial
d : int
The degree of the complete polynomials
Returns
-------
inds : filter
A python filter object that contains all the indices inside a
generator
"""
i = itertools.product(*[range(d + 1) for i in range(n)])
return filter(lambda x: sum(x) <= d, i)
|
cfbc90ab7810fdbe6a8982b40adf5e38b8054208
| 31,561 |
def _get(entry):
"""A helper to get the value, be it a callable or callable with args, or value
"""
if isinstance(entry, (tuple, list)):
func, args = entry
return func(*args)
elif callable(entry):
return entry()
else:
return entry
|
40742e0f86ea1a89b05e0354912c64683a9b9160
| 31,562 |
def redshiftToLOS(redshift, H):
"""
Convert redshifts to apparent line of sight distances, ignoring particle
velocities.
Input: redshifts and Hubble constant.
Output: line of sight distances (Mpc).
"""
c = 3.0e5
return redshift*c/H
|
09f6c287dbc43267dfd239ac835394e41760d242
| 31,563 |
import operator
def difference(vec1, vec2):
"""Return difference between given vectors.
>>> v1 = [1, 2, 3, 4]
>>> v2 = [5, 6, 7, 8]
>>> v3 = [0, 0, 0, 0]
>>> difference(v2, v1)
[4, 4, 4, 4]
>>> difference(v2, v3)
[5, 6, 7, 8]
"""
return map(operator.sub, vec1, vec2)
|
1dcf334f856232d1b5213ed835a34ab1ec20dc9f
| 31,565 |
import colorsys
def map_colour(x, centre, start_hue, end_hue, day):
"""Given an x coordinate and a centre point, a start and end hue (in degrees),
and a Boolean for day or night (day is True, night False), calculate a colour
hue representing the 'colour' of that time of day."""
start_hue = start_hue / 360 # Rescale to between 0 and 1
end_hue = end_hue / 360
sat = 1.0
# Dim the brightness as you move from the centre to the edges
val = 1 - (abs(centre - x) / (2 * centre))
# Ramp up towards centre, then back down
if x > centre:
x = (2 * centre) - x
# Calculate the hue
hue = start_hue + ((x / centre) * (end_hue - start_hue))
# At night, move towards purple/blue hues and reverse dimming
if not day:
hue = 1 - hue
val = 1 - val
r, g, b = [int(c * 255) for c in colorsys.hsv_to_rgb(hue, sat, val)]
return (r, g, b)
|
4699b630f30a06a3421bb5d18155edbffa70362d
| 31,568 |
def gadgetMultipleFiles(rootName, fileIndex):
""" Returns the name of gadget file 'fileIndex' when a snapshot is saved in multiple binary files.
It takes 2 arguments: root name of the files and the file number whose name is requested (from 0 to GadgetHeader.num_files-1)."""
return rootName + "%i" % fileIndex
|
4dac2d23c6cba7600472cc84fa2384ef66f5f204
| 31,569 |
def normalize_index(index, length):
"""
Normalizes an index per sequence indexing.
>>> normalize_index(0, 10)
0
>>> normalize_index(9, 10)
9
>>> normalize_index(-2, 10)
8
"""
index = int(index)
if 0 <= index < length:
return index
elif -length <= index < 0:
return index + length
else:
raise IndexError("index out of range: {}".format(index))
|
bc9be3a3ef554ca95217f93d2d698934c2f1096f
| 31,570 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.