content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def get_parent_language_code(parent_object):
"""
.. versionadded:: 1.0
Return the parent object language code.
Tries to access ``get_current_language()`` and ``language_code`` attributes on the parent object.
"""
if parent_object is None:
return None
try:
# django-parler uses this attribute
return parent_object.get_current_language()
except AttributeError:
pass
try:
# E.g. ContentItem.language_code
return parent_object.language_code
except AttributeError:
pass
return None
|
c7a503b975466354139ba8fddf5fa4d190df646f
| 153,570 |
def get_service_state_name(state):
"""
Translate a Windows service run state number to a friendly service run state name.
"""
return {
1: 'Stopped',
2: 'Start Pending',
3: 'Stop Pending',
4: 'Running',
5: 'Continue Pending',
6: 'Pause Pending',
7: 'Paused'
}.get(state, 'Unknown')
|
153bfb340ca20335aa476021d31a9918fb00f1f4
| 40,856 |
import random
def getRandomBytes(size):
"""
Returns an array of `size` random integers representing bytes
:param size: The size of the array
:return: The array of random integers
"""
random.seed()
a = []
for i in range(0, size):
a.append(random.randint(0, 255))
return a
|
3562bea22fb9e7d2f4988163c77ee10fa48773be
| 483,213 |
def truncate_string(string, truncation, message = ''):
"""
Truncate a string to a given length. Optionally add a message at the end
explaining the truncation
:param string: A string
:param truncation: An int
:param message: A message, e.g. '...<truncated>'
:return: A new string no longer than truncation
"""
if truncation is None:
return string
assert isinstance(truncation, int)
if len(string)>truncation:
return string[:truncation-len(message)]+message
else:
return string
|
45cce7bf6dec02c0a6fac2cf22da8f217a717948
| 22,807 |
def efficency_poly(cappa):
"""Calculate efficency for a given pump's typical number.
The polynomial has been calculated applaying the curve fitting at nodes
cappa .2 .3 .4 .5 .6 .7 .8 .9 1.0 1.1 1.2
eta .700 .850 .900 .916 .923 .928 .931 .932 .933 .935 .932
weights ones(cappa)
n 5
:param cappa (float): typical number
:return eta (float): efficency
"""
coef = [-0.171, 7.400, -19.717, 25.671, -16.239, 3.990]
eta = sum([val * cappa**idx for idx, val in enumerate(coef)])
return eta
|
465005d47d7650a395108ed831f97011817fbafe
| 541,653 |
import math
def is_nan(val):
"""Test if a value is NaN
Parameters
----------
val : obj
A value
Returns
-------
bool
Is it NaN?
"""
try:
float(val)
if math.isnan(float(val)):
return True
except (ValueError, TypeError):
pass
return False
|
253fff265e62e65d944f8b30e766fbb275b58d5b
| 457,348 |
def update_speed_limit(intersection, new_speed):
"""
Updates the speed limit of the intersection
:param intersection: intersection
:param new_speed: new speed value
:type intersection: Intersection
:type new_speed: int
:return: updated intersection
"""
return intersection.update_speed_limit(new_speed)
|
a531ff9152611299499d347cb0639faba559d153
| 672,116 |
import torch
def centralize_gradient(x: torch.Tensor, gc_conv_only: bool = False) -> torch.Tensor:
"""Gradient Centralization (GC)
:param x: torch.Tensor. gradient
:param gc_conv_only: bool. 'False' for both conv & fc layers
:return: torch.Tensor. GC-ed gradient
"""
size: int = x.dim()
if (gc_conv_only and size > 3) or (not gc_conv_only and size > 1):
x.add_(-x.mean(dim=tuple(range(1, size)), keepdim=True))
return x
|
7de961c5182a39b5e9c1aba6a71dc2203147bdd3
| 396,930 |
import gzip
def _compression_safe_opener(fname):
"""Determine whether to use *open* or *gzip.open* to read
the input file, depending on whether or not the file is compressed.
"""
f = gzip.open(fname, "r")
try:
f.read(1)
opener = gzip.open
except IOError:
opener = open
finally:
f.close()
return opener
|
4c44da2ae15c63ccd6467e6e893a3c590c20a7e9
| 706,854 |
from typing import List
from typing import Optional
def distance(v1: List[int], v2: List[int]) -> Optional[int]:
"""
Determine the distance between two vectors, i.e. the number of positions in which their value differs.
:param v1: first vector
:param v2: second vector
:return: distance as described above
>>> distance([], [])
0
>>> distance([0, 1, 1], [1, 0, 1])
2
"""
if len(v1) != len(v2):
return None
return sum(1 if v1_n != v2_n else 0 for v1_n, v2_n in zip(v1, v2))
|
253a27d9df7edd5d281854fd9b24b0b498876cc5
| 624,611 |
def get_param_dict(self):
"""Get the parameters dict for the ELUT of PMSM at the operationnal temperature and frequency
Parameters
----------
self : ELUT
an ELUT_PMSM object
Returns
----------
param_dict : dict
a Dict object
"""
# getting parameters of the abstract class ELUT (stator parameters)
param_dict = super(type(self), self).get_param_dict()
param_dict["R2"] = self.R2
param_dict["L2"] = self.L2
param_dict["T2_ref"] = self.T2_ref
param_dict["Phi_m"] = self.Phi_m
param_dict["I_m"] = self.I_m
return param_dict
|
28da77a0bdc35af4920719972d2c528bcccc1510
| 676,307 |
def to_comma_sep(list_values):
"""
Removes any None, False or blank items from a list and then converts the
list to a string of comma separated values.
"""
default = '--'
if list_values:
actual_vals = [item for item in list_values if item]
unique_vals = list(set(actual_vals))
# remove duplicates and return
if unique_vals:
return ', '.join(unique_vals)
else:
return default
else:
return default
|
2f5d10d0dc76e57a7a7255145c4d43ee53df32cb
| 269,685 |
def phase_sync(frequency, phase, elapsed_time):
"""
Calculate what the phase should be for a carrier at the given frequency
after the given time has elapsed.
"""
return phase + (elapsed_time * frequency * 2)
|
861af29c048f2c2b25f728af7e82dfaae562cdda
| 436,832 |
def rateTCan24(params: dict, states: dict) -> float:
"""
Summary
Parameters
----------
params : dict
tau: float
Time constant of the process.
k: float
Gain of the process.
states : dict
TCan: float
TCan is the simulated or measured canopy temperature. [oC]
TCan24: float
The 24 hour mean canopy temperature [oC s-1]
Returns
-------
float
24 h mean canopy temperature, approximated by
a first order differential equation. [oC s-1]
"""
tCan24_ = (1/params["tau"])*(params["k"]*states["TCan"] - states["TCan24"])
return tCan24_
|
e529392f2ed5ed1779fa7a01e5694e2a5322d37d
| 206,244 |
def _find_all_groups(group_by, md_list):
"""
Find all possible values for group_by
:param group_by: name of the metadata to find values on
:type group_by: str
:param md_list: list of metadata to find into
:type md_list: dict
:return: the list of all unique possible values for this group
:rtype: list
"""
# List of all possible values encountered for the group by
groups_iterators = []
# Find groups list
for ts in md_list:
if group_by not in md_list[ts]:
raise ValueError("Group by '%s' is not found at least for ts %s", group_by, ts)
groups_iterators.append(md_list[ts][group_by])
# Remove duplicates
return list(set(groups_iterators))
|
8a63b536665327518707068157442c5f5e08dbbe
| 561,551 |
def to_css_length(x):
"""
Return the standard length string of css.
It's compatible with number values in old versions.
:param x:
:return:
"""
if isinstance(x, (int, float)):
return '{}px'.format(x)
else:
return x
|
bb4dde6bb8288f3b7c11e89f56b1413f881abb86
| 633,674 |
from typing import List
from typing import Dict
def has_valid_format(data: List[Dict]) -> bool:
"""
Check data format, usually, from plugin results and return True if data
format is value. False otherwise.
"""
format_keys = ("library", "version", "severity", "summary", "advisory")
# Check data is list instance
if not hasattr(data, "append"):
return False
for x in data:
# Check x is a dictionary
if not hasattr(x, "keys"):
return False
if not len(set(format_keys).intersection(x.keys())) == len(format_keys):
return False
return True
|
8c9a00d3c7c091c61e29291a0701aa63fdf90057
| 177,695 |
def count_lines(file_name):
"""
Counts number of non-empty lines in the file `file_name`
Args:
file_name (str): Path to file
Returns:
int: Number of non-empty lines
list(str): List of strings, one str for each line
"""
with open(file_name) as f:
lines = [line.rstrip() for line in f.readlines()]
nonblanklines = [line for line in lines if line]
return len(nonblanklines), nonblanklines
|
a7dfafcc567192200cf50e31034914fe3a187fae
| 232,003 |
def getAlerts(fc, dt):
"""
Count the number of alerts for a given date/time (dt)
"""
res = 0
if "alerts" in fc:
if len(fc["alerts"]) > 0:
for i in range(0, len(fc["alerts"])):
alert = fc["alerts"][i]
if dt >= alert["start"] and dt <= alert["end"]:
res = res + 1
return res
|
2c5b5ccab75b06bf3aa2efa6cea67c932096bcba
| 150,976 |
def read_atom_file(file):
"""Read file with atoms label
Args:
file (str): Name of file with atoms labels
Returns:
list: atoms labeled as <Residue Name> <Atom Name>
"""
atoms = [line.rstrip('\n').split() for line in open(file, "r")]
atoms = [[a[0] + " " + aa for aa in a[1::]] for a in atoms]
return atoms
|
3d85dff55f7165d1c9b747eb75d395ec03f5b3ce
| 41,554 |
def find_branch(lst, i):
"""Finds the whole branch that is rooted from the given node.
Args:
lst: a list that contains a pre-order traversal of a free-tree
i: the index of the actual node
Returns:
int: the given i value
int: the index of the end of the branch + 1
"""
pi = i
i += 1
while i < len(lst):
if lst[i] <= lst[pi]:
break
i += 1
return pi, i
|
4de15d0f1f798bf8a489c6348f1d887c05c2c590
| 561,794 |
def obter_pos_c(p):
"""
Seletor coluna.
Devolve a componente coluna c da posicao p.
:param p: tuple, posicao do tabuleiro de jogo.
:return: string, coluna da posicao.
"""
return p[0]
|
8bf1ff4ae5a98d4385dae3cbdab1829b08916813
| 270,975 |
def uniq(seq, key=None):
"""
Removes duplicate elements from a list while preserving the order of the rest.
The value of the optional `key` parameter should be a function that
takes a single argument and returns a key to test the uniqueness.
"""
key = key or (lambda x: x)
seen = set()
uniq_list = []
for value in seq:
uniq_value = key(value)
if uniq_value in seen:
continue
seen.add(uniq_value)
uniq_list.append(value)
return uniq_list
|
6188dcf1f88d98252d9867daa9d8fb6e60392ba2
| 465,113 |
def remove_duplicates(package_leaflets):
"""
Keep only leaflets with unique product_name.
If there is already the leaflet with same product_name - skip this leaflet
:param package_leaflets: array of processed leaflets
:return: array of unique leaflets
"""
# save only leaflets with unique product_name
package_leaflets_unique = []
# keep track of unique product names observed so far
unique_product_names = set()
COUNT_DUPLICATE_PRODUCT_NAME = 0
for leaflet in package_leaflets:
if leaflet.product_name not in unique_product_names:
unique_product_names.add(leaflet.product_name)
# save unique leaflet separately
package_leaflets_unique.append(leaflet)
# if leaflet.product_name is in unique_product_names - then it is duplicate - do not save
else:
COUNT_DUPLICATE_PRODUCT_NAME += 1
print("Number of *unique* leaflets: ", len(package_leaflets_unique))
print("Number of *duplicate* leaflets (by product names): ", COUNT_DUPLICATE_PRODUCT_NAME)
return package_leaflets_unique
|
f141b458d98ff497b659fb12eadfe34608d2146c
| 110,540 |
from datetime import datetime
def filter_future_amendments(versions):
""" Take a list of amendments, and only return a list of those that are in
the future. """
today = datetime.today()
amendments = [v for v in versions if v['by_date'] > today]
amendments.sort(key=lambda v: v['by_date'])
return amendments
|
262cbf654bcf54f160872f0c879e6b1524eea469
| 425,197 |
def growthClass(gLevel):
"""Determine growth class based on growth level"""
if gLevel >= 0.75:
return "+++"
elif gLevel >= 0.50:
return "++"
elif gLevel >= 0.35:
return "+"
elif gLevel >= 0.25:
return "-"
else:
return "--"
|
fdaa4f9bd823bbd499fdb8f68cc355fd06a2f86e
| 480,153 |
def garrisonkimmel(x):
"""Garrison-Kimmel 2014 Stellar-Halo-Mass function at redshift zero.
MNRAS 438, 2578–2596 (2014)
Equation 4
Keyword arguments:
x -- array of peak halo masses
"""
tr = 3.0e6 * (x/1.0e10)**1.92
return tr
|
0ad11694551e5e57e3f2c75ec2fd5f075fe9f97c
| 201,887 |
def resolved(issue):
"""Is the issue resolved?"""
return issue.fields.status.name in ['Done', 'Resolved', 'Fertig', 'Closed']
|
b4de64097e8f0280d0c3250d1013ff312b87e4b9
| 614,683 |
def deep_tuple(array_like):
"""convert nested tuple/list mixtures to pure nested tuple"""
if isinstance(array_like, (list, tuple)):
return tuple(map(deep_tuple, array_like))
return array_like
|
4546a89ba2d54a36fad973deaf291fc2cca880fc
| 604,616 |
def generate_module_cmd(module, input_json, output_json):
"""Generates a command string to use for subprocess calling
Parameters
----------
module: str
The current module being run
input_json: str
The path of the input for the module
output_json: str
The path of the output for the module
Returns
-------
command_string: str
a string of the command string that will be used by the subprocess
"""
module_cmd = ["python", "-W", "ignore", "-m", module,
"--input_json", input_json,
"--output_json", output_json]
return module_cmd
|
9313fbea1bf7432c90932f96dbc6bd8292dc549f
| 572,007 |
def create_segment_allele_counts(segment_data, allele_data):
""" Create a table of total and allele specific segment counts
Args:
segment_data (pandas.DataFrame): counts of reads in segments
allele_data (pandas.DataFrame): counts of reads in segment haplotype blocks with phasing
Returns:
pandas.DataFrame: output segment data
Input segment_counts table is expected to have columns 'chromosome', 'start', 'end', 'readcount'.
Input phased_allele_counts table is expected to have columns 'chromosome', 'start', 'end',
'hap_label', 'is_allele_a', 'readcount'.
Output table will have columns 'chromosome', 'start', 'end', 'readcount', 'major_readcount',
'minor_readcount', 'major_is_allele_a'
"""
# Calculate allele a/b readcounts
allele_data = (
allele_data
.set_index(['chromosome', 'start', 'end', 'hap_label', 'is_allele_a'])['readcount']
.unstack(fill_value=0)
.reindex(columns=[0, 1])
.fillna(0.0)
.astype(int)
.rename(columns={0: 'allele_b_readcount', 1: 'allele_a_readcount'})
)
# Merge haplotype blocks contained within the same segment
allele_data = allele_data.groupby(level=[0, 1, 2])[['allele_a_readcount', 'allele_b_readcount']].sum()
# Reindex and fill with 0
allele_data = allele_data.reindex(segment_data.set_index(['chromosome', 'start', 'end']).index, fill_value=0)
# Calculate major and minor readcounts, and relationship to allele a/b
allele_data['major_readcount'] = allele_data[['allele_a_readcount', 'allele_b_readcount']].apply(max, axis=1)
allele_data['minor_readcount'] = allele_data[['allele_a_readcount', 'allele_b_readcount']].apply(min, axis=1)
allele_data['major_is_allele_a'] = (allele_data['major_readcount'] == allele_data['allele_a_readcount']) * 1
# Merge allele data with segment data
segment_data = segment_data.merge(allele_data, left_on=['chromosome', 'start', 'end'], right_index=True)
return segment_data
|
f27b8e925d58ea70806c90ad2d3d5144e7690812
| 23,691 |
def sec2hms(seconds):
"""Converts seconds to hours, minutes, seconds
:param int seconds: number of seconds
:return: (*tuple*) -- first element is number of hour(s), second is number
of minutes(s) and third is number of second(s)
:raises TypeError: if argument is not an integer.
"""
if not isinstance(seconds, int):
raise TypeError("seconds must be an integer")
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return hours, minutes, seconds
|
4a0de4f407e6228a56feddf1e1ca84b38644dc26
| 512,166 |
def dict_value_add(dict1, dict2):
"""Add values with same keys from two dictionaries."""
result = {key: dict1.get(key, 0) + dict2.get(key, 0)
for key in set(dict1) | set(dict2)}
'''
# This has an issue of only keeping track of >0 values!
from collections import Counter
result = dict(Counter(dict1) + Counter(dict2))
'''
return result
|
6f6774aef73edd5afae6552fe5681531e56bfae5
| 522,076 |
def load_shefcodefirst_python_members(file_path):
"""Given the `file_path` to the file containing a list of #ShefCodeFirst Python
course members, this function loads them into a list and return such list back."""
# Explicitly open file with the UTF-8 encoding to ensure non-standard English characters
# show up properly too!
with open(file_path, encoding="utf-8") as input_file:
python_course_members = [member.strip() for member in input_file]
return python_course_members
|
a40f492935f6ccd74df3f8fabe43e650f08dbecc
| 596,157 |
def get_fmriprep_outlier_volumes_from_confounds(confounds_df):
"""extract which volume numbers are outliers from the fmriprep confounds df.
Returns:
bad_volumes: list
eg [34, 35, 100, 150]
"""
# get the motion columns
motion = confounds_df.filter(regex='motion')
# find any rows with values above 0
return_df = motion[(motion > 0).any(1)]
# return the index (row names) of this df
return list(return_df.index)
|
b62d833ec2b7f000584354ca6470863acb33682c
| 18,777 |
def setup_and_run_hass(edge, top_process=False):
"""
Setup HASS and run. Block until stopped. Will assume it is running in a
subprocess unless top_process is set to true.
"""
edge.start()
# exit_code = int(edge.block_till_stopped())
# if not top_process:
# sys.exit(exit_code)
return 0
|
b4c4f4736fe153c5daaf8fc40050b29faede1a49
| 338,347 |
def shell_sort(array):
"""
Sorts the given array of integers using the Shell Sort algorithm
Time Complexity : O((len(array))^2)
Space Complexity : O(len(array))
:param array: A List of integers.
:return: returns the array sorted
"""
# calculate the gap using Knuth's formula
gap = 1
while gap < len(array) // 3:
gap = (gap * 3) + 1
while gap > 0:
# using this gap, exchange elements while you can
for idx in range(gap, len(array)):
val_to_insert = array[idx]
candidate_idx = idx
# shift all bigger elements to the right, creating a hole
while candidate_idx > gap - 1 and array[candidate_idx - gap] > val_to_insert:
array[candidate_idx] = array[candidate_idx - gap]
candidate_idx -= gap
# insert our element at the hole
array[candidate_idx] = val_to_insert
# decrease gap, math alert
gap = (gap - 1) // 3
return array
|
c1b3e9dc8f7a1e59ee32f647bd04ffd1f6d99383
| 205,890 |
def assert_errors(result, expected_errors):
"""Assert that result errors match expected errors
Uses substring matching to coorelate expected to actual errrors.
Raise if any expected error is not matched or if any actual
errors are found that were not matched by an expected error.
This function has O(n**2) complexity on the number of errors.
"""
def find_and_remove(expected_error):
for i, actual_error in enumerate(actual_errors):
if expected_error in actual_error:
del actual_errors[i]
return True
return False
actual_errors = list(result.errors)
missing_errors = [e for e in expected_errors if not find_and_remove(e)]
errors = []
if missing_errors:
errors.append("missing expected errors:")
errors.extend(missing_errors)
if actual_errors:
if errors:
errors.append("")
errors.append("unexpected errors:")
errors.extend(actual_errors)
assert not errors, "\n".join(errors)
|
6923c4edbc27c0c81ef884aabcaaad06ff4e317c
| 39,817 |
def by_key(key):
"""
Returns a function that gets an item by the specified key.
Example:
(
Slinkie(items)
.sort(by_key('date'))
)
"""
return lambda items: items[key]
|
1916c75ee4a1e1139d06c73522192d065798917d
| 483,330 |
def _no_stop_codon(aa_seq):
""" Returns True if a sequence does not contain a stop codon,
otherwise returns False
"""
if '*' not in aa_seq:
return True
else:
return False
|
b48d3169d6403bc69782dede504af3962ee7d6bf
| 668,272 |
def area(coords):
""" calculate area of a polygon
:param coords: vertexes of polygon e.g. [[1, 1], [3, 1], [2, 2]]
:returns: area of polygon
"""
w = 0.0
for i in range(len(coords)):
j = (i + 1) % len(coords)
w += (coords[j][0] + coords[i][0]) * (coords[j][1] - coords[i][1])
return abs(w) /2.0
|
d232655bf2bce835227aa53bd226ffcdc95552ee
| 516,416 |
def _ProcessCcSD(fmt):
"""Convert a 'cc' sort directive into SQL."""
# Note: derived cc's are included automatically.
# Note: This sorts on the best Cc, not all Cc addresses.
# Being more exact might require GROUP BY and GROUP_CONCAT().
left_joins = [
(fmt('Issue2Cc AS {alias} ON Issue.id = {alias}.issue_id '
'LEFT JOIN User AS {alias}_user '
'ON {alias}.cc_id = {alias}_user.user_id'), [])]
order_by = [
(fmt('ISNULL({alias}_user.email) {sort_dir}'), []),
(fmt('{alias}_user.email {sort_dir}'), [])]
return left_joins, order_by
|
91c1182b419e21e3ce4ea4f5bfc5affba7dfc349
| 244,629 |
def get_bit(data: bytes, num: int) -> int:
"""
Get specified bit from bytes.
:param bytes data: data
:param int num: bit position
:return: selected bit value
:rtype: int
"""
base = int(num // 8)
shift = 7 - int(num % 8)
return (data[base] >> shift) & 0x1
|
f51fb1d1ac5094ec91ff98e0862edf73079199ee
| 559,575 |
def mix(x, y, ratio):
"""Convex combination of two numbers"""
return (1 - ratio)*x + ratio*y
|
62658fefa9a2f50a358ff9e31260e66437dc7607
| 505,936 |
def import_dg_updates(file_path='/Users/reedd/Documents/projects/PaleoCore/projects/mlp/data_cleaining_170412/DG_updates.txt'):
"""
Function to read data from a delimited text file
:return: list of header values, list of row data lists
"""
dbfile = open(file_path)
data = dbfile.readlines()
dbfile.close()
data_list = []
header_list = data[0][:-1].split('\t') # list of column headers
# populate data list
for row in data[1:]: # skip header row
data_list.append(row[:-1].split('\t')) # remove newlines and split by delimiter
#data_list.append(row.split('\t')) # remove newlines and split by delimiter
print('Importing data from {}'.format(file_path))
return header_list, data_list
|
ce09d8af7eb6ee3be093b8025ba14717fd67c6ac
| 394,254 |
def media_url(context, path, safe=None):
"""
Returns the media url given a partial path.
"""
return context['site'].media_url(path, safe)
|
9c68d8835e36f565a5112084d4ee0cb1ea037aa2
| 325,682 |
def next_instruction_is_function_or_class(lines):
"""Is the first non-empty, non-commented line of the cell either a function or a class?"""
for i, line in enumerate(lines):
if not line.strip(): # empty line
if i > 0 and not lines[i - 1].strip():
return False
continue
if line.startswith('def ') or line.startswith('class '):
return True
if line.startswith(('#', '@', ' ')):
continue
return False
return False
|
b4a840f7b33f4b6f1a29351de000a93ce35286b6
| 466,208 |
def contains(bbox, point):
"""Checks if point is within bbox.
bbox: [[x0, x1], [y0, y1]]
point: [x, y]
-> bool
"""
return (bbox[0][0] <= point[0] <= bbox[0][1] and
bbox[1][0] <= point[1] <= bbox[1][1])
|
8f34ea22eebceeb219bb20f451c8e782d93ed844
| 474,250 |
def is_digit(value: str) -> bool:
"""Return if ``value`` is number (decimal or whole)"""
if value.count('.') == 1 and value.replace('.', '').isdigit() or value.isdigit():
return True
return False
|
e5e646d4b8960df2e9e6c7287bc8f0caffd3a08d
| 348,412 |
def get_ms(delta):
"""Convert a datetime.timedelta into the corresponding milliseconds.
>>> from datetime import timedelta
>>> get_ms(timedelta(1, 1, 1, 1))
86401001.001
>>> get_ms(timedelta(days=1))
86400000.0
>>> get_ms(timedelta(seconds=15))
15000.0
>>> get_ms(timedelta(milliseconds=15, microseconds=222))
15.222
"""
return delta.days * 24 * 60 * 60 * 1000 + delta.seconds * 1000 + delta.microseconds / 1000
|
f280526f1727c4409c4a807184c18970e9903276
| 201,918 |
def KtoC(T_K):
"""Converts Kelvin to Celsius."""
return T_K - 273.15
|
aec2f7ecbf22da4d36e538a1211d6abe36a8c385
| 247,784 |
from typing import Optional
import re
def get_server_id(data: str) -> Optional[int]:
"""
Extracts six digit id of server.
:param data: Input data.
:return: Server id number.
"""
server_id = re.findall(r"#(\d{6})", data)
if server_id:
return int(server_id[0])
else:
return None
|
0e819c14e2489b9579a958f9cd698661e335540e
| 218,699 |
def clean_address(data):
"""Clean the address feature."""
data['Indirizzo'] = data['Indirizzo'].str.replace('[', '').str.replace(
']', '').str.replace('\'', '')
return data
|
21e20751a2a0fde0fe00ee914e36da7b496f7fe7
| 260,838 |
def is_keys_str_decimals(dictionary: dict):
"""
Checks if the keys are string decimals
Args:
dictionary: Dictionary object to check
Returns:
True if keys are numerical strings
"""
keys = dictionary.keys()
are_decimals = [isinstance(k, str) and k.isdecimal() for k in keys]
return all(are_decimals)
|
affd1d55652e3859e7fdb3b5354ec4661ff7b467
| 194,812 |
def tolerance(dimension_mm, itg):
"""
Returns the tolerance in micrometers using ISO standard
using the given dimension and ITG number
"""
return 10 ** ((itg - 1) / 5) * (0.45 * dimension_mm ** (1 / 3) + dimension_mm / 1000)
|
c862974eb83270ea170f1f660d19dc4dea0e5507
| 641,086 |
def to_list(data_in):
"""Convert the data into a list. Does not pack lists into a new one.
If your input is, for example, a string or a list of strings, or a
tuple filled with strings, you have, in general, a problem:
- just iterate through the object will fail because it iterates through the
characters of the string.
- using list(obj) converts the tuple, leaves the list but splits the strings
characters into single elements of a new list.
- using [obj] creates a list containing a string, but also a list containing
a list or a tuple, which you did not want to.
Solution: use to_list(obj), which creates a new list in case the object is
a single object (a string is a single object in this sence) or converts
to a list if the object is already a container for several objects.
Parameters
----------
data_in : any obj
So far, any object can be entered.
Returns
-------
out : list
Return a list containing the object or the object converted to a list.
"""
if isinstance(data_in, (str, int, float)):
data_in = [data_in]
data_in = list(data_in)
return data_in
|
ba82f8d7cf385474e2b2bcf403c3c7c78e06f942
| 192,830 |
def compile_trace(destination):
"""
Given a destination list, returns a maintenance function that appends a trace
string to the given list every time it is called.
"""
def trace(net, task, result):
nonlocal destination
destination.append("Task {}: {}".format(task.name, result.__name__))
return trace
|
f1bc92b2c8f8f4a11ab7d7f86ac9fd239a7e4740
| 248,756 |
import math
def solve_tilted_rectangle(xend, yend, length, width, angle, padding=0.0,
pad_upper=True):
"""
Given a rectangle of a certain length, width and orientation,
knowing the coordinates of the centre of one end of the
rectangle, return the coordinates of the corners.
:Parameters:
xend: float
X coordinate of the centre of the upper edge (at the extremity
of the length) of the rectangle.
yend: float
Y coordinate of the centre of the same edge of the rectangle.
length: float
Length of the rectangle
width: float
Width of the rectangle
angle: float
Angle of the rectangle (radians).
padding: float (optional)
An optional padding to be applied to the edges of the
rectangle, increasing the length and the width by
2 * padding. This parameter can be used to determine the
corners of a new rectangle which avoids the edges of the
original rectangle by at least this amount.
By default the padding is zero and the corners of the
original rectangle are returned.
pad_upper: boolean (optional)
Set True (the default) to pad the upper edge of the rectangle.
Setting this to False allows one end of the rectangle to
have a much smaller padding.
:Returns:
quad: tuple of ((x1,y1),(x2,y2),(x3,y3),(x4,y4))
The corner coordinates of the rectangle
"""
assert float(length) > 0.0
assert float(width) > 0.0
# The coordinates of the other edge of the rectangle can be calculated
# from the length and orientation.
xlength = length * math.cos(angle)
ylength = length * math.sin(angle)
xother = xend - xlength
yother = yend - ylength
# The X and Y increments of the corners from these ends depend on
# the width and orientation
xwidth2 = width * math.sin(angle) / 2.0
ywidth2 = width * math.cos(angle) / 2.0
x1 = xother + xwidth2
y1 = yother - ywidth2
x2 = xother - xwidth2
y2 = yother + ywidth2
x3 = xend - xwidth2
y3 = yend + ywidth2
x4 = xend + xwidth2
y4 = yend - ywidth2
# If required, apply a padding to the corner coordinates.
if padding > 0.0:
xlength_pad = padding * math.cos(angle)
ylength_pad = padding * math.sin(angle)
xwidth_pad = padding * math.sin(angle)
ywidth_pad = padding * math.cos(angle)
x1 = x1 - xlength_pad + xwidth_pad
y1 = y1 - ylength_pad - ywidth_pad
x2 = x2 - xlength_pad - xwidth_pad
y2 = y2 - ylength_pad + ywidth_pad
if pad_upper:
x3 = x3 + xlength_pad - xwidth_pad
y3 = y3 + ylength_pad + ywidth_pad
x4 = x4 + xlength_pad + xwidth_pad
y4 = y4 + ylength_pad - ywidth_pad
else:
# Only pad the width at the upper end of the rectangle
x3 = x3 - xwidth_pad
y3 = y3 + ywidth_pad
x4 = x4 + xwidth_pad
y4 = y4 - ywidth_pad
quad = [(x1,y1), (x2,y2), (x3,y3), (x4,y4)]
return quad
|
7208f27aa51aaf1abe81bd9f5180a63fede7f1f8
| 52,491 |
def formula2components(formula):
"""Convert a glass/any composition formula to component dictionary.
Parameters
----------
formula : string
Glass/any composition for e.g. 50Na2O-50SiO2.
Returns
-------
dictionary
Dictionary where keys are components of glass/any composition and values are their ratio.
"""
dict1 = {}
parts = formula.split('-')
if len(parts)==1:
dict1[parts[0]] = 1.0
else:
for i in parts:
k = ''
p = ''
for ind, j in enumerate(i):
try:
float(j)
p += j
except:
if j == '.':
p += j
else:
k = i[ind:]
break
dict1[k] = float(p)
if sum(dict1.values())==100:
for k,v in dict1.items():
dict1[k] = dict1[k]/100
return dict1
elif sum(dict1.values())==1.0:
return dict1
else:
try:
raise Exception("Invalid Formula: {}.".format(formula))
except Exception as e:
print(e)
raise
|
b6931b9cd470c8d2e378635d287c8e7be9b91f6e
| 128,681 |
def c_0(z: float) -> float:
"""Calculates the orography factor, taken as 1,0
Args:
z (float): vertical distance
Returns:
float: orography factor
"""
return 1.0
|
6580885fd5a7241e26a50a70206f10ea1293d027
| 567,555 |
import inspect
def get_init_params(obj):
"""
Returns a list of the parameters entering the `__init__` method of the given object `obj`.
Parameters
----------
obj: Serializable
Returns
-------
list of str
"""
init_params = list(inspect.signature(obj.__init__).parameters.keys())
if 'self' in init_params:
init_params.remove('self')
if 'kwargs' in init_params:
init_params.remove('kwargs')
return init_params
|
5bbe66f9d4300c4f164f134c3b27e37325b0c1bc
| 105,515 |
from typing import Dict
from typing import Any
def purge_none(d: Dict[Any, Any]) -> Dict[Any, Any]:
"""Purge None entries from a dictionary."""
return {k: v for k, v in d.items() if v is not None}
|
2f23c0b43863dae7f9fe5cc5c43650a6fc1b99e1
| 74,804 |
def check_args(args):
"""Checks to see if at least one of the character options was set"""
if not (args.uppercase_ascii and args.lowercase_ascii and args.numerical and args.punctuation):
return False
return True
|
a63fd932c0b4e109d995d0d172d0b95428680470
| 234,562 |
def sample_statistics(image, geom, numPixels, scale):
""" Sample within a region of a single band float image,
returning a dictionary of statistics:
accessed as ["values"]["<stat>"]
Note this is an estimate"""
bn = image.bandNames().getInfo()[0]
fc = image.sample(region=geom, numPixels=numPixels, scale=scale, tileScale=16, dropNulls=True)
return fc.aggregate_stats(bn)
|
4633628091e78ff2b2e2eb7576af2d450b1399a5
| 220,904 |
import re
def _normalize_samples(samples):
"""Normalizes the samples in the given function body.
Normalization just means that we redact inlined function names. This is
done so that a bit of templating doesn't make two function bodies look
distinct. Namely:
template <typename T>
__attribute__((noinline))
int getNumber() { return 1; }
template <typename T>
__attribute__((noinline))
int getNumberIndirectly() { return getNumber<T>(); }
int main() {
return getNumber<int>() + getNumber<float>();
}
If the profile has the mangled name for getNumber<float> in
getNumberIndirectly<float> (and similar for <int>), we'll consider them to
be distinct when they're not.
"""
# I'm not actually sure if this ends up being an issue in practice, but it's
# simple enough to guard against.
inlined_re = re.compile(r'(^\s*\d+): [^:]+:(\s*\d+)\s*$')
result = []
for s in samples:
m = inlined_re.match(s)
if m:
result.append('%s: __REDACTED__:%s' % m.groups())
else:
result.append(s)
return tuple(result)
|
64607d8b94e16818f49b9d909664d973c6a4ec74
| 465,706 |
def get_bin_list(n, nmax):
"""
return a list of digits of the binary representation of n
nmax is the maximum theoretical value of n, used to add 0 at the front of the list if necessary
"""
if n == 0:
return [0 for _ in range(len(bin(nmax))-3)]
n = bin(n)
digits = []
for i in range(len(n)-2):
digits.append(int(n[i+2]))
while len(digits) < len(bin(nmax))-3:
digits.insert(0,0)
return digits
|
3fa8d1a7e7e025f24ccca4fc287f5a96add937fe
| 630,197 |
import math
def rotate(v1, angle):
"""
rotates the vector by an angle around the z axis
:param v1: The vector that will be rotated
:param angle: The angle to rotate the vector by
:return: The vector rotated around the z axis by the angle
"""
x, y, z = v1
# Rotation transformation
v = (x*math.cos(angle) - y*math.sin(angle),
x*math.sin(angle) + y*math.cos(angle),
z)
return v
|
7d71f8c7126122f13ea99b3cde4a03d6a0605757
| 652,437 |
def has_prim_rou(modulus: int, degree: int) -> bool:
"""
Test whether Z/qZ has a primitive 2d-th root of unity.
"""
return modulus % (2 * degree) == 1
|
43781a3aad3e714e0632175dc2d326dc6ff73c18
| 359,042 |
def normalize_option_name(name):
"""Use '-' as default instead of '_' for option as it is easier to type."""
if name.startswith("--"):
name = name.replace("_", "-")
return name
|
5f971b9c925486843ce4b050be048f0fb3efbdf1
| 97,558 |
def get_tag2idx(df):
"""
Returns tags maps from a given dataframe df
Outputs:
tag2idx: map from tag to idx
idx2tag: map from idx to tag
"""
tag_values = list(df["tag"].unique()) + ['PAD']
tag2idx = {tag:idx for idx, tag in enumerate(tag_values)}
idx2tag = {idx:tag for tag, idx in tag2idx.items()}
return tag2idx, idx2tag
|
4c0d15bf567f47c82779798a0a69bb3966e65e65
| 56,294 |
def tag_to_rtsip(tag):
"""Convert tag to relation type, sense, id, and part."""
rel_type, rel_sense, rel_id, rel_part = tag.split(":")
return rel_type, rel_sense, int(rel_id), rel_part
|
38237836cd16d34a594296c5077eee062600d899
| 21,519 |
def to_str(message):
"""
Returns the string representation of an arbitrary message.
"""
if isinstance(message, str):
return message
else:
return message.decode('utf-8')
|
166eb23cb883543c5fa7d7859a2f0e9845f3f7c5
| 123,278 |
def to_number(number):
"""
Converts a string to a int if possible, else a float.
:param number: The string to convert to a number
:type number: string
:return: The converted number
:rtype: : int or float depending on the format of the string
"""
try:
return int(number)
except ValueError:
return float(number)
|
cd1b77d925dbb215033c631fe1e96580e380f94b
| 534,839 |
def is_palindromic(n):
"""Return True iff the given integer is palindromic."""
xs = str(n)
return xs == "".join(reversed(xs))
|
8af5edd115d1f785c82356852ffc70cc770ffc71
| 111,239 |
def strput(a,inp,pos):
""" Put a substring into a string."""
temp = list(a)
temp[pos:pos+len(inp)] = list(inp)
return ''.join(temp)
|
79c00c62651bba289200f03c4bdca9ab429e5f2c
| 553,500 |
def get_images_type(image_file):
"""Returns 'images_multiview' or 'images_multiview_frontOnly."""
return image_file.split('/')[-5]
|
18101c2a0470882ec1ada9bca26d5c82859edb1f
| 602,236 |
def to_DetectedObject_list(dos):
"""Get a list of the DetectedObjects in a Kwiver DetectedObjectSet"""
return list(dos)
|
c6dce7cf31817fe2fc0ec3b1b4e3b655dc7d830d
| 558,700 |
import ctypes
def destructor(cfunc):
"""
Make a C function a destructor.
Destructors accept pointers to void pointers as argument. They are also wrapped as a staticmethod for usage in
classes.
:param cfunc: The C function as imported by ctypes.
:return: The configured destructor.
"""
cfunc.argtypes = [ctypes.POINTER(ctypes.c_void_p)]
cfunc.restype = None
return staticmethod(cfunc)
|
05abd181649a2178d4dce704ef93f61eb5418092
| 707,796 |
from typing import Tuple
def parse_yearmon(yearmon: str) -> Tuple[int, int]:
"""
Parse a YYYYMM string and return a (year, month) integer tuple
"""
return int(yearmon[:4]), int(yearmon[4:])
|
82f54c5074f7823c51236a6eebf22cbd843ea3b5
| 191,108 |
from typing import Union
from typing import List
import gzip
def basic_gzip_load(file_path: str, max_lines: Union[int, None] = 100_000) -> List[str]:
"""
Load sentences from specified gzipped text file.
It is expected that archived file is going to large, thus maximum
amount of lines to load may be specified (100_000 by default).
:param file_path: path to gzip archive
:param max_lines: stop loading if this number is reached
:return: list of loaded sentences
"""
result = []
with gzip.open(file_path) as file:
for index, line in enumerate(file):
result.append(line.decode('utf-8').strip('\n'))
if max_lines and index >= max_lines - 1:
break
return result
|
49a8910885e8a25667492900992c180ecd60c4ac
| 552,394 |
def applyBandOffset(C, height, bandName, lines, inter=False):
"""Produce bands from a list of lines.
Bands are defined relative to lines by means of offsets of the top
and bottom heights of the lines.
Bands may also be interlinear: defined between the bottom of one line and the top
of the next line.
Parameters
----------
C: object
Configuration settings
height:
The height of the page or block
bandName: string
The name of the bands
lines: tuple
The lines relative to which the bands have to be determined.
Lines are given as a tuple of tuples of top and bottom heights.
inter: boolean, optional `False`
Whether the bands are relative the lines, or relative the interlinear spaces.
Returns
-------
tuple
For each line the band named bandName specified by top and bottom heights.
"""
offsetBand = C.offsetBand
(top, bottom) = offsetBand[bandName]
def offset(x, off):
x += off
return 0 if x < 0 else height if x > height else x
return tuple(
(offset(up, top), offset(lo, bottom))
for (up, lo) in (
zip((x[1] for x in lines), (x[0] for x in lines[1:])) if inter else lines
)
)
|
90f5db0f04b30be774f2f87310baf90ac5f4962d
| 44,225 |
def _TransformGuestPolicyDescription(resource):
"""Returns a length-limited guest policy description."""
max_len = 30 # Show only the first 30 characters if description is long.
description = resource.get('description', '')
return (description[:max_len] +
'...') if len(description) > max_len else description
|
641fa73ea39544ee4d7833f3df8d3ebd5856fa7f
| 400,953 |
def make_callback_dictionary(param_dict, status_flag, status_msg):
"""Utility function that adds extra return values to parameter dictionary."""
callback_dict = param_dict.copy()
callback_dict['status_flag'] = status_flag
callback_dict['status_msg'] = status_msg
return callback_dict
|
c2fcb7d3637955359ae362ddfd5e4334aa5c2af7
| 654,262 |
import requests
def system_check(gc):
"""Check DSA connection with the girder client
Args:
gc: girder client
Returns:
int: 0 for successful connection, 1 otherwise
"""
try:
_ = gc.get("/system/check")
except requests.exceptions.HTTPError as err:
print(f"Please check your host or credentials")
print(err)
return 1
print("Successfully connected to DSA")
return 0
|
f20feae1c3daab345a17e696b03ce7b96d6e29ec
| 363,103 |
from typing import BinaryIO
def _bytes_match(fd: BinaryIO, expected: bytes) -> bool:
"""Peeks at the next bytes to see if they match the expected."""
try:
offset = fd.tell()
data = fd.read(len(expected))
fd.seek(offset)
return data == expected
except IOError:
return False
|
69a1063cb46203db78d362d04f731ac98d283ffa
| 649,203 |
def topk_accuracy(outputs, labels, recalls=(1, 5)):
"""Return @recall accuracies for the given recalls."""
_, num_classes = outputs.size()
maxk = min(max(recalls), num_classes)
_, pred = outputs.topk(maxk, dim=1, largest=True, sorted=True)
correct = (pred == labels[:,None].expand_as(pred)).float()
topk_accuracy = []
for recall in recalls:
topk_accuracy.append(100 * correct[:, :recall].sum(1).mean())
return topk_accuracy
|
0f4e42581a833de7e7554f33aed037511904bb88
| 303,361 |
def convert_to_bool(bool_string):
"""
because bool(str) will check if len(str) > 0 or 0
:param bool_string: "True"/"False"
:return: True or False, and raise Value error on fail
"""
if bool_string.lower() == "true":
return True
elif bool_string.lower() == "false":
return False
raise ValueError("Failed to convert str bool to bool")
|
0e27efe3d37af7f266776517e24041b8b0e9ad33
| 274,365 |
def min_cmp(L, cmp=None):
"""
Returns the smallest item of a list (or iterable) with respect to
a comparison function.
INPUT:
``L`` -- an iterable
``cmp`` -- an optional comparison function.
``cmp(x, y)`` should return a negative value if `x < y`, `0` if
`x == y`, and a positive value if `x > y`.
OUTPUT: the smallest item of ``L`` with respect to ``cmp``.
EXAMPLES::
sage: from sage.misc.sage_itertools import min_cmp
sage: L = [1,-1,3,-1,3,2]
sage: min_cmp(L)
-1
sage: def mycmp(x,y): return y - x
sage: min_cmp(L, mycmp)
3
The input can be any iterable::
sage: min_cmp( (x^2 for x in L) )
1
sage: min_cmp( (x^2 for x in L), mycmp)
9
Computing the min of an empty iterable raises and error::
sage: min_cmp([])
Traceback (most recent call last):
...
ValueError: min() arg is an empty sequence
sage: min_cmp([], mycmp)
Traceback (most recent call last):
...
ValueError: min_cmp() arg is an empty sequence
"""
if cmp is None:
return min(L) # Resort to Python's standard min
iterator = iter(L)
try:
m = next(iterator)
except StopIteration:
raise ValueError("min_cmp() arg is an empty sequence")
for item in iterator:
if cmp(item, m) < 0:
m = item
return m
|
fef9200f1b4ed4f663b93895fc4e4d6103b576a1
| 204,280 |
def scalar2str(arg, title=''):
""" Return a single li element with the value and possibly the title of the item.
Handles strings, ints, and floats.
"""
assert not (isinstance(arg, dict) or isinstance(arg, list) or isinstance(arg, tuple))
if title == '':
title_str = ''
else:
title_str = f'<strong>{title}:</strong> '
if isinstance(arg, str):
return f'{title_str}{arg}'
if isinstance(arg, int):
return f'{title_str}{arg:,}'
if isinstance(arg, float):
return f'{title_str}{arg:0.2f}'
return 'unexpected'
|
e9cc771cbcaaa924f0f8641efd53569954e14030
| 511,215 |
import re
def get_first_stacktrace(stderr_data):
"""If |stderr_data| contains stack traces, only returns the first one.
Otherwise returns the entire string."""
# Use question mark after .+ for non-greedy, otherwise it will match more
# than one stack trace.
sanitizer_stacktrace_regex = r'ERROR: [A-z]+Sanitizer: .*\n(.|\n)+?ABORTING'
match = re.search(sanitizer_stacktrace_regex, stderr_data)
# If we can't find the first stacktrace, return the whole thing.
if match is None:
return stderr_data
return stderr_data[:match.end()]
|
080ec4a56e7fd0c1377936fb881d72f09388d8ac
| 690,146 |
def check_pv(pv):
"""
Check input argument `pv`.
Parameters
----------
pv : int
Percentile of the pairwise distance distribution at which to truncate
during variogram fitting.
Returns
-------
int
Raises
------
ValueError : `pv` lies outside range (0, 100]
"""
try:
pv = int(pv)
except ValueError:
raise ValueError("parameter 'pv' must be an integer in (0,100]")
if pv <= 0 or pv > 100:
raise ValueError("parameter 'pv' must be in (0,100]")
return pv
|
9ff48c9af1a8dceae335dfcd37aab5a736332bfa
| 213,778 |
def tolerance(a, b, e):
"""Return if a-b is within tolerance e"""
d = a - b
if d < 0:
d = -d
if a != 0:
e = e * a
if e < 0:
e = -e
return d <= e
|
dddc8b641aae67102e4a2ec437f3106360d627d1
| 604,805 |
import requests
from typing import Optional
def is_successful_response(response: requests.models.Response) -> bool:
"""
Checks status code of response for success.
:param response: The response to check.
:type response: requests.models.Response
:return: Was response successful?
:rtype: bool
"""
accepted_status_code = str(response.status_code).startswith("2")
content_type: Optional[str] = response.headers.get("content-type")
accepted_content_type: bool = False
if content_type is not None:
accepted_content_type = content_type.startswith("text/html")
return accepted_status_code and accepted_content_type
|
668ec5fd019f98e71fda6433fdcca610fb5833ab
| 407,772 |
def genQrelStr(queryId, docId, relGrade):
"""Produces a string representing one QREL entry
:param queryId: question/query ID
:param docId: relevanet document/answer ID
:param relGrade: relevance grade
:return: a string representing one QREL entry
"""
return f'{queryId} 0 {docId} {relGrade}'
|
d06ab6cb40138b7714f473db7cae429926a1880d
| 409,293 |
def in_group(user, groups):
"""Returns a boolean if the user is in the given group, or comma-separated
list of groups.
Usage::
{% if user|in_group:"Friends" %}
...
{% endif %}
or::
{% if user|in_group:"Friends,Enemies" %}
...
{% endif %}
"""
group_list = (groups).split(',')
try:
# print group_list, user.groups.filter(), user.groups.values('name'), user
return bool(user.groups.filter(name__in=group_list).values('name'))
except:
return False
|
e1b040f4e3534bd7198a5de69be21e6a94cbad4f
| 692,302 |
def _swap_labelmap_dict(labelmap_dict):
"""Swaps keys and labels in labelmap.
Args:
labelmap_dict: Input dictionary.
Returns:
A dictionary mapping class name to class numerical id.
"""
return {v:k for k, v in labelmap_dict.items()}
|
4ba570610c76bd8b6d024d54745985fe688e78c4
| 364,050 |
import time
import random
import math
def slow_prime_check(number):
"""This function is an intentionally slow and simple prime number check."""
# average wait time ~50ms.
time.sleep(100 / 1000 * random.random())
for i in range(2, int(math.sqrt(number)+1)):
if number % i == 0:
return False
return True
|
2440ce2d07eed954ce30b69f2bf29230b19f3506
| 74,025 |
def compare(x,y):
"""compares two numbers and returns 1 if x>y, 0 if x=y, and -1 if x<y"""
if x>y:
return 1
if x == y:
return 0
if x<y:
return -1
return "wtf"
|
16a8e3de683212ae5b431a74e75d8aa8a0dc3f40
| 430,841 |
def linear_system(x, K):
""" A linear system which scales a signal by a factor"""
return K * x
|
4d28803ed023f815fea51477fff9039ecfe2dc64
| 467,857 |
def get_outdir_simupara(fp, line):
"""
get output directory and simulation parameters (in format: time steps)
:param fp: opened file
:param line: current line
:return: output directory and simulation parameters
Note: output directory is '' if not specified and
list of simulation parameters is empty if not specified
"""
outdir = ''
simupara = []
while line:
line = fp.readline()
if not line:
break
if line == '--\n':
continue
line = line.strip('\n')
line = line.split(' ')
if len(line) == 1:
outdir = str(line[0])
else:
simupara = line
return outdir, simupara
|
22da06da0466657a5905ba3333b270787cf98a58
| 17,854 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.