content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def make_sharded_tf_record_path(base_path, num_shards):
"""Makes a sharded tf.Record path with the given number of shards.
Args:
base_path: a path like "/path/to/tf.record"
num_shards: the desired number of shards
Returns:
a sharded path like "/path/to/tf.record-????-of-1000"
"""
num_shards_str = str(num_shards)
num_digits = len(num_shards_str)
return base_path + "-" + "?" * num_digits + "-of-" + num_shards_str
|
17c766aa982c5335a78d4227d5ea90cf4eea7fce
| 85,944 |
from typing import List
import torch
def collate_fn(batch_items: List[dict]):
"""
Collate and pad fields in dataset items
"""
x, y = [], []
input_specs_lengths, output_text_lengths = [], []
texts = []
audios = []
sample_rates = []
for i in range(len(batch_items)):
x.append(batch_items[i]["spectrogram"].squeeze(0).t())
y.append(batch_items[i]["text_encoded"].squeeze(0))
input_specs_lengths.append(x[-1].shape[0])
output_text_lengths.append(y[-1].shape[0])
texts.append(batch_items[i]["text"])
audios.append(batch_items[i]["audio"])
sample_rates.append(batch_items[i]["sample_rate"])
x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True, padding_value=0.)
y = torch.nn.utils.rnn.pad_sequence(y, batch_first=True)
return {"spectrogram": x, "text_encoded": y, "spectrogram_length":
torch.tensor(input_specs_lengths, dtype=torch.int32),
"text_encoded_length": torch.tensor(output_text_lengths,
dtype=torch.int32), "text":
texts, "audio": audios, "sample_rate": sample_rates}
|
cfb36a8ad0033f435d8b8ffa369d3cf85e7b27ca
| 73,839 |
def nl_to_break( text ):
"""
Text may have newlines, which we want to convert to <br />
when formatting for HTML display
"""
text=text.replace("<", "<") # To avoid HTML insertion
text=text.replace("\r", "")
text=text.replace("\n", "<br />")
return text
|
d2baf1c19fae686ae2c4571416b4cad8be065474
| 709,912 |
def human_time(seconds):
"""Returns a human-friendly representation of the number of seconds."""
assert seconds >= 0
hours = seconds / (60 * 60)
minutes = (seconds / 60) % 60
seconds = seconds % 60
return '%02d:%02d:%02d' % (hours, minutes, seconds)
|
ee339ab652a3a715a15514b2d40a75eb46f05e1f
| 665,001 |
import gc
import logging
def train_classifier(classifier, X_train, y_train):
"""
Fits a classifier on the given data.
:param classifier: classifier
:param X_train: training attribute values
:param y_train: training classes
:return: a fitted model
"""
gc.collect()
logging.info("Training...")
# Experimental - to reduce memory consumption
# y_train, y_train_filename = mmap(y_train, 'y_train')
classifier.fit(X_train, y_train)
# Experimental - to reduce memory consumption
# X_train, X_train_filename = mmap(X_train, 'X_train')
gc.collect()
return classifier
|
eafeb18fd1c3a2f6d0bc7a849b3814ca4d4dd4fb
| 316,255 |
def _convert_from_boto3_format(record):
"""Convert From Boto3 Kinesis client record format to Kinesis Stream record format.
record - Raw Boto3 Kinesis client record to deaggregate. (dict)
return value - Each yield returns a single Kinesis user record. (dict)"""
new_record = {
'kinesis': {
'kinesisSchemaVersion': '1.0',
'sequenceNumber': record['SequenceNumber'],
'partitionKey': record['PartitionKey'],
'approximateArrivalTimestamp': record['ApproximateArrivalTimestamp'],
'data': record['Data']
}
}
return new_record
|
997dfbebb86163c683c45bdb6c9530230bfe0855
| 434,148 |
import pickle
def load_classifier(file_path):
"""Load Classifier from pickle"""
with open(file_path, "rb") as in_file:
classifier = pickle.load(in_file)
return classifier
|
3ef1d426ca77d70555ab5b24d0faddc4f368cca0
| 368,114 |
def _interval_sv_overlap(xs, x1, x2):
"""Return the indices of subvoxels that overlap with interval [x1, x2].
Parameters
----------
xs : numpy.ndarray
Array of subvoxel boundaries.
x1 : float
Start/end point of the interval.
x2 : float
End/start point of the interval.
Returns
-------
ll : float
Lowest subvoxel index of the overlapping subvoxels.
ul : float
Highest subvoxel index of the overlapping subvoxels.
"""
xmin = min(x1, x2)
xmax = max(x1, x2)
if xmin <= xs[0]:
ll = 0
elif xmin >= xs[-1]:
ll = len(xs) - 1
else:
ll = 0
for i, x in enumerate(xs):
if x > xmin:
ll = i - 1
break
if xmax >= xs[-1]:
ul = len(xs) - 1
elif xmax <= xs[0]:
ul = 0
else:
ul = len(xs) - 1
for i, x in enumerate(xs):
if not x < xmax:
ul = i
break
if ll != ul:
return ll, ul
else:
if ll != len(xs) - 1:
return ll, ul + 1
else:
return ll - 1, ul
|
8617486aa3061193c83d3893af3b0e152b8b7ae8
| 435,391 |
def pulse(time, start, duration):
"""
Implements vensim's PULSE function.
Parameters
----------
time: function
Function that returns the current time.
start: float
Starting time of the pulse.
duration: float
Duration of the pulse.
Returns
-------
float:
- In range [-inf, start):
returns 0
- In range [start, start + duration):
returns 1
- In range [start + duration, +inf]:
returns 0
"""
t = time()
return 1 if start <= t < start + duration else 0
|
1034bd8a4da4bf53468b0c5a2e41d97631f89198
| 281,703 |
def item_location_tags(ptree, location):
"""Return a tags at item location
Parameters
----------
ptree : PageTree
location : ItemLocation
Returns
-------
List[HtmlTag]
"""
last = location[-1]
end = max(last + 1, ptree.match[last])
if end >= len(ptree.index):
end = ptree.index[-1]
else:
end = ptree.index[end]
tags = ptree.page.parsed_body[ptree.index[location[0]]:end]
return tags
|
cc87649d833917eab43e6bbef60a8cf538be61f4
| 496,311 |
import json
def make_control_event(control_type, timestamp):
"""Make a control event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': timestamp,
'encoding':'json',
'channel':'[?occupancy=metrics.publishers]control_pri',
'data': json.dumps({
'type': 'CONTROL',
'controlType': control_type,
})
})
}
|
845bf1b28a92ea37f5e3d4d1dfc25c739f1c27cf
| 55,970 |
import string
def alpha_enum(value):
"""Convert integer to ordinal letter code (a, b, c, ... z, aa, bb, ...)."""
max_len = len(string.ascii_lowercase)
return str(string.ascii_lowercase[value % max_len] * (value / max_len + 1))
|
ec106ef8ad22aced160521bfaad8f7e17e0cd848
| 595,699 |
def parse_anything(text, match=None, match_start=0):
"""
Provides a generic type converter that accepts anything and returns
the text (unchanged).
:param text: Text to convert (as string).
:return: Same text (as string).
"""
# pylint: disable=unused-argument
return text
|
c1b8362bbe052ef57e798ab5a44a11892784811e
| 445,456 |
def to_camel_case(snake_str):
"""
Transforms a snake_case string into camelCase.
Parameters
----------
snake_str : str
Returns
-------
str
"""
parts = snake_str.split("_")
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return parts[0] + "".join(x.title() for x in parts[1:])
|
02e28889da2a92fc5e085ad955b11519b5069dc4
| 689,093 |
def _is_command(line: str) -> bool:
"""
Return whether the given line is a command.
Returns False for comments and empty lines.
Empty lines behave similar to comments,
for example continue a multi-line command.
"""
stripped = line.strip()
return bool(stripped) and not stripped.startswith("#")
|
50d01053fec019b34aad033f2df371dd1ad158a4
| 264,485 |
def reverse_string_recursive(s):
"""
Returns the reverse of the input string
Time complexity: O(n^2) = O(n) slice * O(n) recursive call stack
Space complexity: O(n)
"""
if len(s) < 2:
return s
# String slicing is O(n) operation
return reverse_string_recursive(s[1:]) + s[0]
|
a8d22e88b1506c56693aa1a8cd346695e2b160b2
| 9,932 |
def u_global(name, prefix='u_', suffix=''):
"""Returns the uncertainty corresponding to a column. For example, the
column ('roi_atoms', 'Fz', '', '') has uncertainties in the column
('roi_atoms', 'u_Fz', '', '')
"""
if type(name) == tuple:
i = len(name)-1
while not name[i]:
i -= 1
t = name[:i]
t += (prefix + name[i] + suffix,)
t += name[i+1:]
return t
elif type(name) == str:
return prefix + name + suffix
else:
return None
|
047ef38d2031b98c5f759481c006e92ab47d4523
| 60,103 |
def get_one_hot_index_from_ints(tonic, quality, num_qualities=3):
"""
Get the one-hot index of a chord with the given tonic and quality.
Parameters
==========
tonic : int
The tonic of the chord, where 0 is C. None represents a chord with no tonic.
quality : int
The quality of the chord, where 0 is major, 1 is minor, and 2 is diminished.
num_qualities : int
The number of allowable qualities in this set of chords. Defaults to 3.
Returns
=======
index : int
The one-hot index of the given chord and quality. A chord with no tonic returns
an index of 36.
"""
try:
return tonic + 12 * quality
except:
return 12 * num_qualities
|
bdd96249b11379d03a11986ef08e1fca1323a64a
| 162,151 |
def abs2rel_name(domain, rsetname):
"""convert rsetname from absolute form foo.bar.tld. to the name
relative to the domain. For IPA, if domain is rsetname, then use
"@" as the relative name. If rsetname does not end with a subset
of the domain, the just return the raw rsetname
"""
if rsetname.endswith(domain):
idx = rsetname.rfind(domain)
if idx == 0:
rsetname = "@"
elif idx > 0:
rsetname = rsetname[:idx].rstrip(".")
return rsetname
|
a76601f844a6f6af0686e5a1f4a203fb4981e291
| 312,178 |
import math
def include_revision(revision_num, skip_factor=1.1):
"""Decide whether to include a revision.
If the number of revisions is large, we exclude some revisions to avoid
a quadratic blowup in runtime, since the article is likely also large.
We make the ratio between consecutive included revision numbers
appproximately equal to "factor".
Args:
revision_num: an integer
skip_factor: a floating point number >= 1.0
Returns:
a boolean
"""
if skip_factor <= 1.0:
return True
return (int(math.log1p(revision_num) / math.log(skip_factor)) != int(
math.log(revision_num + 2.0) / math.log(skip_factor)))
|
791789624887940bbb8db5de1157b69ededd0988
| 603,371 |
import re
def make_system_call_noninteractive( line ):
"""
Make calls to os.system non-interactive. Return non-interacive line.
Some shell commands called via os.system require user interaction, such as
more. Make these shell calls noninteractive.
Replacements:
1) Replace more with cat,
2) [Add other replacements here...]
* line = a python statement
"""
command = ''
newCommand = ''
# Extract the system command from the os.system statement
pattern = r'''os.system\ *\(\ *(['"])(.*)\1\ *\)'''
matchObj = re.match( pattern, line )
if matchObj:
command = matchObj.group(2)
command = command.strip()
# Replace more with cat
pattern2 = r'''^more\ '''
newCommand = re.sub( pattern2, 'cat ', command )
# Add additional substutions here...
newLine = line.replace( command, newCommand, 1 )
return newLine
|
3d8b0dfc3b8683a3181474dafa08c46e118e56a0
| 254,363 |
import torch
def isPD(B):
"""Check whether a matrix is positive definite.
Args:
B ([torch.Tensor]): [Input matrix.]
Returns:
[bool]: [Returns True if matrix is positive definite, otherwise False.]
"""
try:
_ = torch.cholesky(B)
return True
except RuntimeError:
return False
|
c51dc4f6f48ac7417f49ef41b81f3b04816b9279
| 703,950 |
def get_kp(frame,do,c,s):
"""
Returns the keypoints for a single frame of video.
Parameters
----------
frame : numpy.ndarray
The frame of video to be analysed.
do : function
Function provided by Hourglass code for inference.
c : tuple of int
The coordinates for the centre of the image.
s : float
The scale of the image such that height/scale = 200.
Returns
-------
keypoints : numpy.ndarray
The keypoints of the frame consisting of an x, y and confidence value
for each keypoint.
"""
pred = do(frame,c,s);
kps = pred[0]["keypoints"];
return kps;
|
6a422e794ff9da9d8f4bafa712f4ce6f1124c255
| 426,115 |
import six
def _normalize_channels(*channels):
"""
Converts a list of types, strings, or whatever else into a list of strings.
>>> _normalize_channels()
[]
>>> _normalize_channels('')
[]
>>> _normalize_channels(' ')
[]
>>> _normalize_channels(None)
[]
>>> _normalize_channels('topic-one', 'topic-two')
['topic-one', 'topic-two']
>>> _normalize_channels('repeated-topic', 'repeated-topic')
['repeated-topic']
>>> _normalize_channels('topic-left', None, 'topic-right', None)
['topic-left', 'topic-right']
>>> _normalize_channels('', 'topic-left', '', 'topic-right')
['topic-left', 'topic-right']
>>> _normalize_channels(' ', ' topic-padded-left', ' ', 'topic-padded-right ', ' ')
['topic-padded-left', 'topic-padded-right']
>>> _normalize_channels(type)
['type']
>>> _normalize_channels(dict)
['dict']
>>> _normalize_channels(dict(foo="bar"))
["{'foo': 'bar'}"]
"""
normalized_channels = []
for topic in channels:
if topic is not None:
if isinstance(topic, type):
normalized_channels.append(topic.__name__)
elif isinstance(topic, six.string_types):
topic = topic.strip()
if topic != '':
normalized_channels.append(topic)
else:
normalized_channels.append(str(topic))
return list(set(normalized_channels))
|
b4b46def9ed0ce7017c7749e2ecd23b42b63e76b
| 562,539 |
def get_full_job_description_text(job):
"""Returns the job_description as a continuous text, concatenated from the separate lines."""
job_text = ""
for line in job['description']:
job_text += (line + " ") # EOL " " to help date extraction
return job_text.replace(" ", " ")
|
0a13db6de3e63ba1ff4e57134a03d0557c525fae
| 148,066 |
def sub2ind(shape, row_sub, col_sub):
"""
Return the linear index equivalents to the row and column subscripts for
given matrix shape.
:param shape: Preferred matrix shape for subscripts conversion.
:type shape: `tuple`
:param row_sub: Row subscripts.
:type row_sub: `list`
:param col_sub: Column subscripts.
:type col_sub: `list`
"""
assert len(row_sub) == len(
col_sub), "Row and column subscripts do not match."
res = [j * shape[0] + i for i, j in zip(row_sub, col_sub)]
return res
|
bd3e640171f23f80c21a66c92e4d5e3292cddea7
| 73,776 |
import csv
def loadcsv(filename):
"""
Reads an input CSV file.
Args:
filename (str): input file path.
Returns:
List containing all rows from the CSV file without headers.
"""
with open(filename, "r", encoding="utf-8") as f:
return list(filter(None, list(csv.reader(f))[1:]))
|
442d0fdf7bcc160e98c83d7c848ec9477cf757fe
| 12,919 |
def genome_dic(g_file):
"""
Make a dictionary of chromosome sizes from a .chrom.sizes file
e.g. {chrom_name:chrom_size}
"""
gdict = {}
with open(g_file) as ifile:
for i in ifile:
i = i.split()
gdict[i[0]] = int(i[1])
return gdict
|
1289b5da4f31d5847d35926e08f196b078b4ee8e
| 318,691 |
def calculate_expected_wins(win_rate, num_games):
"""Calculate current expected wins"""
expected_wins = win_rate * float(num_games)
result = int(round(expected_wins, 0))
return result
|
5f979a861a0fae1e0fe654c9cc4c9ccf6583b35e
| 678,484 |
def _GetAttr(obj, attr, default_value):
"""Gets a specified attribute of an object (if exist).
Args:
obj: Object.
attr: Attribute name (string).
default_value: Default value to return if object does not have attribute
specified.
Returns:
Specified attribute value if exists, else default value.
"""
if hasattr(obj, attr):
return obj.__getattribute__(attr)
else:
return default_value
|
7d45b62523ad4d15f525463d46ae320f6741d8f8
| 478,505 |
def _get_atoms_from_blocks(blocks, master_blocks):
"""Get list of atoms in a set of blocks"""
atoms_nested = [master_blocks[blk] for blk in blocks]
atoms = [at for sublist in atoms_nested for at in sublist]
return atoms
|
ec6d032e7aa4d71618c5510fac60c44beec950bd
| 363,208 |
def auth_set(hashed_sks, auth_set_indices, height, hashfun):
"""Return the authentication set defined by the given indices.
Keyword arguments:
hashed_sks -- the hased secret key components which form the
leaves of the tree
auth_set_indices -- A list of tuples (h, i) defining the height and index of
the nodes that should end up in the authentication set
height -- the height of the binary tree
hashfun -- a hash function of 2n -> n that is used to produce the
parent node from its two child nodes
Returns:
A list containing tuples ((h, i), hash), the height, index, and hash of the
node in the authentication set. The order of the nodes in the returned list
is equal to the order of the nodes in auth_set_indices.
"""
tree = [None] * height
tree[0] = hashed_sks # the leaves
for current_height in range(1, height): # We don't need to compute the root,
# otherwise this would be off by one
num_nodes = 2**(height - current_height)
tree[current_height] = [None] * num_nodes
for index in range(0, num_nodes):
left = tree[current_height - 1][2*index]
right = tree[current_height - 1][2*index + 1]
tree[current_height][index] = hashfun(left, right)
return [((h, i), tree[h][i]) for h, i in auth_set_indices]
|
0f4565d4aa399ab534a962b6f3ec89fb3325cdb5
| 29,725 |
def eastPlaceCheck(shipSize, xIndexStart, yStart, xAxis):
"""Given a start position on the x index and a ship size will check if
if the y position is a valid location.
Args:
shipSize (INT): Ship size taken from Ship Class
xIndexStart (INT): Start index location
yStart (STR): The start position on the Y Axis, is a letter
yAxis ([INT]): The x x axis range in a list
Returns:
[List]: If range is a valid location for the y axis start will return
the valid list, else it will return false.
"""
eastShipPlace = []
tempLastIndex = xIndexStart + shipSize
if tempLastIndex >= 0 and tempLastIndex <= 10:
for i in range(xIndexStart, tempLastIndex):
eastShipPlace.append(f'{yStart}:{xAxis[i]}')
return eastShipPlace
else:
return False
|
838681e92636246742df52e85983f04d20701731
| 489,613 |
def double_day(bday_1, bday_2, n=2):
"""Return the day when one person is twice as old as the other,
given their birthdays. Optionally find when one person is n times older.
bday_1, bday_2: date (or datetime) objects. Must be the same type.
n: number, > 1
"""
# Double day will be when the younger person's age is the
# same as their difference in age.
# The n-1 factor will just be 1 and have no effect if we're
# doubling (n=2).
# younger = older - diff. So if older = n * younger,
# younger = n*younger - diff ==> younger = diff/(n-1).
# So we just need the date when younger's age is diff/(n-1).
diff = abs(bday_2 - bday_1)
return max(bday_1, bday_2) + diff / (n-1)
|
95510f8edba658e9a7f532edb057fc9ffc75545e
| 59,031 |
def lambda_handler(event, context):
"""
Expecting: [ Input, [ O1, ..., On-1 ], [On, ... On+r ] ]
Returns: [ Input, [ O1, ... On+r ] ]
"""
return [ event[0], event[1] + event[2] ]
|
c6b5686aeeb0bdedb627a6d6bd854a3b0786b975
| 378,097 |
def __clean_key(key):
"""
modify name of the key in order to be usable in file store
:param key: key of the data
:return: key with : replaces by __
"""
return str(key).replace(':', '__')
|
5dfcb181c271ececad7b195db0a6027bffab97a9
| 117,429 |
def get_guess(guesses):
"""
This function will get the user's guess.
"""
# Get the user's guess.
guess = input("------\nGuess a letter: ")
# Check if the user has already guessed the letter.
if guess in guesses:
print("You've already guessed that letter.")
return get_guess(guesses)
# Return the guess.
return guess
|
04b559d3850421ef91fa1ce5d9850b2f4852f917
| 8,180 |
def quoteList(strns):
"""Given a list of strings, return a single string like '"string1", "string2",...'
Note: in SQLite, double quotes are for escaping table and column names;
single quotes are for string literals.
"""
return ','.join(['"'+s+'"' for s in strns])
|
82b56c96e822e80524a1b14aae033d5c7a78685a
| 105,169 |
def unwrap(text):
"""Unwrap a hard-wrapped paragraph of text.
"""
return ' '.join(text.splitlines())
|
ad84e184524f1f77a6ce3b490d0471f916199cac
| 59,001 |
def reverse_bit(value):
"""Calculate the value of the reverse binary representation of the given integer."""
result = 0
while value:
result = (result << 1) + (value & 1)
value >>= 1
return result
|
6e09b7eede8a8ee4d93355bf93993a87992bd230
| 470,245 |
def genapp_second_customer_response() -> bytes:
"""Returns a matching customer lookup result"""
return b'{"LGCMAREA":{"CA_REQUEST_SPECIFIC":"Scott Tracey 1965-09-30Tracey Island 1 TB14TV 000 001 911911 REFROOM@TBHOLDINGS.COM","CA_CUSTOMER_REQUEST":{"CA_DOB":"1965-09-30","CA_FIRST_NAME":"Scott","CA_POLICY_DATA":" 001 911911 REFROOM@TBHOLDINGS.COM","CA_LAST_NAME":"Tracey","CA_HOUSE_NAME":"Tracey Island","CA_NUM_POLICIES":0,"CA_HOUSE_NUM":"1","CA_POSTCODE":"TB14TV"},"CA_RETURN_CODE":0,"CA_CUSTOMER_NUM":2}}'
|
389d818ca0ceb51a9083e3d9ee6515741781f074
| 223,888 |
def split_task_parameters(line):
""" Split a string of comma separated words."""
if line is None:
result = []
else:
result = [parameter.strip() for parameter in line.split(",")]
return result
|
edbd778c496464ba2e86d8efb00f4a6c78da8fe4
| 544,278 |
from typing import Dict
def add_resources(dict1: Dict[str, float],
dict2: Dict[str, float]) -> Dict[str, float]:
"""Add the values in two dictionaries.
Returns:
dict: A new dictionary (inputs remain unmodified).
"""
new_dict = dict1.copy()
for k, v in dict2.items():
new_dict[k] = v + new_dict.get(k, 0)
return new_dict
|
501a30b9e71049d9418dd6daf7bd98d96ea8fe0f
| 58,723 |
def return_list_of(cls, data):
"""
Returns a list of instances of the class type found in the response.
:param cls: class
The class type we want to instantiate
:param data: list
The data containing the list of the dictionaries we want to return as objects
:return:
A list of objects of the type specified found in the response
"""
return [cls(x) for x in data]
|
ab8491202c02f1a10c8bb9e32eb5b9f268a1b976
| 136,707 |
import pytz
from datetime import datetime
def unixepoch2datetime(unixepoch):
"""Given a timestamp in milliseconds, it returns a datetime object.
Parameters
----------
unixepoch : int
Timestamp at millisecond precision
Returns
-------
time : datetime
Datetime object corresponding to the original timestamp
"""
unixepoch = float(unixepoch) / 1000.0
time = pytz.UTC.localize(datetime.utcfromtimestamp(unixepoch))
return time
|
c1e227a10d5e2e4e772ba1ea080d0f617e9c244c
| 272,612 |
import math
def _remove_edge_blobs(blobs, image_shape):
"""Remove blobs detected around the edge of the image.
:param blobs: list of blobs detected from the image
:param image_shape: shape of the image. Provides the bounds to check.
:returns: list of filtered blobs
"""
img_height, img_width = image_shape
def check_within_image(blob):
y, x, r = blob
r = math.ceil(r)
return not ((x - r < 0 or x + r >= img_width) or
(y - r < 0 or y + r >= img_height))
return filter(check_within_image, blobs)
|
15bb099c13102a138bb43def8fb00c8c944cdb90
| 599,766 |
import torch
def cross_entropy(input, target, size_average=True):
""" Cross entropy that accepts soft targets
Args:
pred: predictions for neural network
targets: targets, can be soft
size_average: if false, sum is returned instead of mean
Examples::
input = torch.FloatTensor([[1.1, 2.8, 1.3], [1.1, 2.1, 4.8]])
input = torch.autograd.Variable(out, requires_grad=True)
target = torch.FloatTensor([[0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])
target = torch.autograd.Variable(y1)
loss = cross_entropy(input, target)
loss.backward()
"""
logsoftmax = torch.nn.LogSoftmax(dim=1)
if size_average:
return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))
else:
return torch.sum(torch.sum(-target * logsoftmax(input), dim=1))
|
b82f02b5d16ac6a572a724ae494802cfd4860a2c
| 545,194 |
def log_stability(x):
"""Log-stability for computing loss
:param x: Input value
:type x: float
:return: Scaled value where :math:`\\hat{x} \\in (0, 1)`
:rtype: float
"""
if x == 0:
return 10e-9
elif x == 1:
return 1.0-10e-9
else:
return x
|
df653857b7ce620bcfc16c686e7a65b63d639195
| 441,826 |
from io import StringIO
import traceback
def _get_traceback_text(exc_type, value, tb) -> str:
"""return the text that would be printed by
traceback.print_exception.
"""
sio = StringIO()
traceback.print_exception(exc_type, value, tb, file=sio)
return sio.getvalue()
|
fbddac64f88335b1e19e3e6690dacac2a1bedf8d
| 436,938 |
def read_data(filename):
"""
Reads and parses data from a txt file with a map data. The format that the
function expects is the one followed in the uwaterloo TSP web archive
(math.uwaterloo.ca/tsp/world/countries.html), ignoring the first
description lines that have to be manually removed. The method searches for
the filename in the assets folder.
:param filename: the path to the file to be parsed
:return: the cities as a list of (x, y) coordinates
"""
cities = []
path = 'assets/{}.txt'.format(filename)
with open(path, 'r') as f:
for line in f:
city = list(map(float, line.split()[1:]))
cities.append((city[1], city[0]))
return cities
|
86267ac4cfd9069c87764bb63db8031671cc1e46
| 402,579 |
import random
def random_system(tree):
"""Returns random system node.
"""
systems = tree.xpath('.//system')
idx = random.randrange(0,len(systems))
return systems[idx]
|
97d16be47082fb7cf56ac8a3ba9dc2b31298854c
| 661,216 |
def get_keys_of_max_n(dict_obj, n):
"""Returns the keys that maps to the top n max values in the given dict.
Example:
--------
>>> dict_obj = {'a':2, 'b':1, 'c':5}
>>> get_keys_of_max_n(dict_obj, 2)
['a', 'c']
"""
return sorted([
item[0]
for item in sorted(
dict_obj.items(), key=lambda item: item[1], reverse=True
)[:n]
])
|
16b51090ac086a708deccf6876201857e06ef0fc
| 639,000 |
def value_to_zero_ten(confidence_value):
"""
This method will transform an integer value into the 0-10 scale string
representation.
The scale for this confidence representation is the following:
.. list-table:: STIX Confidence to 0-10
:header-rows: 1
* - Range of Values
- 0-10 Scale
* - 0-4
- 0
* - 5-14
- 1
* - 15-24
- 2
* - 25-34
- 3
* - 35-44
- 4
* - 45-54
- 5
* - 55-64
- 6
* - 65-74
- 7
* - 75-84
- 8
* - 95-94
- 9
* - 95-100
- 10
Args:
confidence_value (int): An integer value between 0 and 100.
Returns:
str: A string corresponding to the 0-10 scale.
Raises:
ValueError: If `confidence_value` is out of bounds.
"""
if 4 >= confidence_value >= 0:
return '0'
elif 14 >= confidence_value >= 5:
return '1'
elif 24 >= confidence_value >= 15:
return '2'
elif 34 >= confidence_value >= 25:
return '3'
elif 44 >= confidence_value >= 35:
return '4'
elif 54 >= confidence_value >= 45:
return '5'
elif 64 >= confidence_value >= 55:
return '6'
elif 74 >= confidence_value >= 65:
return '7'
elif 84 >= confidence_value >= 75:
return '8'
elif 94 >= confidence_value >= 85:
return '9'
elif 100 >= confidence_value >= 95:
return '10'
else:
raise ValueError("Range of values out of bounds: %s" % confidence_value)
|
130b26d5c367b7300566044ea2befb3ad888bb56
| 171,267 |
def onedim_index(index, axis, ndim):
"""Return an index from a 1-dimensional index along the given axis
>>> index = onedim_index(np.index_exp[1,2], 1, 3)
>>> A=np.eye(3)
>>> A[index]
array([[ 0., 0.],
[ 1., 0.],
[ 0., 1.]])
"""
return (slice(None),) * (axis % ndim) + (index,)
|
763cce64b8b1d3583ac395cd5e3308edcfc3cc1b
| 438,864 |
def dict_slice(Dict, *keys):
"""Returns a shallow copy of the subset of a given dict (or a dict-like
object) with a given set of keys.
The return object is a dict.
No keys may be missing from Dict.
Example: if d = {'abc': 12, 'def': 7, 'ghi': 32, 'jkl': 98 }
then dict_slice(d, 'abc', 'ghi') will yield {'abc': 12, 'ghi': 32 }
"""
return dict([ (k, Dict[k]) for k in keys ])
|
4c5fc3a548b2e46853810487fe77655d00584761
| 57,953 |
def filter_by_length(lst, length):
"""
This function removes from a list of strings
all those elements which are shorter than or
as long as the specified length
Arguments
lst : a list of strings
length : (int) min length of the strings to keep
Returns
filtered list
"""
return list(filter(lambda x: len(str(x)) > length, lst))
|
ba21175ae55ceb591a3c985897fc0d04aa520112
| 468,334 |
def build_array(text):
"""Returns an array of parsed lines from the input text
Array elements are in the format:
(min, max, character, string)
"""
array = []
with open(text, 'r') as f:
for line in f:
_range, char, s = line.strip().split()
n, m = _range.split('-')
array.append((int(n), int(m), char[0], s))
return array
|
2a46b5f09bb08bc146c56d94b175a9c0fbc407ab
| 75,837 |
import base64
def _get_creds_from_token(token):
"""
Decode ecr token into username and password.
"""
cred_string = base64.b64decode(token).decode("ascii")
username, password = str(cred_string).split(":")
return username, password
|
46f1457c9eb3df5d21fa5b2d414b44ed6f5ae7aa
| 144,041 |
def find_in_string(sub, string):
"""Check if a substring is contained in another string.
:param sub: Substring
:type: str
:param string: String which is checked if the substring is contained
:type: str
:return: Result if the substring is contained in the string
:rtype: boolean
"""
sub_l = sub.lower()
string_l = string.lower()
if sub_l in string_l or string_l in sub_l:
return 1
else:
return 0
|
e16567e39f29771d98b2a8280fb0a066d21db20f
| 606,661 |
def part_decode(part):
"""Decode a part of a JSON pointer.
>>> part_decode("foo")
'foo'
>>> part_decode("~0foo")
'~foo'
>>> part_decode("foo~0")
'foo~'
>>> part_decode("~1foo")
'/foo'
>>> part_decode("foo~1")
'foo/'
>>> part_decode("f~1o~0o")
'f/o~o'
>>> part_decode("~00")
'~0'
>>> part_decode("~01")
'~1'
>>> part_decode("0")
'0'
"""
return part.replace("~1", "/").replace("~0", "~")
|
ccb527f2e8e671dca08894089dbd8f86ce52c39a
| 356,119 |
import re
def parse_stylesheet_header(css):
"""Get WordPress theme data from CSS file contents via the comment header.
:param css: The contents of the CSS file
:type css: string
:return: Theme info. See https://codex.wordpress.org/File_Header
:rtype: dict
"""
headers = [
'Theme Name',
'Theme URI',
'Description',
'Author',
'Author URI',
'Version',
'Template',
'Template Version',
'Status',
'Tags',
'Text Domain',
'Domain Path'
]
result = {}
for header in headers:
regex = re.escape(header + ':') + r'(.*)'
match = re.search(regex, css, flags=re.IGNORECASE)
if match:
result[header.lower().replace(' ', '_')] = match.group(1).strip()
return result
|
115a3bd734ee122dfc6e4b5f76cfad0d655138a3
| 86,148 |
def custom_input_number(prompt=''):
""" Take in user input. This deals with real
number only. See below for a function that deals with
purely alphabetic strings.
:param prompt: a customizable prompt
:return:
no: a real number
"""
cF = True
while cF:
try:
no = float(input(prompt))
float(no)
return no
except ValueError:
print('Cannot be converted to a string')
continue
|
bd93ccfe432496110511fd9148169e1e1f7f9a74
| 211,420 |
def sort_dict(dictionary):
"""utility function that from a dictionary returns a list of tuples ordered by dictionary values"""
d = dict(dictionary)
l = []
for key in d.keys():
l.append((d[key], key))
l.sort(reverse=True)
return l
|
5d0125d8d90b61a4d8a9c4f611fa58103a6b181f
| 464,773 |
def get_uid_warning(uid: str) -> str:
"""
Returns the templated message for the warning of existing `UID`,
while creating a `Student`
"""
uid_href = f"/students/{uid}/#:~:text={uid}"
msg = f"Student with UID <a class=\"alert-link text-secondary font-weight-medium text-decoration-none\"\
href=\"{uid_href}\" target=\"_blank\">{uid}</a> already exists.. maybe you mistyped!"
return msg
|
a701b6d42ca78e93121903251d5887155fb73835
| 632,963 |
def champion_dictionary(obj):
"""Converts champion.json to a dictionary that is useful to us."""
champions = {}
for champion in obj["data"]:
name = champion.lower()
champions[int(obj["data"][champion]["key"])] = name
return champions
|
13e0177f23adb557e2db3b4e3967311765504fd3
| 329,862 |
def max_dict(d):
"""Return dictionary key with the maximum value"""
if d:
return max(d, key=lambda key: d[key])
else:
return None
|
446c185a2e986c8a58672d0571f0f100340be7e4
| 26,758 |
import time
def gen_time(s):
"""
Takes a string in YYYY/MM/DD hh:mm format and converts it to
a float of seconds since the epoch.
For example:
>>> gen_time("2007/02/14 14:14")
1171480440.0
"""
return time.mktime(time.strptime(s, "%Y/%m/%d %H:%M"))
|
6d0bc96cfc525756e059a6ad1635fe0543a39b45
| 245,281 |
def combine_dicts(new_dict, old_dict):
"""
returns a dictionary with all key, value pairs from new_dict.
also returns key, value pairs from old_dict, if that key does not exist in new_dict.
if a key is present in both new_dict and old_dict, the new_dict value will take precedence.
"""
old_data_keys = list(old_dict.keys())
new_data_keys = list(new_dict.keys())
all_keys = set(old_data_keys).union(new_data_keys)
combined_data_dict = {}
for k in all_keys:
try:
combined_data_dict[k] = new_dict[k]
except KeyError:
combined_data_dict[k] = old_dict[k]
return combined_data_dict
|
fe3a619235e30d801aa8d75585dcbd80eab21512
| 66,821 |
import requests
import time
def render_analysis(response, operation_url, headers):
"""
This function waits for the analysis of the response image,
so it runs a loop until the text detection is complete
Arguments:
response {Object} -- The response passed
operation_url {String} -- The Operation url obtained from response
Returns:
[json] : The analysis json containing the detected text
"""
analysis = {}
poll = True
while (poll):
response_final = requests.get(
response.headers["Operation-Location"], headers=headers)
analysis = response_final.json()
time.sleep(1)
if ("recognitionResult" in analysis):
poll= False
if ("status" in analysis and analysis['status'] == 'Failed'):
poll= False
return(analysis)
|
b80210cede55ac2482c054a3e5dda830fa55a5bd
| 443,761 |
def has_common_column(a, b):
"""
Returns:
True if A & B have at least one common column
"""
columns_a = [(c.name, c.data_type) for c in a.columns.columns]
columns_b = [(c.name, c.data_type) for c in b.columns.columns]
if list(set(columns_a) & set(columns_b)):
return True
return False
|
d560e4b994315fc81ee9eecbce9f8a1d8aac40d6
| 152,611 |
def euler_problem_26(n=1000):
"""
A unit fraction contains 1 in the numerator. The decimal representation of the unit fractions with denominators 2 to 10 are given:
1/2 = 0.5
1/3 = 0.(3)
1/4 = 0.25
1/5 = 0.2
1/6 = 0.1(6)
1/7 = 0.(142857)
1/8 = 0.125
1/9 = 0.(1)
1/10= 0.1
Where 0.1(6) means 0.166666..., and has a 1-digit recurring cycle. It can be seen that 1/7 has a 6-digit recurring cycle.
Find the value of d < 1000 for which 1/d contains the longest recurring cycle in its decimal fraction part.
"""
# a bit of mathematical insight is helpful here.
# if d is divisible by 2 or 5, d has the same number of digits in the 1/d recurring cycle as (d/2) or (d/5) respectively.
# assuming that d is not divisible by 2 or 5, then the smallest m with (10^m - 1) divisible by d gives the length of the recurring cycle.
def remove_2_5_factors(num):
"""
Divide a number by 2 and 5 until it becomes coprime with 2 and 5.
"""
if num % 2 == 0:
return remove_2_5_factors(num // 2)
if num % 5 == 0:
return remove_2_5_factors(num // 5)
return num
cache_cycle_length = {}
for d in range(2, n):
d_equivalent = remove_2_5_factors(d)
# base case: d has no prime factors other than 2 and 5
if d_equivalent == 1:
cache_cycle_length[d] = 0
# recursive case: d is divisible by 2 or 5 but has other prime factors
elif d_equivalent in cache_cycle_length.keys():
cache_cycle_length[d] = cache_cycle_length[d_equivalent]
# base case: d is not divisible by 2 or 5
else:
# one should be alerted if the for loop fails to update the cycle length.
cache_cycle_length[d] = -1
for m in range(1, 1000):
if (10 ** m - 1) % d == 0:
cache_cycle_length[d] = m
break
if min(cache_cycle_length.values()) < 0:
print("Warning: some number has longer cycle length than we screened for.")
d_to_return = max(cache_cycle_length.keys(), key=lambda x: cache_cycle_length[x])
return d_to_return, cache_cycle_length[d_to_return]
|
b85723cbb35303f383ac19dd6d2d94fe9b60bc74
| 78,764 |
def no_client_alias_in_test_cases(filename, logical_line):
"""Check that test cases don't use "self.client" to define a client.
P103
"""
if "patrole_tempest_plugin/tests/api" in filename:
if "self.client" in logical_line or "cls.client" in logical_line:
return 0, "Do not use 'self.client' as a service client alias"
|
f43c8c4a01c6ab5a52aa8b37cfb9b6ea03605d91
| 203,425 |
def get_permission_for_model(model, action):
"""
Resolve the named permission for a given model (or instance) and action (e.g. view or add).
:param model: A model or instance
:param action: View, add, change, or delete (string)
"""
if action not in ('view', 'add', 'change', 'delete'):
raise ValueError(f"Unsupported action: {action}")
return '{}.{}_{}'.format(
model._meta.app_label,
action,
model._meta.model_name
)
|
efc889a981bfb528095697b50d1b12a18e3daee8
| 483,335 |
def _path_to_str(path):
"""Converts a path (tuple of strings) to a printable string."""
return ":".join(str(field) for field in path)
|
b290d53a8bfb1f34c2ce4ec01083475bf217ed59
| 394,925 |
def diff2uni(l_v, r_v, L, R):
"""
Convert differential to unicycle
:param l_v: left velocity
:param r_v: right velocity
:param L: Wheel base (or track) wdith
:param R: Wheel radius
:return: Linear/Angular velocity
"""
v = (l_v + r_v) * (R / 2)
try:
w = (r_v - l_v) * (R / L)
except ZeroDivisionError:
v, w = 0, 0
return v, w
|
85cc9498ed7579a077c94e2f395df7ad108acf10
| 303,256 |
import socket
def dnsResolve(host):
"""
Resolves the given DNS hostname into an IP address, and returns it in the dot separated format as a string.
Returns an empty string if there is an error
:param str host: hostname to resolve
:return: Resolved IP address, or empty string if resolution failed.
:rtype: str
"""
try:
return socket.gethostbyname(host)
except socket.gaierror:
return ''
|
b8046c14919c5de59d262e9a6b34eb97d570d640
| 317,714 |
def copy_of(board):
"""
Returns a copy of a board
"""
copy = []
for row_num in range(10):
row = []
for col_num in range(10):
row.append(board[row_num][col_num])
copy.append(row)
return copy
|
dc4f383455165f7b2cc9f7d7ec900485d4cf539e
| 636,028 |
from typing import Dict
def split_entry(entry: str) -> Dict[str, str]:
"""
Splits a given entry into a dictionary of parameters
:param entry: An entry as a formatted string
:return: A dictionary of key value pairs
"""
entries = entry.split(" ")
return {key: value for key, value in [entry.split(":") for entry in entries]}
|
4dfe41869a5a8b3191a17d9140f0b0da450823d7
| 662,650 |
def chunks(seq, size):
"""Breaks a sequence of bytes into chunks of provided size
:param seq: sequence of bytes
:param size: chunk size
:return: generator that yields tuples of sequence chunk and boolean that indicates if chunk is
the last one
"""
length = len(seq)
return ((seq[pos:pos + size], (pos + size < length))
for pos in range(0, length, size))
|
25abe8afdb032af0e2b10dbd3c03b369d862813f
| 702,342 |
import re
def normalize_double_n(str):
"""
Normalize double n.
"""
# Replace double n with n'
str = re.sub("nn", "n'", str)
# Remove unnecessary apostrophes
str = re.sub("n'(?=[^aiueoyn]|$)", "n", str)
return str
|
75c09aa64c8c3d1ab65364ef1a0ceabeef5a50e7
| 337,528 |
def replace_string(text, to_find, replacement):
"""Search for items in strings and replace.
Search for characters or sequences of charaters in a string and replace
them i.e. replace all commas.
Args:
text (str): The string to be searched.
to_find (str): The character or sequence of characters to replace.
replacement (str): The string to replace the character(s) with.
Returns:
String with characters replaced.
"""
return text.replace(to_find, replacement)
|
1d52e3c461768400474307571a97f6081d5a1612
| 543,712 |
def is_null_str(value):
"""
Indicate if a string is None or 'None' or 'N/A'
:param value: A string value
:return: True if a string is None or 'None' or 'N/A'
"""
return not value or value == str(None) or value == 'N/A'
|
e8edb22c77ddf712a039f92529d453b7a4947173
| 23,298 |
def get_picam(c):
""" Return a dict of the attributes in a PiCamera"""
d = {}
if type(c).__name__ == "PiCamera":
all_settings = ['analog_gain', 'awb_mode', 'awb_gains', 'contrast', 'drc_strength', 'digital_gain',
'exposure_mode', 'exposure_speed', 'framerate', 'hflip', 'iso',
'meter_mode', 'rotation', 'sharpness',
'shutter_speed', 'sensor_mode', 'saturation',
'still_stats', 'zoom', 'vflip']
for s in all_settings:
d[s] = getattr(c, s)
return d
|
f24b87e81fe094dc0df874585ef890d76453a7f7
| 668,434 |
def New_Dataframe(old_df,indicator_name):
""" create a new dataframe that is composed of only one indicator
Args:
old_df (dataframe): general dataframe from which we extract the new one
indicator_name (string): Name onf the indicator that will composed the new dataframe
Returns:
(dataframe): dataframe composed of only the chosen indicator
"""
return old_df.loc[old_df.Indicator == indicator_name]
|
5ccd394a01a70b39b64d2a12ed0aac6f39296a0a
| 6,866 |
def helper(A, k, left, right):
"""binary search of k in A[left:right], return True if found, False otherwise"""
print(f'so far ==> left={left}, right={right}, k={k}, A={A}')
#1- base case
if left > right:
# if empty list there is nothing to search
return False
#2- solve the subproblems
mid = (right - left)//2 + left
if A[mid] < k:
left = mid + 1 # -> search right
return helper(A, k, left, right)
elif A[mid] > k:
right = mid - 1 # -> search left
return helper(A, k, left, right)
else:
return True
#3- combine the sub-solutions
# nothing to combine --> tail recursion(stay tunned for more)
|
3df455e6bf232b7f427c2e0763de1e848e7245e5
| 674,560 |
def subsumes(paradigm_cell, morpheme):
"""Check if a vocabulary item subsumes the features in a paradigm cell.
>>> cell = [['Nom', '+1', '-pl'], ['Acc', '+3', '-pl']]
>>> subsumes(cell, [['+1', '-pl']])
True
>>> subsumes(cell, [['+3', '-pl']])
True
>>> subsumes(cell, [['+3'], ['+1']])
True
>>> subsumes(cell, [['+1', '+3']])
False
>>> subsumes(cell, [['+3'], ['+2']])
False
>>> subsumes([['+intr', '+1', '-2', '-3', '-sg', '+pl']],
... [['+1', '+pl', '+intr']])
True
"""
for mstruct in morpheme:
if not any(set(mstruct) <= set(pstruct) for pstruct in paradigm_cell):
return False
return True
|
f4c7c9c0ad6744ace3eb83f0f30e20ae89dcdce3
| 521,551 |
def hammingDistance(str1, str2):
""" Returns the number of `i`th characters in `str1` that don't match the `i`th character in `str2`.
Args
---
`str1 : string` The first string
`str2 : string` The second string
Returns
---
`differences : int` The differences between `str1` and `str2`
"""
# Convert strings to arrays
a = list(str1)
b = list(str2)
# Determine what n equals
if len(a) < len(b):
n = len(a)
else:
n = len(b)
# Increment the number of distances for each difference
differences = 0
for i in range(n):
if a[i] != b[i]:
differences += 1
return differences
|
ad35cc79f89171c75a16a13ec6862a2a4abdbd61
| 8,032 |
def trapezoid_area(height, top, bottom):
"""
Computes the area of a trapezoid with the given height and top/bottom
lengths.
"""
return (
# triangle based on longer - shorter of the top/bottom
0.5 * abs(top - bottom) * height
# plus parallelogram based on shorter edge
+ min(top, bottom) * height
)
|
c5d654806a8c6aaab41c7fe4014293ebd26da303
| 258,348 |
from typing import Any
def decode_answer(response_data: dict) -> Any:
"""
Decode the answer described by *response_data*, a substructure of an
enrollment document.
Returns a string, number, tuple of strings, or None.
"""
answer = response_data["answer"]
if answer["type"] in ["String", "Number"]:
return answer["value"]
elif answer["type"] == "Option":
chosen_options = map(int, answer["chosenOptions"])
option_tokens = [
option["token"]
for option in response_data["options"] ]
return tuple(
option_tokens[chosen]
for chosen in chosen_options)
elif answer["type"] == "Declined":
return None
else:
raise ValueError(f"Unknown response answer type {answer['type']}")
|
e0c88644f09412a95ab6df929f691d6936140fc3
| 661,425 |
def wf_levenshtein(string_1, string_2):
"""
Calculates the Levenshtein distance between two strings.
This version uses the Wagner-Fischer algorithm.
Usage::
>>> wf_levenshtein('kitten', 'sitting')
3
>>> wf_levenshtein('kitten', 'kitten')
0
>>> wf_levenshtein('', '')
0
"""
len_1 = len(string_1) + 1
len_2 = len(string_2) + 1
d = [0] * (len_1 * len_2)
for i in range(len_1):
d[i] = i
for j in range(len_2):
d[j * len_1] = j
for j in range(1, len_2):
for i in range(1, len_1):
if string_1[i - 1] == string_2[j - 1]:
d[i + j * len_1] = d[i - 1 + (j - 1) * len_1]
else:
d[i + j * len_1] = min(
d[i - 1 + j * len_1] + 1, # deletion
d[i + (j - 1) * len_1] + 1, # insertion
d[i - 1 + (j - 1) * len_1] + 1, # substitution
)
return d[-1]
|
1e92ea31bc284732251989bc46303017c87015e8
| 549,947 |
def is_numeric(series, max_unique=16):
"""Flag if series is numeric."""
if len(set(series.values[:3000])) > max_unique:
return True
return False
|
8f3b492b8eb7a230f8dd01d628b12b39a517332a
| 661,719 |
def number(number: str, fsep: str, tsep: str) -> str:
"""
Format a number using the provided float and thousands separators.
>>> number("1 000 000 000 000", ",", " ")
'1 000 000 000 000'
>>> number("1000000", ",", " ")
'1 000 000'
>>> number("1000000", ".", "")
'1000000'
>>> number("1000000", ".", ",")
'1,000,000'
>>> number("-1000000", ",", " ")
'β1 000 000'
>>> number("-1000000", "", "")
'β1000000'
>>> number("β1000000", ".", ",")
'β1,000,000'
>>> number("4.54609", "," , " ")
'4,54609'
>>> number("4.54609", "." , ",")
'4.54609'
>>> number("22905", "," , ".")
'22.905'
"""
# Remove superfluous spaces
number = number.replace(" ", "")
# Handle unicode minus U+2212 character before doing the conversion
number = number.replace("β", "-")
# Convert
try:
# Integer
res = f"{int(number):,}"
except ValueError:
# Float
res = f"{float(number):,}"
# Replace the current thousands separator with "|";
# then replace the dot with the float separator;
# and lastly replace the "|" with the deisred thousands separator.
# This 3-steps-replacement is needed for when separators are replacing each other.
res = res.replace(",", "|").replace(".", fsep).replace("|", tsep)
# Always return unicode minus U+2212 character for negative numbers
return res.replace("-", "β")
|
eb68d3e7fc344ed3eab94ccb0ccaf9a493c89d27
| 99,921 |
import logging
def identity(image):
"""Return the image as is."""
logging.info("identity({})".format(repr(image)))
return image
|
a82c2d2b70d8c7de14568b3a34f503086f50c072
| 604,491 |
def box(text, gen_text=None):
"""Create an HTML box of text"""
if gen_text:
raw_html = '<div style="padding:8px;font-size:28px;margin-top:28px;margin-bottom:14px;">' + str(
text) + '<span style="color: red">' + str(gen_text) + '</div>'
else:
raw_html = '<div style="border-bottom:1px inset black;border-top:1px inset black;padding:8px;font-size: 28px;">' + str(
text) + '</div>'
return raw_html
|
bed47c8431e9e37a774f53704e33765a0b354a6e
| 539,546 |
import math
def get_col_row(length: int) -> tuple:
"""
The size of the rectangle (r x c) should be decided
by the length of the message, such that c >= r and
c - r <= 1, where c is the number of columns and r
is the number of rows.
:param length:
:return:
"""
r = int(math.sqrt(length))
c = int(math.ceil(math.sqrt(length)))
if r * c < length:
r += 1
return c, r
|
f6615c3781706ddd73f78f072c9f7b3669a6460a
| 622,133 |
def find_valid_edges(components, valid_edges):
"""Find all edges between two components in a complete undirected graph.
Args:
components: A [V]-shaped array of boolean component ids. This assumes
there are exactly two nonemtpy components.
valid_edges: An uninitialized array where output is written. On return,
the subarray valid_edges[:end] will contain edge ids k for all
valid edges.
Returns:
The number of valid edges found.
"""
k = 0
end = 0
for v2, c2 in enumerate(components):
for v1 in range(v2):
if c2 ^ components[v1]:
valid_edges[end] = k
end += 1
k += 1
return end
|
83d6a8237c45c74bd695d2f54371b19ae81e493e
| 682,644 |
def parse_alias(alias, quote_char):
"""
Extract the alias if available.
:param alias:
antlr context
:parma quote_char:
which string quote character to use
"""
if alias:
alias = alias.ID().getText().strip(quote_char)
else:
alias = None
return alias
|
dae32ca0f9409d60a0711950c5cf527f53e8cf98
| 478,586 |
from typing import List
def is_image(file_name: str, ext_list_image: List[str]) -> bool:
"""Determine if file is image based on known file name extensions."""
ext_list_lower = [ext.lower() for ext in ext_list_image]
fn_lower = file_name.lower()
is_image = fn_lower.endswith(tuple(ext_list_lower))
return is_image
|
959938bdc60bc5ee6304c3c8578cc60a6a1a00ff
| 558,747 |
def etcd_unpack(obj):
"""Take a JSON response object (as a dict) from etcd, and transform
into a dict without the associated etcd cruft.
>>> etcd_unpack({})
{}
>>> etcd_unpack({'node': { 'key': 'a', 'value': 'AA'}})
{'a': 'AA'}
>>> etcd_unpack({'node': {'nodes': [{'value': 'a', 'key': 'A'}, {'value': 'B', 'key': 'b'}], 'dir': True, 'key': 'pa'}})
{'pa': {'A': 'a', 'b': 'B'}}
>>> etcd_unpack(
{'node': {'nodes': [{'nodes': [{'value': 'a', 'key': 'A'}], 'dir': True, 'key': 'pa'}], 'dir': True, 'key': 'paa'}}
)
{'paa': {'pa': {'A': 'a'}}}
>>> etcd_unpack({'node': {'dir': True, 'key': '/resource/flow'}})
{'/resource/flow': {}}
"""
def _unpack_lst(n):
rv = {}
for v in n:
if 'dir' not in v:
rv[v['key']] = v['value']
elif 'nodes' in v:
rv[v['key']] = _unpack_lst(v['nodes'])
else:
rv[v['key']] = {}
return rv
if 'node' not in obj:
return {}
cn = obj['node'] # current node
pn = None
if 'prevNode' in obj:
pn = obj['prevNode'] # previous node
retVal = {}
for n in [cn, pn]:
pc = ''
if n is cn:
pc = 'node'
if n is pn:
pc = 'prevNode'
if n:
if not n['key'] in retVal:
retVal[n['key']] = {}
if 'dir' not in n and 'value' in n:
retVal[n['key']][pc] = n['value']
elif not 'dir' and 'value' not in n:
retVal[n['key']][pc] = '{}'
elif 'nodes' in n:
retVal[n['key']][pc] = _unpack_lst(n['nodes'])
else:
retVal[n['key']][pc] = '{}'
return retVal
|
05f6f313574d9b8578045c5946ed674bd1877f72
| 193,382 |
def get_shapefile_srs_from_ds(datasource):
"""Get the spatial reference object of the shapefile
:param datasource: The shapefile datasource
:return: The spatial reference object
"""
layer = datasource.GetLayerByIndex(0)
spatial_ref = layer.GetSpatialRef()
return spatial_ref
|
44e35868fa2ee230dc9e3e3f2f14dbf0eaf99d3e
| 155,158 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.