code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def get(self, *, search, limit=0, headers=None):
return self.transport.forward_request(method='GET', path=self.path, params={'search': search, 'limit': limit}, headers=headers)
|
Retrieves the assets that match a given text search string.
Args:
search (str): Text search string.
limit (int): Limit the number of returned documents. Defaults to
zero meaning that it returns all the matching assets.
headers (dict): Optional headers to pass to the request.
Returns:
:obj:`list` of :obj:`dict`: List of assets that match the query.
|
codesearchnet
|
def CopyTextToLabel(cls, text, prefix=''):
text = '{0:s}{1:s}'.format(prefix, text)
return cls._INVALID_LABEL_CHARACTERS_REGEX.sub('_', text)
|
Copies a string to a label.
A label only supports a limited set of characters therefore
unsupported characters are replaced with an underscore.
Args:
text (str): label text.
prefix (Optional[str]): label prefix.
Returns:
str: label.
|
codesearchnet
|
def _maybe_track_assets(self, graph_def):
asset_tracker = {}
for node in graph_def.node:
if node.name.startswith('FileIdentity'):
asset_tracker[node.input[0]] = None
if not asset_tracker:
return {}
for node in graph_def.node:
if node.name in asset_tracker:
tensor_proto = node.attr['value'].tensor
with context.eager_mode(), ops.device('CPU'):
node_value = gen_parsing_ops.parse_tensor(tensor_proto.SerializeToString(), dtypes.string).numpy()
asset_tracker[node.name] = [self._track_trackable(asset.Asset(n), name=node.name + '_' + str(i), overwrite=True) for i, n in enumerate(node_value)]
return asset_tracker
|
Finds and tracks nodes in `graph_def` that refer to asset files.
Args:
graph_def: Serialized graph representation of this dataset.
Returns:
A dictionary mapping the node name of an asset constant to a tracked
`asset.Asset` object.
|
github-repos
|
def fulfill_order(self, order_number, site_code=None, email_opt_in=False):
max_fulfillment_retries = get_configuration('MAX_FULFILLMENT_RETRIES', site_code=site_code)
api = get_ecommerce_client(site_code=site_code)
try:
logger.info('Requesting fulfillment of order [%s].', order_number)
api.orders(order_number).fulfill.put(email_opt_in=email_opt_in)
except exceptions.HttpClientError as exc:
status_code = exc.response.status_code
if status_code == 406:
logger.info('Order [%s] has already been fulfilled. Ignoring.', order_number)
raise Ignore()
else:
logger.warning(
'Fulfillment of order [%s] failed because of HttpClientError. Retrying',
order_number,
exc_info=True
)
_retry_order(self, exc, max_fulfillment_retries, order_number)
except (exceptions.HttpServerError, exceptions.Timeout, SSLError) as exc:
_retry_order(self, exc, max_fulfillment_retries, order_number)
|
Fulfills an order.
Arguments:
order_number (str): Order number indicating which order to fulfill.
Returns:
None
|
juraj-google-style
|
def replace_name_with_id(cls, name):
try:
int(name)
return name
except ValueError:
pass
if (name.split('-')[0] in Meta._MODEL_ABBREVS):
return int(name.split('-', 1)[1])
try:
result = cls.ES.get_record_by_name(cls.ES_INDEX_NAME, name)
if result:
return result['id']
except pulsarpy.elasticsearch_utils.MultipleHitsException as e:
raise
raise RecordNotFound("Name '{}' for model '{}' not found.".format(name, cls.__name__))
|
Used to replace a foreign key reference using a name with an ID. Works by searching the
record in Pulsar and expects to find exactly one hit. First, will check if the foreign key
reference is an integer value and if so, returns that as it is presumed to be the foreign key.
Raises:
`pulsarpy.elasticsearch_utils.MultipleHitsException`: Multiple hits were returned from the name search.
`pulsarpy.models.RecordNotFound`: No results were produced from the name search.
|
codesearchnet
|
def refresh(self) -> bool:
with self._lock:
min_pending_timestamp = WatermarkManager.WATERMARK_POS_INF
has_pending_elements = False
for input_bundle in self._pending:
for wv in input_bundle.get_elements_iterable():
has_pending_elements = True
if wv.timestamp < min_pending_timestamp:
min_pending_timestamp = wv.timestamp
pending_holder = WatermarkManager.WATERMARK_POS_INF
if has_pending_elements:
pending_holder = min_pending_timestamp - TIME_GRANULARITY
input_watermarks = [tw.output_watermark for tw in self._input_transform_watermarks]
input_watermarks.append(WatermarkManager.WATERMARK_POS_INF)
producer_watermark = min(input_watermarks)
self._input_watermark = max(self._input_watermark, min(pending_holder, producer_watermark))
earliest_hold = WatermarkManager.WATERMARK_POS_INF
for hold in self._keyed_earliest_holds.values():
if hold < earliest_hold:
earliest_hold = hold
new_output_watermark = min(self._input_watermark, earliest_hold)
advanced = new_output_watermark > self._output_watermark
self._output_watermark = new_output_watermark
return advanced
|
Refresh the watermark for a given transform.
This method looks at the watermark coming from all input PTransforms, and
the timestamp of the minimum element, as well as any watermark holds.
Returns:
True if the watermark has advanced, and False if it has not.
|
github-repos
|
def downsample_bottleneck(x, output_channels, dim='2d', stride=1, scope='h'):
conv = CONFIG[dim]['conv']
with tf.variable_scope(scope):
x = conv(x, output_channels, 1, strides=stride, padding='SAME', activation=None)
return x
|
Downsamples 'x' by `stride` using a 1x1 convolution filter.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1.
|
codesearchnet
|
def compile_regex_from_str(self, ft_str):
sequence = []
for m in re.finditer('\\[([^]]+)\\]', ft_str):
ft_mask = fts(m.group(1))
segs = self.all_segs_matching_fts(ft_mask)
sub_pat = '({})'.format('|'.join(segs))
sequence.append(sub_pat)
pattern = ''.join(sequence)
regex = re.compile(pattern)
return regex
|
Given a string describing features masks for a sequence of segments,
return a regex matching the corresponding strings.
Args:
ft_str (str): feature masks, each enclosed in square brackets, in
which the features are delimited by any standard delimiter.
Returns:
Pattern: regular expression pattern equivalent to `ft_str`
|
codesearchnet
|
def get_doc_sources(api_name):
if api_name == tf_export.TENSORFLOW_API_NAME:
return _TENSORFLOW_DOC_SOURCES
if api_name == tf_export.KERAS_API_NAME:
return _KERAS_DOC_SOURCES
return {}
|
Get a map from module to a DocSource object.
Args:
api_name: API you want to generate (e.g. `tensorflow` or `estimator`).
Returns:
Map from module name to DocSource object.
|
github-repos
|
def add_server(self, name, prefer=False):
if not name or re.match(r'^[\s]+$', name):
raise ValueError('ntp server name must be specified')
if prefer:
name = '%s prefer' % name
cmd = self.command_builder('ntp server', value=name)
return self.configure(cmd)
|
Add or update an NTP server entry to the node config
Args:
name (string): The IP address or FQDN of the NTP server.
prefer (bool): Sets the NTP server entry as preferred if True.
Returns:
True if the operation succeeds, otherwise False.
|
juraj-google-style
|
def size(self):
return sum((len(self._dump_tensor_data[device_name]) for device_name in self._dump_tensor_data))
|
Total number of dumped tensors in the dump root directory.
Returns:
(`int`) The total number of dumped tensors in the dump root directory.
|
github-repos
|
def _dict_to_tensor(self, x, k1, k2, k3):
return array_ops_stack.stack([array_ops_stack.stack([array_ops_stack.stack([x[i, j, k] for k in range(k3)]) for j in range(k2)]) for i in range(k1)])
|
Convert a dictionary to a tensor.
Args:
x: A k1 * k2 dictionary.
k1: First dimension of x.
k2: Second dimension of x.
k3: Third dimension of x.
Returns:
A k1 * k2 * k3 tensor.
|
github-repos
|
def local_attention_1d(q, k, v, length_dim, key_dim, value_dim, autoregressive=True, length_dim_num_splits=1, radius=128, sequence_id=1, attention_kwargs=None):
length_per_split = (length_dim.size
block_length = max(radius, 128)
while ((length_per_split % block_length) != 0):
block_length -= 1
query_block_length = mtf.Dimension('query_block_length', block_length)
memory_block_length = mtf.Dimension('memory_block_length', block_length)
num_blocks = mtf.Dimension(length_dim.name, (length_dim.size
def _reshape_query(x):
return mtf.replace_dimensions(x, length_dim, [num_blocks, query_block_length])
def _reshape_memory(x):
x = mtf.replace_dimensions(x, length_dim, [num_blocks, memory_block_length])
return (mtf.left_halo_exchange if autoregressive else mtf.halo_exchange)(x, num_blocks, memory_block_length, radius)
q = _reshape_query(q)
k = _reshape_memory(k)
if v:
v = _reshape_memory(v)
else:
v = k
if (sequence_id is None):
sequence_id = 1
if ((not isinstance(sequence_id, mtf.Tensor)) or (length_dim not in sequence_id.shape.dims)):
sequence_id += mtf.zeros(q.mesh, [length_dim], tf.int32)
q_sequence_id = _reshape_query(sequence_id)
m_sequence_id = _reshape_memory(sequence_id)
pos = mtf.range(q.mesh, length_dim, dtype=tf.int32)
q_pos = _reshape_query(pos)
m_pos = _reshape_memory(pos)
padded_memory_block_length = mtf.Dimension('memory_block_length', (((1 if autoregressive else 2) * radius) + block_length))
relative_position = (m_pos - q_pos)
illegal = mtf.not_equal(q_sequence_id, m_sequence_id)
illegal = mtf.logical_or(illegal, mtf.less_equal(relative_position, (- radius)))
illegal = mtf.logical_or(illegal, mtf.greater(relative_position, (0 if autoregressive else radius)))
mask = (mtf.cast(illegal, q.dtype) * (- 1000000000.0))
o = attention(q, k, v, padded_memory_block_length, key_dim, value_dim, mask, **attention_kwargs)
return mtf.replace_dimensions(o, [num_blocks, query_block_length], length_dim)
|
Attention to the a neighborood around the source.
If autoregressive, then query position p can only see memory positions
in the range (p - radius, p].
If not autoregressive, then query position p can only see memory positions
in the range (p - window_size, p + radius].
Args:
q: a Tensor containing length_dim
k: a Tensor containing length_dim
v: an optional Tensor containing length_dim. If none then uses v=k.
length_dim: a Dimension
key_dim: a Dimension (the channels dimension of q and k)
value_dim: a Dimension (the channels dimension of v)
autoregressive: a boolean
length_dim_num_splits: an optional integer indicating how many ways the
length dimension is split
radius: an integer
sequence_id: a Tensor or an integer
attention_kwargs: optional keyword arguments for attention()
Returns:
a Tensor with the shape x.shape - key_dim + value_dim
Raises:
ValueError: if channels or depth don't match.
|
codesearchnet
|
def server_url_for_websocket_url(url):
if url.startswith("ws:"):
reprotocoled = "http" + url[2:]
elif url.startswith("wss:"):
reprotocoled = "https" + url[3:]
else:
raise ValueError("URL has non-websocket protocol " + url)
if not reprotocoled.endswith("/ws"):
raise ValueError("websocket URL does not end in /ws")
return reprotocoled[:-2]
|
Convert an ``ws(s)`` URL for a Bokeh server into the appropriate
``http(s)`` URL for the websocket endpoint.
Args:
url (str):
An ``ws(s)`` URL ending in ``/ws``
Returns:
str:
The corresponding ``http(s)`` URL.
Raises:
ValueError:
If the input URL is not of the proper form.
|
juraj-google-style
|
def bind_rows(df, other, join='outer', ignore_index=False):
df = pd.concat([df, other], join=join, ignore_index=ignore_index, axis=0)
return df
|
Binds DataFrames "vertically", stacking them together. This is equivalent
to `pd.concat` with `axis=0`.
Args:
df (pandas.DataFrame): Top DataFrame (passed in via pipe).
other (pandas.DataFrame): Bottom DataFrame.
Kwargs:
join (str): One of `"outer"` or `"inner"`. Outer join will preserve
columns not present in both DataFrames, whereas inner joining will
drop them.
ignore_index (bool): Indicates whether to consider pandas indices as
part of the concatenation (defaults to `False`).
|
codesearchnet
|
def learn_one(self, x: beam.Row) -> None:
if len(x.__dict__) != 1:
raise ValueError('RobustZScore.learn_one expected univariate input, but got %s', str(x))
v = next(iter(x))
self._mad_tracker.push(v)
|
Updates the `MadTracker` with a new data point.
Args:
x: A `beam.Row` containing a single numerical value.
|
github-repos
|
def tables(self):
select = ('SELECT name FROM sqlite_master',)
query = self.execute(*select)
result = query.fetchall()
return [row[0] for row in result]
|
Returns a list of table names.
Example:
>>> db.tables
["bar", "foo"]
Returns:
list of str: One string for each table name.
|
codesearchnet
|
def get_all_publications(return_namedtuples=True):
sources = [
ben_cz.get_publications,
grada_cz.get_publications,
cpress_cz.get_publications,
zonerpress_cz.get_publications,
]
publications = []
for source in sources:
publications.extend(
filters.filter_publications(source())
)
if return_namedtuples:
publications = map(lambda x: x.to_namedtuple(), publications)
return publications
|
Get list publications from all available source.
Args:
return_namedtuples (bool, default True): Convert :class:`.Publication`
structures to namedtuples (used in AMQP
communication).
Returns:
list: List of :class:`.Publication` structures converted to namedtuple.
|
juraj-google-style
|
def unwrap_model(model: nn.Module, recursive: bool=False) -> nn.Module:
if is_accelerate_available():
kwargs = {}
if recursive:
if not is_accelerate_available('0.29.0'):
raise RuntimeError('Setting `recursive=True` to `unwrap_model` requires `accelerate` v0.29.0. Please upgrade your version of accelerate')
else:
kwargs['recursive'] = recursive
return extract_model_from_parallel(model, **kwargs)
elif hasattr(model, 'module'):
return unwrap_model(model.module)
else:
return model
|
Recursively unwraps a model from potential containers (as used in distributed training).
Args:
model (`torch.nn.Module`): The model to unwrap.
recursive (`bool`, *optional*, defaults to `False`):
Whether to recursively extract all cases of `module.module` from `model` as well as unwrap child sublayers
recursively, not just the top-level distributed containers.
|
github-repos
|
def finalize_options(self):
self.cwd = os.path.abspath(os.path.dirname(__file__))
self.test_dir = os.path.join(self.cwd, 'tests')
|
Finalizes the command's options.
Args:
self (CoverageCommand): the ``CoverageCommand`` instance
Returns:
``None``
|
juraj-google-style
|
def plot(self, tag, mpl_plt, step=None, close_plot=True):
if step is None:
step = self._step
else:
self._step = step
fig = mpl_plt.get_current_fig_manager()
img_w, img_h = fig.canvas.get_width_height()
image_buf = io.BytesIO()
mpl_plt.savefig(image_buf, format='png')
image_summary = Summary.Image(
encoded_image_string=image_buf.getvalue(),
colorspace=4,
height=img_h,
width=img_w)
summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])
self.add_summary(summary, step)
if close_plot:
mpl_plt.close()
|
Saves matplotlib plot output to summary image.
Args:
tag: str: label for this data
mpl_plt: matplotlib stateful pyplot object with prepared plotting state
step: int: training step
close_plot: bool: automatically closes plot
|
juraj-google-style
|
def __init__(self, mutate_fn, throttle_rampup=True, hint_num_workers=_DEFAULT_HINT_NUM_WORKERS):
self._mutate_fn = mutate_fn
self._throttle_rampup = throttle_rampup
self._hint_num_workers = hint_num_workers
|
Initializes a Mutate transform.
Args:
mutate_fn: Instance of `DatastoreMutateFn` to use.
throttle_rampup: Whether to enforce a gradual ramp-up.
hint_num_workers: A hint for the expected number of workers, used to
estimate appropriate limits during ramp-up throttling.
|
github-repos
|
def __call__(self, fn):
def fail(app, *args, **kwargs):
if isinstance(self.enable, bool):
enabled = self.enable
app.tcex.log.debug('Fail on input is ({}).'.format(self.enable))
else:
enabled = getattr(app.args, self.enable)
app.tcex.log.debug('Fail on input is ({}) for ({}).'.format(enabled, self.enable))
if not isinstance(enabled, bool):
app.tcex.playbook.exit(
1, 'The enable value must be a boolean for fail on input.'
)
if enabled is True:
if self.arg is None:
arg_name = 'input'
conditional_value = app.tcex.playbook.read(list(args)[0], embedded=False)
else:
arg_name = self.arg
conditional_value = app.tcex.playbook.read(
getattr(app.args, self.arg), embedded=False
)
if conditional_value in self.values:
app.tcex.log.error(
'Invalid value ({}) provided for ({}).'.format(conditional_value, arg_name)
)
app.tcex.exit(1, self.msg)
return fn(app, *args, **kwargs)
return fail
|
Implement __call__ function for decorator.
Args:
fn (function): The decorated function.
Returns:
function: The custom decorator function.
|
juraj-google-style
|
def get_names(file_dir, files):
total_list = []
name_list = []
get_sub = False
for path, subdir, dir_files in os.walk(file_dir):
if not get_sub:
total_list = subdir[:]
get_sub = True
else:
break
for user in total_list:
has_file = True
for f in files:
file_path = file_dir + user + "/" + f + ".txt"
if not os.path.exists(file_path):
has_file = False
break
if has_file:
name_list.append(user)
if len(name_list) == 0:
print("********Error: Cannot find any user who completes the files*************", file=ERROR_LOG)
return name_list
|
Get the annotator name list based on a list of files
Args:
file_dir: AMR file folder
files: a list of AMR names, e.g. nw_wsj_0001_1
Returns:
a list of user names who annotate all the files
|
juraj-google-style
|
def shot_noise(x, severity=1):
c = [60, 25, 12, 5, 3][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
return around_and_astype(x_clip)
|
Shot noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added shot noise.
|
juraj-google-style
|
def export_obj(filename, cutout, level=0):
if ".obj" not in filename:
filename = filename + ".obj"
vs, fs = mcubes.marching_cubes(cutout, level)
mcubes.export_obj(vs, fs, filename)
|
Converts a dense annotation to a obj, using Marching Cubes (PyMCubes).
Arguments:
filename (str): The filename to write out to
cutout (numpy.ndarray): The dense annotation
level (int): The level at which to run mcubes
Returns:
boolean success
|
juraj-google-style
|
def _GetNextLogCountPerToken(token):
global _log_counter_per_token
_log_counter_per_token[token] = (1 + _log_counter_per_token.get(token, (- 1)))
return _log_counter_per_token[token]
|
Wrapper for _log_counter_per_token.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0)
|
codesearchnet
|
def complete_multipart_upload(self, request):
parts = {'Parts': request.parts}
try:
self.client.complete_multipart_upload(Bucket=request.bucket, Key=request.object, UploadId=request.upload_id, MultipartUpload=parts)
except Exception as e:
raise messages.S3ClientError(str(e), get_http_error_code(e))
|
Completes a multipart upload to S3
Args:
request: (UploadPartRequest) input message
Returns:
(Void) The response message.
|
github-repos
|
def remat(f):
return jax.checkpoint(f)
|
Implementation of rematerialization.
Args:
f: The function or operation to rematerialize.
Returns:
A function wrapping f that defines a custom gradient, which
recomputes f on the backwards pass of a gradient call.
|
github-repos
|
async def inspect(self, task_id: str) -> Mapping[(str, Any)]:
response = (await self.docker._query_json('tasks/{task_id}'.format(task_id=task_id), method='GET'))
return response
|
Return info about a task
Args:
task_id: is ID of the task
|
codesearchnet
|
def __init__(self, batch_url=None, retryable_codes=None,
response_encoding=None):
self.api_requests = []
self.retryable_codes = retryable_codes or []
self.batch_url = batch_url or 'https:
self.response_encoding = response_encoding
|
Initialize a batch API request object.
Args:
batch_url: Base URL for batch API calls.
retryable_codes: A list of integer HTTP codes that can be retried.
response_encoding: The encoding type of response content.
|
juraj-google-style
|
def variable_product(variables: list[cfg.Variable]) -> Iterable[tuple[cfg.Binding, ...]]:
return itertools.product(*(v.bindings for v in variables))
|
Take the Cartesian product of a number of Variables.
Args:
variables: A sequence of Variables.
Returns:
A list of lists of Values, where each sublist has one element from each
of the given Variables.
|
github-repos
|
def get_dataset(self):
package_id = self.data.get('package_id')
if (package_id is None):
raise HDXError('Resource has no package id!')
return hdx.data.dataset.Dataset.read_from_hdx(package_id)
|
Return dataset containing this resource
Returns:
hdx.data.dataset.Dataset: Dataset containing this resource
|
codesearchnet
|
def get_reduced_symbols(symbols):
reduced_symbols = []
for ss in symbols:
if (not (ss in reduced_symbols)):
reduced_symbols.append(ss)
return reduced_symbols
|
Reduces expanded list of symbols.
Args:
symbols: list containing any chemical symbols as often as
the atom appears in the structure
Returns:
reduced_symbols: any symbols appears only once
|
codesearchnet
|
def sign(allocate_quota_request):
if not isinstance(allocate_quota_request, sc_messages.AllocateQuotaRequest):
raise ValueError(u'Invalid request')
op = allocate_quota_request.allocateOperation
if op is None or op.methodName is None or op.consumerId is None:
logging.error(u'Bad %s: not initialized => not signed', allocate_quota_request)
raise ValueError(u'allocate_quota request must be initialized with an operation')
md5 = hashlib.md5()
md5.update(op.methodName.encode('utf-8'))
md5.update(b'\x00')
md5.update(op.consumerId.encode('utf-8'))
if op.labels:
signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))
for value_set in op.quotaMetrics:
md5.update(b'\x00')
md5.update(value_set.metricName.encode('utf-8'))
for mv in value_set.metricValues:
metric_value.update_hash(md5, mv)
md5.update(b'\x00')
return md5.digest()
|
Obtains a signature for an operation in a `AllocateQuotaRequest`
Args:
op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an
operation used in a `AllocateQuotaRequest`
Returns:
string: a secure hash generated from the operation
|
juraj-google-style
|
def extend(self, name, opts, info):
tifo = self.info.copy()
tifo.update(info)
topt = self.opts.copy()
topt.update(opts)
tobj = self.__class__(self.modl, name, tifo, topt)
tobj.subof = self.name
return tobj
|
Extend this type to construct a sub-type.
Args:
name (str): The name of the new sub-type.
opts (dict): The type options for the sub-type.
info (dict): The type info for the sub-type.
Returns:
(synapse.types.Type): A new sub-type instance.
|
juraj-google-style
|
def to_bqm(self, model):
linear = ((v, float(model.get_py_value(bias)))
for v, bias in self.linear.items())
quadratic = ((u, v, float(model.get_py_value(bias)))
for (u, v), bias in self.quadratic.items())
offset = float(model.get_py_value(self.offset))
return dimod.BinaryQuadraticModel(linear, quadratic, offset, dimod.SPIN)
|
Given a pysmt model, return a bqm.
Adds the values of the biases as determined by the SMT solver to a bqm.
Args:
model: A pysmt model.
Returns:
:obj:`dimod.BinaryQuadraticModel`
|
juraj-google-style
|
def flush(self, force=False):
super(GCSRecordsPool, self).flush()
if force:
extra_padding = self._buf_size % self._GCS_BLOCK_SIZE
if extra_padding > 0:
self._write("\x00" * (self._GCS_BLOCK_SIZE - extra_padding))
self._filehandle.flush()
|
Flush pool contents.
Args:
force: Inserts additional padding to achieve the minimum block size
required for GCS.
|
juraj-google-style
|
def destroy_dns(app='', env='dev', **_):
client = boto3.Session(profile_name=env).client('route53')
generated = get_details(app=app, env=env)
record = generated.dns_elb()
zone_ids = get_dns_zone_ids(env=env, facing='external')
for zone_id in zone_ids:
record_sets = client.list_resource_record_sets(HostedZoneId=zone_id, StartRecordName=record, StartRecordType='CNAME', MaxItems='1')
for found_record in record_sets['ResourceRecordSets']:
assert destroy_record(client=client, found_record=found_record, record=record, zone_id=zone_id)
return True
|
Destroy DNS records.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
regions (str): AWS region.
Returns:
bool: True upon successful completion.
|
codesearchnet
|
def __send_notification(self, message, title, title_link='', color='good',
fields='', log_level=LogLv.INFO):
if log_level < self.log_level:
return None
payload = self.__build_payload(message, title, title_link, color, fields)
try:
response = self.__post(payload)
except Exception:
raise Exception(traceback.format_exc())
return response
|
Send a message to a channel.
Args:
title: Message title.
title_link: Link of the message title.
message: Message body.
color: Message line color on Slack. This parameter should be one of the following values: 'good', 'warning',
'danger' or any hex color code.
Returns:
response: Response of Slack API.
Raises:
Exception:
|
juraj-google-style
|
def reward_scope(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor],
next_state: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:
scope = {}
scope.update(self.non_fluents_scope())
scope.update(self.state_scope(state))
scope.update(self.action_scope(action))
scope.update(self.next_state_scope(next_state))
return scope
|
Returns the complete reward fluent scope for the
current `state`, `action` fluents, and `next_state` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
next_state (Sequence[tf.Tensor]): The next state fluents.
Returns:
A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
|
juraj-google-style
|
def __set_unkown_effect(self, hgvs_string):
unknown_effect_list = ['?', '(=)', '=']
if (hgvs_string in unknown_effect_list):
self.unknown_effect = True
elif ('(' in hgvs_string):
self.unknown_effect = True
else:
self.unknown_effect = False
if ('?' in hgvs_string):
self.is_missing_info = True
else:
self.is_missing_info = False
|
Sets a flag for unkown effect according to HGVS syntax. The
COSMIC database also uses unconventional questionmarks to denote
missing information.
Args:
hgvs_string (str): hgvs syntax with "p." removed
|
codesearchnet
|
def get_type_from_api_entity(self, api_entity):
merged = self.group_types_data.copy()
merged.update(self.indicator_types_data)
print(merged)
for (key, value) in merged.items():
if (value.get('apiEntity') == api_entity):
return key
return None
|
Returns the object type as a string given a api entity.
Args:
api_entity:
Returns:
|
codesearchnet
|
def post_slack_message(message=None, channel=None, username=None, icon_emoji=None):
LOG.debug('Slack Channel: %s\nSlack Message: %s', channel, message)
slack = slacker.Slacker(SLACK_TOKEN)
try:
slack.chat.post_message(channel=channel, text=message, username=username, icon_emoji=icon_emoji)
LOG.info('Message posted to %s', channel)
except slacker.Error:
LOG.info("error posted message to %s", channel)
|
Format the message and post to the appropriate slack channel.
Args:
message (str): Message to post to slack
channel (str): Desired channel. Must start with #
|
juraj-google-style
|
def dates_in_range(start_date, end_date):
return [
start_date + timedelta(n)
for n in range(int((end_date - start_date).days))
]
|
Returns all dates between two dates.
Inclusive of the start date but not the end date.
Args:
start_date (datetime.date)
end_date (datetime.date)
Returns:
(list) of datetime.date objects
|
juraj-google-style
|
def create_position_ids_from_input_ids(self, input_ids, padding_idx):
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
|
Args:
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
x: torch.Tensor x:
Returns: torch.Tensor
|
github-repos
|
def _is_ready(self, as_of):
if self.is_one_off():
return (self.initial_billing_cycle.date_range.lower <= as_of)
else:
return True
|
Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
|
codesearchnet
|
def update_service(name, service_map):
if name in service_map:
service = service_map[name]
data = service.update()
if not data:
logger.warning('no data received for service: %s', name)
else:
data['service_name'] = service.service_name
CACHE[name] = dict(data=data, updated=datetime.now())
else:
logger.warning('service not found: %s', name)
if name in CACHE:
return add_time(CACHE[name])
return {}
|
Get an update from the specified service.
Arguments:
name (:py:class:`str`): The name of the service.
service_map (:py:class:`dict`): A mapping of service names to
:py:class:`flash.service.core.Service` instances.
Returns:
:py:class:`dict`: The updated data.
|
juraj-google-style
|
def get_history(self, filters=(), pagesize=15, offset=0):
response = None
try:
response = requests.get(urls.history(self._giid), headers={'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={'offset': int(offset), 'pagesize': int(pagesize), 'notificationCategories': filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
|
codesearchnet
|
def authenticate(self, request, username=None, password=None):
if not hasattr(settings, 'MASTER_PASSWORD'):
logging.debug("Master password not set.")
return None
if check_password(password, settings.MASTER_PASSWORD):
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
if settings.MASTER_NOTIFY:
logger.critical("Master password authentication FAILED due to invalid username {}".format(username))
logger.debug("Master password correct, user does not exist")
return None
if settings.MASTER_NOTIFY:
logger.critical("Master password authentication SUCCEEDED with username {}".format(username))
logger.debug("Authentication with master password successful")
return user
logger.debug("Master password authentication failed")
return None
|
Authenticate a username-password pair.
Creates a new user if one is not already in the database.
Args:
username
The username of the `User` to authenticate.
password
The master password.
Returns:
`User`
|
juraj-google-style
|
def link_asset_content_key(access_token, asset_id, encryptionkey_id, ams_redirected_rest_endpoint):
path = '/Assets'
full_path = ''.join([path, "('", asset_id, "')", '/$links/ContentKeys'])
full_path_encoded = urllib.parse.quote(full_path, safe='')
endpoint = ''.join([ams_rest_endpoint, full_path_encoded])
uri = ''.join([ams_redirected_rest_endpoint, 'ContentKeys', "('", encryptionkey_id, "')"])
body = (('{"uri": "' + uri) + '"}')
return do_ams_post(endpoint, full_path_encoded, body, access_token)
|
Link Media Service Asset and Content Key.
Args:
access_token (str): A valid Azure authentication token.
asset_id (str): A Media Service Asset ID.
encryption_id (str): A Media Service Encryption ID.
ams_redirected_rest_endpoint (str): A Media Service Redirected Endpoint.
Returns:
HTTP response. JSON body.
|
codesearchnet
|
def __init__(self, model_layers, *args, **kwargs):
inputs = kwargs.pop('input_tensor', None)
super(_SubclassModel, self).__init__(*args, **kwargs)
for i, layer in enumerate(model_layers):
setattr(self, self._layer_name_for_i(i), layer)
self.num_layers = len(model_layers)
if inputs is not None:
self._set_inputs(inputs)
|
Instantiate a model.
Args:
model_layers: a list of layers to be added to the model.
*args: Model's args
**kwargs: Model's keyword args, at most one of input_tensor -> the input
tensor required for ragged/sparse input.
|
github-repos
|
def __getitem__(self, item):
if item not in self._declarations:
raise self.UndeclaredKeyError('Configuration key not declared', item)
if item in self._flag_values:
if item in self._loaded_values:
self._logger.warning(
'Overriding loaded value for %s (%s) with flag value: %s',
item, self._loaded_values[item], self._flag_values[item])
return self._flag_values[item]
if item in self._loaded_values:
return self._loaded_values[item]
if self._declarations[item].has_default:
return self._declarations[item].default_value
raise self.UnsetKeyError(
'Configuration value not set and has no default', item)
|
Get a config value via item access.
Order of precedence is:
- Value provided via --config-value flag.
- Value loaded via load*() methods.
- Default value as declared with conf.declare()
Args:
item: Config key name to get.
|
juraj-google-style
|
def broadcast(self, tensor, destinations):
validate_destinations(destinations)
return self.broadcast_implementation(tensor, destinations)
|
Broadcast `tensor` to `destinations`.
This can only be called in the cross-replica context.
Args:
tensor: a `tf.Tensor` like object. The value to broadcast.
destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a
`tf.Tensor` alike object, or a device string. It specifies the devices
to broadcast to. Note that if it's a `tf.Variable`, the value is
broadcasted to the devices of that variable, this method doesn't update
the variable.
Returns:
A `tf.Tensor` or `tf.distribute.DistributedValues`.
|
github-repos
|
def CalculateHashes(self, base_path_specs, output_writer):
for base_path_spec in base_path_specs:
file_system = resolver.Resolver.OpenFileSystem(base_path_spec)
file_entry = resolver.Resolver.OpenFileEntry(base_path_spec)
if file_entry is None:
logging.warning('Unable to open base path specification:\n{0:s}'.format(
base_path_spec.comparable))
continue
self._CalculateHashesFileEntry(file_system, file_entry, '', output_writer)
|
Recursive calculates hashes starting with the base path specification.
Args:
base_path_specs (list[dfvfs.PathSpec]): source path specification.
output_writer (StdoutWriter): output writer.
|
juraj-google-style
|
def l2_regression_sq_loss(y, target, name=None):
with tf.name_scope(name, 'l2_regression_sq', [y, target]) as scope:
y = tf.convert_to_tensor(y, name='y')
target = tf.convert_to_tensor(target, name='target')
return reduce_batch_sum(tf.square(y - target), name=scope)
|
Calculates the sum of squared errors between y and target.
Args:
y: the calculated values.
target: the desired values.
name: the name for this op, defaults to l2_regression
Returns:
A tensorflow op.
|
juraj-google-style
|
def delete_as(access_token, subscription_id, resource_group, as_name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/availabilitySets/', as_name,
'?api-version=', COMP_API])
return do_delete(endpoint, access_token)
|
Delete availability set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
as_name (str): Name of the availability set.
Returns:
HTTP response.
|
juraj-google-style
|
def __init__(self, step, metric, labels=None):
self.step = step
self.metric = metric
self.labels = labels if labels else {}
|
Initializes ``MetricKey``.
Args:
step: A string with the step this metric cell is part of.
metric: A ``MetricName`` namespace+name that identifies a metric.
labels: An arbitrary set of labels that also identifies the metric.
|
github-repos
|
def verify_callback(
self,
origin_authorization,
url,
body,
content_type='application/x-www-form-urlencoded'):
token = self.token_of_request(url, body, content_type)
authorization = 'QBox {0}'.format(token)
return origin_authorization == authorization
|
回调验证
Args:
origin_authorization: 回调时请求Header中的Authorization字段
url: 回调请求的url
body: 回调请求的body
content_type: 回调请求body的Content-Type
Returns:
返回true表示验证成功,返回false表示验证失败
|
juraj-google-style
|
def update_exit_code(self, code: int):
if code:
if self._exit_code:
self._exit_code = min(self._exit_code, code)
else:
self._exit_code = code
|
Set the exit code if it is serious than before.
Args:
code: The exit code.
|
juraj-google-style
|
def set_shutdown(self, name, default=False, disable=True):
commands = [('interface %s' % name)]
commands.append(self.command_builder('shutdown', value=True, default=default, disable=disable))
return self.configure(commands)
|
Configures the interface shutdown state
Default configuration for set_shutdown is disable=True, meaning
'no shutdown'. Setting both default and disable to False will
effectively enable shutdown on the interface.
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
default (boolean): Specifies to default the interface shutdown
disable (boolean): Specifies to disable interface shutdown, i.e.
disable=True => no shutdown
Returns:
True if the operation succeeds otherwise False is returned
|
codesearchnet
|
def create_multipart_upload(self, request):
try:
boto_response = self.client.create_multipart_upload(Bucket=request.bucket, Key=request.object, ContentType=request.mime_type)
response = messages.UploadResponse(boto_response['UploadId'])
except Exception as e:
raise messages.S3ClientError(str(e), get_http_error_code(e))
return response
|
Initates a multipart upload to S3 for a given object
Args:
request: (UploadRequest) input message
Returns:
(UploadResponse) The response message.
|
github-repos
|
def _device_assignments(self) -> list[traceable_stack.TraceableObject]:
return self._device_code_locations or []
|
Code locations for device context managers active at op creation.
This property will return a list of traceable_stack.TraceableObject
instances where .obj is a string representing the assigned device
(or information about the function that would be applied to this op
to compute the desired device) and the filename and lineno members
record the location of the relevant device context manager.
For example, suppose file_a contained these lines:
file_a.py:
15: with tf.device('/gpu:0'):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the device context manager
would have these member values:
t_obj.obj -> '/gpu:0'
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._device_assignments would return the list [t_obj].
Returns:
[str: traceable_stack.TraceableObject, ...] as per this method's
description, above.
|
github-repos
|
def convert_selu(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting selu ...')
if (names == 'short'):
tf_name = ('SELU' + random_string(4))
elif (names == 'keep'):
tf_name = w_name
else:
tf_name = (w_name + str(random.random()))
selu = keras.layers.Activation('selu', name=tf_name)
layers[scope_name] = selu(layers[inputs[0]])
|
Convert selu layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
codesearchnet
|
def create_report(self, uri, timeout=-1):
logger.debug('Creating Report (uri = %s)'.format(uri))
task, _ = self._connection.post(uri, {})
if not task:
raise exceptions.HPOneViewException(RESOURCE_CLIENT_TASK_EXPECTED)
task = self._task_monitor.get_completed_task(task, timeout)
return task['taskOutput']
|
Creates a report and returns the output.
Args:
uri: URI
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
list:
|
juraj-google-style
|
def from_input(cls, input, workdir=None, manager=None):
return cls(input, workdir=workdir, manager=manager)
|
Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
|
juraj-google-style
|
def distribute_variable(value, layout):
return distribute_tensor(value, layout)
|
Create a distributed variable for JAX.
Since JAX doesn't have a variable class, this will just return a `jax.Array`
with the corresponding layout/sharding specified.
Note that this function should be used in eager context, not in jitted
function.
Args:
value: the initial value of the variable.
layout: `TensorLayout` for the created variable, or a
JAX-supported layout instance
(e.g. `jax.experimental.layout.Layout`, `jax.sharding.Sharding`).
Returns:
jax.Array which is the distributed variable.
|
github-repos
|
def line_count(fn):
with open(fn) as f:
for i, l in enumerate(f):
pass
return i + 1
|
Get line count of file
Args:
fn (str): Path to file
Return:
Number of lines in file (int)
|
juraj-google-style
|
def abspath(cur_file, parent=0) -> str:
file_path = os.path.abspath(cur_file).replace('\\', '/')
if os.path.isdir(file_path) and parent == 0: return file_path
adj = 1 - os.path.isdir(file_path)
return '/'.join(file_path.split('/')[:-(parent + adj)])
|
Absolute path
Args:
cur_file: __file__ or file or path str
parent: level of parent to look for
Returns:
str
|
juraj-google-style
|
def format_earning(data: pd.DataFrame, header: pd.DataFrame) -> pd.DataFrame:
if data.dropna(subset=['value']).empty:
return pd.DataFrame()
res = pd.concat([grp.loc[(:, ['value'])].set_index(header.value) for (_, grp) in data.groupby(data.position)], axis=1)
res.index.name = None
res.columns = res.iloc[0]
res = res.iloc[1:].transpose().reset_index().apply(pd.to_numeric, downcast='float', errors='ignore')
res.rename(columns=(lambda vv: '_'.join(vv.lower().split()).replace('fy_', 'fy')), inplace=True)
years = res.columns[res.columns.str.startswith('fy')]
lvl_1 = (res.level == 1)
for yr in years:
res.loc[(:, yr)] = res.loc[(:, yr)].round(1)
pct = f'{yr}_pct'
res.loc[(:, pct)] = 0.0
res.loc[(lvl_1, pct)] = res.loc[(lvl_1, pct)].astype(float).round(1)
res.loc[(lvl_1, pct)] = ((res.loc[(lvl_1, yr)] / res.loc[(lvl_1, yr)].sum()) * 100)
sub_pct = []
for (_, snap) in res[::(- 1)].iterrows():
if (snap.level > 2):
continue
if (snap.level == 1):
if (len(sub_pct) == 0):
continue
sub = pd.concat(sub_pct, axis=1).transpose()
res.loc[(sub.index, pct)] = ((res.loc[(sub.index, yr)] / res.loc[(sub.index, yr)].sum()) * 100)
sub_pct = []
if (snap.level == 2):
sub_pct.append(snap)
res.set_index('segment_name', inplace=True)
res.index.name = None
return res
|
Standardized earning outputs and add percentage by each blocks
Args:
data: earning data block
header: earning headers
Returns:
pd.DataFrame
Examples:
>>> format_earning(
... data=pd.read_pickle('xbbg/tests/data/sample_earning.pkl'),
... header=pd.read_pickle('xbbg/tests/data/sample_earning_header.pkl')
... ).round(2)
level fy2017 fy2017_pct
Asia-Pacific 1.0 3540.0 66.43
China 2.0 1747.0 49.35
Japan 2.0 1242.0 35.08
Singapore 2.0 551.0 15.56
United States 1.0 1364.0 25.60
Europe 1.0 263.0 4.94
Other Countries 1.0 162.0 3.04
|
codesearchnet
|
def count_de_novos_per_transcript(ensembl, gene_id, de_novos=[]):
transcripts = get_transcript_ids(ensembl, gene_id)
if len(transcripts) == 0:
raise IndexError("{0} lacks coding transcripts".format(gene_id))
counts = {}
for key in transcripts:
try:
gene = construct_gene_object(ensembl, key)
total = len(get_de_novos_in_transcript(gene, de_novos))
if total > 0:
counts[key] = {}
counts[key]["n"] = total
counts[key]["len"] = transcripts[key]
except ValueError:
pass
return counts
|
count de novos in transcripts for a gene.
Args:
ensembl: EnsemblRequest object to request data from ensembl
gene_id: HGNC symbol for gene
de_novos: list of de novo positions, so we can check they all fit in
the gene transcript
Returns:
dictionary of lengths and de novo counts, indexed by transcript IDs.
|
juraj-google-style
|
def imdirect_open(fp):
img = pil_open(fp, 'r')
if (img.format == 'JPEG'):
if isinstance(fp, string_types):
exif = piexif.load(text_type_to_use(fp))
else:
fp.seek(0)
exif = piexif.load(fp.read())
orientation_value = exif.get('0th', {}).get(piexif.ImageIFD.Orientation)
if ((orientation_value is None) or (orientation_value == 1)):
return img
img_rot = autorotate(img)
exif = update_exif_for_rotated_image(exif)
with io.BytesIO() as bio:
img_rot.save(bio, format='jpeg', exif=piexif.dump(exif))
bio.seek(0)
img_rot_new = pil_open(bio, 'r')
img_rot_new.load()
img = img_rot_new
return img
|
Opens, identifies the given image file, and rotates it if it is a JPEG.
Note that this method does NOT employ the lazy loading methodology that
the PIL Images otherwise use. This is done to avoid having to save new
Args:
fp: A filename (string), pathlib.Path object or a file-like object.
Returns:
The image as an :py:class:`~PIL.Image.Image` object.
Raises:
IOError: If the file cannot be found, or the image cannot be
opened and identified.
|
codesearchnet
|
def __getitem__(self, indices):
return self.array[indices]
|
Select elements in the 0th dimension.
Args:
indices: the indices to select. Only needs to support one dimension,
the 0th dimension. Should support a `slice` or a list, tuple,
`np.array` or 1D tensor.
Returns: A slice of `self.array`.
|
github-repos
|
def _on_receive(self, client, userdata, message):
topic = message.topic
encoded = message.payload
try:
packet = json.loads(encoded)
except ValueError:
self._logger.warn('Could not decode json packet: %s', encoded)
return
try:
seq = packet['sequence']
message_data = packet['message']
except KeyError:
self._logger.warn('Message received did not have required sequence and message keys: %s', packet)
return
if (topic not in self.queues):
found = False
for (_, regex, callback, ordered) in self.wildcard_queues:
if regex.match(topic):
self.queues[topic] = PacketQueue(0, callback, ordered)
found = True
break
if (not found):
self._logger.warn('Received message for unknown topic: %s', topic)
return
self.queues[topic].receive(seq, [seq, topic, message_data])
|
Callback called whenever we receive a message on a subscribed topic
Args:
client (string): The client id of the client receiving the message
userdata (string): Any user data set with the underlying MQTT client
message (object): The mesage with a topic and payload.
|
codesearchnet
|
def parse(self, key, value):
if value is not None:
try:
return self._parser(value)
except Exception:
raise ParsingError("Error parsing {}".format(key))
elif self._default is not SENTINAL:
return self._default
else:
raise KeyError(key)
|
Parse the environment value for a given key against the schema.
Args:
key: The name of the environment variable.
value: The value to be parsed.
|
juraj-google-style
|
def get_flat(self):
self._check_sess()
return np.concatenate([v.eval(session=self.sess).flatten() for v in self.variables.values()])
|
Gets the weights and returns them as a flat array.
Returns:
1D Array containing the flattened weights.
|
codesearchnet
|
def set_continue(self, name, action, seqno, value=None, default=False, disable=False):
commands = [('route-map %s %s %s' % (name, action, seqno))]
if default:
commands.append('default continue')
elif disable:
commands.append('no continue')
else:
if ((not str(value).isdigit()) or (value < 1)):
raise ValueError('seqno must be a positive integer unless default or disable is specified')
commands.append(('continue %s' % value))
return self.configure(commands)
|
Configures the routemap continue value
Args:
name (string): The full name of the routemap.
action (string): The action to take for this routemap clause.
seqno (integer): The sequence number for the routemap clause.
value (integer): The value to configure for the routemap continue
default (bool): Specifies to default the routemap continue value
disable (bool): Specifies to negate the routemap continue value
Returns:
True if the operation succeeds otherwise False is returned
|
codesearchnet
|
def from_moy(cls, moy, leap_year=False):
if (not leap_year):
num_of_minutes_until_month = (0, 44640, 84960, 129600, 172800, 217440, 260640, 305280, 349920, 393120, 437760, 480960, 525600)
else:
num_of_minutes_until_month = (0, 44640, (84960 + 1440), (129600 + 1440), (172800 + 1440), (217440 + 1440), (260640 + 1440), (305280 + 1440), (349920 + 1440), (393120 + 1440), (437760 + 1440), (480960 + 1440), (525600 + 1440))
for monthCount in range(12):
if (int(moy) < num_of_minutes_until_month[(monthCount + 1)]):
month = (monthCount + 1)
break
try:
day = (int(((moy - num_of_minutes_until_month[(month - 1)]) / (60 * 24))) + 1)
except UnboundLocalError:
raise ValueError(('moy must be positive and smaller than 525600. Invalid input %d' % moy))
else:
hour = int(((moy / 60) % 24))
minute = int((moy % 60))
return cls(month, day, hour, minute, leap_year)
|
Create Ladybug Datetime from a minute of the year.
Args:
moy: An integer value 0 <= and < 525600
|
codesearchnet
|
def __init__(self, graph, resolver, namespace, scope, closure_types):
super(Analyzer, self).__init__(graph)
self.resolver = resolver
self.namespace = namespace
self.scope = scope
self.closure_types = closure_types
context_types = {n: t for n, t in closure_types.items() if n not in scope.bound}
if context_types:
self.context_types = _TypeMap()
self.context_types.types = context_types
else:
self.context_types = None
|
Creates a new analyzer.
Args:
graph: cfg.Graph
resolver: Resolver
namespace: Dict[str, Any]
scope: activity.Scope
closure_types: Dict[QN, Set]
|
github-repos
|
def _GetTripSequence(self, schedule=None):
if schedule is None:
schedule = getattr(self, "_schedule", None)
if schedule is None:
warnings.warn("No longer supported. _schedule attribute is used to get "
"stop_times table", DeprecationWarning)
cursor = schedule._connection.cursor()
cursor.execute("SELECT trip_id,stop_sequence FROM stop_times "
"WHERE stop_id=?",
(self.stop_id, ))
return [(schedule.GetTrip(row[0]), row[1]) for row in cursor]
|
Return a list of (trip, stop_sequence) for all trips visiting this stop.
A trip may be in the list multiple times with different index.
stop_sequence is an integer.
Args:
schedule: Deprecated, do not use.
|
juraj-google-style
|
def remove(self, annotation):
if (annotation.id in self._annotations):
del self._annotations[annotation.id]
self._dirty = True
|
Removes an annotation.
Args:
annotation (gkeepapi.node.Annotation): An Annotation object.
Returns:
gkeepapi.node.Annotation: The Annotation.
|
codesearchnet
|
def autodecode(b):
import warnings
import chardet
try:
return b.decode()
except UnicodeError:
result = chardet.detect(b)
if (result['confidence'] < 0.95):
warnings.warn(('autodecode failed with utf-8; guessing %s' % result['encoding']))
return result.decode(result['encoding'])
|
Try to decode ``bytes`` to text - try default encoding first, otherwise try to autodetect
Args:
b (bytes): byte string
Returns:
str: decoded text string
|
codesearchnet
|
def evaluate(self, tensors):
sess = ops.get_default_session() or self.cached_session()
return sess.run(tensors)
|
Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
|
github-repos
|
def load_structure_path(self, structure_path, file_type):
if (not file_type):
raise ValueError('File type must be specified')
self.file_type = file_type
self.structure_dir = op.dirname(structure_path)
self.structure_file = op.basename(structure_path)
|
Load a structure file and provide pointers to its location
Args:
structure_path (str): Path to structure file
file_type (str): Type of structure file
|
codesearchnet
|
def add_function_def(self, fdef):
self.ensure_initialized()
if is_oss:
fdef_string = fdef.SerializeToString()
pywrap_tfe.TFE_ContextAddFunctionDef(self._handle, fdef_string, len(fdef_string))
else:
pywrap_tfe.TFE_ContextAddFunctionDefNoSerialization(self._handle, fdef)
|
Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fdef: A FunctionDef protocol buffer message.
|
github-repos
|
def unpack_dosdate(self, offset):
try:
o = self._offset + offset
return dosdate(self._buf[o:o + 2], self._buf[o + 2:o + 4])
except struct.error:
raise OverrunBufferException(o, len(self._buf))
|
Returns a datetime from the DOSDATE and DOSTIME starting at
the relative offset.
Arguments:
- `offset`: The relative offset from the start of the block.
Throws:
- `OverrunBufferException`
|
juraj-google-style
|
def setup_ui(uifile, base_instance=None):
ui = QtCompat.loadUi(uifile)
if not base_instance:
return ui
else:
for member in dir(ui):
if not member.startswith('__') and \
member is not 'staticMetaObject':
setattr(base_instance, member, getattr(ui, member))
return ui
|
Load a Qt Designer .ui file and returns an instance of the user interface
Args:
uifile (str): Absolute path to .ui file
base_instance (QWidget): The widget into which UI widgets are loaded
Returns:
QWidget: the base instance
|
juraj-google-style
|
def name_to_vector(name):
if (not isinstance(name, unicode)):
name = name.decode('utf-8')
name = name.lower()
name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')
name = ''.join(filter((lambda x: (x.isalpha() or (x == ' '))), list(name)))
return sorted(name.split(), key=(lambda x: len(x)), reverse=True)
|
Convert `name` to the ASCII vector.
Example:
>>> name_to_vector("ing. Franta Putšálek")
['putsalek', 'franta', 'ing']
Args:
name (str): Name which will be vectorized.
Returns:
list: Vector created from name.
|
codesearchnet
|
def draw(self, time: float, frametime: float, target: moderngl.Framebuffer):
raise NotImplementedError("draw() is not implemented")
|
Draw function called by the system every frame when the effect is active.
This method raises ``NotImplementedError`` unless implemented.
Args:
time (float): The current time in seconds.
frametime (float): The time the previous frame used to render in seconds.
target (``moderngl.Framebuffer``): The target FBO for the effect.
|
juraj-google-style
|
def check_type(obj: Any, candidate_type: Any, reltype: str='invariant') -> bool:
if (reltype not in ['invariant', 'covariant', 'contravariant']):
raise ValueError(f' Variadic type {reltype} is unknown')
if ((type(candidate_type) == type) and (reltype in ['invariant'])):
return isinstance(obj, candidate_type)
if ((type(candidate_type) == type) and (reltype in ['covariant'])):
return issubclass(obj.__class__, candidate_type)
if ((type(candidate_type) == type) and (reltype in ['contravariant'])):
return issubclass(candidate_type, obj.__class__)
if (type(candidate_type) == type(Any)):
return True
if (type(candidate_type) == type(Union)):
return any((check_type(obj, t, reltype) for t in candidate_type.__args__))
if ((type(candidate_type) == type(Tuple)) and (tuple in candidate_type.__bases__)):
if (not hasattr(obj, '__len__')):
return False
if (len(candidate_type.__args__) != len(obj)):
return False
return all((check_type(o, t, reltype) for (o, t) in zip(obj, candidate_type.__args__)))
if ((type(candidate_type) == type(Dict)) and (dict in candidate_type.__bases__)):
if (type(obj) != dict):
return False
return all(((check_type(k, candidate_type.__args__[0], reltype) and check_type(v, candidate_type.__args__[1], reltype)) for (k, v) in obj.items()))
if ((type(candidate_type) == type(List)) and ((list in candidate_type.__bases__) or (set in candidate_type.__bases__))):
if (not hasattr(obj, '__len__')):
return False
return all((check_type(o, candidate_type.__args__[0], reltype) for o in obj))
if (type(candidate_type) == TypeVar):
if (not candidate_type.__constraints__):
return True
if (not (candidate_type.__covariant__ or candidate_type.__contravariant__)):
return any((check_type(obj, t) for t in candidate_type.__constraints__))
if (type(candidate_type) == type(Type)):
return check_type(obj, candidate_type.__args__[0], reltype='covariant')
if (inspect.isclass(candidate_type) and (reltype in ['invariant'])):
return isinstance(obj, candidate_type)
raise ValueError(f'Cannot check against {reltype} type {candidate_type}')
|
Tell wether a value correspond to a type,
optionally specifying the type as contravariant or covariant.
Args:
obj (Any): The value to check.
candidate_type (Any): The type to check the object against.
reltype (:obj:`str`, optional): Variance of the type, can be contravariant,
covariant or invariant. By default is invariant.
Returns:
bool: True if the type is fine, False otherwise
Raises:
ValueError: When the variance or the type are not among the ones the function can manage.
|
codesearchnet
|
def Patch(self, request, global_params=None):
config = self.GetMethodConfig('Patch')
return self._RunMethod(config, request, global_params=global_params)
|
Update an association between a GCP project and a GitHub Enterprise server.
Args:
request: (CloudbuildProjectsLocationsGithubEnterpriseConfigsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
|
github-repos
|
def make_processor(self, name, mappings, processor_type, **kwargs):
from .processor import Processor
if self.processors.get(name):
raise LookupError('processor has already been created')
if isinstance(mappings, list):
mappings = [self.get_rml(item) for item in mappings]
else:
mappings = [self.get_rml(mappings)]
self.processors[name] = Processor[processor_type](mappings, **kwargs)
self.processors[name].name = name
return self.processors[name]
|
Instantiates a RmlProcessor and registers it in the manager
Args:
-----
name: the name to register the processor
mappings: the list RML mapping definitions to use
processor_type: the name of the RML processor to use
|
codesearchnet
|
def filter_out_spontaneous_genes(genes, custom_spont_id=None):
new_genes = DictList()
for gene in genes:
if (not is_spontaneous(gene, custom_id=custom_spont_id)):
new_genes.append(gene)
return new_genes
|
Return the DictList of genes that are not spontaneous in a model.
Args:
genes (DictList): Genes DictList
custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
DictList: genes excluding ones that are spontaneous
|
codesearchnet
|
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: torch.device=None, dtype: torch.float=None) -> Tensor:
if dtype is None:
dtype = self.dtype
if not (attention_mask.dim() == 2 and self.config.is_decoder):
if device is not None:
warnings.warn('The `device` argument is deprecated and will be removed in v5 of Transformers.', FutureWarning)
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
if self.config.is_decoder:
extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder(input_shape, attention_mask, device)
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(f'Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})')
extended_attention_mask = extended_attention_mask.to(dtype=dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min
return extended_attention_mask
|
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (`Tuple[int]`):
The shape of the input to the model.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
|
github-repos
|
def element_if_exists(self, using, value):
try:
self._execute(Command.FIND_ELEMENT, {
'using': using,
'value': value
})
return True
except:
return False
|
Check if an element in the current context.
Support:
Android iOS Web(WebView)
Args:
using(str): The element location strategy.
value(str): The value of the location strategy.
Returns:
Return True if the element does exists and return False otherwise.
Raises:
WebDriverException.
|
juraj-google-style
|
def _verify_structure_compatible(input_name, spec_name, input_, spec):
try:
nest.assert_same_structure(input_, spec, expand_composites=True)
except (ValueError, TypeError) as e:
raise TypeError('{} must have the same element structure as {}.\n\n{}'.format(input_name, spec_name, str(e))) from e
nest.map_structure(functools.partial(_verify_spec_compatible, input_name, spec_name), input_, spec)
|
Verifies that possibly-structured symbol has types compatible vith another.
See _verify_spec_compatible for a more concrete meaning of "compatible".
Unspec _verify_spec_compatible, which handles singular Tensor-spec objects,
verify_structures_compatible can process structures recognized by tf.nest.
Args:
input_name: A name to use for `input_` in error messages.
spec_name: A name to use for `spec` in error messages.
input_: Any, value to verify. May, but doesn't need to, be a structure.
spec: Any, value that `input_` must be compatible with. May, but doesn't
need to, be a structure.
Raises:
ValueError if the two types have been determined not to be compatible.
|
github-repos
|
def UpdatePreprocessor(self, line):
if Match('^\\s*
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match('^\\s*
if self.pp_stack:
if (not self.pp_stack[(- 1)].seen_else):
self.pp_stack[(- 1)].seen_else = True
self.pp_stack[(- 1)].stack_before_else = copy.deepcopy(self.stack)
self.stack = copy.deepcopy(self.pp_stack[(- 1)].stack_before_if)
else:
pass
elif Match('^\\s*
if self.pp_stack:
if self.pp_stack[(- 1)].seen_else:
self.stack = self.pp_stack[(- 1)].stack_before_else
self.pp_stack.pop()
else:
pass
|
Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
|
codesearchnet
|
def __replaceSpecialValues(self, decisions):
error = []
for row, line in enumerate(decisions):
if '.' in line:
for i, element in enumerate(line):
if row == 0:
error.append(
"Row: {}colume: {}==> don't have parent value".format(str(row).ljust(4), str(i).ljust(4)))
if element == self.__parentSymbol:
if decisions[row - 1][i] == '.':
error.append("Row: {}Colume: {}==> don't have parent value".format(str(row).ljust(4),
str(i).ljust(4)))
decisions[row][i] = decisions[row - 1][i]
if error:
view.Tli.showErrors('ReplaceSpecialValuesError', error)
else:
return decisions
|
Will replace special values in decisions array.
Args:
decisions (array of array of str): Standard decision array format.
Raises:
ValueError: Row element don't have parent value.
Returns:
New decision array with updated values.
|
juraj-google-style
|
def compile_reward(self, scope: Dict[(str, TensorFluent)]) -> TensorFluent:
reward_expr = self.rddl.domain.reward
with self.graph.as_default():
with tf.name_scope('reward'):
return self._compile_expression(reward_expr, scope)
|
Compiles the reward function given the fluent `scope`.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for reward evaluation.
Returns:
A :obj:`rddl2tf.fluent.TensorFluent` representing the reward function.
|
codesearchnet
|
def RegisterUtility(utility_name, version_mapping=None):
def IsFunctionOrMethod(member):
"Determines if given member is a function or method.\n\n These two are used in combination to ensure that inspect finds all of a\n given utility class's methods in both Python 2 and 3.\n\n Args:\n member: object that is a member of a class, to be determined whether it is\n a function or method.\n\n Returns:\n A boolean that is True if the provided member is a function or method, or\n False if it isn't.\n "
return (inspect.isfunction(member) or inspect.ismethod(member))
def MethodDecorator(utility_method, version):
'Decorates a method in the utility class.'
registry_name = (('%s/%s' % (utility_name, version)) if version else utility_name)
@wraps(utility_method)
def Wrapper(*args, **kwargs):
AddToUtilityRegistry(registry_name)
return utility_method(*args, **kwargs)
return Wrapper
def ClassDecorator(cls):
'Decorates a utility class.'
for (name, method) in inspect.getmembers(cls, predicate=IsFunctionOrMethod):
if (not name.startswith('_')):
if (not getattr(method, '__self__', None)):
setattr(cls, name, MethodDecorator(method, (version_mapping.get(name) if version_mapping else None)))
return cls
return ClassDecorator
|
Decorator that registers a class with the given utility name.
This will only register the utilities being used if the UtilityRegistry is
enabled. Note that only the utility class's public methods will cause the
utility name to be added to the registry.
Args:
utility_name: A str specifying the utility name associated with the class.
version_mapping: A dict containing optional version strings to append to the
utility string for individual methods; where the key is the method name and
the value is the text to be appended as the version.
Returns:
The decorated class.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.