code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def index_sample(self, md5, index_name):
generator = self.stream_sample(md5)
for row in generator:
self.indexer.index_data(row, index_name)
|
Index a stored sample with the Indexer.
Args:
md5: the md5 of the sample
index_name: the name of the index
Returns:
Nothing
|
juraj-google-style
|
def easeInOutElastic(n, amplitude=1, period=0.5):
_checkRange(n)
n *= 2
if (n < 1):
return (easeInElastic(n, amplitude=amplitude, period=period) / 2)
else:
return ((easeOutElastic((n - 1), amplitude=amplitude, period=period) / 2) + 0.5)
|
An elastic tween function wobbles towards the midpoint.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
|
codesearchnet
|
def _get_weights(max_length):
weights = [1]
for i in range(1, max_length):
weights.append(weights[i-1] * len(_ALPHABET) + 1)
weights.reverse()
return weights
|
Get weights for each offset in str of certain max length.
Args:
max_length: max length of the strings.
Returns:
A list of ints as weights.
Example:
If max_length is 2 and alphabet is "ab", then we have order "", "a", "aa",
"ab", "b", "ba", "bb". So the weight for the first char is 3.
|
juraj-google-style
|
async def delete(
self, name: str, *, force: bool = False, noprune: bool = False
) -> List:
params = {"force": force, "noprune": noprune}
response = await self.docker._query_json(
"images/{name}".format(name=name), "DELETE", params=params
)
return response
|
Remove an image along with any untagged parent
images that were referenced by that image
Args:
name: name/id of the image to delete
force: remove the image even if it is being used
by stopped containers or has other tags
noprune: don't delete untagged parent images
Returns:
List of deleted images
|
juraj-google-style
|
def scrape(text, ptype=None):
for (ruletype, rule, info) in scrape_types:
if (ptype and (ptype != ruletype)):
continue
regx = regexes.get(ruletype)
for valu in regx.findall(text):
(yield (ruletype, valu))
|
Scrape types from a blob of text and return node tuples.
Args:
text (str): Text to scrape.
ptype (str): Optional ptype to scrape. If present, only scrape rules which match the provided type.
Returns:
(str, str): Yield tuples of type, valu strings.
|
codesearchnet
|
def adjust_target_dtype(self, torch_dtype: 'torch.dtype') -> 'torch.dtype':
return torch_dtype
|
Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained`
to compute the device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype`
to `torch.int8` and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`.
Args:
torch_dtype (`torch.dtype`, *optional*):
The torch_dtype that is used to compute the device_map.
|
github-repos
|
def get_oneformer_resize_output_image_size(image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], max_size: Optional[int]=None, default_to_square: bool=True, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> tuple:
output_size = get_resize_output_image_size(input_image=image, size=size, default_to_square=default_to_square, max_size=max_size, input_data_format=input_data_format)
return output_size
|
Computes the output size given the desired size.
Args:
image (`np.ndarray`):
The input image.
size (`int` or `Tuple[int, int]` or `List[int]` or `Tuple[int]`):
The size of the output image.
max_size (`int`, *optional*):
The maximum size of the output image.
default_to_square (`bool`, *optional*, defaults to `True`):
Whether to default to square if no size is provided.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If unset, will use the inferred format from the input.
Returns:
`Tuple[int, int]`: The output size.
|
github-repos
|
def exec_iteration(self, counter, context, step_method):
logger.debug('starting')
context['whileCounter'] = counter
logger.info(f'while: running step with counter {counter}')
step_method(context)
logger.debug(f'while: done step {counter}')
result = False
if self.stop:
result = context.get_formatted_as_type(self.stop, out_type=bool)
logger.debug('done')
return result
|
Run a single loop iteration.
This method abides by the signature invoked by poll.while_until_true,
which is to say (counter, *args, **kwargs). In a normal execution
chain, this method's args passed by self.while_loop where context
and step_method set. while_until_true injects counter as a 1st arg.
Args:
counter. int. loop counter, which number of iteration is this.
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
Returns:
bool. True if self.stop evaluates to True after step execution,
False otherwise.
|
codesearchnet
|
def top_stories(self, raw=False, limit=None):
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
|
Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
|
codesearchnet
|
def import_laid_out_tensor(mesh, laid_out_tensor, shape, name=None):
return ImportLaidOutTensorOperation(
mesh, laid_out_tensor, convert_to_shape(shape), name=name).outputs[0]
|
Import a laid_out_tensor.
For expert users.
The input must be laid out appropriately given the eventual MeshImpl,
and layout.
Args:
mesh: a Mesh
laid_out_tensor: a LaidOutTensor
shape: a mtf.Shape
name: an optional string
Returns:
a mtf.Tensor
|
juraj-google-style
|
def validate(self):
if (self.value is not None):
if (type(self.value) not in six.integer_types):
raise TypeError('expected (one of): {0}, observed: {1}'.format(six.integer_types, type(self.value)))
elif (self.value > Interval.MAX):
raise ValueError('interval value greater than accepted max')
elif (self.value < Interval.MIN):
raise ValueError('interval value less than accepted min')
|
Verify that the value of the Interval is valid.
Raises:
TypeError: if the value is not of type int or long
ValueError: if the value cannot be represented by an unsigned
32-bit integer
|
codesearchnet
|
def compute_area_key(features, max_area_width, max_area_height=1, height=1, mode='mean', training=True, name=None):
tf.logging.info('area_attention mode=%s', mode)
(area_mean, area_std, _, area_heights, area_widths) = compute_area_features(features, max_area_width=max_area_width, max_area_height=max_area_height, height=height)
if (mode == 'mean'):
return area_mean
elif (mode == 'max'):
(area_max, _, _) = basic_pool(features, max_area_width=max_area_width, max_area_height=max_area_height, height=height)
return area_max
elif (mode == 'sample'):
if training:
area_mean += (area_std * tf.random_normal(tf.shape(area_std)))
return area_mean
with tf.variable_scope(name, default_name='combine_area_features', values=[area_mean, area_std, area_heights, area_widths]):
depth = common_layers.shape_list(area_mean)[(- 1)]
height_embed = tf.nn.embedding_lookup(params=tf.get_variable('area_height_emb', [max_area_height, (depth
width_embed = tf.nn.embedding_lookup(params=tf.get_variable('area_width_emb', [max_area_width, (depth
size_embed = tf.concat([height_embed, width_embed], (- 1))
if (mode == 'concat'):
feature_concat = tf.concat([area_mean, area_std, size_embed], (- 1))
elif (mode == 'max_concat'):
(area_max, _, _) = basic_pool(features, max_area_width=max_area_width, max_area_height=max_area_height, height=height)
feature_concat = tf.concat([area_max, size_embed], (- 1))
elif (mode == 'sum'):
feature_concat = ((size_embed + area_mean) + area_std)
elif (mode == 'sample_concat'):
if training:
area_mean += (area_std * tf.random_normal(tf.shape(area_std)))
feature_concat = tf.concat([area_mean, size_embed], (- 1))
elif (mode == 'sample_sum'):
if training:
area_mean += (area_std * tf.random_normal(tf.shape(area_std)))
feature_concat = (area_mean + size_embed)
else:
raise ValueError(('Unsupported area key mode=%s' % mode))
feature_hidden = tf.layers.dense(inputs=feature_concat, units=depth, activation=tf.nn.relu)
area_key = tf.layers.dense(feature_hidden, units=depth)
return area_key
|
Computes the key for each area.
Args:
features: a Tensor in a shape of [batch_size, height * width, depth].
max_area_width: the max width allowed for an area.
max_area_height: the max height allowed for an area.
height: the height of the image.
mode: whether to combine different area features or only use
the vector mean of each area, which can be "mean", "concat", "sum",
"sample_concat", and "sample_sum".
training: indicating if it is in the training mode.
name: the name for setting the variable scope.
Returns:
area_key: a Tensor in the shape of [batch_size, num_areas, depth]
|
codesearchnet
|
def del_method(self, m):
if (isinstance(m, types.FunctionType) and (not iscoroutinefunction(m))):
wrkey = ('function', id(m))
else:
(f, obj) = get_method_vars(m)
wrkey = (f, id(obj))
if (wrkey in self):
del self[wrkey]
|
Remove an instance method or function if it exists
Args:
m: The instance method or function to remove
|
codesearchnet
|
def to_dict(self) -> dict[str, Any]:
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, 'model_type'):
output['model_type'] = self.__class__.model_type
output['transformers_version'] = __version__
for key, value in output.items():
if isinstance(value, PretrainedConfig):
value = value.to_dict()
del value['transformers_version']
output[key] = value
self._remove_keys_not_serialized(output)
if hasattr(self, 'quantization_config'):
output['quantization_config'] = self.quantization_config.to_dict() if not isinstance(self.quantization_config, dict) else self.quantization_config
self.dict_torch_dtype_to_str(output)
return output
|
Serializes this instance to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
|
github-repos
|
class TFDebertaV2XSoftmax(keras.layers.Layer):
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
def call(self, inputs: tf.Tensor, mask: tf.Tensor):
rmask = tf.logical_not(tf.cast(mask, tf.bool))
output = tf.where(rmask, tf.cast(float('-inf'), dtype=self.compute_dtype), inputs)
output = stable_softmax(tf.cast(output, dtype=tf.float32), self.axis)
output = tf.where(rmask, 0.0, output)
return output
|
Masked Softmax which is optimized for saving memory
Args:
input (`tf.Tensor`): The input tensor that will apply softmax.
mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
dim (int): The dimension that will apply softmax
|
github-repos
|
def _build_nccl_hybrid(input_tensors, red_op, upper_level_f):
input_tensors, shape = _flatten_tensors(input_tensors)
devices = [t.device for t in input_tensors]
per_worker_devices, per_worker_values = _split_by_task(devices, input_tensors)
num_workers = len(per_worker_devices)
up_values = [None for w in range(0, num_workers)]
up_devices = up_values[:]
down_values = up_values[:]
for w in range(0, num_workers):
worker_values = build_nccl_all_reduce(per_worker_values[w], red_op)
with ops.control_dependencies(worker_values):
with ops.device(worker_values[0].device):
up_values[w] = array_ops.identity(worker_values[0])
up_devices[w] = per_worker_devices[w][0]
level_2_output = upper_level_f(up_values)
for w in range(0, num_workers):
dst_tensors = []
with ops.device(per_worker_devices[w][0]):
broadcast_src = nccl_ops.broadcast(array_ops.identity(level_2_output[w]))
for d in per_worker_devices[w]:
with ops.device(d):
dst_tensors.append(array_ops.identity(broadcast_src))
down_values[w] = dst_tensors
output_tensors = [v for sublist in down_values for v in sublist]
if len(shape) != 1:
output_tensors = _reshape_tensors(output_tensors, shape)
return output_tensors
|
Construct a subgraph for NCCL hybrid all-reduce.
Args:
input_tensors: list of `tf.Tensor` of same-shape and type values to
be reduced.
red_op: binary elementwise reduction operator.
upper_level_f: function for reducing one value per worker, across
workers.
Returns:
list of `tf.Tensor` of reduced values.
Raises:
ValueError: inputs not well-formed.
|
github-repos
|
def _unary_assert_doc(sym, sym_name):
def _decorator(func):
opname = func.__name__
cap_sym_name = sym_name.capitalize()
func.__doc__ = '\n Assert the condition `x {sym}` holds element-wise.\n\n When running in graph mode, you should add a dependency on this operation\n to ensure that it runs. Example of adding a dependency to an operation:\n\n ```python\n with tf.control_dependencies([tf.debugging.{opname}(x, y)]):\n output = tf.reduce_sum(x)\n ```\n\n {sym_name} means, for every element `x[i]` of `x`, we have `x[i] {sym}`.\n If `x` is empty this is trivially satisfied.\n\n Args:\n x: Numeric `Tensor`.\n data: The tensors to print out if the condition is False. Defaults to\n error message and first few entries of `x`.\n summarize: Print this many entries of each tensor.\n message: A string to prefix to the default message.\n name: A name for this operation (optional). Defaults to "{opname}".\n\n Returns:\n Op that raises `InvalidArgumentError` if `x {sym}` is False.\n @compatibility(eager)\n returns None\n @end_compatibility\n\n Raises:\n InvalidArgumentError: if the check can be performed immediately and\n `x {sym}` is False. The check can be performed immediately during\n eager execution or if `x` is statically known.\n '.format(sym=sym, sym_name=cap_sym_name, opname=opname)
return func
return _decorator
|
Common docstring for assert_* ops that evaluate a unary predicate over every element of a tensor.
Args:
sym: Mathematical symbol for the check performed on each element, i.e. "> 0"
sym_name: English-language name for the op described by sym
Returns:
Decorator that adds the appropriate docstring to the function for symbol
`sym`.
|
github-repos
|
def get_tensor_mtf_dimension_names(self, tensor_name):
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, mtf.Tensor):
return tensor.shape.dimension_names
else:
return []
|
The Mesh TensorFlow dimensions associated with a tensor.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a [string], the names of Mesh TensorFlow dimensions.
|
codesearchnet
|
def register_peer(self, connection_id, endpoint):
with self._lock:
if (len(self._peers) < self._maximum_peer_connectivity):
self._peers[connection_id] = endpoint
self._topology.set_connection_status(connection_id, PeerStatus.PEER)
LOGGER.debug('Added connection_id %s with endpoint %s, connected identities are now %s', connection_id, endpoint, self._peers)
else:
raise PeeringException('At maximum configured number of peers: {} Rejecting peering request from {}.'.format(self._maximum_peer_connectivity, endpoint))
public_key = self.peer_to_public_key(connection_id)
if public_key:
self._consensus_notifier.notify_peer_connected(public_key)
|
Registers a connected connection_id.
Args:
connection_id (str): A unique identifier which identifies an
connection on the network server socket.
endpoint (str): The publically reachable endpoint of the new
peer
|
codesearchnet
|
def __init__(self, pfor: 'PFor', op: ops.Operation, inputs):
self.pfor = pfor
self._op = op
self._inputs = inputs
|
Creates a _PforInput object.
Args:
pfor: PFor converter object.
op: the Operation object that is being converted.
inputs: list of WrappedTensor objects representing converted values of the
inputs of `op`.
|
github-repos
|
def search_messages(self, *, query: str, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({'query': query})
return self.api_call('search.messages', http_verb='GET', params=kwargs)
|
Searches for messages matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
|
codesearchnet
|
def construct_end_message(self):
app_count = self.dfk.task_count
site_count = len([x for x in self.dfk.config.executors if x.managed])
app_fails = len([t for t in self.dfk.tasks if (self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES)])
message = {'uuid': self.uuid, 'end': time.time(), 't_apps': app_count, 'sites': site_count, 'c_time': None, 'failed': app_fails, 'test': self.test_mode}
return json.dumps(message)
|
Collect the final run information at the time of DFK cleanup.
Returns:
- Message dict dumped as json string, ready for UDP
|
codesearchnet
|
def bounded_trie(namespace: Union[Type, str], name: str) -> 'Metrics.DelegatingBoundedTrie':
namespace = Metrics.get_namespace(namespace)
return Metrics.DelegatingBoundedTrie(MetricName(namespace, name))
|
Obtains or creates a Bounded Trie metric.
Args:
namespace: A class or string that gives the namespace to a metric
name: A string that gives a unique name to a metric
Returns:
A BoundedTrie object.
|
github-repos
|
def recode_sam_reads(sam_fn, fastq_rnf_fo, fai_fo, genome_id, number_of_read_tuples=(10 ** 9), simulator_name=None, allow_unmapped=False):
fai_index = rnftools.utils.FaIdx(fai_fo)
read_tuple_id_width = len(format(number_of_read_tuples, 'x'))
fq_creator = rnftools.rnfformat.FqCreator(fastq_fo=fastq_rnf_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator=simulator_name)
cigar_reg_shift = re.compile('([0-9]+)([MDNP=X])')
reverse_complement_dict = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', 'N': 'N'}
read_tuple_id = 0
last_read_tuple_name = None
with pysam.AlignmentFile(sam_fn, check_header=False) as samfile:
for alignment in samfile:
if ((alignment.query_name != last_read_tuple_name) and (last_read_tuple_name is not None)):
read_tuple_id += 1
last_read_tuple_name = alignment.query_name
if alignment.is_unmapped:
rnftools.utils.error("SAM files used for conversion should not contain unaligned segments. This condition is broken by read tuple '{}' in file '{}'.".format(alignment.query_name, sam_fn), program='RNFtools', subprogram='MIShmash', exception=NotImplementedError)
if alignment.is_reverse:
direction = 'R'
bases = ''.join([reverse_complement_dict[nucl] for nucl in alignment.seq[::(- 1)]])
qualities = str(alignment.qual[::(- 1)])
else:
direction = 'F'
bases = alignment.seq[:]
qualities = str(alignment.qual[:])
if (fai_index.dict_chr_ids != {}):
chr_id = fai_index.dict_chr_ids[samfile.getrname(alignment.reference_id)]
else:
chr_id = '0'
left = (int(alignment.reference_start) + 1)
right = (left - 1)
for (steps, operation) in cigar_reg_shift.findall(alignment.cigarstring):
right += int(steps)
segment = rnftools.rnfformat.Segment(genome_id=genome_id, chr_id=chr_id, direction=direction, left=left, right=right)
fq_creator.add_read(read_tuple_id=read_tuple_id, bases=bases, qualities=qualities, segments=[segment])
fq_creator.flush_read_tuple()
|
Transform a SAM file to RNF-compatible FASTQ.
Args:
sam_fn (str): SAM/BAM file - file name.
fastq_rnf_fo (str): Output FASTQ file - file object.
fai_fo (str): FAI index of the reference genome - file object.
genome_id (int): Genome ID for RNF.
number_of_read_tuples (int): Expected number of read tuples (to set width of read tuple id).
simulator_name (str): Name of the simulator. Used for comment in read tuple name.
allow_unmapped (bool): Allow unmapped reads.
Raises:
NotImplementedError
|
codesearchnet
|
def name_scope(name):
return ops.name_scope_v2(name)
|
A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
def my_op(a):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
# Define some computation that uses `a`.
return foo_op(..., name=scope)
When executed, the Tensor `a` will have the name `MyOp/a`.
Args:
name: The prefix to use on all names created within the name scope.
Returns:
Name scope context manager.
|
github-repos
|
def apply_step(self, variables, deltas):
if len(variables) != len(deltas):
raise TensorForceError("Invalid variables and deltas lists.")
return tf.group(
*(tf.assign_add(ref=variable, value=delta) for variable, delta in zip(variables, deltas))
)
|
Applies the given (and already calculated) step deltas to the variable values.
Args:
variables: List of variables.
deltas: List of deltas of same length.
Returns:
The step-applied operation. A tf.group of tf.assign_add ops.
|
juraj-google-style
|
def cancelHistoricalData(self, bars: BarDataList):
self.client.cancelHistoricalData(bars.reqId)
self.wrapper.endSubscription(bars)
|
Cancel the update subscription for the historical bars.
Args:
bars: The bar list that was obtained from ``reqHistoricalData``
with a keepUpToDate subscription.
|
juraj-google-style
|
def find_all_hinted_output_nodes(session=None, graph_def=None):
if session is not None and graph_def is not None:
raise ValueError('Provide only one of session and graph_def.')
hinted_outputs_nodes = []
if session is not None:
hints = _find_all_hints_in_nodes(session.graph_def.node)
elif graph_def is not None:
hints = _find_all_hints_in_nodes(graph_def.node)
for hint in hints.values():
_, output_nodes = hint.flattened_inputs_and_outputs()
hinted_outputs_nodes.extend(output_nodes)
return hinted_outputs_nodes
|
Find all Ophints output nodes in the graph.
This is used to get all the output nodes those are ophinted, it is important
for operation like convert_variables_to_constants keep all ophints structure.
Note: only one of session or graph_def should be used, not both.
Why this can be useful? Some TensorFlow ops (e.g. bidirectional rnn), can
generate multiple outputs for unfused subgraph. If not all output nodes are
consumed, graph optimization can potentially drop the unused nodes and cause
ophints in an invalid states (due to missing ophinted output nodes). So it's
important for us to find all those hinted output nodes and make sure they're
not discarded away.
Args:
session: A TensorFlow session that contains the graph to convert.
graph_def: A graph def that we should convert.
Returns:
A list of OpHints output nodes.
Raises:
ValueError: If both session and graph_def are provided.
|
github-repos
|
def _ParseRegisteredDLLs(self, parser_mediator, registry_key):
notify_key = registry_key.GetSubkeyByName('Notify')
if (not notify_key):
return
for subkey in notify_key.GetSubkeys():
for trigger in self._TRIGGERS:
handler_value = subkey.GetValueByName(trigger)
if (not handler_value):
continue
values_dict = {'Application': subkey.name, 'Handler': handler_value.GetDataAsObject(), 'Trigger': trigger}
command_value = subkey.GetValueByName('DllName')
if command_value:
values_dict['Command'] = command_value.GetDataAsObject()
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = subkey.path
event_data.offset = subkey.offset
event_data.regvalue = values_dict
event_data.source_append = ': Winlogon'
event = time_events.DateTimeValuesEvent(subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses the registered DLLs that receive event notifications.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
|
codesearchnet
|
def alt40fms(msg):
d = hex2bin(data(msg))
if (d[13] == '0'):
return None
alt = (bin2int(d[14:26]) * 16)
return alt
|
Selected altitude, FMS
Args:
msg (String): 28 bytes hexadecimal message (BDS40) string
Returns:
int: altitude in feet
|
codesearchnet
|
def optimize(self, init_method='default', inference=None, n_times=10, perturb=False, pertSize=0.001, verbose=None):
assert (init_method in ['default', 'random', None]), 'VarianceDecomposition: specified init_method not valid'
if (init_method == 'default'):
self._init_params_default()
if ((init_method is not 'random') and (not perturb)):
n_times = 1
if (inference is None):
inference = self._det_inference()
else:
self._check_inference(inference)
self._inference = inference
if (self.gp is None):
self._initGP()
params0 = self.gp.getParams()
for i in range(n_times):
if (init_method == 'random'):
params = {'covar': sp.randn(params0['covar'].shape[0])}
self.gp.setParams(params)
elif perturb:
params = {'covar': (params0['covar'] + (pertSize * sp.randn(params0['covar'].shape[0])))}
self.gp.setParams(params)
(conv, info) = self.gp.optimize()
if conv:
break
if verbose:
if (conv == False):
print('No local minimum found for the tested initialization points')
else:
print(('Local minimum found at iteration %d' % i))
return conv
|
Train the model using the specified initialization strategy
Args:
init_method: initialization strategy:
'default': variance is equally split across the different random effect terms. For mulit-trait models the empirical covariance between traits is used
'random': variance component parameters (scales) are sampled from a normal distribution with mean 0 and std 1,
None: no initialization is considered. Initial parameters can be specifies by using the single covariance getTraitCovarfun()
inference: inference gp method, by default algebrically efficient inference (i.e., gp2kronSum, gp2KronSumLR, gp3KronSumLR) will be used when possible.
For models with high a standard inference scheme (gp_base) will be used.
n_times: number of restarts to converge
perturb: if true, the initial point (if random initializaiton is not being used) is perturbed with gaussian noise for each restart (default, False)
perturbSize: std of the gassian noise used to perturb the initial point
verbose: print if convergence is achieved and how many restarted were needed
|
codesearchnet
|
def docker_list(registry_pass):
registry = conf.get('docker.registry', None)
if registry is None:
log.err("You must define docker.registry conf variable to list images")
sys.exit(-1)
registry_user = conf.get('docker.registry_user', None)
if registry_user is None:
registry_user = click.prompt("Username")
rc = client.RegistryClient(registry, registry_user, registry_pass)
images = {x: rc.list_tags(x) for x in rc.list_images()}
shell.cprint("<32>Images in <34>{} <32>registry:", registry)
for image, tags in images.items():
shell.cprint(' <92>{}', image)
for tag in tags:
shell.cprint(' <90>{}:<35>{}', image, tag)
|
List docker images stored in the remote registry.
Args:
registry_pass (str):
Remote docker registry password.
|
juraj-google-style
|
def DownloadFile(file_obj, target_path, buffer_size=BUFFER_SIZE):
logging.info(u"Downloading: %s to: %s", file_obj.urn, target_path)
target_file = open(target_path, "wb")
file_obj.Seek(0)
count = 0
data_buffer = file_obj.Read(buffer_size)
while data_buffer:
target_file.write(data_buffer)
data_buffer = file_obj.Read(buffer_size)
count += 1
if not count % 3:
logging.debug(u"Downloading: %s: %s done", file_obj.urn,
utils.FormatNumberAsString(count * buffer_size))
target_file.close()
|
Download an aff4 file to the local filesystem overwriting it if it exists.
Args:
file_obj: An aff4 object that supports the file interface (Read, Seek)
target_path: Full path of file to write to.
buffer_size: Read in chunks this size.
|
juraj-google-style
|
def should_generate_summaries():
name_scope = tf.contrib.framework.get_name_scope()
if (name_scope and ('while/' in name_scope)):
return False
if tf.get_variable_scope().reuse:
return False
return True
|
Is this an appropriate context to generate summaries.
Returns:
a boolean
|
codesearchnet
|
def _ifft(self, x):
x_complex = _to_complex(x)
return _IFFT_OP[self.block_depth](x_complex)
|
IFFT along the last self.block_depth dimensions of x.
Args:
x: `Tensor` with floating or complex dtype. Should be in the form
returned by self._vectorize_then_blockify.
Returns:
`Tensor` with `dtype` `complex64`.
|
github-repos
|
async def retry_create_artifact(*args, **kwargs):
await retry_async(
create_artifact,
retry_exceptions=(
ScriptWorkerRetryException,
aiohttp.ClientError
),
args=args,
kwargs=kwargs
)
|
Retry create_artifact() calls.
Args:
*args: the args to pass on to create_artifact
**kwargs: the args to pass on to create_artifact
|
juraj-google-style
|
def Read(f):
try:
yaml_data = yaml.load(f)
except yaml.YAMLError as e:
raise ParseError('%s' % e)
except IOError as e:
raise YAMLLoadError('%s' % e)
_CheckData(yaml_data)
try:
return Config(
yaml_data.get('blacklist', ()),
yaml_data.get('whitelist', ('*')))
except UnicodeDecodeError as e:
raise YAMLLoadError('%s' % e)
|
Reads and returns Config data from a yaml file.
Args:
f: Yaml file to parse.
Returns:
Config object as defined in this file.
Raises:
Error (some subclass): If there is a problem loading or parsing the file.
|
juraj-google-style
|
def __init__(self, issuers_to_provider_ids, jwks_supplier, cache_capacity=200):
self._issuers_to_provider_ids = issuers_to_provider_ids
self._jwks_supplier = jwks_supplier
arguments = {u"capacity": cache_capacity}
expiration_time = datetime.timedelta(minutes=5)
self._cache = cache.make_region().configure(u"lru_cache",
arguments=arguments,
expiration_time=expiration_time)
|
Construct an instance of AuthTokenDecoder.
Args:
issuers_to_provider_ids: a dictionary mapping from issuers to provider
IDs defined in the service configuration.
jwks_supplier: an instance of JwksSupplier that supplies JWKS based on
issuer.
cache_capacity: the cache_capacity with default value of 200.
|
juraj-google-style
|
def list_insights_components(access_token, subscription_id, resource_group):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/microsoft.insights/', '/components?api-version=', INSIGHTS_COMPONENTS_API])
return do_get(endpoint, access_token)
|
List the Microsoft Insights components in a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
Returns:
HTTP response. JSON body of components.
|
codesearchnet
|
def get_dict_to_print(field_to_obs):
def compressed_steps(steps):
return {'num_steps': len(set(steps)),
'min_step': min(steps),
'max_step': max(steps),
'last_step': steps[-1],
'first_step': steps[0],
'outoforder_steps': get_out_of_order(steps)}
def full_steps(steps):
return {'steps': steps, 'outoforder_steps': get_out_of_order(steps)}
output = {}
for field, observations in field_to_obs.items():
if not observations:
output[field] = None
continue
steps = [x['step'] for x in observations]
if field in SHORT_FIELDS:
output[field] = compressed_steps(steps)
if field in LONG_FIELDS:
output[field] = full_steps(steps)
return output
|
Transform the field-to-obs mapping into a printable dictionary.
Args:
field_to_obs: Dict that maps string field to `Observation` list.
Returns:
A dict with the keys and values to print to console.
|
juraj-google-style
|
def get_metadata_as_dict(self, user_id=None, source=None):
if ((self.metadata is None) or (self.metadata == '')):
return {}
metadata_dict = (self.metadata if isinstance(self.metadata, dict) else json.loads(self.metadata))
metadata_keys = [m.lower() for m in metadata_dict]
if ((user_id is not None) and ('user_id' not in metadata_keys)):
metadata_dict['user_id'] = six.text_type(user_id)
if ((source is not None) and ('source' not in metadata_keys)):
metadata_dict['source'] = six.text_type(source)
return {k: six.text_type(v) for (k, v) in metadata_dict.items()}
|
Convert a metadata json string into a dictionary.
Args:
user_id (int): Optional: Insert user_id into the metadata if specified
source (string): Optional: Insert source (the name of the app typically) into the metadata if necessary.
Returns:
dict: THe metadata as a python dictionary
|
codesearchnet
|
def do_get_next(endpoint, access_token):
headers = {'Authorization': ('Bearer ' + access_token)}
headers['User-Agent'] = get_user_agent()
looping = True
value_list = []
vm_dict = {}
while looping:
get_return = requests.get(endpoint, headers=headers).json()
if (not ('value' in get_return)):
return get_return
if (not ('nextLink' in get_return)):
looping = False
else:
endpoint = get_return['nextLink']
value_list += get_return['value']
vm_dict['value'] = value_list
return vm_dict
|
Do an HTTP GET request, follow the nextLink chain and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
|
codesearchnet
|
def infeed_dequeue(dtype, shape, name=None):
if dtype not in _SUPPORTED_INFEED_DTYPES:
raise TypeError("Operation '{}' has type {} which is not a supported TPU infeed type. Supported types are: {}".format(name, dtype, list(_SUPPORTED_INFEED_DTYPES)))
return gen_tpu_ops.infeed_dequeue(dtype, shape, name=name)
|
A placeholder op for a value that will be fed into the computation.
Args:
dtype: A `tf.DType`. The type of elements in the tensor.
shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
A tensor that will be provided using the infeed mechanism.
Raises:
TypeError: If 'dtype` is not a supported infeed type.
|
github-repos
|
def dict_product(*d, **kwargs):
d = dict(dict_merge(*d), **kwargs)
holdout = {k: d[k] for k in d if not isinstance(d[k], list)}
d = {k: d[k] for k in d if k not in holdout}
items = d.items()
if len(items) == 0:
dicts = [{}]
else:
keys, values = zip(*items)
dicts = [dict_filter_none(dict(zip(keys, v))) for v in product(*values)]
for d in dicts:
d.update(holdout)
return dicts
|
cartesian product of dict whose values are lists
Args:
d: dictionary to take product of. multiple dictionaries will first
be merged by dict_merge
kwargs: additional kwargs for convenience
Returns:
a list of dictionaries with the same keys as d and kwargs
|
juraj-google-style
|
def gradient_helper(optimizer, loss, var_list=None):
if var_list is None:
var_list = tf.compat.v1.trainable_variables()
grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)
grads = [pair[0] for pair in grads_and_vars]
return grads, optimizer.apply_gradients(grads_and_vars)
|
A helper to get the gradients out at each step.
Args:
optimizer: the optimizer op.
loss: the op that computes your loss value.
Returns: the gradient tensors and the train_step op.
|
juraj-google-style
|
def gen_pypirc(username=None, password=None):
path = join(conf.getenv('HOME'), '.pypirc')
username = username or conf.getenv('PYPI_USER', None)
password = password or conf.getenv('PYPI_PASS', None)
if username is None or password is None:
log.err("You must provide $PYPI_USER and $PYPI_PASS")
sys.exit(1)
log.info("Generating <94>{}".format(path))
fs.write_file(path, util.remove_indent(.format(
username=username,
password=password
)))
|
Generate ~/.pypirc with the given credentials.
Useful for CI builds. Can also get credentials through env variables
``PYPI_USER`` and ``PYPI_PASS``.
Args:
username (str):
pypi username. If not given it will try to take it from the
`` PYPI_USER`` env variable.
password (str):
pypi password. If not given it will try to take it from the
`` PYPI_PASS`` env variable.
|
juraj-google-style
|
def VerifyConfiguration(conf, nsswitch_filename=FILE_NSSWITCH):
warnings, errors = (0, 0)
if not conf.maps:
logging.error('No maps are configured.')
errors += 1
nsswitch = ParseNSSwitchConf(nsswitch_filename)
for configured_map in conf.maps:
if configured_map == 'sshkey':
continue
if conf.options[configured_map].cache['name'] == 'nssdb':
logging.error('nsscache no longer supports nssdb cache')
errors += 1
if conf.options[configured_map].cache['name'] == 'files':
nss_module_name = 'files'
if 'cache_filename_suffix' in conf.options[configured_map].cache and conf.options[configured_map].cache['cache_filename_suffix'] == 'cache':
nss_module_name = 'cache'
else:
nss_module_name = 'cache'
if nss_module_name not in nsswitch[configured_map]:
logging.warning('nsscache is configured to build maps for %r, but NSS is not configured (in %r) to use it', configured_map, nsswitch_filename)
warnings += 1
return (warnings, errors)
|
Verify that the system configuration matches the nsscache configuration.
Checks that NSS configuration has the cache listed for each map that
is configured in the nsscache configuration, i.e. that the system is
configured to use the maps we are building.
Args:
conf: a Configuration
nsswitch_filename: optionally the name of the file to parse
Returns:
(warnings, errors) a tuple counting the number of warnings and
errors detected
|
github-repos
|
def setMaxDemandPeriod(self, period, password='00000000'):
result = False
self.setContext('setMaxDemandPeriod')
try:
if ((period < 1) or (period > 3)):
self.writeCmdMsg('Correct parameter: 1 = 15 minute, 2 = 30 minute, 3 = hour')
self.setContext('')
return result
if (not self.request(False)):
self.writeCmdMsg('Bad read CRC on setting')
elif (not self.serialCmdPwdAuth(password)):
self.writeCmdMsg('Password failure')
else:
req_str = (('015731023030353028' + binascii.hexlify(str(period)).zfill(2)) + '2903')
req_str += self.calc_crc16(req_str[2:].decode('hex'))
self.m_serial_port.write(req_str.decode('hex'))
if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):
self.writeCmdMsg('Success(setMaxDemandPeriod): 06 returned.')
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext('')
return result
|
Serial call to set max demand period.
Args:
period (int): : as int.
password (str): Optional password.
Returns:
bool: True on completion with ACK.
|
codesearchnet
|
def parse_content(self, content):
self.active_lines_unparsed = (get_active_lines(content) if (content is not None) else [])
self.active_settings = (split_kv_pairs(content, use_partition=False) if (content is not None) else [])
|
Main parsing class method which stores all interesting data from the content.
Args:
content (context.content): Parser context content
|
codesearchnet
|
def Add(self, category, label, age):
now = rdfvalue.RDFDatetime.Now()
category = utils.SmartUnicode(category)
for active_time in self.active_days:
self.categories[active_time].setdefault(label, {})
if ((now - age).seconds < (((active_time * 24) * 60) * 60)):
self.categories[active_time][label][category] = (self.categories[active_time][label].get(category, 0) + 1)
|
Adds another instance of this category into the active_days counter.
We automatically count the event towards all relevant active_days. For
example, if the category "Windows" was seen 8 days ago it will be counted
towards the 30 day active, 14 day active but not against the 7 and 1 day
actives.
Args:
category: The category name to account this instance against.
label: Client label to which this should be applied.
age: When this instance occurred.
|
codesearchnet
|
def altitude(msg):
tc = typecode(msg)
if tc<5 or tc==19 or tc>22:
raise RuntimeError("%s: Not a position message" % msg)
if tc>=5 and tc<=8:
return 0
msgbin = common.hex2bin(msg)
q = msgbin[47]
if q:
n = common.bin2int(msgbin[40:47]+msgbin[48:52])
alt = n * 25 - 1000
return alt
else:
return None
|
Decode aircraft altitude
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: altitude in feet
|
juraj-google-style
|
def get(self, statediag, accepted=None):
count = 0
statesmap = {}
newstatediag = {}
for state in statediag:
if statediag[state].id not in statesmap:
statesmap[statediag[state].id] = count
mapped = count
count = count + 1
else:
mapped = statesmap[statediag[state].id]
transitions = {}
for nextstate in statediag[state].trans:
if nextstate not in statesmap:
statesmap[nextstate] = count
transmapped = count
count = count + 1
else:
transmapped = statesmap[nextstate]
transitions[transmapped] = statediag[state].trans[nextstate]
newstate = PDAState()
newstate.id = mapped
newstate.type = statediag[state].type
newstate.sym = statediag[state].sym
newstate.trans = transitions
newstatediag[mapped] = newstate
newaccepted = None
if accepted is not None:
newaccepted = []
for accepted_state in accepted :
if (0, accepted_state) in statesmap:
newaccepted.append(statesmap[(0, accepted_state)])
return newstatediag, count, newaccepted
|
Replaces complex state IDs as generated from the product operation,
into simple sequencial numbers. A dictionaty is maintained in order
to map the existed IDs.
Args:
statediag (list): The states of the PDA
accepted (list): the list of DFA accepted states
Returns:
list:
|
juraj-google-style
|
def add_observer(self, o, component_type=ComponentType):
self.observers[component_type].add(o)
|
Add a callback that will get invoked after each component is called.
Args:
o (func): the callback function
Keyword Args:
component_type (ComponentType): the :class:`ComponentType` to observe.
The callback will fire any time an instance of the class or its
subclasses is invoked.
The callback should look like this:
.. code-block:: python
def callback(comp, broker):
value = broker.get(comp)
# do something with value
pass
|
codesearchnet
|
def to_unicode(self, s):
if isinstance(s, unicode):
return s
if isinstance(s, str):
return unicode(s, errors='ignore')
return s
|
Convert an elementary datatype to unicode.
Args:
s: the datatype to be unicoded.
Returns:
Unicoded data.
|
juraj-google-style
|
def match(pattern, name):
try:
re_pat = _PATTERN_CACHE[(pattern, True)]
except KeyError:
res = (('(?ms)' + _translate(pattern)) + '\\Z')
_PATTERN_CACHE[(pattern, True)] = re_pat = re.compile(res)
return (re_pat.match(name) is not None)
|
Test whether a name matches a wildcard pattern.
Arguments:
pattern (str): A wildcard pattern, e.g. ``"*.py"``.
name (str): A filename.
Returns:
bool: `True` if the filename matches the pattern.
|
codesearchnet
|
def locale(self, value):
if value == self._defaults['ai.device.locale'] and 'ai.device.locale' in self._values:
del self._values['ai.device.locale']
else:
self._values['ai.device.locale'] = value
|
The locale property.
Args:
value (string). the property value.
|
juraj-google-style
|
def get_tr(self, derivatives=False, **selectors):
selectors.update(suffix='bold', datatype='func')
scope = None if derivatives else 'raw'
images = self.get(extensions=['.nii', '.nii.gz'], scope=scope,
**selectors)
if not images:
raise ValueError("No functional images that match criteria found.")
all_trs = set()
for img in images:
md = self.get_metadata(img.path, suffix='bold', full_search=True)
all_trs.add(round(float(md['RepetitionTime']), 5))
if len(all_trs) > 1:
raise ValueError("Unique TR cannot be found given selectors {!r}"
.format(selectors))
return all_trs.pop()
|
Returns the scanning repetition time (TR) for one or more runs.
Args:
derivatives (bool): If True, also checks derivatives images.
selectors: Optional keywords used to constrain the selected runs.
Can be any arguments valid for a .get call (e.g., BIDS entities
or JSON sidecar keys).
Returns: A single float.
Notes: Raises an exception if more than one unique TR is found.
|
juraj-google-style
|
def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSeq2SeqSequenceClassifierOutput, Tuple[tf.Tensor]]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(f'Passing input embeddings is currently not supported for {self.__class__.__name__}')
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
last_hidden_state = outputs[0]
eos_mask = tf.equal(input_ids, self.config.eos_token_id)
self_masked = tf.reshape(tf.boolean_mask(eos_mask, eos_mask), (tf.shape(input_ids)[0], -1))
tf.Assert(tf.reduce_all(self_masked[:, -1]), ['All examples must have the same number of <eos> tokens.'])
masked = tf.reshape(tf.boolean_mask(last_hidden_state, eos_mask), (tf.shape(input_ids)[0], tf.shape(self_masked)[1], tf.shape(last_hidden_state)[-1]))
sentence_representation = masked[:, -1, :]
logits = self.classification_head(sentence_representation)
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return TFSeq2SeqSequenceClassifierOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
|
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
|
github-repos
|
def _InsertEvent(self, event, force_flush=False):
if event:
event_document = {'index': {'_index': self._index_name, '_type': self._document_type}}
event_values = self._GetSanitizedEventValues(event)
self._event_documents.append(event_document)
self._event_documents.append(event_values)
self._number_of_buffered_events += 1
if (force_flush or (self._number_of_buffered_events > self._flush_interval)):
self._FlushEvents()
|
Inserts an event.
Events are buffered in the form of documents and inserted to Elasticsearch
when either forced to flush or when the flush interval (threshold) has been
reached.
Args:
event (EventObject): event.
force_flush (bool): True if buffered event documents should be inserted
into Elasticsearch.
|
codesearchnet
|
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(config, request, global_params=global_params)
|
Lists all datasets in the specified project to which you have been granted the READER dataset role.
Args:
request: (BigqueryDatasetsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DatasetList) The response message.
|
github-repos
|
def set_computer_name(name):
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(
win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
|
Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
|
juraj-google-style
|
def overlap(ival0, ival1):
(min0, max0) = ival0
(min1, max1) = ival1
return (max(0, (min(max0, max1) - max(min0, min1))) > 0)
|
Determine if two interval tuples have overlap.
Args:
iv0 ((int,int)): An interval tuple
iv1 ((int,int)); An interval tuple
Returns:
(bool): True if the intervals overlap, otherwise False
|
codesearchnet
|
def _create_none_optionals(func_graph, n):
with func_graph.as_default():
return [gen_optional_ops.optional_none() for _ in range(n)]
|
Creates `n` `None` optionals in func_graph.
Args:
func_graph: FuncGraph.
n: `int` the number of `None` optionals to make.
Returns:
A list of tensors in func_graph.
|
github-repos
|
def broadcast(cls, shape1: 'TensorFluentShape', shape2: 'TensorFluentShape') -> Tuple[(Reshaping, Reshaping)]:
(reshape_1, reshape_2) = (None, None)
if (not (shape1._batch or shape2._batch)):
return (reshape_1, reshape_2)
(size_1, size_2) = (shape1.fluent_size, shape2.fluent_size)
size_diff = abs((size_1 - size_2))
if (size_diff == 0):
return (reshape_1, reshape_2)
if ((size_2 > size_1) and (not ((size_1 == 0) and (not shape1._batch)))):
reshape_1 = (([1] * size_diff) + list(shape1.fluent_shape))
if shape1._batch:
reshape_1 = ([shape1.batch_size] + reshape_1)
elif ((size_1 > size_2) and (not ((size_2 == 0) and (not shape2._batch)))):
reshape_2 = (([1] * size_diff) + list(shape2.fluent_shape))
if shape2._batch:
reshape_2 = ([shape2.batch_size] + reshape_2)
return (reshape_1, reshape_2)
|
It broadcasts the fluent shapes if any input is in batch mode.
It handles input shapes in different modes, expanding its
dimensions if necessary. It outputs a tuple with new shapes.
If no input shape is in batch mode, return (None, None).
If an input shape does not need to be changed, return None.
Args:
shape1: A fluent's shape.
shape2: A fluent's shape.
Returns:
A pair of new shapes.
|
codesearchnet
|
def create_tar_file(source_files, target=None):
if target:
filename = target
else:
(_, filename) = tempfile.mkstemp()
with tarfile.open(filename, mode='w:gz') as t:
for sf in source_files:
t.add(sf, arcname=os.path.basename(sf))
return filename
|
Create a tar file containing all the source_files
Args:
source_files (List[str]): List of file paths that will be contained in the tar file
Returns:
(str): path to created tar file
|
codesearchnet
|
def setReplicationStatus(
self, pid, nodeRef, status, dataoneError=None, vendorSpecific=None
):
response = self.setReplicationStatusResponse(
pid, nodeRef, status, dataoneError, vendorSpecific
)
return self._read_boolean_response(response)
|
See Also: setReplicationStatusResponse()
Args:
pid:
nodeRef:
status:
dataoneError:
vendorSpecific:
Returns:
|
juraj-google-style
|
def loop_until_true_else_raise(timeout_s, function, invert=False, message=None, sleep_s=1):
def validate(x):
return (bool(x) != invert)
result = loop_until_timeout_or_valid(timeout_s, function, validate, sleep_s=1)
if validate(result):
return result
if (message is not None):
raise RuntimeError(message)
name = '(unknown)'
if hasattr(function, '__name__'):
name = function.__name__
elif (isinstance(function, functools.partial) and hasattr(function.func, '__name__')):
name = function.func.__name__
raise RuntimeError(('Function %s failed to return %s within %d seconds.' % (name, ('falsey' if invert else 'truthy'), timeout_s)))
|
Repeatedly call the given function until truthy, or raise on a timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
invert: If True, wait for the callable to return falsey instead of truthy.
message: Optional custom error message to use on a timeout.
sleep_s: Seconds to sleep between call attempts.
Returns:
The final return value of the function.
Raises:
RuntimeError if the timeout is reached before the function returns truthy.
|
codesearchnet
|
def setPollingValues(self, max_waits, wait_sleep):
self.m_max_waits = max_waits
self.m_wait_sleep = wait_sleep
|
Optional polling loop control
Args:
max_waits (int): waits
wait_sleep (int): ms per wait
|
juraj-google-style
|
def predict_proba(self, L):
Y_pf = LabelModel.predict_proba(self, L)
n, k = Y_pf.shape
Y_p = [np.zeros((n, k_t)) for k_t in self.task_graph.K]
for yi, y in enumerate(self.task_graph.feasible_set()):
for t in range(self.t):
k_t = int(y[t])
Y_p[t][:, k_t - 1] += Y_pf[:, yi]
return Y_p
|
Returns the task marginals estimated by the model: a t-length list of
[n,k_t] matrices where the (i,j) entry of the sth matrix represents the
estimated P((Y_i)_s | \lambda_j(x_i))
Args:
L: A t-length list of [n,m] scipy.sparse label matrices with values
in {0,1,...,k}
|
juraj-google-style
|
def on(self, *qubits: Qid) -> 'gate_operation.GateOperation':
from cirq.ops import gate_operation
return gate_operation.GateOperation(self, list(qubits))
|
Returns an application of this gate to the given qubits.
Args:
*qubits: The collection of qubits to potentially apply the gate to.
|
juraj-google-style
|
def fingerprints(data):
Hashes = namedtuple('Hashes', 'md5 sha1 sha256 sha512')
if six.PY2:
if (not isinstance(data, str)):
data = data.encode('utf-8')
elif six.PY3:
if (not isinstance(data, bytes)):
data = data.encode('utf-8')
md5 = hashlib.md5()
md5.update(data)
md5 = md5.hexdigest()
sha1 = hashlib.sha1()
sha1.update(data)
sha1 = sha1.hexdigest()
sha256 = hashlib.sha256()
sha256.update(data)
sha256 = sha256.hexdigest()
sha512 = hashlib.sha512()
sha512.update(data)
sha512 = sha512.hexdigest()
return Hashes(md5, sha1, sha256, sha512)
|
This function return the fingerprints of data.
Args:
data (string): raw data
Returns:
namedtuple: fingerprints md5, sha1, sha256, sha512
|
codesearchnet
|
def sgn_prod(p1, p2):
r
phase = Pauli._prod_phase(p1, p2)
new_pauli = p1 * p2
return new_pauli, phase
|
r"""
Multiply two Paulis and track the phase.
$P_3 = P_1 \otimes P_2$: X*Y
Args:
p1 (Pauli): pauli 1
p2 (Pauli): pauli 2
Returns:
Pauli: the multiplied pauli
complex: the sign of the multiplication, 1, -1, 1j or -1j
|
juraj-google-style
|
def _parse_domain_id(self, config):
match = re.search(r'domain-id (.+)$', config)
value = match.group(1) if match else None
return dict(domain_id=value)
|
Scans the config block and parses the domain-id value
Args:
config (str): The config block to scan
Returns:
dict: A dict object that is intended to be merged into the
resource dict
|
juraj-google-style
|
def modify_job_state(self, job_id, new_state):
if new_state == 'JOB_STATE_DONE':
new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_DONE
elif new_state == 'JOB_STATE_CANCELLED':
new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_CANCELLED
elif new_state == 'JOB_STATE_DRAINING':
new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_DRAINING
else:
return False
request = dataflow.DataflowProjectsLocationsJobsUpdateRequest()
request.jobId = job_id
request.projectId = self.google_cloud_options.project
request.location = self.google_cloud_options.region
request.job = dataflow.Job(requestedState=new_state)
self._client.projects_locations_jobs.Update(request)
return True
|
Modify the run state of the job.
Args:
job_id: The id of the job.
new_state: A string representing the new desired state. It could be set to
either 'JOB_STATE_DONE', 'JOB_STATE_CANCELLED' or 'JOB_STATE_DRAINING'.
Returns:
True if the job was modified successfully.
|
github-repos
|
def get_messages(module):
answer = collections.OrderedDict()
for name in dir(module):
candidate = getattr(module, name)
if inspect.isclass(candidate) and issubclass(candidate, message.Message):
answer[name] = candidate
return answer
|
Discovers all protobuf Message classes in a given import module.
Args:
module (module): A Python module; :func:`dir` will be run against this
module to find Message subclasses.
Returns:
dict[str, google.protobuf.message.Message]: A dictionary with the
Message class names as keys, and the Message subclasses themselves
as values.
|
juraj-google-style
|
def _add_unitary_two(self, gate, qubit0, qubit1):
indexes = einsum_vecmul_index([qubit0, qubit1], self._number_of_qubits)
gate_tensor = np.reshape(np.array(gate, dtype=complex), 4 * [2])
self._statevector = np.einsum(indexes, gate_tensor,
self._statevector,
dtype=complex,
casting='no')
|
Apply a two-qubit unitary matrix.
Args:
gate (matrix_like): a the two-qubit gate matrix
qubit0 (int): gate qubit-0
qubit1 (int): gate qubit-1
|
juraj-google-style
|
def set_hostname(self, value=None, default=False, disable=False):
cmd = self.command_builder('hostname', value=value, default=default, disable=disable)
return self.configure(cmd)
|
Configures the global system hostname setting
EosVersion:
4.13.7M
Args:
value (str): The hostname value
default (bool): Controls use of the default keyword
disable (bool): Controls the use of the no keyword
Returns:
bool: True if the commands are completed successfully
|
codesearchnet
|
def add(self, other):
if not isinstance(other, Operator):
other = Operator(other)
if self.dim != other.dim:
raise QiskitError("other operator has different dimensions.")
return Operator(self.data + other.data, self.input_dims(),
self.output_dims())
|
Return the operator self + other.
Args:
other (Operator): an operator object.
Returns:
Operator: the operator self + other.
Raises:
QiskitError: if other is not an operator, or has incompatible
dimensions.
|
juraj-google-style
|
def usufyToOdsExport(d, fPath):
from pyexcel_ods import get_data
try:
oldData = {"OSRFramework": get_data(fPath) }
except:
oldData = {"OSRFramework":[]}
tabularData = _generateTabularData(d, oldData)
from pyexcel_ods import save_data
save_data(fPath, tabularData)
|
Workaround to export to a .ods file.
Args:
-----
d: Data to export.
fPath: File path for the output file.
|
juraj-google-style
|
def __getitem__(self, key):
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
if getitem is None:
raise TypeError('unsubscriptable object')
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
return self._CreateMockMethod('__getitem__')(key)
|
Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
|
juraj-google-style
|
def _einsum_equation(input_shapes, output_shape):
ret = []
next_letter = ord("a")
dim_to_letter = {}
for shape_num, shape in enumerate(input_shapes + [output_shape]):
if shape_num == len(input_shapes):
ret.append("->")
elif shape_num > 0:
ret.append(",")
for d in shape.dims:
if d not in dim_to_letter:
dim_to_letter[d] = chr(next_letter)
next_letter += 1
ret.append(dim_to_letter[d])
return "".join(ret)
|
Turn shapes into an einsum equation.
e.g. "ij,jk->ik"
Args:
input_shapes: a list of Shapes
output_shape: a Shape
Returns:
a string
|
juraj-google-style
|
def model_fn(features, labels, mode, params, config):
del labels, config
logit_concentration = tf.compat.v1.get_variable(
"logit_concentration",
shape=[1, params["num_topics"]],
initializer=tf.compat.v1.initializers.constant(
_softplus_inverse(params["prior_initial_value"])))
concentration = _clip_dirichlet_parameters(
tf.nn.softplus(logit_concentration))
num_words = features.shape[1]
topics_words_logits = tf.compat.v1.get_variable(
"topics_words_logits",
shape=[params["num_topics"], num_words],
initializer=tf.compat.v1.glorot_normal_initializer())
topics_words = tf.nn.softmax(topics_words_logits, axis=-1)
lda_variational = make_lda_variational(
params["activation"],
params["num_topics"],
params["layer_sizes"])
with ed.tape() as variational_tape:
_ = lda_variational(features)
with ed.tape() as model_tape:
with ed.interception(
make_value_setter(topics=variational_tape["topics_posterior"])):
posterior_predictive = latent_dirichlet_allocation(concentration,
topics_words)
log_likelihood = posterior_predictive.distribution.log_prob(features)
tf.compat.v1.summary.scalar("log_likelihood",
tf.reduce_mean(input_tensor=log_likelihood))
kl = variational_tape["topics_posterior"].distribution.kl_divergence(
model_tape["topics"].distribution)
tf.compat.v1.summary.scalar("kl", tf.reduce_mean(input_tensor=kl))
with tf.control_dependencies(
[tf.compat.v1.assert_greater(kl, -1e-3, message="kl")]):
kl = tf.identity(kl)
elbo = log_likelihood - kl
avg_elbo = tf.reduce_mean(input_tensor=elbo)
tf.compat.v1.summary.scalar("elbo", avg_elbo)
loss = -avg_elbo
global_step = tf.compat.v1.train.get_or_create_global_step()
optimizer = tf.compat.v1.train.AdamOptimizer(params["learning_rate"])
grads_and_vars = optimizer.compute_gradients(loss)
grads_and_vars_except_prior = [
x for x in grads_and_vars if x[1] != logit_concentration]
def train_op_except_prior():
return optimizer.apply_gradients(
grads_and_vars_except_prior,
global_step=global_step)
def train_op_all():
return optimizer.apply_gradients(
grads_and_vars,
global_step=global_step)
train_op = tf.cond(
pred=global_step < params["prior_burn_in_steps"],
true_fn=train_op_except_prior,
false_fn=train_op_all)
words_per_document = tf.reduce_sum(input_tensor=features, axis=1)
log_perplexity = -elbo / words_per_document
tf.compat.v1.summary.scalar(
"perplexity", tf.exp(tf.reduce_mean(input_tensor=log_perplexity)))
(log_perplexity_tensor,
log_perplexity_update) = tf.compat.v1.metrics.mean(log_perplexity)
perplexity_tensor = tf.exp(log_perplexity_tensor)
topics = tf.compat.v1.py_func(
functools.partial(get_topics_strings, vocabulary=params["vocabulary"]),
[topics_words, concentration],
tf.string,
stateful=False)
tf.compat.v1.summary.text("topics", topics)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops={
"elbo": tf.compat.v1.metrics.mean(elbo),
"log_likelihood": tf.compat.v1.metrics.mean(log_likelihood),
"kl": tf.compat.v1.metrics.mean(kl),
"perplexity": (perplexity_tensor, log_perplexity_update),
"topics": (topics, tf.no_op()),
},
)
|
Builds the model function for use in an Estimator.
Arguments:
features: The input features for the Estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
|
juraj-google-style
|
def get_artifact_filename(self, package_name, artifact_name):
project_name = self.packages.normalize(package_name)
return self.records.get((project_name, artifact_name))
|
Similar to pkg_resources.resource_filename, however this works
with the information cached in this registry instance, and
arguments are not quite the same.
Arguments:
package_name
The name of the package to get the artifact from
artifact_name
The exact name of the artifact.
Returns the path of where the artifact should be if it has been
declared, otherwise None.
|
juraj-google-style
|
def _ParseValueData(self, parser_mediator, registry_key, registry_value):
value_data = registry_value.data
value_data_size = len(value_data)
if value_data_size < 4:
return
header_map = self._GetDataTypeMap('programscache_header')
try:
header = self._ReadStructureFromByteStream(
value_data, 0, header_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse header value with error: {0!s}'.format(
exception))
return
if header.format_version not in (1, 9, 12, 19):
parser_mediator.ProduceExtractionWarning(
'unsupported format version: {0:d}'.format(header.format_version))
return
known_folder_identifier = None
if header.format_version == 1:
value_data_offset = 8
elif header.format_version == 9:
value_data_offset = 6
elif header.format_version in (12, 19):
known_folder_identifier = uuid.UUID(bytes_le=value_data[4:20])
value_data_offset = 20
entry_header_map = self._GetDataTypeMap('programscache_entry_header')
entry_footer_map = self._GetDataTypeMap('programscache_entry_footer')
sentinel = 0
if header.format_version != 9:
try:
entry_footer = self._ReadStructureFromByteStream(
value_data[value_data_offset:], value_data_offset, entry_footer_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse sentinel at offset: 0x{0:08x} '
'with error: {1!s}').format(value_data_offset, exception))
return
value_data_offset += entry_footer_map.GetByteSize()
sentinel = entry_footer.sentinel
link_targets = []
while sentinel in (0x00, 0x01):
if value_data_offset >= value_data_size:
break
try:
entry_header = self._ReadStructureFromByteStream(
value_data[value_data_offset:], value_data_offset, entry_header_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse entry header at offset: 0x{0:08x} '
'with error: {1!s}').format(value_data_offset, exception))
break
value_data_offset += entry_header_map.GetByteSize()
display_name = '{0:s} {1:s}'.format(
registry_key.path, registry_value.name)
shell_items_parser = shell_items.ShellItemsParser(display_name)
shell_items_parser.ParseByteStream(
parser_mediator, value_data[value_data_offset:],
codepage=parser_mediator.codepage)
link_target = shell_items_parser.CopyToPath()
link_targets.append(link_target)
value_data_offset += entry_header.data_size
try:
entry_footer = self._ReadStructureFromByteStream(
value_data[value_data_offset:], value_data_offset, entry_footer_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse entry footer at offset: 0x{0:08x} '
'with error: {1!s}').format(value_data_offset, exception))
return
value_data_offset += entry_footer_map.GetByteSize()
sentinel = entry_footer.sentinel
if known_folder_identifier:
known_folder_identifier = '{0!s}'.format(known_folder_identifier)
event_data = windows_events.WindowsRegistryListEventData()
event_data.key_path = registry_key.path
event_data.known_folder_identifier = known_folder_identifier
event_data.list_name = registry_value.name
event_data.list_values = ' '.join([
'{0:d}: {1:s}'.format(index, link_target)
for index, link_target in enumerate(link_targets)])
event_data.value_name = registry_value.name
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts event objects from a Explorer ProgramsCache value data.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
registry_value (dfwinreg.WinRegistryValue): Windows Registry value.
Raises:
ParseError: if the value data could not be parsed.
|
juraj-google-style
|
def get_all(self, uids: Iterable[int]) -> Mapping[int, Record]:
return {uid: self._records[uid] for uid in uids
if uid in self._records}
|
Get records by a set of UIDs.
Args:
uids: The message UIDs.
|
juraj-google-style
|
def Set(self, name, value):
if self.writeback is None:
logging.warning("Attempting to modify a read only config object for %s.",
name)
if name in self.constants:
raise ConstModificationError(
"Attempting to modify constant value %s" % name)
writeback_data = self.writeback_data
if value is not None:
if isinstance(value, Text):
value = self.EscapeString(value)
writeback_data[name] = value
self.FlushCache()
|
Update the configuration option with a new value.
Note that this forces the value to be set for all contexts. The value is
written to the writeback location if Save() is later called.
Args:
name: The name of the parameter to set.
value: The value to set it to. The value will be validated against the
option's type descriptor.
Raises:
ConstModificationError: When attempting to change a constant option.
|
juraj-google-style
|
def number_text_lines(text):
r
numbered_linelist = [
''.join((('%2d' % (count + 1)), ' >>> ', line))
for count, line in enumerate(text.splitlines())
]
text_with_lineno = '\n'.join(numbered_linelist)
return text_with_lineno
|
r"""
Args:
text (str):
Returns:
str: text_with_lineno - string with numbered lines
|
juraj-google-style
|
def add_file_recursive(self, filename, trim=False):
assert (not self.final), 'Trying to mutate a final graph.'
self.add_source_file(filename)
queue = collections.deque([filename])
seen = set()
while queue:
filename = queue.popleft()
self.graph.add_node(filename)
try:
(deps, broken) = self.get_file_deps(filename)
except parsepy.ParseError:
if filename.endswith('.py'):
self.unreadable_files.add(filename)
else:
self.graph.remove_node(filename)
continue
for f in broken:
self.broken_deps[filename].add(f)
for f in deps:
if self.follow_file(f, seen, trim):
queue.append(f)
seen.add(f)
self.graph.add_node(f)
self.graph.add_edge(filename, f)
|
Add a file and all its recursive dependencies to the graph.
Args:
filename: The name of the file.
trim: Whether to trim the dependencies of builtin and system files.
|
codesearchnet
|
def patch_with_options(request, options, parent_queue_item=None):
request.auth = copy.deepcopy(options.identity.auth)
request.cookies = copy.deepcopy(options.identity.cookies)
request.headers = copy.deepcopy(options.identity.headers)
request.proxies = copy.deepcopy(options.identity.proxies)
request.timeout = copy.copy(options.performance.request_timeout)
if parent_queue_item != None:
for cookie in parent_queue_item.request.cookies:
request.cookies.set(cookie.name, cookie.value, domain=cookie.domain, path=cookie.path)
for cookie in parent_queue_item.response.cookies:
request.cookies.set(cookie.name, cookie.value, domain=cookie.domain, path=cookie.path)
if options.misc.verify_ssl_certificates and options.misc.trusted_certificates:
request.verify = options.misc.trusted_certificates
else:
request.verify = options.misc.verify_ssl_certificates
|
Patch the given request with the given options (e.g. user agent).
Args:
request (:class:`nyawc.http.Request`): The request to patch.
options (:class:`nyawc.Options`): The options to patch the request with.
parent_queue_item (:class:`nyawc.QueueItem`): The parent queue item object (request/response pair) if exists.
|
juraj-google-style
|
def visit_statements(self, nodes):
for node in nodes:
if isinstance(node, gast.AST):
self.to_prepend.append(deque())
self.to_append.append(deque())
node = self.visit(node)
self.visit_statements(self.to_prepend.pop())
if isinstance(node, gast.AST):
self.to_insert[(- 1)].append(node)
elif node:
self.to_insert[(- 1)].extend(node)
self.visit_statements(self.to_append.pop())
else:
self.to_insert[(- 1)].append(node)
return self.to_insert[(- 1)]
|
Visit a series of nodes in a node body.
This function is factored out so that it can be called recursively on
statements that are appended or prepended. This allows e.g. a nested
expression to prepend a statement, and that statement can prepend a
statement again, etc.
Args:
nodes: A list of statements.
Returns:
A list of transformed statements.
|
codesearchnet
|
def FindFieldByName(self, full_name):
full_name = _NormalizeFullyQualifiedName(full_name)
(message_name, _, field_name) = full_name.rpartition('.')
message_descriptor = self.FindMessageTypeByName(message_name)
return message_descriptor.fields_by_name[field_name]
|
Loads the named field descriptor from the pool.
Args:
full_name: The full name of the field descriptor to load.
Returns:
The field descriptor for the named field.
Raises:
KeyError: if the field cannot be found in the pool.
|
codesearchnet
|
def getmtime(self, path=None, client_kwargs=None, header=None):
return self._getmtime_from_header(self.head(path, client_kwargs, header))
|
Return the time of last access of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
float: The number of seconds since the epoch
(see the time module).
|
codesearchnet
|
def _audience_condition_deserializer(obj_dict):
return [
obj_dict.get('name'),
obj_dict.get('value'),
obj_dict.get('type'),
obj_dict.get('match')
]
|
Deserializer defining how dict objects need to be decoded for audience conditions.
Args:
obj_dict: Dict representing one audience condition.
Returns:
List consisting of condition key with corresponding value, type and match.
|
juraj-google-style
|
def _create_disk(self, name, spec, template_repo=None, template_store=None):
LOGGER.debug(('Spec: %s' % spec))
with LogTask(('Create disk %s' % spec['name'])):
disk_metadata = {}
if (spec['type'] == 'template'):
(disk_path, disk_metadata) = self._handle_template(host_name=name, template_spec=spec, template_repo=template_repo, template_store=template_store)
elif (spec['type'] == 'empty'):
(disk_path, disk_metadata) = self._handle_empty_disk(host_name=name, disk_spec=spec)
elif (spec['type'] == 'file'):
(disk_path, disk_metadata) = self._handle_file_disk(disk_spec=spec)
else:
raise RuntimeError(('Unknown drive spec %s' % str(spec)))
return (disk_path, disk_metadata)
|
Creates a disc with the given name from the given repo or store
Args:
name (str): Name of the domain to create the disk for
spec (dict): Specification of the disk to create
template_repo (TemplateRepository or None): template repo instance
to use
template_store (TemplateStore or None): template store instance to
use
Returns:
Tuple(str, dict): Path to the disk and disk metadata
Raises:
RuntimeError: If the type of the disk is not supported or failed to
create the disk
|
codesearchnet
|
def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.LongTensor):
image_tokens = self.get_image_tokens(pixel_values, image_sizes)
split_sizes = [height
image_features = self.get_input_embeddings()(image_tokens)
image_features = torch.split(image_features, split_sizes)
return image_features
|
Tokenizes images into discrete tokens with VQGAN module and embeds
them with text embeddings layer
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
The tensors corresponding to the input images.
|
github-repos
|
def __ne__(self, other):
if not isinstance(other, DateTimeValues):
return True
normalized_timestamp = self._GetNormalizedTimestamp()
other_normalized_timestamp = other._GetNormalizedTimestamp()
if normalized_timestamp is None and other_normalized_timestamp is not None:
return True
if normalized_timestamp is not None and other_normalized_timestamp is None:
return True
return normalized_timestamp != other_normalized_timestamp
|
Determines if the date time values are not equal to other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are not equal to other.
|
juraj-google-style
|
def cherry_pick(self, branch, **kwargs):
path = ('%s/%s/cherry_pick' % (self.manager.path, self.get_id()))
post_data = {'branch': branch}
self.manager.gitlab.http_post(path, post_data=post_data, **kwargs)
|
Cherry-pick a commit into a branch.
Args:
branch (str): Name of target branch
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCherryPickError: If the cherry-pick could not be performed
|
codesearchnet
|
def has_platform(self, platform):
if (platform and (not isinstance(platform, dict))):
parts = platform.split('/')
if ((len(parts) > 3) or (len(parts) < 1)):
raise InvalidArgument('"{0}" is not a valid platform descriptor'.format(platform))
platform = {'os': parts[0]}
if (len(parts) > 2):
platform['variant'] = parts[2]
if (len(parts) > 1):
platform['architecture'] = parts[1]
return (normalize_platform(platform, self.client.version()) in self.attrs['Platforms'])
|
Check whether the given platform identifier is available for this
digest.
Args:
platform (str or dict): A string using the ``os[/arch[/variant]]``
format, or a platform dictionary.
Returns:
(bool): ``True`` if the platform is recognized as available,
``False`` otherwise.
Raises:
:py:class:`docker.errors.InvalidArgument`
If the platform argument is not a valid descriptor.
|
codesearchnet
|
def VerifySignature(self, message, signature, public_key, unhex=True):
return Crypto.VerifySignature(message, signature, public_key, unhex=unhex)
|
Verify the integrity of the message.
Args:
message (str): the message to verify.
signature (bytearray): the signature belonging to the message.
public_key (ECPoint): the public key to use for verifying the signature.
unhex (bool): whether the message should be unhexlified before verifying
Returns:
bool: True if verification passes. False otherwise.
|
juraj-google-style
|
def _PromptUserForInput(self, input_text):
self._output_writer.Write('{0:s}: '.format(input_text))
return self._input_reader.Read()
|
Prompts user for an input.
Args:
input_text (str): text used for prompting the user for input.
Returns:
str: input read from the user.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.