code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def starts_when(iterable, condition): if (not callable(condition)): cond_value = condition def condition(x): return (x == cond_value) return itertools.dropwhile((lambda x: (not condition(x))), iterable)
Start yielding items when a condition arise. Args: iterable: the iterable to filter. condition: if the callable returns True once, start yielding items. If it's not a callable, it will be converted to one as `lambda condition: condition == item`. Example: >>> list(starts_when(range(10), lambda x: x > 5)) [6, 7, 8, 9] >>> list(starts_when(range(10), 7)) [7, 8, 9]
codesearchnet
def _SetRow(self, new_values, row=0): if (not row): row = self._row_index if (row > self.size): raise TableError(('Entry %s beyond table size %s.' % (row, self.size))) self._table[row].values = new_values
Sets the current row to new list. Args: new_values: List|dict of new values to insert into row. row: int, Row to insert values into. Raises: TableError: If number of new values is not equal to row size.
codesearchnet
def prerequisite_check(): if (sys.version_info < (3, 6)): version_str = ('%s.%s.%s' % sys.version_info[:3]) search_url = build_search_query((_('install') + ' Python 3.7')) return _('EH Forwarder Bot requires a minimum of Python 3.6 to run. You are currently using Python {version}. \n\nYou may want to try:\n{url}').format(version=version_str, url=search_url) modules_err = _('You may want to visit the modules repository to find a list of available modules to install.\nhttps: try: next(pkg_resources.iter_entry_points('ehforwarderbot.master')) except StopIteration: return ((_('No master channel detected. EH Forwarder Bot requires at least one master channel installed to run.') + '\n\n') + modules_err) try: next(pkg_resources.iter_entry_points('ehforwarderbot.slave')) except StopIteration: return ((_('No slave channel detected. EH Forwarder Bot requires at least one slave channel installed to run.') + '\n\n') + modules_err)
Check prerequisites of the framework, including Python version, installation of modules, etc. Returns: Optional[str]: If the check is not passed, return error message regarding failed test case. None is returned otherwise.
codesearchnet
def update(table, columns, values): rows = len(values) cells = len(columns) * len(values) return _Mutator(mutation=Mutation(update=batch._make_write_pb(table, columns, values)), operation=WriteMutation._OPERATION_UPDATE, rows=rows, cells=cells, kwargs={'table': table, 'columns': columns, 'values': values})
Update one or more existing table rows. Args: table: Name of the table to be modified. columns: Name of the table columns to be modified. values: Values to be modified.
github-repos
def _isbn_cleaner(fn): @wraps(fn) def wrapper(isbn): return fn(_clean_isbn(isbn)) return wrapper
Decorator for calling other functions from this module. Purpose of this decorator is to clean the ISBN string from garbage and return list of digits. Args: fn (function): function in which will be :func:`_clean_isbn(isbn)` call wrapped.
codesearchnet
def Close(self, abort=False): if not self._closed_event or not self._terminate_event: raise RuntimeError('Missing closed or terminate event.') if not abort and self._closed_event.is_set(): raise errors.QueueAlreadyClosed() self._closed_event.set() if abort: if not self._closed_event.is_set(): logger.warning( '{0:s} queue aborting. Contents may be lost.'.format(self.name)) self._linger_seconds = 0 self._terminate_event.set() else: logger.debug( '{0:s} queue closing, will linger for up to {1:d} seconds'.format( self.name, self._linger_seconds))
Closes the queue. Args: abort (Optional[bool]): whether the Close is the result of an abort condition. If True, queue contents may be lost. Raises: QueueAlreadyClosed: if the queue is not started, or has already been closed. RuntimeError: if closed or terminate event is missing.
juraj-google-style
def remove(self, iterable, data=None): return self.root.remove(iterable, data=data)
Used to remove from the root node Args: iterable(hashable): index or key used to identify item to remove data: data to be paired with the key
codesearchnet
def send(self, cumulative_counters=None, gauges=None, counters=None): if not gauges and not cumulative_counters and not counters: return data = { 'cumulative_counter': cumulative_counters, 'gauge': gauges, 'counter': counters, } _logger.debug('Sending datapoints to SignalFx: %s', data) for metric_type, datapoints in data.items(): if not datapoints: continue if not isinstance(datapoints, list): raise TypeError('Datapoints not of type list %s', datapoints) for datapoint in datapoints: self._add_extra_dimensions(datapoint) self._add_to_queue(metric_type, datapoint) self._start_thread()
Send the given metrics to SignalFx. Args: cumulative_counters (list): a list of dictionaries representing the cumulative counters to report. gauges (list): a list of dictionaries representing the gauges to report. counters (list): a list of dictionaries representing the counters to report.
juraj-google-style
def GetFileEntryByPathSpec(self, path_spec): if not self.FileEntryExistsByPathSpec(path_spec): return None location = getattr(path_spec, 'location', None) if len(location) == 1: return zip_file_entry.ZipFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True) kwargs = {} try: kwargs['zip_info'] = self._zip_file.getinfo(location[1:]) except KeyError: kwargs['is_virtual'] = True return zip_file_entry.ZipFileEntry( self._resolver_context, self, path_spec, **kwargs)
Retrieves a file entry for a path specification. Args: path_spec (PathSpec): path specification of the file entry. Returns: ZipFileEntry: a file entry or None.
juraj-google-style
def create(self, model_name): body = {'name': model_name} parent = 'projects/' + self._project_id return self._api.projects().models().create(body=body, parent=parent).execute()
Create a model. Args: model_name: the short name of the model, such as "iris". Returns: If successful, returns informaiton of the model, such as {u'regions': [u'us-central1'], u'name': u'projects/myproject/models/mymodel'} Raises: If the model creation failed.
juraj-google-style
def render_template(self, template_name, out_path=None): return render_template(template_name, self.to_dict(), out_path=out_path)
Render a template based on this TileBus Block. The template has access to all of the attributes of this block as a dictionary (the result of calling self.to_dict()). You can optionally render to a file by passing out_path. Args: template_name (str): The name of the template to load. This must be a file in config/templates inside this package out_path (str): An optional path of where to save the output file, otherwise it is just returned as a string. Returns: string: The rendered template data.
codesearchnet
def auth_proxy(self, method): def _proxy(*args, **kwargs): return method(self.session, *args, **kwargs) return _proxy
Authentication proxy for API requests. This is required because the API objects are naive of ``HelpScout``, so they would otherwise be unauthenticated. Args: method (callable): A method call that should be authenticated. It should accept a ``requests.Session`` as its first parameter, which should be used for the actual API call. Returns: mixed: The results of the authenticated callable.
juraj-google-style
def plot_zt_mu(self, temp=600, output='eig', relaxation_time=1e-14, xlim=None): import matplotlib.pyplot as plt plt.figure(figsize=(9, 7)) zt = self._bz.get_zt(relaxation_time=relaxation_time, output=output, doping_levels=False)[temp] plt.plot(self._bz.mu_steps, zt, linewidth=3.0) self._plot_bg_limits() self._plot_doping(temp) if output == 'eig': plt.legend(['ZT$_1$', 'ZT$_2$', 'ZT$_3$']) if xlim is None: plt.xlim(-0.5, self._bz.gap + 0.5) else: plt.xlim(xlim) plt.ylabel("ZT", fontsize=30.0) plt.xlabel("E-E$_f$ (eV)", fontsize=30.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) plt.tight_layout() return plt
Plot the ZT in function of Fermi level. Args: temp: the temperature xlim: a list of min and max fermi energy by default (0, and band gap) tau: A relaxation time in s. By default none and the plot is by units of relaxation time Returns: a matplotlib object
juraj-google-style
def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]: return self._val if self._is_some else callback()
Returns the contained value or computes it from ``callback``. Args: callback: The the default callback. Returns: The contained value if the :py:class:`Option` is ``Some``, otherwise ``callback()``. Examples: >>> Some(0).unwrap_or_else(lambda: 111) 0 >>> NONE.unwrap_or_else(lambda: 'ha') 'ha'
juraj-google-style
def get(self, object_id): if object_id is None: return return self._obj_ids_to_obj.get(object_id)
Given a shared object ID, returns a previously instantiated object. Args: object_id: shared object ID to use when attempting to find already-loaded object. Returns: The object, if we've seen this ID before. Else, `None`.
github-repos
def get_file_tracebacks(self, file_path): if (file_path not in self._source_file_content): raise ValueError(('Source file of path "%s" has not been received by this instance of SourceManager.' % file_path)) lineno_to_op_names_and_stack_position = dict() for op_log_entry in self._graph_traceback.log_entries: for (stack_pos, trace) in enumerate(op_log_entry.code_def.traces): if (self._graph_traceback.id_to_string[trace.file_id] == file_path): if (trace.lineno not in lineno_to_op_names_and_stack_position): lineno_to_op_names_and_stack_position[trace.lineno] = [] lineno_to_op_names_and_stack_position[trace.lineno].append((op_log_entry.name, stack_pos)) return lineno_to_op_names_and_stack_position
Get the lists of ops created at lines of a specified source file. Args: file_path: Path to the source file. Returns: A dict mapping line number to a list of 2-tuples, `(op_name, stack_position)` `op_name` is the name of the name of the op whose creation traceback includes the line. `stack_position` is the position of the line in the op's creation traceback, represented as a 0-based integer. Raises: ValueError: If `file_path` does not point to a source file that has been received by this instance of `SourceManager`.
codesearchnet
def _reshape_tensors(tensors, shape): reshaped = [] for t in tensors: with ops.colocate_with(t): reshaped.append(array_ops.reshape(t, shape)) return reshaped
Reshape tensors flattened by _flatten_tensors. Args: tensors: list of `tf.Tensor` of identical length 1D tensors. shape: list of integers describing the desired shape. Product of the elements must equal the length of each tensor. Returns: list of `tf.Tensor` which are the reshaped inputs.
github-repos
async def get(self, uid: int, cached_msg: CachedMessage = None, requirement: FetchRequirement = FetchRequirement.METADATA) \ -> Optional[MessageT]: ...
Return the message with the given UID. Args: uid: The message UID. cached_msg: The last known cached message. requirement: The data required from each message. Raises: IndexError: The UID is not valid in the mailbox.
juraj-google-style
def sparse_segment_sqrt_n(data, indices, segment_ids, name=None, num_segments=None, sparse_gradient=False): if num_segments is not None: return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(data=data, indices=indices, segment_ids=segment_ids, num_segments=num_segments, name=name, sparse_gradient=sparse_gradient) else: return gen_math_ops.sparse_segment_sqrt_n(data=data, indices=indices, segment_ids=segment_ids, name=name, sparse_gradient=sparse_gradient)
Computes the sum along sparse segments of a tensor divided by the sqrt(N). `N` is the size of the segment being reduced. Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. name: A name for the operation (optional). num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the gradient of this function will be sparse (IndexedSlices) instead of dense (Tensor). Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`.
github-repos
def encode_dataset(dataset, vocabulary): def encode(features): return {k: vocabulary.encode_tf(v) for k, v in features.items()} return dataset.map(encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
Encode from strings to token ids. Args: dataset: a tf.data.Dataset with string values. vocabulary: a mesh_tensorflow.transformer.Vocabulary Returns: a tf.data.Dataset with integer-vector values ending in EOS=1
juraj-google-style
def get_imap_capabilities(server): capabilities = list(map(str, list(server.capabilities()))) for i in range(len(capabilities)): capabilities[i] = str(capabilities[i]).replace("b'", '').replace("'", '') logger.debug('IMAP server supports: {0}'.format(capabilities)) return capabilities
Returns a list of an IMAP server's capabilities Args: server (imapclient.IMAPClient): An instance of imapclient.IMAPClient Returns (list): A list of capabilities
codesearchnet
def __init__(self, in_features, out_features, mlp_dim=128): super().__init__() self.conv1 = nn.Conv2d(in_features, mlp_dim, 1, 1, 0) self.act = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(mlp_dim, out_features, 1, 1, 0)
Projector MLP. Args: in_features (`int`): Number of input channels. out_features (`int`): Number of output channels. mlp_dim (`int`, *optional*, defaults to 128): Hidden dimension.
github-repos
def apply_gradients(self, grads_and_vars, global_step=None, name=None): if not grads_and_vars: raise ValueError('Must supply at least one variable') if global_step is None: raise ValueError('Global step is required to check staleness') self._global_step = global_step train_ops = [] aggregated_grad = [] var_list = [] local_anchor = control_flow_ops.no_op() distribution_strategy = distribute_lib.get_strategy() with distribution_strategy.extended.colocate_vars_with(local_anchor): self._local_step = variable_v1.VariableV1(initial_value=0, trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES], dtype=global_step.dtype.base_dtype, name='sync_rep_local_step') self.local_step_init_op = state_ops.assign(self._local_step, global_step) chief_init_ops = [self.local_step_init_op] self.ready_for_local_init_op = variables.report_uninitialized_variables(variables.global_variables()) with ops.name_scope(None, self._name): for grad, var in grads_and_vars: var_list.append(var) with ops.device(var.device): if grad is None: aggregated_grad.append(None) continue elif isinstance(grad, tensor.Tensor): grad_accum = data_flow_ops.ConditionalAccumulator(grad.dtype, shape=var.get_shape(), shared_name=var.name + '/grad_accum') train_ops.append(grad_accum.apply_grad(grad, local_step=self._local_step)) aggregated_grad.append(grad_accum.take_grad(self._replicas_to_aggregate)) else: if not isinstance(grad, indexed_slices.IndexedSlices): raise ValueError('Unknown grad type!') grad_accum = data_flow_ops.SparseConditionalAccumulator(grad.dtype, shape=(), shared_name=var.name + '/grad_accum') train_ops.append(grad_accum.apply_indexed_slices_grad(grad, local_step=self._local_step)) aggregated_grad.append(grad_accum.take_indexed_slices_grad(self._replicas_to_aggregate)) self._accumulator_list.append((grad_accum, var.device)) aggregated_grads_and_vars = zip(aggregated_grad, var_list) with ops.device(global_step.device), ops.name_scope(''): update_op = self._opt.apply_gradients(aggregated_grads_and_vars, global_step) with ops.device(global_step.device), ops.name_scope(''): sync_token_queue = data_flow_ops.FIFOQueue(-1, global_step.dtype.base_dtype, shapes=(), name='sync_token_q', shared_name='sync_token_q') self._sync_token_queue = sync_token_queue with ops.device(global_step.device), ops.name_scope(''): with ops.control_dependencies(train_ops): token = sync_token_queue.dequeue() train_op = state_ops.assign(self._local_step, token) with ops.control_dependencies([update_op]): tokens = array_ops.fill([self._tokens_per_step], global_step) sync_op = sync_token_queue.enqueue_many((tokens,)) if self._variable_averages is not None: with ops.control_dependencies([sync_op]), ops.name_scope(''): sync_op = self._variable_averages.apply(self._variables_to_average) self._chief_queue_runner = queue_runner.QueueRunner(sync_token_queue, [sync_op]) for accum, dev in self._accumulator_list: with ops.device(dev): chief_init_ops.append(accum.set_global_step(global_step, name='SetGlobalStep')) self.chief_init_op = control_flow_ops.group(*chief_init_ops) self._gradients_applied = True return train_op
Apply gradients to variables. This contains most of the synchronization implementation and also wraps the apply_gradients() from the real optimizer. Args: grads_and_vars: List of (gradient, variable) pairs as returned by compute_gradients(). global_step: Optional Variable to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the Optimizer constructor. Returns: train_op: The op to dequeue a token so the replicas can exit this batch and start the next one. This is executed by each replica. Raises: ValueError: If the grads_and_vars is empty. ValueError: If global step is not provided, the staleness cannot be checked.
github-repos
def _sort_or_argsort(values, axis, direction, return_argsort): if direction not in _SORT_IMPL: valid_directions = ', '.join(sorted(_SORT_IMPL.keys())) raise ValueError(f'Argument `direction` should be one of {valid_directions}. Received: direction={direction}') axis = framework_ops.convert_to_tensor(axis, name='axis') axis_static = tensor_util.constant_value(axis) if axis.shape.ndims not in (None, 0) or axis_static is None: raise ValueError(f'Argument `axis` must be a constant scalar. Received: axis={axis}.') axis_static = int(axis_static) values = framework_ops.convert_to_tensor(values, name='values') return _SORT_IMPL[direction](values, axis_static, return_argsort)
Internal sort/argsort implementation. Args: values: The input values. axis: The axis along which to sort. direction: 'ASCENDING' or 'DESCENDING'. return_argsort: Whether to return the argsort result. Returns: Either the sorted values, or the indices of the sorted values in the original tensor. See the `sort` and `argsort` docstrings. Raises: ValueError: If axis is not a constant scalar, or the direction is invalid.
github-repos
def call(self, x): with tf.name_scope('embedding'): embeddings = tf.gather(self.shared_weights, x) embeddings *= (self.hidden_size ** 0.5) padding = model_utils.get_padding(x) embeddings *= tf.expand_dims((1 - padding), (- 1)) return embeddings
Get token embeddings of x. Args: x: An int64 tensor with shape [batch_size, length] Returns: embeddings: float32 tensor with shape [batch_size, length, embedding_size] padding: float32 tensor with shape [batch_size, length] indicating the locations of the padding tokens in x.
codesearchnet
def configTestMesh(device_type_mesh_map: typing.Dict[typing.Text, layout_lib.Mesh]) -> layout_lib.Mesh: reset_context() def get_mesh(device_type): mesh = device_type_mesh_map.get(device_type, None) if mesh is None: raise ValueError('Requires a %s mesh to run test on %s.' % (device_type, device_type)) return mesh mesh = None if is_tpu_present(): mesh = get_mesh('TPU') reset_context() accelerator_util.initialize_accelerator_system('TPU') elif tf_config.list_physical_devices('GPU'): mesh = get_mesh('GPU') reset_logical_devices('GPU', np.prod(mesh.shape())) accelerator_util.initialize_accelerator_system('GPU') else: mesh = get_mesh('CPU') reset_logical_devices('CPU', np.prod(mesh.shape())) accelerator_util.initialize_accelerator_system('CPU') return mesh
Configs corresponding mesh given test context. If runs on a CPU mesh, set virtual device on CPU. If runs on a GPU mesh, sets virtual device on GPU with proper memory limits. if runs on a TPU mesh, initializes TPU system. Args: device_type_mesh_map: A dictionary containing device_type -> mesh mapping. Returns: A properly configured mesh for use in test.
github-repos
def extractBests(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0, limit=5): best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff) return (heapq.nlargest(limit, best_list, key=(lambda i: i[1])) if (limit is not None) else sorted(best_list, key=(lambda i: i[1]), reverse=True))
Get a list of the best matches to a collection of choices. Convenience function for getting the choices with best scores. Args: query: A string to match against choices: A list or dictionary of choices, suitable for use with extract(). processor: Optional function for transforming choices before matching. See extract(). scorer: Scoring function for extract(). score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. limit: Optional maximum for the number of elements returned. Defaults to 5. Returns: A a list of (match, score) tuples.
codesearchnet
def removeRow(self, triggered): if triggered: model = self.tableView.model() selection = self.tableView.selectedIndexes() rows = [index.row() for index in selection] model.removeDataFrameRows(set(rows)) self.sender().setChecked(False)
Removes a row to the model. This method is also a slot. Args: triggered (bool): If the corresponding button was activated, the selected row will be removed from the model.
codesearchnet
def export_msdt(self, filename): fmt = "csv" if filename.lower().endswith(".csv") else "dat" delimiter = ", " if fmt == "csv" else " " with open(filename, "wt") as f: if fmt == "dat": f.write(" f.write(delimiter.join(["t", "MSD", "MSD_a", "MSD_b", "MSD_c", "MSCD"])) f.write("\n") for dt, msd, msdc, mscd in zip(self.dt, self.msd, self.msd_components, self.mscd): f.write(delimiter.join(["%s" % v for v in [dt, msd] + list( msdc) + [mscd]])) f.write("\n")
Writes MSD data to a csv file that can be easily plotted in other software. Args: filename (str): Filename. Supported formats are csv and dat. If the extension is csv, a csv file is written. Otherwise, a dat format is assumed.
juraj-google-style
def campaign(self, name, owner=None, **kwargs): return Campaign(self.tcex, name, owner=owner, **kwargs)
Create the Campaign TI object. Args: owner: name: **kwargs: Return:
juraj-google-style
def text(name, data, step=None, description=None): summary_metadata = metadata.create_summary_metadata(display_name=None, description=description) summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None) or tf.summary.summary_scope) with summary_scope(name, 'text_summary', values=[data, step]) as (tag, _): tf.debugging.assert_type(data, tf.string) return tf.summary.write(tag=tag, tensor=data, step=step, metadata=summary_metadata)
Write a text summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A UTF-8 string tensor value. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
codesearchnet
def __init__(self, xid=None, flags=None, miss_send_len=None): super().__init__(xid) self.flags = flags self.miss_send_len = miss_send_len
Create a SwitchConfig with the optional parameters below. Args: xid (int): xid to be used on the message header. flags (ConfigFlag): OFPC_* flags. miss_send_len (int): UBInt16 max bytes of new flow that the datapath should send to the controller.
juraj-google-style
def Delete(self, request, global_params=None): config = self.GetMethodConfig('Delete') return self._RunMethod(config, request, global_params=global_params)
Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted. Args: request: (BigqueryTablesDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (BigqueryTablesDeleteResponse) The response message.
github-repos
def diff(f, s): if (isinstance(f, base.Root) or (f._yang_type in ('container', None))): result = _diff_root(f, s) elif (f._yang_type in ('list',)): result = _diff_list(f, s) else: result = {} first = '{}'.format(f) second = '{}'.format(s) if (first != second): result = {'first': first, 'second': second} return result
Given two models, return the difference between them. Args: f (Pybindbase): First element. s (Pybindbase): Second element. Returns: dict: A dictionary highlighting the differences. Examples: >>> diff = napalm_yang.utils.diff(candidate, running) >>> pretty_print(diff) >>> { >>> "interfaces": { >>> "interface": { >>> "both": { >>> "Port-Channel1": { >>> "config": { >>> "mtu": { >>> "first": "0", >>> "second": "9000" >>> } >>> } >>> } >>> }, >>> "first_only": [ >>> "Loopback0" >>> ], >>> "second_only": [ >>> "Loopback1" >>> ] >>> } >>> } >>> }
codesearchnet
def populate_conversion_metadata(model_object, metadata): try: metadata_builder = flatbuffers.Builder(0) metadata_builder.Finish(metadata.Pack(metadata_builder)) buffer_field = schema_fb.BufferT() buffer_field.data = metadata_builder.Output() if not model_object.metadata: model_object.metadata = [] else: for meta in model_object.metadata: if meta.name.decode('utf-8') == CONVERSION_METADATA_FIELD_NAME: model_object.buffers[meta.buffer] = buffer_field return model_object if not model_object.buffers: model_object.buffers = [] model_object.buffers.append(buffer_field) metadata_field = schema_fb.MetadataT() metadata_field.name = CONVERSION_METADATA_FIELD_NAME metadata_field.buffer = len(model_object.buffers) - 1 model_object.metadata.append(metadata_field) return model_object except Exception: return model_object
Add or update conversion metadata to a tflite model. Args: model_object: A tflite model in object form. metadata: The conversion metadata. Returns: A tflite model object with embedded conversion metadata.
github-repos
def _Check(self): success = True for path in self._paths: if not os.path.isfile(path): logging.error('No such file: %s', path) success = False elif not os.access(path, os.R_OK): logging.error('No read access: %s', path) success = False elif not FLAGS.output and (not os.access(path, os.W_OK)): logging.error('No write access: %s', path) success = False return success
Verifies the existence and read+write access to all paths. Returns: Boolean, True if all paths are OK, otherwise False.
github-repos
def parameterized_codec(raw, b64): if isinstance(raw, bytes): raw = raw.decode('utf-8') result = _parameterize_string(raw) return Base64(result.data) if b64 else result
Parameterize a string, possibly encoding it as Base64 afterwards Args: raw (`str` | `bytes`): String to be processed. Byte strings will be interpreted as UTF-8. b64 (`bool`): Whether to wrap the output in a Base64 CloudFormation call Returns: :class:`troposphere.AWSHelperFn`: output to be included in a CloudFormation template.
juraj-google-style
async def find(self, seq_set: SequenceSet, selected: SelectedMailbox, requirement: FetchRequirement = FetchRequirement.METADATA) \ -> AsyncIterable[Tuple[int, MessageT]]: for seq, cached_msg in selected.messages.get_all(seq_set): msg = await self.get(cached_msg.uid, cached_msg, requirement) if msg is not None: yield (seq, msg)
Find the active message UID and message pairs in the mailbox that are contained in the given sequences set. Message sequence numbers are resolved by the selected mailbox session. Args: seq_set: The sequence set of the desired messages. selected: The selected mailbox session. requirement: The data required from each message.
juraj-google-style
def download( self, file: Union[IO[bytes], asyncio.StreamWriter, None]=None, raw: bool=False, rewind: bool=True, duration_timeout: Optional[float]=None): if self._session_state != SessionState.request_sent: raise RuntimeError('Request not sent') if rewind and file and hasattr(file, 'seek'): original_offset = file.tell() else: original_offset = None if not hasattr(file, 'drain'): self._response.body = file if not isinstance(file, Body): self._response.body = Body(file) read_future = self._stream.read_body(self._request, self._response, file=file, raw=raw) try: yield from asyncio.wait_for(read_future, timeout=duration_timeout) except asyncio.TimeoutError as error: raise DurationTimeout( 'Did not finish reading after {} seconds.' .format(duration_timeout) ) from error self._session_state = SessionState.response_received if original_offset is not None: file.seek(original_offset) self.event_dispatcher.notify(self.Event.end_response, self._response) self.recycle()
Read the response content into file. Args: file: A file object or asyncio stream. raw: Whether chunked transfer encoding should be included. rewind: Seek the given file back to its original offset after reading is finished. duration_timeout: Maximum time in seconds of which the entire file must be read. Be sure to call :meth:`start` first. Coroutine.
juraj-google-style
def detect_content_type(self, path=None, payload=None, objectInput=None): if objectInput: message = "Detection content type with file object is not stable." log.exception(message) raise TikaAppError(message) f = file_path(path, payload, objectInput) switches = ["-d", f] result = self._command_template(switches).lower() return result, path, f
Return the content type of passed file or payload. Args: path (string): Path of file to analyze payload (string): Payload base64 to analyze objectInput (object): file object/standard input to analyze Returns: content type of file (string)
juraj-google-style
def Parse(self, parser_mediator): file_entry = parser_mediator.GetFileEntry() if (not file_entry): raise errors.UnableToParseFile('Invalid file entry') parser_mediator.AppendToParserChain(self) try: self.ParseFileEntry(parser_mediator, file_entry) finally: parser_mediator.PopFromParserChain()
Parsers the file entry and extracts event objects. Args: parser_mediator (ParserMediator): a parser mediator. Raises: UnableToParseFile: when the file cannot be parsed.
codesearchnet
def _deserialization_dependencies(self, children): del children return {}
Returns a dictionary containing `Trackables` that this object depends on. Dependencies define the order to serialize and deserialize objects in the SavedModel. For example: class A(Trackable): b = B() def _deserialization_dependencies(self, children): return {'b': self.b} class B(Trackable): pass We say that object `a=A()` depends on `a.b`. Dependencies are guaranteed to be serialized and deserialized before the object depending on them. The following methods use dependencies: - `_deserialize_from_proto` [loading] SavedModel loads with the bottom-up approach, by first creating all objects in the order defined by the dependencies, then connecting the children. Unlike `_trackable_children`, this function does not define the `SavedObjectGraph`. It only changes the order in which things are saved/loaded. Therefore, if there are dependencies that are not in the `SavedObjectGraph`, saving will fail. Args: children: Dict returned from `_trackable_children`. Returns: A dictionary mapping names to `Trackable`.
github-repos
def _makedirs(name, user=None, group=None, dir_mode=None, win_owner=None, win_perms=None, win_deny_perms=None, win_inheritance=None): if salt.utils.platform.is_windows(): (drive, path) = os.path.splitdrive(name) if (not os.path.isdir(drive)): raise CommandExecutionError(drive) win_owner = (win_owner if win_owner else user) return __salt__['file.makedirs'](path=name, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance) else: return __salt__['file.makedirs'](path=name, user=user, group=group, mode=dir_mode)
Helper function for creating directories when the ``makedirs`` option is set to ``True``. Handles Unix and Windows based systems .. versionadded:: 2017.7.8 Args: name (str): The directory path to create user (str): The linux user to own the directory group (str): The linux group to own the directory dir_mode (str): The linux mode to apply to the directory win_owner (str): The Windows user to own the directory win_perms (dict): A dictionary of grant permissions for Windows win_deny_perms (dict): A dictionary of deny permissions for Windows win_inheritance (bool): True to inherit permissions on Windows Returns: bool: True if successful, otherwise False on Windows str: Error messages on failure on Linux None: On successful creation on Linux Raises: CommandExecutionError: If the drive is not mounted on Windows
codesearchnet
def _VerifyValuesWithDilation(self, tensor_in_sizes, filter_in_sizes, stride, dilation, padding, data_type, data_format='NHWC'): total_size_1 = 1 total_size_2 = 1 for s in tensor_in_sizes: total_size_1 *= s for s in filter_in_sizes: total_size_2 *= s x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)], dtype=data_type).reshape(tensor_in_sizes) x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)], dtype=data_type).reshape(filter_in_sizes) with self.session() as sess: if data_type == np.float32: tolerance = 0.01 else: self.assertEqual(data_type, np.float64) tolerance = 1e-08 t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type) t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type) native_t1 = t1 strides = [1, stride, stride, 1] dilations = [dilation, dilation] if data_format == 'NCHW': native_t1 = array_ops.transpose(t1, [0, 3, 1, 2]) strides = [1, 1, stride, stride] with self.test_scope(): conv_native = nn_impl.depthwise_conv2d(native_t1, t2, strides=strides, rate=dilations, data_format=data_format, padding=padding) if data_format == 'NCHW': conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1]) with ops.device('CPU'): strides = [1, stride, stride, 1] conv_interface = nn_impl.depthwise_conv2d(t1, t2, strides=strides, rate=dilations, padding=padding) native_result = sess.run(conv_native, {t1: x1, t2: x2}) interface_result = sess.run(conv_interface, {t1: x1, t2: x2}) print('data_type:', data_type, 'max diff = ', np.amax(np.absolute(native_result - interface_result))) self.assertAllClose(np.ravel(native_result), np.ravel(interface_result), rtol=tolerance)
Verifies the output values of the convolution function. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols, input_depth, depth_multiplier]. stride: Stride. dilation: Dilation. padding: Padding type. data_type: The data type to use. data_format: The data_format of the input. "NHWC" or "NCHW".
github-repos
def get_graph(self): st = self.status if (st in (SolverStatus.solved, SolverStatus.unsolved)): phase = self._latest_nonfailed_phase() return phase.get_graph() else: return self.get_fail_graph()
Returns the most recent solve graph. This gives a graph showing the latest state of the solve. The specific graph returned depends on the solve status. When status is: unsolved: latest unsolved graph is returned; solved: final solved graph is returned; failed: most appropriate failure graph is returned (see `failure_reason`); cyclic: last failure is returned (contains cycle). Returns: A pygraph.digraph object.
codesearchnet
def df_first_row_to_dict(df): if df is not None: return [dict(r) for i, r in df.head(1).iterrows()][0]
First DataFrame row to list of dict Args: df (pandas.DataFrame): A DataFrame with at least one row Returns: A list of dict that looks like: [{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}] from a DataFrame that looks like: C1 C2 C3 1 x y z Else if `df` is `None`, returns `None`
juraj-google-style
def _verify(self): if (self._num_sides < 2): raise ValueError('At least two sides required.') for (prev, curr) in six.moves.zip(self._edges, self._edges[1:]): self._verify_pair(prev, curr) prev = self._edges[(- 1)] curr = self._edges[0] self._verify_pair(prev, curr)
Verify that the edges define a curved polygon. This may not be entirely comprehensive, e.g. won't check self-intersection of the defined polygon. .. note:: This currently checks that edge endpoints match **exactly** but allowing some roundoff may be desired. Raises: ValueError: If there are fewer than two sides. ValueError: If one of the sides is not in 2D. ValueError: If consecutive sides don't share an endpoint.
codesearchnet
def register_intent_parser(self, intent_parser, domain=0): if domain not in self.domains: self.register_domain(domain=domain) self.domains[domain].register_intent_parser( intent_parser=intent_parser)
Register a intent parser with a domain. Args: intent_parser(intent): The intent parser you wish to register. domain(str): a string representing the domain you wish register the intent parser to.
juraj-google-style
def _truncate_filename(filename, max_length): if len(filename) <= max_length: return filename if '.' in filename: filename, extension = filename.rsplit('.', 1) if len(extension) > max_length - 1: return filename[:max_length] return '.'.join([filename[:max_length - len(extension) - 1], extension]) else: return filename[:max_length]
Truncates a filename while trying to preserve the extension. Args: filename: string, the filename to potentially truncate. Returns: The truncated filename that is less than or equal to the given maximum length.
github-repos
def pause(self, device): resp = self.post('pause', params={'device': device}, return_response=True) error = resp.text if not error: error = None return {'success': resp.status_code == requests.codes.ok, 'error': error}
Pause the given device. Args: device (str): Device ID. Returns: dict: with keys ``success`` and ``error``.
juraj-google-style
def _parse_address(self, config): match = re.search('ip address ([^\\s]+)', config) value = (match.group(1) if match else None) return dict(address=value)
Parses the config block and returns the ip address value The provided configuration block is scaned and the configured value for the IP address is returned as a dict object. If the IP address value is not configured, then None is returned for the value Args: config (str): The interface configuration block to parse Return: dict: A dict object intended to be merged into the resource dict
codesearchnet
def _get_int_removals_helper(self, spec_amts_oxi, oxid_el, oxid_els, numa): oxid_old = min([spec.oxi_state for spec in spec_amts_oxi if (spec.symbol == oxid_el.symbol)]) oxid_new = math.floor((oxid_old + 1)) if (oxid_new > oxid_el.max_oxidation_state): return numa spec_old = Specie(oxid_el.symbol, oxid_old) spec_new = Specie(oxid_el.symbol, oxid_new) specamt = spec_amts_oxi[spec_old] spec_amts_oxi = {sp: amt for (sp, amt) in spec_amts_oxi.items() if (sp != spec_old)} spec_amts_oxi[spec_new] = specamt spec_amts_oxi = Composition(spec_amts_oxi) oxi_noA = sum([(spec.oxi_state * spec_amts_oxi[spec]) for spec in spec_amts_oxi if (spec.symbol not in self.cation.symbol)]) a = max(0, ((- oxi_noA) / self.cation_charge)) numa = numa.union({a}) if (a == 0): return numa else: for oxid_el in oxid_els: numa = numa.union(self._get_int_removals_helper(spec_amts_oxi.copy(), oxid_el, oxid_els, numa)) return numa
This is a helper method for get_removals_int_oxid! Args: spec_amts_oxi - a dict of species to their amounts in the structure oxid_el - the element to oxidize oxid_els - the full list of elements that might be oxidized numa - a running set of numbers of A cation at integer oxidation steps Returns: a set of numbers A; steps for for oxidizing oxid_el first, then the other oxid_els in this list
codesearchnet
def _compute_best_partitions(num_part, sizes, nfps): if (num_part < 2): raise ValueError('num_part cannot be less than 2') if (num_part > len(sizes)): raise ValueError('num_part cannot be greater than the domain size of all set sizes') if (num_part == 2): (total_nfps, u) = min((((nfps[(0, u1)] + nfps[((u1 + 1), (len(sizes) - 1))]), u1) for u1 in range(0, (len(sizes) - 1)))) return ([(sizes[0], sizes[u]), (sizes[(u + 1)], sizes[(- 1)])], total_nfps, None) cost = np.zeros((len(sizes), (num_part - 2))) p2i = (lambda p: (p - 2)) for p in range(2, num_part): for u in range((p - 1), len(sizes)): if (p == 2): cost[(u, p2i(p))] = min(((nfps[(0, u1)] + nfps[((u1 + 1), u)]) for u1 in range(u))) else: cost[(u, p2i(p))] = min(((cost[(u1, p2i((p - 1)))] + nfps[((u1 + 1), u)]) for u1 in range(((p - 1) - 1), u))) p = num_part (total_nfps, u) = min((((cost[(u1, p2i((p - 1)))] + nfps[((u1 + 1), (len(sizes) - 1))]), u1) for u1 in range(((p - 1) - 1), (len(sizes) - 1)))) partitions = [(sizes[(u + 1)], sizes[(- 1)])] p -= 1 while (p > 1): (_, u1_best) = min((((cost[(u1, p2i(p))] + nfps[((u1 + 1), u)]), u1) for u1 in range(((p - 1) - 1), u))) partitions.insert(0, (sizes[(u1_best + 1)], sizes[u])) u = u1_best p -= 1 partitions.insert(0, (sizes[0], sizes[u])) return [partitions, total_nfps, cost]
Computes the optimal partitions given the size distributions and computed number of expected false positives for all sub-intervals. Args: num_part (int): The number of partitions to create. sizes (numpy.array): The complete domain of set sizes in sorted order. nfps (numpy.array): The computed number of expected false positives for all sub-intervals; axis-0 is for the indexes of lower bounds and axis-1 is for the indexes of upper bounds. Returns: partitions (list): list of lower and upper bounds of set sizes for all partitions. total_nfps (float): total number of expected false positives from all partitions. cost (numpy.array): a N x p-1 matrix of the computed optimal NFPs for all sub-problems given upper bound set size and number of partitions.
codesearchnet
def __bool__(self): self._disallow_bool_casting()
Dummy method to prevent a tensor from being used as a Python `bool`. This overload raises a `TypeError` when the user inadvertently treats a `Tensor` as a boolean (most commonly in an `if` or `while` statement), in code that was not converted by AutoGraph. For example: ```python if tf.constant(True): # Will raise. # ... if tf.constant(5) < tf.constant(7): # Will raise. # ... ``` Raises: `TypeError`.
github-repos
class TFBaseModelOutputWithNoAttention(ModelOutput): last_hidden_state: Optional[tf.Tensor] = None hidden_states: Optional[Tuple[tf.Tensor, ...]] = None
Base class for model's outputs, with potential hidden states. Args: last_hidden_state (`tf.Tensor` shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
github-repos
def _ParseRecord(self, parser_mediator, page_data, record_offset): record_header_map = self._GetDataTypeMap('binarycookies_record_header') try: record_header = self._ReadStructureFromByteStream( page_data[record_offset:], record_offset, record_header_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map record header data at offset: 0x{0:08x} with error: ' '{1!s}').format(record_offset, exception)) event_data = SafariBinaryCookieEventData() event_data.flags = record_header.flags if record_header.url_offset: data_offset = record_offset + record_header.url_offset event_data.url = self._ParseCString(page_data, data_offset) if record_header.name_offset: data_offset = record_offset + record_header.name_offset event_data.cookie_name = self._ParseCString(page_data, data_offset) if record_header.path_offset: data_offset = record_offset + record_header.path_offset event_data.path = self._ParseCString(page_data, data_offset) if record_header.value_offset: data_offset = record_offset + record_header.value_offset event_data.cookie_value = self._ParseCString(page_data, data_offset) if record_header.creation_time: date_time = dfdatetime_cocoa_time.CocoaTime( timestamp=record_header.creation_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) if record_header.expiration_time: date_time = dfdatetime_cocoa_time.CocoaTime( timestamp=record_header.expiration_time) else: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) for plugin in self._cookie_plugins: if parser_mediator.abort: break if event_data.cookie_name != plugin.COOKIE_NAME: continue try: plugin.UpdateChainAndProcess( parser_mediator, cookie_name=event_data.cookie_name, cookie_data=event_data.cookie_value, url=event_data.url) except Exception as exception: parser_mediator.ProduceExtractionWarning( 'plugin: {0:s} unable to parse cookie with error: {1!s}'.format( plugin.NAME, exception))
Parses a record from the page data. Args: parser_mediator (ParserMediator): parser mediator. page_data (bytes): page data. record_offset (int): offset of the record relative to the start of the page. Raises: ParseError: when the record cannot be parsed.
juraj-google-style
def simple_lmm(snps, pheno, K=None, covs=None, test='lrt', NumIntervalsDelta0=100, NumIntervalsDeltaAlt=0, searchDelta=False): t0 = time.time() if (K is None): K = SP.eye(snps.shape[0]) lm = limix.CLMM() lm.setK(K) lm.setSNPs(snps) lm.setPheno(pheno) if (covs is None): covs = SP.ones((snps.shape[0], 1)) lm.setCovs(covs) if (test == 'lrt'): lm.setTestStatistics(0) elif (test == 'f'): lm.setTestStatistics(1) else: print(test) raise NotImplementedError('only f or lrt are implemented') lm.setNumIntervals0(NumIntervalsDelta0) if searchDelta: lm.setNumIntervalsAlt(NumIntervalsDeltaAlt) else: lm.setNumIntervalsAlt(0) lm.process() t1 = time.time() print(('finished GWAS testing in %.2f seconds' % (t1 - t0))) return lm
Univariate fixed effects linear mixed model test for all SNPs Args: snps: [N x S] SP.array of S SNPs for N individuals pheno: [N x 1] SP.array of 1 phenotype for N individuals K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional) If not provided, then linear regression analysis is performed covs: [N x D] SP.array of D covariates for N individuals test: 'lrt' for likelihood ratio test (default) or 'f' for F-test NumIntervalsDelta0: number of steps for delta optimization on the null model (100) NumIntervalsDeltaAlt:number of steps for delta optimization on the alt. model (0 - no optimization) searchDelta: Carry out delta optimization on the alternative model? if yes We use NumIntervalsDeltaAlt steps Returns: limix LMM object
codesearchnet
def errors(self, batch_id, halt_on_error=True): errors = [] try: r = self.tcex.session.get('/v2/batch/{}/errors'.format(batch_id)) self.tcex.log.debug('Retrieve Errors for ID {}: status code {}, errors {}'.format(batch_id, r.status_code, r.text)) if r.ok: errors = json.loads(r.text) for error in errors: error_reason = error.get('errorReason') for error_msg in self._critical_failures: if re.findall(error_msg, error_reason): self.tcex.handle_error(10500, [error_reason], halt_on_error) return errors except Exception as e: self.tcex.handle_error(560, [e], halt_on_error)
Retrieve Batch errors to ThreatConnect API. .. code-block:: javascript [{ "errorReason": "Incident incident-001 has an invalid status.", "errorSource": "incident-001 is not valid." }, { "errorReason": "Incident incident-002 has an invalid status.", "errorSource":"incident-002 is not valid." }] Args: batch_id (str): The ID returned from the ThreatConnect API for the current batch job. halt_on_error (bool, default:True): If True any exception will raise an error.
codesearchnet
def get(self): logger.info('Loading refresh_token from %s', repr(self._filename)) try: with open(self._filename) as f: return f.read() except IOError as e: logger.info('Failed to load refresh_token: %s', e)
Get cached refresh token. Returns: Cached refresh token, or ``None`` on failure.
codesearchnet
def next_id(self, channel): if channel not in self.topics: self.topics[channel] = 0 return 0 self.topics[channel] += 1 return self.topics[channel]
Get the next sequence number for a named channel or topic If channel has not been sent to next_id before, 0 is returned otherwise next_id returns the last id returned + 1. Args: channel (string): The name of the channel to get a sequential id for. Returns: int: The next id for this channel
juraj-google-style
def unique_fetches(self): raise NotImplementedError('unique_fetches must be implemented by subclasses')
Return the list of unique tensors or ops needed by this fetch mapper. Returns: A list of tensors or ops.
github-repos
def zeros_like(x, dtype=None, name=None): return array_ops.zeros_like(x, dtype=dtype, name=name)
Instantiates an all-zeros variable of the same shape as another tensor. Args: x: Keras variable or Keras tensor. dtype: dtype of returned Keras variable. `None` uses the dtype of `x`. name: name for the variable to create. Returns: A Keras variable with the shape of `x` filled with zeros. Example: ```python from tensorflow.keras import backend as K kvar = K.variable(np.random.random((2,3))) kvar_zeros = K.zeros_like(kvar) K.eval(kvar_zeros) # array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32) ```
github-repos
def stops_when(iterable, condition): if (not callable(condition)): cond_value = condition def condition(x): return (x == cond_value) return itertools.takewhile((lambda x: (not condition(x))), iterable)
Stop yielding items when a condition arise. Args: iterable: the iterable to filter. condition: if the callable returns True once, stop yielding items. If it's not a callable, it will be converted to one as `lambda condition: condition == item`. Example: >>> list(stops_when(range(10), lambda x: x > 5)) [0, 1, 2, 3, 4, 5] >>> list(stops_when(range(10), 7)) [0, 1, 2, 3, 4, 5, 6]
codesearchnet
def _comparison_functions(cls, partial=False): def prerelease_cmp(a, b): 'Compare prerelease components.\n\n Special rule: a version without prerelease component has higher\n precedence than one with a prerelease component.\n ' if (a and b): return identifier_list_cmp(a, b) elif a: return (- 1) elif b: return 1 else: return 0 def build_cmp(a, b): 'Compare build metadata.\n\n Special rule: there is no ordering on build metadata.\n ' if (a == b): return 0 else: return NotImplemented def make_optional(orig_cmp_fun): "Convert a cmp-like function to consider 'None == *'." @functools.wraps(orig_cmp_fun) def alt_cmp_fun(a, b): if ((a is None) or (b is None)): return 0 return orig_cmp_fun(a, b) return alt_cmp_fun if partial: return [base_cmp, make_optional(base_cmp), make_optional(base_cmp), make_optional(prerelease_cmp), make_optional(build_cmp)] else: return [base_cmp, base_cmp, base_cmp, prerelease_cmp, build_cmp]
Retrieve comparison methods to apply on version components. This is a private API. Args: partial (bool): whether to provide 'partial' or 'strict' matching. Returns: 5-tuple of cmp-like functions.
codesearchnet
def classifier_factory(clf): required_methods = ['fit', 'score', 'predict'] for method in required_methods: if (not hasattr(clf, method)): raise TypeError('"{}" is not in clf. Did you pass a classifier instance?'.format(method)) optional_methods = ['predict_proba'] for method in optional_methods: if (not hasattr(clf, method)): warnings.warn('{} not in clf. Some plots may not be possible to generate.'.format(method)) additional_methods = {'plot_learning_curve': plot_learning_curve, 'plot_confusion_matrix': plot_confusion_matrix_with_cv, 'plot_roc_curve': plot_roc_curve_with_cv, 'plot_ks_statistic': plot_ks_statistic_with_cv, 'plot_precision_recall_curve': plot_precision_recall_curve_with_cv, 'plot_feature_importances': plot_feature_importances} for (key, fn) in six.iteritems(additional_methods): if hasattr(clf, key): warnings.warn('"{}" method already in clf. Overriding anyway. This may result in unintended behavior.'.format(key)) setattr(clf, key, types.MethodType(fn, clf)) return clf
Embeds scikit-plot instance methods in an sklearn classifier. Args: clf: Scikit-learn classifier instance Returns: The same scikit-learn classifier instance passed in **clf** with embedded scikit-plot instance methods. Raises: ValueError: If **clf** does not contain the instance methods necessary for scikit-plot instance methods.
codesearchnet
def get_cached_response(self, key): cached_value = self.data.get(key, _CACHE_MISS) is_found = cached_value is not _CACHE_MISS return CachedResponse(is_found, key, cached_value)
Retrieves a CachedResponse for the provided key. Args: key (string) Returns: A CachedResponse with is_found status and value.
juraj-google-style
def cancelTickByTickData(self, contract: Contract, tickType: str): ticker = self.ticker(contract) reqId = self.wrapper.endTicker(ticker, tickType) if reqId: self.client.cancelTickByTickData(reqId) else: self._logger.error(f'cancelMktData: No reqId found for contract {contract}')
Unsubscribe from tick-by-tick data Args: contract: The exact contract object that was used to subscribe with.
codesearchnet
def set_status(self, name: str=None): game = None if name: game = {'name': name} payload = {'op': WebSocketEvent.STATUS_UPDATE.value, 'd': {'game': game, 'status': 'online', 'afk': False, 'since': 0.0}} data = json.dumps(payload, indent=2) self.logger.debug(f'Sending status update payload: {data}') self._ws.send(data)
Updates the bot's status This is used to get the game that the bot is "playing" or to clear it. If you want to set a game, pass a name; if you want to clear it, either call this method without the optional ``name`` parameter or explicitly pass ``None``. Args: name: the game's name, or None
codesearchnet
def search_client_by_id(self, clientID) -> Client: for c in self.clients: if c.id == clientID: return c return None
searches a client by given id Args: clientID(str): the client to search for Returns the client object or None if it couldn't find a client
juraj-google-style
def detect_incorrect_erc20_interface(contract): functions = [f for f in contract.functions if ((f.contract == contract) and IncorrectERC20InterfaceDetection.incorrect_erc20_interface(f.signature))] return functions
Detect incorrect ERC20 interface Returns: list(str) : list of incorrect function signatures
codesearchnet
def info(self, message, domain=None): if (domain is None): domain = self.extension_name info(message, domain)
Shortcut function for `utils.loggable.info` Args: message: see `utils.loggable.info` domain: see `utils.loggable.info`
codesearchnet
def emit_obj_create(self, category: str, name: str, timestamp: int, pid: int, tid: int, object_id: int) -> None: event = self._create_event('N', category, name, pid, tid, timestamp) event['id'] = object_id self._events.append(event)
Adds an object creation event to the trace. Args: category: The event category as a string. name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. object_id: Identifier of the object as an integer.
github-repos
def send_notification(*, subsystem, recipients, subject, body_html, body_text): from cloud_inquisitor import CINQ_PLUGINS if not body_html and not body_text: raise ValueError('body_html or body_text must be provided') recipients = list(set(recipients)) notifiers = map(lambda plugin: plugin.load(), CINQ_PLUGINS['cloud_inquisitor.plugins.notifiers']['plugins']) for cls in filter(lambda x: x.enabled(), notifiers): for recipient in recipients: if isinstance(recipient, NotificationContact): if recipient.type == cls.notifier_type: try: notifier = cls() notifier.notify(subsystem, recipient.value, subject, body_html, body_text) except Exception: log.exception('Failed sending notification for {}/{}'.format( recipient.type, recipient.value )) else: log.warning('Unexpected recipient {}'.format(recipient))
Method to send a notification. A plugin may use only part of the information, but all fields are required. Args: subsystem (`str`): Name of the subsystem originating the notification recipients (`list` of :obj:`NotificationContact`): List of recipients subject (`str`): Subject / title of the notification body_html (`str)`: HTML formatted version of the message body_text (`str`): Text formatted version of the message Returns: `None`
juraj-google-style
def __init__(self, filenames, compression_type=None, buffer_size=None, name=None): self._filenames = filenames self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string) self._buffer_size = convert.optional_param_to_tensor('buffer_size', buffer_size, argument_default=_DEFAULT_TF_RECORD_BUFFER_SIZE_BYTES) self._name = name variant_tensor = gen_dataset_ops.tf_record_dataset(self._filenames, self._compression_type, self._buffer_size, metadata=self._metadata.SerializeToString()) super(_TFRecordDataset, self).__init__(variant_tensor)
Creates a `TFRecordDataset`. Args: filenames: A `tf.string` tensor containing one or more filenames. compression_type: (Optional.) A `tf.string` scalar evaluating to one of `""` (no compression), `"ZLIB"`, or `"GZIP"`. buffer_size: (Optional.) A `tf.int64` scalar representing the number of bytes in the read buffer. 0 means no buffering. name: (Optional.) A name for the tf.data operation.
github-repos
def fit_size_models(self, model_names, model_objs, input_columns, output_column="Hail_Size", output_start=5, output_step=5, output_stop=100): print("Fitting size models") groups = self.data["train"]["member"][self.group_col].unique() output_start = int(output_start) output_step = int(output_step) output_stop = int(output_stop) for group in groups: group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group] group_data.dropna(inplace=True) group_data = group_data[group_data[output_column] >= output_start] output_data = group_data[output_column].values.astype(int) output_data[output_data > output_stop] = output_stop discrete_data = ((output_data - output_start) self.size_models[group] = {} self.size_models[group]["outputvalues"] = np.arange(output_start, output_stop + output_step, output_step, dtype=int) for m, model_name in enumerate(model_names): print("{0} {1}".format(group, model_name)) self.size_models[group][model_name] = deepcopy(model_objs[m]) self.size_models[group][model_name].fit(group_data[input_columns], discrete_data)
Fit size models to produce discrete pdfs of forecast hail sizes. Args: model_names: List of model names model_objs: List of model objects input_columns: List of input variables output_column: Output variable name output_start: Hail size bin start output_step: hail size bin step output_stop: hail size bin stop
juraj-google-style
def load_scheduler_plugins(self): if (not self.scheduler_plugins): for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.schedulers']['plugins']: cls = entry_point.load() self.scheduler_plugins[cls.__name__] = cls if (cls.__name__ == self.active_scheduler): self.log.debug('Scheduler loaded: {} in module {}'.format(cls.__name__, cls.__module__)) else: self.log.debug('Scheduler disabled: {} in module {}'.format(cls.__name__, cls.__module__))
Refresh the list of available schedulers Returns: `list` of :obj:`BaseScheduler`
codesearchnet
def _read_mode_rsralt(self, size, kind): if (size != 4): raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') _code = self._read_unpack(2) data = dict(kind=kind, type=self._read_opt_type(kind), length=size, alert=_ROUTER_ALERT.get(_code, 'Reserved'), code=_code) return data
Read Router Alert option. Positional arguments: size - int, length of option kind - int, 148 (RTRALT) Returns: * dict -- extracted Router Alert (RTRALT) option Structure of Router Alert (RTRALT) option [RFC 2113]: +--------+--------+--------+--------+ |10010100|00000100| 2 octet value | +--------+--------+--------+--------+ Octets Bits Name Description 0 0 ip.rsralt.kind Kind (148) 0 0 ip.rsralt.type.copy Copied Flag (1) 0 1 ip.rsralt.type.class Option Class (0) 0 3 ip.rsralt.type.number Option Number (20) 1 8 ip.rsralt.length Length (4) 2 16 ip.rsralt.alert Alert 2 16 ip.rsralt.code Alert Code
codesearchnet
def dependency_of_targets(targets, op): if isinstance(op, tf.Tensor): op = op.op assert isinstance(op, tf.Operation), op from tensorflow.contrib.graph_editor import get_backward_walk_ops dependent_ops = get_backward_walk_ops(targets, control_inputs=True) return op in dependent_ops
Check that op is in the subgraph induced by the dependencies of targets. The result is memoized. This is useful if some SessionRunHooks should be run only together with certain ops. Args: targets: a tuple of ops or tensors. The targets to find dependencies of. op (tf.Operation or tf.Tensor): Returns: bool: True if any one of `targets` depend on `op`.
juraj-google-style
def format_time( self, hour_expression, minute_expression, second_expression='' ): hour = int(hour_expression) period = '' if self._options.use_24hour_time_format is False: period = " PM" if (hour >= 12) else " AM" if hour > 12: hour -= 12 minute = str(int(minute_expression)) second = '' if second_expression is not None and second_expression: second = "{}{}".format(":", str(int(second_expression)).zfill(2)) return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
Given time parts, will contruct a formatted time description Args: hour_expression: Hours part minute_expression: Minutes part second_expression: Seconds part Returns: Formatted time description
juraj-google-style
def _google_section_permitted(line_info, state): if state.section.indentation is None: return True return line_info.indentation <= state.section.indentation or line_info.indentation < state.section.line1_indentation
Returns whether a new google section is permitted to start here. Q: Why might a new Google section not be allowed? A: If we're in the middle of a Google "Args" section, then lines that start "param:" will usually be a new arg, rather than a new section. We use whitespace to determine when the Args section has actually ended. A Google section ends when either: - A new google section begins at either - indentation less than indentation of line 1 of the previous section - or <= indentation of the previous section - Or the docstring terminates. Args: line_info: Information about the current line. state: The state of the parser. Returns: True or False, indicating whether a new Google section is permitted at the current line.
github-repos
def auto_batch_size(sequence_length, mesh_shape, layout_rules, tokens_per_split=2048): num_splits = mtf.tensor_dim_to_mesh_dim_size(layout_rules, mesh_shape, mtf.Dimension('batch', 0)) ret = (max(1, (tokens_per_split tf.logging.info(('AUTO_BATCH_SIZE tokens_per_split=%s num_splits=%s sequence_length=%s batch_size=%s' % (tokens_per_split, num_splits, sequence_length, ret))) return ret
Automatically compute batch size. Args: sequence_length: an integer mesh_shape: an input to mtf.convert_to_shape() layout_rules: an input to mtf.convert_to_layout_rules() tokens_per_split: an integer Returns: an integer
codesearchnet
def breakpoint_set(self, addr, thumb=False, arm=False): flags = enums.JLinkBreakpoint.ANY if thumb: flags = (flags | enums.JLinkBreakpoint.THUMB) elif arm: flags = (flags | enums.JLinkBreakpoint.ARM) handle = self._dll.JLINKARM_SetBPEx(int(addr), flags) if (handle <= 0): raise errors.JLinkException('Breakpoint could not be set.') return handle
Sets a breakpoint at the specified address. If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if ``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a normal breakpoint is set. Args: self (JLink): the ``JLink`` instance addr (int): the address where the breakpoint will be set thumb (bool): boolean indicating to set the breakpoint in THUMB mode arm (bool): boolean indicating to set the breakpoint in ARM mode Returns: An integer specifying the breakpoint handle. This handle should be retained for future breakpoint operations. Raises: TypeError: if the given address is not an integer. JLinkException: if the breakpoint could not be set.
codesearchnet
def get_config(self, name, default=MISSING): res = self.config.get(name, default) if res is MISSING: raise ArgumentError("Could not find config value by name and no default supplied", name=name) return res
Get a config value from this adapter by name Args: name (string): The name of the config variable default (object): The default value to return if config is not found Returns: object: the value associated with the name Raises: ArgumentError: if the name is not found and no default is supplied
juraj-google-style
def _get_encoding(dom, default='utf-8'): encoding = dom.find('meta', {'http-equiv': 'Content-Type'}) if (not encoding): return default encoding = encoding[0].params.get('content', None) if (not encoding): return default return encoding.lower().split('=')[(- 1)]
Try to look for meta tag in given `dom`. Args: dom (obj): pyDHTMLParser dom of HTML elements. default (default "utr-8"): What to use if encoding is not found in `dom`. Returns: str/default: Given encoding or `default` parameter if not found.
codesearchnet
def from_variant(variant, structure): return _VariantDataset(variant, structure)
Constructs a dataset from the given variant and (nested) structure. Args: variant: A scalar `tf.variant` tensor representing a dataset. structure: A (nested) structure of `tf.TypeSpec` objects representing the structure of each element in the dataset. Returns: A `tf.data.Dataset` instance.
github-repos
def get_schema_descendant(self, route: SchemaRoute) -> Optional[SchemaNode]: node = self for p in route: node = node.get_child(*p) if (node is None): return None return node
Return descendant schema node or ``None`` if not found. Args: route: Schema route to the descendant node (relative to the receiver).
codesearchnet
def me(self): json_data = self._session.get((API_ENDPOINT + '/me')) return self._object_factory(OBJECT_TYPE, json_data)
Get the details of the person accessing the API. Raises: ApiError: If the Webex Teams cloud returns an error.
codesearchnet
def override_parent_subgraph(self, parent_subgraph, invisible_edges=None): with transaction.atomic(): if invisible_edges is None: invisible_edges = set() children = list(parent_subgraph.keys()) all_old_relations = dict(proso.list.group_by( list(ItemRelation.objects.filter(child_id__in=children)), by=lambda relation: relation.child_id )) to_delete = set() for child_id, parents in parent_subgraph.items(): old_relations = { relation.parent_id: relation for relation in all_old_relations.get(child_id, []) } for parent_id in parents: if parent_id not in old_relations: ItemRelation.objects.create( parent_id=parent_id, child_id=child_id, visible=(child_id, parent_id) not in invisible_edges ) elif old_relations[parent_id].visible != ((child_id, parent_id) not in invisible_edges): old_relations[parent_id].visible = (child_id, parent_id) not in invisible_edges old_relations[parent_id].save() to_delete |= {old_relations[parent_id].pk for parent_id in set(old_relations.keys()) - set(parents)} ItemRelation.objects.filter(pk__in=to_delete).delete()
Get all items with outcoming edges from the given subgraph, drop all their parent relations, and then add parents according to the given subgraph. Args: parent_subgraph (dict): item id -> list of parents(item ids) invisible_edges (list|set): set of (from, to) tuples specifying invisible edges
juraj-google-style
def get_http_header(self) -> Response: with wpull.util.reset_file_offset(self.block_file): data = self.block_file.read(4096) match = re.match(b'(.*?\\r?\\n\\r?\\n)', data) if (not match): return (status_line, dummy, field_str) = match.group(1).partition(b'\n') try: (version, code, reason) = Response.parse_status_line(status_line) except ValueError: return response = Response(status_code=code, reason=reason, version=version) try: response.fields.parse(field_str, strict=False) except ValueError: return return response
Return the HTTP header. It only attempts to read the first 4 KiB of the payload. Returns: Response, None: Returns an instance of :class:`.http.request.Response` or None.
codesearchnet
def _CalculateStorageCounters(self, storage_reader): analysis_reports_counter = collections.Counter() analysis_reports_counter_error = False event_labels_counter = collections.Counter() event_labels_counter_error = False parsers_counter = collections.Counter() parsers_counter_error = False for session in storage_reader.GetSessions(): if isinstance(session.analysis_reports_counter, dict): analysis_reports_counter += collections.Counter(session.analysis_reports_counter) elif isinstance(session.analysis_reports_counter, collections.Counter): analysis_reports_counter += session.analysis_reports_counter else: analysis_reports_counter_error = True if isinstance(session.event_labels_counter, dict): event_labels_counter += collections.Counter(session.event_labels_counter) elif isinstance(session.event_labels_counter, collections.Counter): event_labels_counter += session.event_labels_counter else: event_labels_counter_error = True if isinstance(session.parsers_counter, dict): parsers_counter += collections.Counter(session.parsers_counter) elif isinstance(session.parsers_counter, collections.Counter): parsers_counter += session.parsers_counter else: parsers_counter_error = True storage_counters = {} warnings_by_path_spec = collections.Counter() warnings_by_parser_chain = collections.Counter() for warning in list(storage_reader.GetWarnings()): warnings_by_path_spec[warning.path_spec.comparable] += 1 warnings_by_parser_chain[warning.parser_chain] += 1 storage_counters['warnings_by_path_spec'] = warnings_by_path_spec storage_counters['warnings_by_parser_chain'] = warnings_by_parser_chain if (not analysis_reports_counter_error): storage_counters['analysis_reports'] = analysis_reports_counter if (not event_labels_counter_error): storage_counters['event_labels'] = event_labels_counter if (not parsers_counter_error): storage_counters['parsers'] = parsers_counter return storage_counters
Calculates the counters of the entire storage. Args: storage_reader (StorageReader): storage reader. Returns: dict[str,collections.Counter]: storage counters.
codesearchnet
def get_lats(): lats = {} fname = pkg_resources.resource_filename(__name__, 'resources/Latitudes-Longitudes.csv') with open(fname, 'rb') as csvfile: reader = csv.reader(csvfile, delimiter=',') for row in reader: word = row[0].lower() word = re.sub(' ', '', word) lats[word] = float(row[1]) return lats
Get a dictionary that maps Backpage city names to their respective latitudes. Returns: dictionary that maps city names (Strings) to latitudes (Floats)
codesearchnet
def pad_to_multiple_2d(x, block_shape): old_shape = x.get_shape().dims last = old_shape[(- 1)] if (len(old_shape) == 4): height_padding = ((- common_layers.shape_list(x)[1]) % block_shape[0]) width_padding = ((- common_layers.shape_list(x)[2]) % block_shape[1]) paddings = [[0, 0], [0, height_padding], [0, width_padding], [0, 0]] elif (len(old_shape) == 5): height_padding = ((- common_layers.shape_list(x)[2]) % block_shape[0]) width_padding = ((- common_layers.shape_list(x)[3]) % block_shape[1]) paddings = [[0, 0], [0, 0], [0, height_padding], [0, width_padding], [0, 0]] padded_x = tf.pad(x, paddings) padded_shape = padded_x.get_shape().as_list() padded_shape = (padded_shape[:(- 1)] + [last]) padded_x.set_shape(padded_shape) return padded_x
Making sure x is a multiple of shape. Args: x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor block_shape: a 2-d list of integer shapes Returns: padded_x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor
codesearchnet
def is_str(string): if sys.version_info[:2] >= (3, 0): return isinstance(string, str) return isinstance(string, basestring)
Python 2 and 3 compatible string checker. Args: string (str | basestring): the string to check Returns: bool: True or False
juraj-google-style
def download_and_uncompress(self, fileobj, dst_path): try: with tarfile.open(mode='r|*', fileobj=fileobj) as tgz: for tarinfo in tgz: abs_target_path = _merge_relative_path(dst_path, tarinfo.name) if tarinfo.isfile(): self._extract_file(tgz, tarinfo, abs_target_path) elif tarinfo.isdir(): tf_v1.gfile.MakeDirs(abs_target_path) else: raise ValueError(('Unexpected object type in tar archive: %s' % tarinfo.type)) total_size_str = tf_utils.bytes_to_readable_str(self._total_bytes_downloaded, True) self._print_download_progress_msg(('Downloaded %s, Total size: %s' % (self._url, total_size_str)), flush=True) except tarfile.ReadError: raise IOError(('%s does not appear to be a valid module.' % self._url))
Streams the content for the 'fileobj' and stores the result in dst_path. Args: fileobj: File handle pointing to .tar/.tar.gz content. dst_path: Absolute path where to store uncompressed data from 'fileobj'. Raises: ValueError: Unknown object encountered inside the TAR file.
codesearchnet
def verify_abort(func, *args, **kwargs): expected_exception = kwargs.pop('expected_exception', runez.system.AbortException) with CaptureOutput() as logged: try: value = func(*args, **kwargs) assert False, ('%s did not raise, but returned %s' % (func, value)) except expected_exception: return str(logged)
Convenient wrapper around functions that should exit or raise an exception Example: assert "Can't create folder" in verify_abort(ensure_folder, "/dev/null/not-there") Args: func (callable): Function to execute *args: Args to pass to 'func' **kwargs: Named args to pass to 'func' Returns: (str): Chatter from call to 'func', if it did indeed raise
codesearchnet
def conv(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]: q_input = array_ops.fake_quant_with_min_max_args(input_tensor, min=-0.1, max=0.2, num_bits=8, narrow_range=False) filter_tensor = ops.convert_to_tensor(self.filter_value) filter_min = array_ops.identity(array_ops.constant([-0.5, -0.5], dtype=dtypes.float32)) filter_max = array_ops.identity(array_ops.constant([0.5, 0.5], dtype=dtypes.float32)) q_filter = array_ops.fake_quant_with_min_max_vars_per_channel(filter_tensor, filter_min, filter_max, num_bits=8, narrow_range=True) bias = array_ops.constant([0.1, 0.2], dtype=dtypes.float32) scale, offset = ([1.0] * 2, [0.5] * 2) mean, variance = (scale, offset) out = nn_ops.conv2d(q_input, q_filter, strides=[1, 1, 2, 1], dilations=[1, 1, 1, 1], padding='SAME', data_format='NHWC', name='sample/conv2d') if has_bias: out = nn_ops.bias_add(out, bias, data_format='NHWC') if activation_fn is not None: if has_batch_norm: out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(out, scale, offset, mean, variance, is_training=False) out = activation_fn(out) out_min = array_ops.constant([-0.18, -0.32], dtype=dtypes.float32) out_max = array_ops.constant([0.5, 0.5], dtype=dtypes.float32) q_out = array_ops.fake_quant_with_min_max_vars_per_channel(out, min=out_min, max=out_max, num_bits=8, narrow_range=True) return {'output': q_out}
Performs a 2D convolution operation. Args: input_tensor: Input tensor to perform convolution on. Returns: A map of: output key -> output result.
github-repos
def resize(self, image: torch.Tensor, size: SizeDict, patch_size: SizeDict, interpolation: 'F.InterpolationMode'=None, **kwargs) -> torch.Tensor: interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR if size.longest_edge: size = (size.longest_edge, size.longest_edge) elif size.height and size.width: size = (size.height, size.width) else: raise ValueError("size must contain either 'longest_edge' or 'height' and 'width'.") if patch_size.height and patch_size.width: patch_size = (patch_size.height, patch_size.width) else: raise ValueError("patch_size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, patch_size=patch_size) return F.resize(image, size=output_size, interpolation=interpolation, **kwargs)
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`torch.Tensor`): Image to resize. size (`SizeDict`): Dict containing the longest possible edge of the image. patch_size (`SizeDict`): Patch size used to calculate the size of the output image. interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): Resampling filter to use when resiizing the image.
github-repos
def Verify(self, completely=False): res = super(Block, self).Verify() if (not res): return False from neo.Blockchain import GetBlockchain, GetConsensusAddress if (self.Transactions[0].Type != TransactionType.MinerTransaction): return False for tx in self.Transactions[1:]: if (tx.Type == TransactionType.MinerTransaction): return False if completely: bc = GetBlockchain() if (self.NextConsensus != GetConsensusAddress(bc.GetValidators(self.Transactions).ToArray())): return False for tx in self.Transactions: if (not tx.Verify()): pass logger.error('Blocks cannot be fully validated at this moment. please pass completely=False') raise NotImplementedError() return True
Verify the integrity of the block. Args: completely: (Not functional at this time). Returns: bool: True if valid. False otherwise.
codesearchnet
def approximate_split(x, num_splits, axis=0): size = shape_list(x)[axis] size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)] return tf.split(x, size_splits, axis=axis)
Split approximately equally into num_splits parts. Args: x: a Tensor num_splits: an integer axis: an integer. Returns: a list of num_splits Tensors.
juraj-google-style
def read_from_hdx(identifier, configuration=None): resourceview = ResourceView(configuration=configuration) result = resourceview._load_from_hdx('resource view', identifier) if result: return resourceview return None
Reads the resource view given by identifier from HDX and returns ResourceView object Args: identifier (str): Identifier of resource view configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[ResourceView]: ResourceView object if successful read, None if not
juraj-google-style