code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def match_row_splits_dtypes(*tensors, **kwargs): return_dtype = kwargs.pop('return_dtype', False) if kwargs: raise ValueError(f'Unexpected keyword args {kwargs}.') has_int32 = False has_int64 = False for tensor in tensors: if isinstance(tensor, RaggedTensor): if tensor.row_splits.dtype == dtypes.int32: has_int32 = True else: has_int64 = True if has_int32 and has_int64: if not ragged_config.auto_cast_partition_dtype(): raise ValueError('Input RaggedTensors have mismatched row_splits dtypes; use RaggedTensor.with_row_splits_dtype() to convert them to compatible dtypes.') dtype = dtypes.int64 tensors = tuple((t.with_row_splits_dtype(dtypes.int64) if isinstance(t, RaggedTensor) else t for t in tensors)) elif has_int32: dtype = dtypes.int32 else: dtype = dtypes.int64 if return_dtype: return (dtype, tensors) else: return tensors
Return a copy of `tensors` with row_splits all having the same dtype. Args: *tensors: A list of Tensors or RaggedTensors. **kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors), where `dtype` is the data type used by row-splits, and `tensors` is the converted list of `Tensors` and `RaggedTensors`. Returns: The converted list of `Tensors` and `RaggedTensors`.
github-repos
async def delCronJob(self, iden): cron = self.cell.agenda.appts.get(iden) if cron is None: raise s_exc.NoSuchIden() self._trig_auth_check(cron.useriden) await self.cell.agenda.delete(iden)
Delete a cron job Args: iden (bytes): The iden of the cron job to be deleted
juraj-google-style
def _StopExtractionProcesses(self, abort=False): logger.debug('Stopping extraction processes.') self._StopMonitoringProcesses() if abort: self._AbortTerminate() logger.debug('Emptying task queue.') self._task_queue.Empty() for _ in self._processes_per_pid: try: self._task_queue.PushItem(plaso_queue.QueueAbort(), block=False) except errors.QueueFull: logger.warning('Task queue full, unable to push abort message.') self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) self._task_queue.Close(abort=abort) if (not abort): self._AbortTerminate() self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) self._task_queue.Close(abort=True) self._AbortKill()
Stops the extraction processes. Args: abort (bool): True to indicated the stop is issued on abort.
codesearchnet
def __init__(self, asset_id, amount): self.AssetId = asset_id self.Amount = amount
Create an instance. Args: asset_id (UInt256): amount (Fixed8):
juraj-google-style
def _print_test_names_for_suite(suite_class): config = config_parser.TestRunConfig() runner = test_runner.TestRunner(log_dir=config.log_path, testbed_name=config.testbed_name) cls = suite_class(runner, config) try: cls.setup_suite(config) finally: cls.teardown_suite() last = '' for name in runner.get_full_test_names(): tag = name.split('.')[0] if tag != last: last = tag print('==========> %s <==========' % tag) print(name)
Prints the names of all the tests in a suite classes. Args: suite_class: a test suite_class to be run.
github-repos
def sync_main(async_main, config_path=None, default_config=None, should_validate_task=True, loop_function=asyncio.get_event_loop): context = _init_context(config_path, default_config) _init_logging(context) if should_validate_task: validate_task_schema(context) loop = loop_function() loop.run_until_complete(_handle_asyncio_loop(async_main, context))
Entry point for scripts using scriptworker. This function sets up the basic needs for a script to run. More specifically: * it creates the scriptworker context and initializes it with the provided config * the path to the config file is either taken from `config_path` or from `sys.argv[1]`. * it verifies `sys.argv` doesn't have more arguments than the config path. * it creates the asyncio event loop so that `async_main` can run Args: async_main (function): The function to call once everything is set up config_path (str, optional): The path to the file to load the config from. Loads from ``sys.argv[1]`` if ``None``. Defaults to None. default_config (dict, optional): the default config to use for ``_init_context``. defaults to None. should_validate_task (bool, optional): whether we should validate the task schema. Defaults to True. loop_function (function, optional): the function to call to get the event loop; here for testing purposes. Defaults to ``asyncio.get_event_loop``.
codesearchnet
def _prepare_headers(self, additional_headers=None, **kwargs): user_agent = 'pyseaweed/{version}'.format(version=__version__) headers = {'User-Agent': user_agent} if (additional_headers is not None): headers.update(additional_headers) return headers
Prepare headers for http communication. Return dict of header to be used in requests. Args: .. versionadded:: 0.3.2 **additional_headers**: (optional) Additional headers to be used with request Returns: Headers dict. Key and values are string
codesearchnet
def fillup_layer(layer_length, arrow_char): breakwire_layer = [] for _ in range(layer_length): breakwire_layer.append(BreakWire(arrow_char)) return breakwire_layer
Creates a layer with BreakWire elements. Args: layer_length (int): The length of the layer to create arrow_char (char): The char used to create the BreakWire element. Returns: list: The new layer.
juraj-google-style
def sort_index(self, **kwargs): axis = kwargs.pop('axis', 0) index = (self.columns if axis else self.index) ascending = kwargs.pop('ascending', True) if (ascending is None): ascending = False kwargs['ascending'] = ascending def sort_index_builder(df, **kwargs): if axis: df.columns = index else: df.index = index return df.sort_index(axis=axis, **kwargs) func = self._prepare_method(sort_index_builder, **kwargs) new_data = self._map_across_full_axis(axis, func) if axis: new_columns = pandas.Series(self.columns).sort_values(**kwargs) new_index = self.index else: new_index = pandas.Series(self.index).sort_values(**kwargs) new_columns = self.columns return self.__constructor__(new_data, new_index, new_columns, self.dtypes.copy())
Sorts the data with respect to either the columns or the indices. Returns: DataManager containing the data sorted by columns or indices.
codesearchnet
def copy_r(src, dst): abssrc = os.path.abspath(src) absdst = os.path.abspath(dst) try: os.makedirs(absdst) except OSError: pass for f in os.listdir(abssrc): fpath = os.path.join(abssrc, f) if os.path.isfile(fpath): shutil.copy(fpath, absdst) elif not absdst.startswith(fpath): copy_r(fpath, os.path.join(absdst, f)) else: warnings.warn("Cannot copy %s to itself" % fpath)
Implements a recursive copy function similar to Unix's "cp -r" command. Surprisingly, python does not have a real equivalent. shutil.copytree only works if the destination directory is not present. Args: src (str): Source folder to copy. dst (str): Destination folder.
juraj-google-style
def apply(self, window_length, samples=True, func1d=None): window_length /= (1 if samples else self.step) if (func1d is None): func1d = np.mean params = self.__dict__.copy() out = self._rolling_window(int(window_length), func1d) return Curve(out, params=params)
Runs any kind of function over a window. Args: window_length (int): the window length. Required. samples (bool): window length is in samples. Use False for a window length given in metres. func1d (function): a function that takes a 1D array and returns a scalar. Default: ``np.mean()``. Returns: Curve.
codesearchnet
def intrusion_sets(self, name, owner=None, **kwargs): return IntrusionSet(self.tcex, name, owner=owner, **kwargs)
Create the Intrustion Set TI object. Args: owner: name: **kwargs: Return:
juraj-google-style
def stop_tuning_job(self, name): try: LOGGER.info('Stopping tuning job: {}'.format(name)) self.sagemaker_client.stop_hyper_parameter_tuning_job(HyperParameterTuningJobName=name) except ClientError as e: error_code = e.response['Error']['Code'] if (error_code == 'ValidationException'): LOGGER.info('Tuning job: {} is already stopped or not running.'.format(name)) else: LOGGER.error('Error occurred while attempting to stop tuning job: {}. Please try again.'.format(name)) raise
Stop the Amazon SageMaker hyperparameter tuning job with the specified name. Args: name (str): Name of the Amazon SageMaker hyperparameter tuning job. Raises: ClientError: If an error occurs while trying to stop the hyperparameter tuning job.
codesearchnet
def get_compound_bodies(node): if isinstance(node, (ast.Module, ast.FunctionDef, ast.ClassDef, ast.With)): return [node.body] elif isinstance(node, (ast.If, ast.While, ast.For)): return [node.body, node.orelse] elif (PY2 and isinstance(node, ast.TryFinally)): return [node.body, node.finalbody] elif (PY2 and isinstance(node, ast.TryExcept)): return ([node.body, node.orelse] + [h.body for h in node.handlers]) elif (PY3 and isinstance(node, ast.Try)): return ([node.body, node.orelse, node.finalbody] + [h.body for h in node.handlers]) end return []
Returns a list of bodies of a compound statement node. Args: node: AST node. Returns: A list of bodies of the node. If the given node does not represent a compound statement, an empty list is returned.
codesearchnet
def replace_batch_norm(model): for name, module in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = ConditionalDetrFrozenBatchNorm2d(module.num_features) if not module.weight.device == torch.device('meta'): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module)
Recursively replace all `torch.nn.BatchNorm2d` with `ConditionalDetrFrozenBatchNorm2d`. Args: model (torch.nn.Module): input model
github-repos
def delete(workflow_id: str = None, workflow_version: str = None): if workflow_id is None and workflow_version is None: keys = DB.get_keys("workflow_definitions:*") DB.delete(*keys) elif workflow_id is not None and workflow_version is None: keys = DB.get_keys("workflow_definitions:{}:*".format(workflow_id)) DB.delete(*keys) elif workflow_id is None and workflow_version is not None: keys = DB.get_keys("workflow_definitions:*:{}" .format(workflow_version)) DB.delete(*keys) else: name = "workflow_definitions:{}:{}".format(workflow_id, workflow_version) DB.delete(name)
Delete workflow definitions. Args: workflow_id (str, optional): Optional workflow identifier workflow_version (str, optional): Optional workflow identifier version If workflow_id and workflow_version are None, delete all workflow definitions.
juraj-google-style
def broadcast_dynamic_shape(shape_x: dynamic_ragged_shape.DenseOrRaggedShape, shape_y: dynamic_ragged_shape.DenseOrRaggedShape) -> dynamic_ragged_shape.DynamicRaggedShape: if not isinstance(shape_x, dynamic_ragged_shape.DynamicRaggedShape): shape_x = dynamic_ragged_shape.DynamicRaggedShape([], shape_x) if not isinstance(shape_y, dynamic_ragged_shape.DynamicRaggedShape): shape_y = dynamic_ragged_shape.DynamicRaggedShape([], shape_y) return dynamic_ragged_shape.broadcast_dynamic_shape(shape_x, shape_y)
Returns the shape formed by broadcasting two shapes to be compatible. 1. If shape_x and shape_y both have row_partitions, then fail if their dtypes don't match. 2. If neither has row_partitions and they have different dtypes, go with int64. 3. If one has row_partitions, go with that dtype. Args: shape_x: A `DynamicRaggedShape` shape_y: A `DynamicRaggedShape` Returns: A `DynamicRaggedShape`. Raises: ValueError: If `shape_x` and `shape_y` are not broadcast-compatible.
github-repos
def append_with_data(url, data): if data is None: return url url_parts = list(urlparse(url)) query = OrderedDict(parse_qsl(url_parts[4], keep_blank_values=True)) query.update(data) url_parts[4] = URLHelper.query_dict_to_string(query) return urlunparse(url_parts)
Append the given URL with the given data OrderedDict. Args: url (str): The URL to append. data (obj): The key value OrderedDict to append to the URL. Returns: str: The new URL.
juraj-google-style
def info(self): result = list() result.append('Agents:\n') for agent in self._all_agents: result.append('\tName: ') result.append(agent.name) result.append('\n\tType: ') result.append(type(agent).__name__) result.append('\n\t') result.append('Sensors:\n') for sensor in self._sensor_map[agent.name].keys(): result.append('\t\t') result.append(Sensors.name(sensor)) result.append('\n') return ''.join(result)
Returns a string with specific information about the environment. This information includes which agents are in the environment and which sensors they have. Returns: str: The information in a string format.
codesearchnet
def save_results(self, output_dir='.', prefix='', prefix_sep='_', image_list=None): if prefix == '': prefix_sep = '' if not exists(output_dir): makedirs(output_dir) logger.debug("Saving results...") if image_list is None: image_list = self.images.keys() for suffix, img in self.images.items(): if suffix in image_list: filename = prefix + prefix_sep + suffix + '.nii.gz' outpath = join(output_dir, filename) imageutils.save_img(img, outpath, self.dataset.masker)
Write out any images generated by the meta-analysis. Args: output_dir (str): folder to write images to prefix (str): all image files will be prepended with this string prefix_sep (str): glue between the prefix and rest of filename image_list (list): optional list of images to save--e.g., ['pFgA_z', 'pAgF']. If image_list is None (default), will save all images.
juraj-google-style
def get_meta_graph_def_from_tags(self, tags): found_match = False meta_graph_def_to_load = None available_tags = [] for meta_graph_def in self._saved_model.meta_graphs: available_tags.append(set(meta_graph_def.meta_info_def.tags)) if set(meta_graph_def.meta_info_def.tags) == set(tags): meta_graph_def_to_load = meta_graph_def found_match = True break if not found_match: raise RuntimeError(f"MetaGraphDef associated with tags {str(tags).strip('[]')} could not be found in SavedModel, with available tags '{available_tags}'. To inspect available tag-sets in the SavedModel, please use the SavedModel CLI: `saved_model_cli`.") return meta_graph_def_to_load
Return MetaGraphDef with the exact specified tags. Args: tags: A list or set of string tags that identify the MetaGraphDef. Returns: MetaGraphDef with the same tags. Raises: RuntimeError: if no metagraphs were found with the associated tags.
github-repos
def stream( self, accountID, **kwargs ): request = Request( 'GET', '/v3/accounts/{accountID}/transactions/stream' ) request.set_path_param( 'accountID', accountID ) request.set_stream(True) class Parser(): def __init__(self, ctx): self.ctx = ctx def __call__(self, line): j = json.loads(line.decode('utf-8')) type = j.get("type") if type is None: return ("unknown", j) elif type == "HEARTBEAT": return ( "transaction.TransactionHeartbeat", self.ctx.transaction.TransactionHeartbeat.from_dict( j, self.ctx ) ) transaction = self.ctx.transaction.Transaction.from_dict( j, self.ctx ) return ( "transaction.Transaction", transaction ) request.set_line_parser( Parser(self.ctx) ) response = self.ctx.request(request) return response
Get a stream of Transactions for an Account starting from when the request is made. Args: accountID: Account Identifier Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def FromDictionary(cls, dictionary): if 'user_id' in dictionary: raise errors.GitkitClientError('use localId instead') if 'localId' not in dictionary: raise errors.GitkitClientError('must specify localId') if 'email' not in dictionary: raise errors.GitkitClientError('must specify email') return cls(decode=False, **dictionary)
Initializes from user specified dictionary. Args: dictionary: dict of user specified attributes Returns: GitkitUser object
juraj-google-style
def _convert_keras_to_saved_model(self, output_dir): try: def _is_keras_3(): try: import keras return keras.__version__.startswith('3') and isinstance(self._keras_model, keras.layers.Layer) except ImportError: return False if _is_keras_3(): import keras export_archive = keras.export.ExportArchive() export_archive.track(self._keras_model) if isinstance(self._keras_model, (keras.src.models.Functional, keras.src.models.Sequential)): input_signature = nest.map_structure(lambda x: tensor_spec.TensorSpec(x.shape, dtype=x.dtype, name=x.name), self._keras_model.inputs) if isinstance(input_signature, list) and len(input_signature) > 1: input_signature = [input_signature] else: save_spec = _get_save_spec(self._keras_model) if not save_spec: raise ValueError('The model provided has never been called. It must be called at least once before export.') input_signature = [save_spec] inference_fn = functools.partial(self._keras_model.__call__, training=False) export_archive.add_endpoint('serve', inference_fn, input_signature) export_archive.write_out(output_dir) else: _save.save(self._keras_model, output_dir, options=_save_options.SaveOptions(save_debug_info=True)) except Exception: return (None, None, None) self.saved_model_dir = output_dir self._saved_model_tags = set([_tag_constants.SERVING]) self._saved_model_exported_names = [_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] self._parse_saved_model_args(always_enable_saved_model_import=self.experimental_lower_to_saved_model) if self.saved_model_dir: graph_def, input_tensors, output_tensors = self._load_saved_model(self.saved_model_dir, self._saved_model_tags) self._trackable_obj = _load(self.saved_model_dir, self._saved_model_tags) return (graph_def, input_tensors, output_tensors) return (None, None, None)
Save Keras model to the SavedModel format. Args: output_dir: The output directory to save the SavedModel. Returns: graph_def: The frozen GraphDef. input_tensors: List of input tensors. output_tensors: List of output tensors.
github-repos
def _to_tf_type(dtype): return dtypes.as_dtype(dtype)
Converts a native python or numpy type to TF DType. Args: dtype: Could be a python type, a numpy type or a TF DType. Returns: A tensorflow `DType`.
github-repos
def transform(self, args): if self.parse_error(): AliasManager.write_alias_config_hash(empty_hash=True) return args if self.detect_alias_config_change(): self.load_full_command_table() self.collided_alias = AliasManager.build_collision_table(self.alias_table.sections()) build_tab_completion_table(self.alias_table) else: self.load_collided_alias() transformed_commands = [] alias_iter = enumerate(args, 1) for (alias_index, alias) in alias_iter: is_collided_alias = ((alias in self.collided_alias) and (alias_index in self.collided_alias[alias])) is_named_arg = ((alias_index > 1) and args[(alias_index - 2)].startswith('-')) is_named_arg_flag = alias.startswith('-') excluded_commands = is_alias_command(['remove', 'export'], transformed_commands) if ((not alias) or is_collided_alias or is_named_arg or is_named_arg_flag or excluded_commands): transformed_commands.append(alias) continue full_alias = self.get_full_alias(alias) if self.alias_table.has_option(full_alias, 'command'): cmd_derived_from_alias = self.alias_table.get(full_alias, 'command') telemetry.set_alias_hit(full_alias) else: transformed_commands.append(alias) continue pos_args_table = build_pos_args_table(full_alias, args, alias_index) if pos_args_table: logger.debug(POS_ARG_DEBUG_MSG, full_alias, cmd_derived_from_alias, pos_args_table) transformed_commands += render_template(cmd_derived_from_alias, pos_args_table) for pos_arg in pos_args_table: next(alias_iter) else: logger.debug(DEBUG_MSG, full_alias, cmd_derived_from_alias) transformed_commands += shlex.split(cmd_derived_from_alias) return self.post_transform(transformed_commands)
Transform any aliases in args to their respective commands. Args: args: A list of space-delimited command input extracted directly from the console. Returns: A list of transformed commands according to the alias configuration file.
codesearchnet
def validate(datapackage, schema='base'): errors = [] schema_obj = None datapackage_obj = None if isinstance(datapackage, six.string_types): try: datapackage_obj = json.loads(datapackage) except ValueError as e: errors.append(DataPackageValidateException(e)) elif (not isinstance(datapackage, dict)): msg = "Data Package must be a dict or JSON string, but was a '{0}'" dp_type = type(datapackage).__name__ error = DataPackageValidateException(msg.format(dp_type)) errors.append(error) else: datapackage_obj = datapackage try: if isinstance(schema, six.string_types): try: schema = json.loads(schema) except ValueError: pass schema_obj = Schema(schema) except (SchemaError, RegistryError) as e: errors.append(e) if ((datapackage_obj is not None) and (schema_obj is not None)): try: schema_obj.validate(datapackage_obj) except ValidationError as e: errors.append(e) if errors: exception = DataPackageValidateException() exception.errors = errors raise exception
Validate Data Package datapackage.json files against a jsonschema. Args: datapackage (str or dict): The Data Package descriptor file (i.e. datapackage.json) as a dict or its contents in a string. schema (str or dict): If a string, it can be the schema ID in the registry, a local path, a URL or the schema's JSON as a string. If a dict, it must be the JSON Schema itself. Returns: None Raises: DataPackageValidateException: This exception has the list of the validation errors in its `.errors` attribute.
codesearchnet
def register_multi_flags_validator(flag_names, multi_flags_checker, message='Flags validation failed', flag_values=FLAGS): v = gflags_validators.MultiFlagsValidator(flag_names, multi_flags_checker, message) _add_validator(flag_values, v)
Adds a constraint to multiple flags. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_names: [str], a list of the flag names to be checked. multi_flags_checker: callable, a function to validate the flag. input - dictionary, with keys() being flag_names, and value for each key being the value of the corresponding flag (string, boolean, etc). output - Boolean. Must return True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise gflags.ValidationError. message: Error text to be shown to the user if checker returns False. If checker raises gflags.ValidationError, message from the raised error will be shown. flag_values: An optional FlagValues instance to validate against. Raises: AttributeError: If a flag is not registered as a valid flag name.
codesearchnet
async def check_status(self, pipeline_uuid: str) -> api_pb2.Status: self._verify_pipeline_uuid(pipeline_uuid) request = api_pb2.CheckStatusRequest(pipeline_uuid=pipeline_uuid) response = await self._stub.CheckStatus(request, **self._kwargs) return response.status
Get status of the pipeline by his pipeline Args: pipeline_uuid: uuid of the pipeline Returns: status: status of the pipeline
github-repos
def task_done(self, message): topic_partition = (message.topic, message.partition) if topic_partition not in self._topics: logger.warning('Unrecognized topic/partition in task_done message: ' '{0}:{1}'.format(*topic_partition)) return False offset = message.offset prev_done = self._offsets.task_done[topic_partition] if prev_done is not None and offset != (prev_done + 1): logger.warning('Marking task_done on a non-continuous offset: %d != %d + 1', offset, prev_done) prev_commit = self._offsets.commit[topic_partition] if prev_commit is not None and ((offset + 1) <= prev_commit): logger.warning('Marking task_done on a previously committed offset?: %d (+1) <= %d', offset, prev_commit) self._offsets.task_done[topic_partition] = offset if self._does_auto_commit_messages(): self._incr_auto_commit_message_count() if self._should_auto_commit(): self.commit() return True
Mark a fetched message as consumed. Offsets for messages marked as "task_done" will be stored back to the kafka cluster for this consumer group on commit() Arguments: message (KafkaMessage): the message to mark as complete Returns: True, unless the topic-partition for this message has not been configured for the consumer. In normal operation, this should not happen. But see github issue 364.
juraj-google-style
def _download(self): repo = self._config.get('napps', 'repo') napp_id = '{}/{}-{}.napp'.format(self.user, self.napp, self.version) uri = os.path.join(repo, napp_id) return urllib.request.urlretrieve(uri)[0]
Download NApp package from server. Return: str: Downloaded temp filename. Raises: urllib.error.HTTPError: If download is not successful.
codesearchnet
def _CreateStyleForRoute(self, doc, route): style_id = 'route_%s' % route.route_id style = ET.SubElement(doc, 'Style', {'id': style_id}) linestyle = ET.SubElement(style, 'LineStyle') width = ET.SubElement(linestyle, 'width') type_to_width = {0: '3', 1: '3', 2: '5', 3: '1'} width.text = type_to_width.get(route.route_type, '1') if route.route_color: color = ET.SubElement(linestyle, 'color') red = route.route_color[0:2].lower() green = route.route_color[2:4].lower() blue = route.route_color[4:6].lower() color.text = 'ff%s%s%s' % (blue, green, red) return style_id
Create a KML Style element for the route. The style sets the line colour if the route colour is specified. The line thickness is set depending on the vehicle type. Args: doc: The KML Document ElementTree.Element instance. route: The transitfeed.Route to create the style for. Returns: The id of the style as a string.
juraj-google-style
def parent_callback(self, executor_fu): with self._update_lock: if (not executor_fu.done()): raise ValueError('done callback called, despite future not reporting itself as done') if (executor_fu != self.parent): if ((executor_fu.exception() is None) and (not isinstance(executor_fu.result(), RemoteExceptionWrapper))): raise ValueError('internal consistency error: AppFuture done callback called without an exception, but parent has been changed since then') try: res = executor_fu.result() if isinstance(res, RemoteExceptionWrapper): res.reraise() super().set_result(executor_fu.result()) except Exception as e: if (executor_fu.retries_left > 0): pass else: super().set_exception(e)
Callback from a parent future to update the AppFuture. Used internally by AppFuture, and should not be called by code using AppFuture. Args: - executor_fu (Future): Future returned by the executor along with callback. This may not be the current parent future, as the parent future may have already been updated to point to a retrying execution, and in that case, this is logged. In the case that a new parent has been attached, we must immediately discard this result no matter what it contains (although it might be interesting to log if it was successful...) Returns: - None Updates the super() with the result() or exception()
codesearchnet
def get_content_of_file(self, name, full_path=False): if self.handle: for member in self.handle.getmembers(): if (full_path and member.name == name) or ( not full_path and os.path.basename( member.name) == name): extracted = self.handle.extractfile(member) return extracted.read().decode( locale.getpreferredencoding()) return None
Returns content of file from archive. If full_path is set to False and two files with given name exist, content of one is returned (it is not specified which one that is). If set to True, returns content of exactly that file. Args: name: name of the file to get content of Returns: Content of the file with given name or None, if no such.
juraj-google-style
def _NormalizedVolumeIdentifiers(self, volume_system, volume_identifiers, prefix='v'): normalized_volume_identifiers = [] for volume_identifier in volume_identifiers: if isinstance(volume_identifier, int): volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier) elif (not volume_identifier.startswith(prefix)): try: volume_identifier = int(volume_identifier, 10) volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier) except (TypeError, ValueError): pass try: volume = volume_system.GetVolumeByIdentifier(volume_identifier) except KeyError: volume = None if (not volume): raise errors.ScannerError('Volume missing for identifier: {0:s}.'.format(volume_identifier)) normalized_volume_identifiers.append(volume_identifier) return normalized_volume_identifiers
Normalizes volume identifiers. Args: volume_system (VolumeSystem): volume system. volume_identifiers (list[int|str]): allowed volume identifiers, formatted as an integer or string with prefix. prefix (Optional[str]): volume identifier prefix. Returns: list[str]: volume identifiers with prefix. Raises: ScannerError: if the volume identifier is not supported or no volume could be found that corresponds with the identifier.
codesearchnet
def __enter__(self) -> str: ctx = context.context() if ctx.executing_eagerly(): old_name = ctx.scope_name name = self._name if not name: scope_name = '' elif name[-1] == '/': scope_name = name elif old_name: scope_name = old_name + name + '/' else: scope_name = name + '/' ctx.scope_name = scope_name def _restore_name_scope(*_): ctx.scope_name = old_name self._exit_fns.append(_restore_name_scope) else: scope = get_default_graph().name_scope(self._name) scope_name = scope.__enter__() self._exit_fns.append(scope.__exit__) return scope_name
Start the scope block. Returns: The scope name.
github-repos
def prepend_to_list(self, key, *value, pipeline=False): if pipeline: self._pipeline.lpush(key, *value) else: self._db.lpush(key, *value)
Add new element to the start of the list stored at key. Args: key (str): Key where the list is stored value: Value to add to the list pipeline (bool): True, start a transaction block. Default false.
codesearchnet
def get_cond_latents(all_latents=None, hparams=None): cond_latents = None if hparams.gen_mode == "conditional": if hparams.latent_dist_encoder in ["conv_net", "conv3d_net"]: num_cond_latents = (hparams.num_cond_latents + int(hparams.cond_first_frame)) if len(all_latents) >= num_cond_latents: cond_latents = all_latents[-hparams.num_cond_latents:] if hparams.cond_first_frame: cond_latents = [all_latents[0]] + cond_latents elif hparams.latent_dist_encoder in ["pointwise", "conv_lstm"]: if all_latents: cond_latents = all_latents[-1] if hparams.gen_mode == "conditional": global_step = tf.train.get_or_create_global_step() condition = tf.greater(global_step, hparams.pretrain_steps) else: condition = tf.constant(False, dtype=tf.bool) return condition, cond_latents
Get z^{cond}_{t} given z^{1..t-1}. Args: all_latents: list of list of tensors, outer-size equals no.of time_steps-1 inner-size equals hparams.n_levels. hparams: See next_frame_glow_hparams. Returns: cond_latents: conditional latents at time-step t.
juraj-google-style
def draw_rect(self, rect): check_int_err(lib.SDL_RenderDrawRect(self._ptr, rect._ptr))
Draw a rectangle on the current rendering target. Args: rect (Rect): The destination rectangle, or None to outline the entire rendering target. Raises: SDLError: If an error is encountered.
juraj-google-style
def _weight_generator(self, reviewers): scores = [r.anomalous_score for r in reviewers] mu = np.average(scores) sigma = np.std(scores) if sigma: def w(v): try: exp = math.exp(self.alpha * (v - mu) / sigma) return 1. / (1. + exp) except OverflowError: return 0. return w else: return lambda v: 1.
Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer.
juraj-google-style
def as_dataframe(self, pattern='*', max_rows=None): data = [] for i, group in enumerate(self.list(pattern)): if max_rows is not None and i >= max_rows: break parent = self._group_dict.get(group.parent_id) parent_display_name = '' if parent is None else parent.display_name data.append([ group.id, group.display_name, group.parent_id, parent_display_name, group.is_cluster, group.filter]) return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)
Creates a pandas dataframe from the groups that match the filters. Args: pattern: An optional pattern to further filter the groups. This can include Unix shell-style wildcards. E.g. ``"Production *"``, ``"*-backend"``. max_rows: The maximum number of groups to return. If None, return all. Returns: A pandas dataframe containing matching groups.
juraj-google-style
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def get_keypoint_predictions(heatmaps: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: if not isinstance(heatmaps, np.ndarray): raise ValueError('Heatmaps should be np.ndarray') if heatmaps.ndim != 4: raise ValueError('Heatmaps should be 4-dimensional') batch_size, num_keypoints, _, width = heatmaps.shape heatmaps_reshaped = heatmaps.reshape((batch_size, num_keypoints, -1)) idx = np.argmax(heatmaps_reshaped, 2).reshape((batch_size, num_keypoints, 1)) scores = np.amax(heatmaps_reshaped, 2).reshape((batch_size, num_keypoints, 1)) preds = np.tile(idx, (1, 1, 2)).astype(np.float32) preds[:, :, 0] = preds[:, :, 0] % width preds[:, :, 1] = preds[:, :, 1] preds = np.where(np.tile(scores, (1, 1, 2)) > 0.0, preds, -1) return (preds, scores)
Get keypoint predictions from score maps. Args: heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width)`): Model predicted heatmaps. Returns: tuple: A tuple containing aggregated results. - coords (`np.ndarray` of shape `(batch_size, num_keypoints, 2)`): Predicted keypoint location. - scores (`np.ndarray` of shape `(batch_size, num_keypoints, 1)`): Scores (confidence) of the keypoints.
github-repos
def parse_compounds(compound_info, case_id, variant_type): compounds = [] if compound_info: for family_info in compound_info.split(','): splitted_entry = family_info.split(':') if (splitted_entry[0] == case_id): for compound in splitted_entry[1].split('|'): splitted_compound = compound.split('>') compound_obj = {} compound_name = splitted_compound[0] compound_obj['variant'] = generate_md5_key((compound_name.split('_') + [variant_type, case_id])) try: compound_score = float(splitted_compound[1]) except (TypeError, IndexError): compound_score = 0.0 compound_obj['score'] = compound_score compound_obj['display_name'] = compound_name compounds.append(compound_obj) return compounds
Get a list with compounds objects for this variant. Arguments: compound_info(str): A Variant dictionary case_id (str): unique family id variant_type(str): 'research' or 'clinical' Returns: compounds(list(dict)): A list of compounds
codesearchnet
def extractHolidayDate(self, setting_holiday): ret = namedtuple("result", ["Holiday", "Month", "Day"]) setting_holiday += 1 ret.Holiday = str(setting_holiday) if (setting_holiday < 1) or (setting_holiday > Extents.Holidays): ekm_log("Out of bounds: holiday " + str(setting_holiday)) ret.Holiday = ret.Month = ret.Day = str(0) return ret idxday = "Holiday_" + str(setting_holiday) + "_Day" idxmon = "Holiday_" + str(setting_holiday) + "_Mon" if idxmon not in self.m_hldy: ret.Holiday = ret.Month = ret.Day = str(0) return ret if idxday not in self.m_hldy: ret.Holiday = ret.Month = ret.Day = str(0) return ret ret.Day = self.m_hldy[idxday][MeterData.StringValue] ret.Month = self.m_hldy[idxmon][MeterData.StringValue] return ret
Read a single holiday date from meter buffer. Args: setting_holiday (int): Holiday from 0-19 or in range(Extents.Holidays) Returns: tuple: Holiday tuple, elements are strings. =============== ====================== Holiday Holiday 0-19 as string Day Day 1-31 as string Month Monty 1-12 as string =============== ======================
juraj-google-style
def _ParseKeyWithPlugin(self, parser_mediator, registry_key, plugin): try: plugin.UpdateChainAndProcess(parser_mediator, registry_key) except (IOError, dfwinreg_errors.WinRegistryValueError) as exception: parser_mediator.ProduceExtractionWarning( 'in key: {0:s} error: {1!s}'.format(registry_key.path, exception))
Parses the Registry key with a specific plugin. Args: parser_mediator (ParserMediator): parser mediator. registry_key (dfwinreg.WinRegistryKey): Windwos Registry key. plugin (WindowsRegistryPlugin): Windows Registry plugin.
juraj-google-style
def execute(self, command, *args, encoding=_NOTSET): if ((self._reader is None) or self._reader.at_eof()): msg = (self._close_msg or 'Connection closed or corrupted') raise ConnectionClosedError(msg) if (command is None): raise TypeError('command must not be None') if (None in args): raise TypeError('args must not contain None') command = command.upper().strip() is_pubsub = (command in _PUBSUB_COMMANDS) is_ping = (command in ('PING', b'PING')) if (self._in_pubsub and (not (is_pubsub or is_ping))): raise RedisError('Connection in SUBSCRIBE mode') elif is_pubsub: logger.warning('Deprecated. Use `execute_pubsub` method directly') return self.execute_pubsub(command, *args) if (command in ('SELECT', b'SELECT')): cb = partial(self._set_db, args=args) elif (command in ('MULTI', b'MULTI')): cb = self._start_transaction elif (command in ('EXEC', b'EXEC')): cb = partial(self._end_transaction, discard=False) elif (command in ('DISCARD', b'DISCARD')): cb = partial(self._end_transaction, discard=True) else: cb = None if (encoding is _NOTSET): encoding = self._encoding fut = self._loop.create_future() if (self._pipeline_buffer is None): self._writer.write(encode_command(command, *args)) else: encode_command(command, *args, buf=self._pipeline_buffer) self._waiters.append((fut, encoding, cb)) return fut
Executes redis command and returns Future waiting for the answer. Raises: * TypeError if any of args can not be encoded as bytes. * ReplyError on redis '-ERR' responses. * ProtocolError when response can not be decoded meaning connection is broken. * ConnectionClosedError when either client or server has closed the connection.
codesearchnet
def batch_normalize_with_arguments(x, arguments): x = prettytensor.wrap(x) if isinstance(arguments, bool): if arguments: return x.batch_normalize() else: return x kwargs = arguments._asdict() defaults = prettytensor._defaults for arg in ('learned_moments_update_rate', 'variance_epsilon', 'scale_after_normalization'): if (kwargs.get(arg, None) is None): if (arg in defaults): kwargs[arg] = defaults[arg] else: del kwargs[arg] return x.batch_normalize(**kwargs)
Applies batch normalization to x as specified in arguments. Args: x: A Pretty Tensor. arguments: Either a boolean to batch_normalize or a BatchNormalizationArguments Returns: x with batch normalization applied.
codesearchnet
def get_counter_metric(result: PipelineResult, namespace: str, name: str) -> int: metrics = result.metrics().query(MetricsFilter().with_namespace(namespace).with_name(name)) counters = metrics['counters'] if len(counters) > 1: raise RuntimeError('%d instead of one metric result matches name: %s in namespace %s' % (len(counters), name, namespace)) return counters[0].result if len(counters) > 0 else -1
get specific counter metric from pipeline result Args: result: the PipelineResult which metrics are read from namespace: a string representing the namespace of wanted metric name: a string representing the name of the wanted metric Returns: the result of the wanted metric if it exist, else -1
github-repos
def range(*args, prefix: str): return [NamedQubit(prefix + str(i)) for i in range(*args)]
Returns a range of NamedQubits. The range returned starts with the prefix, and followed by a qubit for each number in the range, e.g.: NamedQubit.range(3, prefix="a") -> ["a1", "a2", "a3] NamedQubit.range(2, 4, prefix="a") -> ["a2", "a3] Args: *args: Args to be passed to Python's standard range function. prefix: A prefix for constructed NamedQubits. Returns: A list of NamedQubits.
juraj-google-style
def generate_sitemap(self, path='sitemap.xml', https=False): sitemap = russell.sitemap.generate_sitemap(self, https=https) self.write_file(path, sitemap)
Generate an XML sitemap. Args: path (str): The name of the file to write to. https (bool): If True, links inside the sitemap with relative scheme (e.g. example.com/something) will be set to HTTPS. If False (the default), they will be set to plain HTTP.
juraj-google-style
def set_timestamp(cls, filename: str, response: HTTPResponse): last_modified = response.fields.get('Last-Modified') if not last_modified: return try: last_modified = email.utils.parsedate(last_modified) except ValueError: _logger.exception('Failed to parse date.') return last_modified = time.mktime(last_modified) os.utime(filename, (time.time(), last_modified))
Set the Last-Modified timestamp onto the given file. Args: filename: The path of the file response: Response
juraj-google-style
def wait(self, timeout=None): if self._future: try: self._future.exception(timeout) except concurrent.futures.TimeoutError: self._timeout() self._refresh_state() else: while not self.is_complete: if timeout is not None: if timeout <= 0: self._timeout() timeout -= Job._POLL_INTERVAL_SECONDS time.sleep(Job._POLL_INTERVAL_SECONDS) return self
Wait for the job to complete, or a timeout to happen. Args: timeout: how long to wait before giving up (in seconds); default None which means no timeout. Returns: The Job
juraj-google-style
def profile(self, num): baseuri = (self._BASE_URI + 'company/{}'.format(num)) res = self.session.get(baseuri) self.handle_http_error(res) return res
Search for company profile by company number. Args: num (str): Company number to search on.
codesearchnet
def heightmap_get_normal( hm: np.ndarray, x: float, y: float, waterLevel: float ) -> Tuple[float, float, float]: cn = ffi.new("float[3]") lib.TCOD_heightmap_get_normal(_heightmap_cdata(hm), x, y, cn, waterLevel) return tuple(cn)
Return the map normal at given coordinates. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. x (float): The x coordinate. y (float): The y coordinate. waterLevel (float): The heightmap is considered flat below this value. Returns: Tuple[float, float, float]: An (x, y, z) vector normal.
juraj-google-style
def CreateSmartShoppingAdGroup(client, campaign_id): ad_group_service = client.GetService('AdGroupService', version='v201809') ad_group = {'campaignId': campaign_id, 'name': ('Smart Shopping ad group adgroup_operations = {'operator': 'ADD', 'operand': ad_group} ad_group = ad_group_service.mutate(adgroup_operations)['value'][0] ad_group_id = ad_group['id'] print(('AdGroup with name "%s" and ID "%s" was added.' % (ad_group['name'], ad_group_id))) return ad_group_id
Adds a new Smart Shopping ad group. Args: client: an AdWordsClient instance. campaign_id: the str ID of a Smart Shopping campaign. Returns: An ad group ID.
codesearchnet
def push_doc(self, document): msg = self._protocol.create('PUSH-DOC', document) reply = self._send_message_wait_for_reply(msg) if reply is None: raise RuntimeError("Connection to server was lost") elif reply.header['msgtype'] == 'ERROR': raise RuntimeError("Failed to push document: " + reply.content['text']) else: return reply
Push a document to the server, overwriting any existing server-side doc. Args: document : (Document) A Document to push to the server Returns: The server reply
juraj-google-style
def set_cache_policy(self, func): if (func is None): func = self.default_cache_policy elif isinstance(func, bool): func = (lambda unused_key, flag=func: flag) self._cache_policy = func
Set the context cache policy function. Args: func: A function that accepts a Key instance as argument and returns a bool indicating if it should be cached. May be None.
codesearchnet
def cumulative_probabilities( self ): partition_function = np.sum( self.p ) return np.cumsum( self.p ) / partition_function
Cumulative sum of the relative probabilities for all possible jumps. Args: None Returns: (np.array): Cumulative sum of relative jump probabilities.
juraj-google-style
def check_prerequisites( prerequisites, checker, msg_tmpl='Prerequisites "{}" are required in method "{}" but not ' 'found, please install them first.'): def wrap(func): @functools.wraps(func) def wrapped_func(*args, **kwargs): requirements = [prerequisites] if isinstance( prerequisites, str) else prerequisites missing = [] for item in requirements: if not checker(item): missing.append(item) if missing: print(msg_tmpl.format(', '.join(missing), func.__name__)) raise RuntimeError('Prerequisites not meet.') else: return func(*args, **kwargs) return wrapped_func return wrap
A decorator factory to check if prerequisites are satisfied. Args: prerequisites (str of list[str]): Prerequisites to be checked. checker (callable): The checker method that returns True if a prerequisite is meet, False otherwise. msg_tmpl (str): The message template with two variables. Returns: decorator: A specific decorator.
juraj-google-style
def combine(args, part=None): args = [cleanup(arg) for arg in args] if (part is not None): (parts, orders) = part if (numpy.array(orders).size == 1): orders = ([int(numpy.array(orders).item())] * len(args)) parts = numpy.array(parts).flatten() for (i, arg) in enumerate(args): (m, n) = (float(parts[i]), float(orders[i])) l = len(arg) args[i] = arg[int(((m / n) * l)):int((((m + 1) / n) * l))] shapes = [arg.shape for arg in args] size = (numpy.prod(shapes, 0)[0] * numpy.sum(shapes, 0)[1]) if (size > (10 ** 9)): raise MemoryError('Too large sets') if (len(args) == 1): out = args[0] elif (len(args) == 2): out = combine_two(*args) else: arg1 = combine_two(*args[:2]) out = combine(([arg1] + args[2:])) return out
All linear combination of a list of list. Args: args (numpy.ndarray) : List of input arrays. Components to take linear combination of with `args[i].shape=(N[i], M[i])` where N is to be taken linear combination of and M is static. M[i] is set to 1 if missing. Returns: (numpy.array) : matrix of combinations with shape (numpy.prod(N), numpy.sum(M)). Examples: >>> A, B = [1,2], [[4,4],[5,6]] >>> print(chaospy.quad.combine([A, B])) [[1. 4. 4.] [1. 5. 6.] [2. 4. 4.] [2. 5. 6.]]
codesearchnet
def _ParseCString(self, page_data, string_offset): cstring_map = self._GetDataTypeMap('cstring') try: value_string = self._ReadStructureFromByteStream(page_data[string_offset:], string_offset, cstring_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError('Unable to map string data at offset: 0x{0:08x} with error: {1!s}'.format(string_offset, exception)) return value_string.rstrip('\x00')
Parses a C string from the page data. Args: page_data (bytes): page data. string_offset (int): offset of the string relative to the start of the page. Returns: str: string. Raises: ParseError: when the string cannot be parsed.
codesearchnet
def label_search(self, label:str) -> List[dict]: ilx_rows = self.label2rows(self.local_degrade(label)) if not ilx_rows: return None else: return ilx_rows
Returns the rows in InterLex associated with that label Note: Pressumed to have duplicated labels in InterLex Args: label: label of the entity you want to find Returns: None or List[dict]
juraj-google-style
def _overload_operator(cls, tensor_class, operator): tensor_oper = getattr(tensor_class, operator) tensor_oper = getattr(tensor_oper, '__func__', tensor_oper) setattr(cls, operator, tensor_oper)
Overload an operator with the same implementation as a base Tensor class. We pull the operator out of the class dynamically to avoid ordering issues. Args: tensor_class: The (Composite)Tensor to get the method from. operator: string. The operator name.
github-repos
def read(self, size=None): if not self._is_open: raise IOError('Not opened.') if self._current_offset < 0: raise IOError('Invalid current offset value less than zero.') if self._current_offset >= self._size: return b'' if size is None or self._current_offset + size > self._size: size = self._size - self._current_offset if self._tsk_attribute: data = self._tsk_file.read_random( self._current_offset, size, self._tsk_attribute.info.type, self._tsk_attribute.info.id) else: data = self._tsk_file.read_random(self._current_offset, size) self._current_offset += len(data) return data
Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
juraj-google-style
def insert(self, optional_root_locations_path): encountered_simple_optional = False parent_location = self._root_location for optional_root_location in optional_root_locations_path: if encountered_simple_optional: raise AssertionError(u'Encountered simple optional root location {} in path, but' u'further locations are present. This should not happen: {}' .format(optional_root_location, optional_root_locations_path)) if optional_root_location not in self._location_to_children: encountered_simple_optional = True else: self._location_to_children[parent_location].add(optional_root_location) parent_location = optional_root_location
Insert a path of optional Locations into the tree. Each OptionalTraversalTree object contains child Location objects as keys mapping to other OptionalTraversalTree objects. Args: optional_root_locations_path: list of optional root Locations all except the last of which must be present in complex_optional_roots
juraj-google-style
def promote_artifacts(self, promote_stage='latest'): if (promote_stage.lower() == 'alpha'): self._sync_to_uri(self.s3_canary_uri) elif (promote_stage.lower() == 'canary'): self._sync_to_uri(self.s3_latest_uri) else: self._sync_to_uri(self.s3_latest_uri)
Promote artifact version to dest. Args: promote_stage (string): Stage that is being promoted
codesearchnet
def read_int8(self, little_endian=True): if little_endian: endian = "<" else: endian = ">" return self.unpack('%sb' % endian)
Read 1 byte as a signed integer value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int:
juraj-google-style
def _merge_tensor_signatures(self, signatures): sorted_update = [] if self._num_signature_dimensions() > 1: signature_indices = self._signature_types() for _, val in sorted(signatures.items(), key=lambda item: signature_indices[item[0]]): sorted_update.append(val) updates = array_ops_stack.stack(sorted_update, axis=0, name='merge_single_op_signatures') elif self._num_signature_dimensions() == 1: (_, val), = signatures.items() updates = val else: raise ValueError('Cannot merge 0 signatures. Check the value passed for flag --signatures.') return updates
Returns a tensor that merges the given signatures. Args: signatures: A dictionary of the signature updates from signature name to a tensor of dimension [1]. Returns: A tensor that concats the signature values in a predefined order. Raises: ValueError: Unable to merge signatures.
github-repos
def _ReadEncryptedData(self, read_size): encrypted_data = self._file_object.read(read_size) read_count = len(encrypted_data) self._encrypted_data = b''.join([self._encrypted_data, encrypted_data]) self._decrypted_data, self._encrypted_data = ( self._decrypter.Decrypt(self._encrypted_data)) self._decrypted_data_size = len(self._decrypted_data) return read_count
Reads encrypted data from the file-like object. Args: read_size (int): number of bytes of encrypted data to read. Returns: int: number of bytes of encrypted data read.
juraj-google-style
def on_predict_batch_end(self, batch, logs=None):
Called at the end of a batch in `predict` methods. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.
github-repos
def merge_two_dictionaries(a, b, merge_lists=False): key = None try: if ((a is None) or isinstance(a, (six.string_types, six.text_type, six.integer_types, float))): a = b elif isinstance(a, list): if isinstance(b, list): if merge_lists: a.extend(b) else: a = b else: a.append(b) elif isinstance(a, (dict, UserDict)): if isinstance(b, (dict, UserDict)): for key in b: if (key in a): a[key] = merge_two_dictionaries(a[key], b[key], merge_lists=merge_lists) else: a[key] = b[key] else: raise ValueError(('Cannot merge non-dict "%s" into dict "%s"' % (b, a))) else: raise ValueError(('NOT IMPLEMENTED "%s" into "%s"' % (b, a))) except TypeError as e: raise ValueError(('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a))) return a
Merges b into a and returns merged result NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen Args: a (DictUpperBound): dictionary to merge into b (DictUpperBound): dictionary to merge from merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary
codesearchnet
def nb_ll_row(params, data_row): p = params[0] r = params[1] n = len(data_row) ll = np.sum(gammaln(data_row + r)) - np.sum(gammaln(data_row + 1)) ll -= n*gammaln(r) ll += np.sum(data_row)*np.log(p) ll += n*r*np.log(1-p) return -ll
returns the negative LL of a single row. Args: params (array) - [p, r] data_row (array) - 1d array of data Returns: LL of row
juraj-google-style
def __init__(self, submission_id, submissions, storage_bucket): super(DefenseSubmission, self).__init__(submission_id, submissions, storage_bucket) if self.type != TYPE_DEFENSE: raise WorkerError('Incorrect defense type for submission "{0}"'.format( submission_id))
Initializes DefenseSubmission. Args: submission_id: ID of the submission submissions: instance of CompetitionSubmissions with all submissions storage_bucket: storage bucket where all submissions are stored Raises: WorkerError: if submission has incorrect type
juraj-google-style
def __init__(self, feed_fn): self.feed_fn = feed_fn
Initializes a `FeedFnHook`. Args: feed_fn: function that takes no arguments and returns `dict` of `Tensor` to feed.
github-repos
def check_denotation(target_values, predicted_values): if (len(target_values) != len(predicted_values)): return False for target in target_values: if (not any((target.match(pred) for pred in predicted_values))): return False return True
Return True if the predicted denotation is correct. Args: target_values (list[Value]) predicted_values (list[Value]) Returns: bool
codesearchnet
def ParseContactRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TwitterIOSContactEventData() event_data.description = self._GetRowValue(query_hash, row, 'description') event_data.followers_count = self._GetRowValue( query_hash, row, 'followersCount') event_data.following = self._GetRowValue(query_hash, row, 'following') event_data.following_count = self._GetRowValue( query_hash, row, 'followingCount') event_data.location = self._GetRowValue(query_hash, row, 'location') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.profile_url = self._GetRowValue( query_hash, row, 'profileImageUrl') event_data.query = query event_data.screen_name = self._GetRowValue(query_hash, row, 'screenName') event_data.url = self._GetRowValue(query_hash, row, 'url') timestamp = self._GetRowValue(query_hash, row, 'createdDate') if timestamp: timestamp = int(timestamp) date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'updatedAt') if timestamp: timestamp = int(timestamp) date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UPDATE) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a contact row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
juraj-google-style
def _send_json(self, method, path, data): headers = {'Content-type': 'application/json'} return self._make_request(method, path, data=data, headers=headers)
Make a application/json request. Args: `method`: The method of the request (POST or PUT). `path`: The path to the resource. `data`: The JSON-encoded data. Returns: The content of the response. Raises: An exception depending on the HTTP status code of the response.
juraj-google-style
def __init__(self, name, client=None): self._name = name self.client = client self._state = None
Create a Thing. Args: name (str): name of the Thing. This corresponds to the AWS IoT Thing name. client (str): MQTT client connection to use. This can be set anytime before publishing Thing messages to the server.
juraj-google-style
def _apply(self, ctx: ExtensionContext) -> AugmentedDict: node_key, node_value = ctx.node def process(pattern: Pattern[str], _str: str) -> str: _match = pattern.match(_str) if _match is None: return _str placeholder, envvar = _match.group(1), _match.group(2) envvalue = os.environ.get(envvar, None) if envvalue is None and self.fail_on_unset: raise ExtensionError("Environment variable '{}' is unset.".format(envvar)) return _str.replace(placeholder, envvalue or self.default) _pattern = re.compile(self.__pattern__) node_key = process(_pattern, node_key) node_value = process(_pattern, node_value) return {node_key: node_value}
Replaces any {{env::*}} directives with it's actual environment variable value or a default. Args: ctx: The processing context. Returns: Returns the altered node key and value.
juraj-google-style
def FetchAllGraphSeries(label, report_type, period=None, token=None): if _ShouldUseLegacyDatastore(): return _FetchAllGraphSeriesFromTheLegacyDB(label, report_type, period=period, token=token) if (period is None): time_range = None else: range_end = rdfvalue.RDFDatetime.Now() time_range = time_utils.TimeRange((range_end - period), range_end) return data_store.REL_DB.ReadAllClientGraphSeries(label, report_type, time_range=time_range)
Fetches graph series for the given label and report-type from the DB. Args: label: Client label to fetch data for. report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for. period: rdfvalue.Duration specifying how far back in time to fetch data. If not provided, all data for the given label and report-type will be returned. token: ACL token to use for reading from the legacy (non-relational) datastore. Raises: AFF4AttributeTypeError: If, when reading to the legacy DB, an unexpected report-data type is encountered. Returns: A dict mapping timestamps to graph-series. The timestamps represent when the graph-series were written to the datastore.
codesearchnet
def get(self, name, *default): curr = self.values for part in name.split('.'): if (part in curr): curr = curr[part] elif default: return default[0] else: fmt = "Context value '{}' does not exist:\n{}" raise AttributeError(fmt.format(name, util.yaml_dump(self.values))) return curr
Get context value with the given name and optional default. Args: name (str): The name of the context value. *default (Any): If given and the key doesn't not exist, this will be returned instead. If it's not given and the context value does not exist, `AttributeError` will be raised Returns: The requested context value. If the value does not exist it will return `default` if give or raise `AttributeError`. Raises: AttributeError: If the value does not exist and `default` was not given.
codesearchnet
def is_text(self): return (self.type in [self._TYPE_PASTE, self._TYPE_TEXT, self._TYPE_TWEET])
Tells if this message is a text message. Returns: bool. Success
codesearchnet
def parse_step(step_name): prefix = 'step' step_name = step_name.lower().replace(' ', '_') step_name = step_name[len(prefix):] if prefix and step_name.startswith(prefix) else step_name return step_name.strip(':_')
Replaces white spaces and removes 'Step:' label Args: step_name(str): step name passed in metric ParDo Returns: lower case step name without namespace and step label
github-repos
def publish(cls, message, client_filter=None): with cls._lock: for client in cls.subscribers: if ((not client_filter) or client_filter(client)): client.send(message)
Publish messages to subscribers. Args: message: The message to publish. client_filter: A filter function to call passing in each client. Only clients for whom the function returns True will have the message sent to them.
codesearchnet
def teardown(self, *args, **kwargs): pass
Called to clean up an instance before it is discarded. If you are using Dataflow, you need to enable Dataflow Runner V2 before using this feature. Args: *args: Additional arguments and side inputs. **kwargs: Additional arguments and side inputs.
github-repos
def index_add(x, idx, y): return _index_update_helper(tf_np.ndarray._with_index_add, x, idx, y)
Pure equivalent of `x[idx] += y`. Returns the value of x that would result from the NumPy-style indexed assignment `x[idx] += y`. Because it's a pure function, `x` itself won't be changed. Args: x: an array with the values to be updated. idx: a Numpy-style index, consisting of `None`, integers, slice objects, ellipses, ndarrays with integer dtypes, or a tuple of the above. y: the array of updates. `y` must be broadcastable to the shape of the array that would be returned by `x[idx]`. Returns: The updated version of `x`.
github-repos
def guess_depth(packages): if len(packages) == 1: return packages[0].count('.') + 2 return min(p.count('.') for p in packages) + 1
Guess the optimal depth to use for the given list of arguments. Args: packages (list of str): list of packages. Returns: int: guessed depth to use.
juraj-google-style
def _separate_string(string: str, stride: int, separator: str) -> str: result = '' for i, c in enumerate(string): if i > 0 and i % stride == 0: result += separator result += c return result
Returns a separated string by separator at multiples of stride. For example, the input: * string: 'thequickbrownfoxjumpedoverthelazydog' * stride: 3 * separator: '-' Would produce a return value of: 'the-qui-ckb-row-nfo-xju-mpe-dov-ert-hel-azy-dog' Args: string: The string to split. stride: The interval to insert the separator at. separator: The string to insert at every stride interval. Returns: The original string with the separator present at every stride interval.
github-repos
def generate_string(self, initial_logits, initial_state, sequence_length): current_logits = initial_logits current_state = initial_state generated_letters = [] for _ in range(sequence_length): char_index = tf.squeeze(tf.multinomial(current_logits, 1)) char_one_hot = tf.one_hot(char_index, self._output_size, 1.0, 0.0) generated_letters.append(char_one_hot) (gen_out_seq, current_state) = self._core(tf.nn.relu(self._embed_module(char_one_hot)), current_state) current_logits = self._output_module(gen_out_seq) generated_string = tf.stack(generated_letters) return generated_string
Builds sub-graph to generate a string, sampled from the model. Args: initial_logits: Starting logits to sample from. initial_state: Starting state for the RNN core. sequence_length: Number of characters to sample. Returns: A Tensor of characters, with dimensions `[sequence_length, batch_size, output_size]`.
codesearchnet
def from_file(cls, path): with open(path, 'r', errors='replace') as f: return cls(f.read())
Create a text from a file. Args: path (str): The file path.
juraj-google-style
def _AddPropertiesForNonRepeatedScalarField(field, cls): proto_field_name = field.name property_name = _PropertyName(proto_field_name) type_checker = type_checkers.GetTypeChecker(field) default_value = field.default_value valid_values = set() is_proto3 = (field.containing_type.syntax == 'proto3') def getter(self): return self._fields.get(field, default_value) getter.__module__ = None getter.__doc__ = ('Getter for %s.' % proto_field_name) clear_when_set_to_default = (is_proto3 and (not field.containing_oneof)) def field_setter(self, new_value): new_value = type_checker.CheckValue(new_value) if (clear_when_set_to_default and (not new_value)): self._fields.pop(field, None) else: self._fields[field] = new_value if (not self._cached_byte_size_dirty): self._Modified() if field.containing_oneof: def setter(self, new_value): field_setter(self, new_value) self._UpdateOneofState(field) else: setter = field_setter setter.__module__ = None setter.__doc__ = ('Setter for %s.' % proto_field_name) doc = ('Magic attribute generated for "%s" proto field.' % proto_field_name) setattr(cls, property_name, property(getter, setter, doc=doc))
Adds a public property for a nonrepeated, scalar protocol message field. Clients can use this property to get and directly set the value of the field. Note that when the client sets the value of a field by using this property, all necessary "has" bits are set as a side-effect, and we also perform type-checking. Args: field: A FieldDescriptor for this field. cls: The class we're constructing.
codesearchnet
def __init__(self, function_name, level=1, children_inputs_mappings=None, **kwargs): self._function_name = function_name self._level = level if self._level == 1: assert children_inputs_mappings is None else: assert isinstance(children_inputs_mappings, dict) self._children_inputs_mappings = children_inputs_mappings if self._children_inputs_mappings is not None: self._validate_children_inputs_mappings(self._children_inputs_mappings) self._unique_function_id = _uuid.uuid1().hex self._attrs_to_store_later = kwargs self._stored_attrs = False self._inputs = OpHint.OpHintArgumentTracker(self._function_name, self._unique_function_id, 'InputHint', OpHint.FUNCTION_INPUT_INDEX_ATTR, level, self._children_inputs_mappings) self._outputs = OpHint.OpHintArgumentTracker(self._function_name, self._unique_function_id, 'OutputHint', OpHint.FUNCTION_OUTPUT_INDEX_ATTR, level, self._children_inputs_mappings)
Create a OpHint. Args: function_name: Name of the function (the custom op name in tflite) level: OpHint level. children_inputs_mappings: Children OpHint inputs/outputs mapping. children_inputs_mappings should like below: "parent_first_child_input": [{"parent_input_index": num, "child_input_index": num}, ...] "parent_last_child_output": [{"parent_output_index": num, "child_output_index": num}, ...] "internal_children_input_output": [{"child_input_index": num, "child_output_index": num}, ...] **kwargs: Keyword arguments of any constant attributes for the function.
github-repos
def exit_hook(callable, once=True): if (once and (callable in ExitHooks)): return ExitHooks.append(callable)
r"""A decorator that makes the decorated function to run while ec exits. Args: callable (callable): The target callable. once (bool): Avoids adding a func to the hooks, if it has been added already. Defaults to True. Note: Hooks are processedd in a LIFO order.
codesearchnet
def play_alert(zones, alert_uri, alert_volume=20, alert_duration=0, fade_back=False): for zone in zones: zone.snap = Snapshot(zone) zone.snap.snapshot() print('snapshot of zone: {}'.format(zone.player_name)) for zone in zones: if zone.is_coordinator: if not zone.is_playing_tv: trans_state = zone.get_current_transport_info() if trans_state['current_transport_state'] == 'PLAYING': zone.pause() zone.volume = alert_volume zone.mute = False print('will play: {} on all coordinators'.format(alert_uri)) for zone in zones: if zone.is_coordinator: zone.play_uri(uri=alert_uri, title='Sonos Alert') time.sleep(alert_duration) for zone in zones: print('restoring {}'.format(zone.player_name)) zone.snap.restore(fade=fade_back)
Demo function using soco.snapshot across multiple Sonos players. Args: zones (set): a set of SoCo objects alert_uri (str): uri that Sonos can play as an alert alert_volume (int): volume level for playing alert (0 tp 100) alert_duration (int): length of alert (if zero then length of track) fade_back (bool): on reinstating the zones fade up the sound?
juraj-google-style
def add_ast_fn(d, spec, parent_function=None): if (d['type'] == 'Function'): ast_fn = Function(d['function']['name'], spec, parent_function=parent_function) for arg in d['args']: if (arg['type'] == 'Function'): ast_fn.add_argument(add_ast_fn(arg, spec, parent_function=ast_fn)) elif (arg['type'] == 'NSArg'): ast_fn.add_argument(NSArg(arg['nsarg']['ns'], arg['nsarg']['ns_val'], ast_fn)) elif (arg['type'] == 'StrArg'): ast_fn.add_argument(StrArg(arg['arg'], ast_fn)) return ast_fn
Convert dict AST to object AST Function Args: ast_fn: AST object Function d: AST as dictionary spec: BEL Specification Return: ast_fn
codesearchnet
def post_process(self, outputs, target_sizes): logging.warning_once('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.') out_logits, out_bbox = (outputs.logits, outputs.pred_boxes) if len(out_logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') if target_sizes.shape[1] != 2: raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch') prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor') labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results
Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the Pascal VOC format (xmin, ymin, xmax, ymax). Only supports PyTorch. Args: outputs ([`ConditionalDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model.
github-repos
def validate(self, scope: ValidationScope = ValidationScope.all, ctype: ContentType = ContentType.config) -> None: self.schema_node._validate(self, scope, ctype)
Validate the receiver's value. Args: scope: Scope of the validation (syntax, semantics or all). ctype: Receiver's content type. Raises: SchemaError: If the value doesn't conform to the schema. SemanticError: If the value violates a semantic constraint. YangTypeError: If the value is a scalar of incorrect type.
juraj-google-style
def get_block(self, block_id): block = backend.query.get_block(self.connection, block_id) latest_block = self.get_latest_block() latest_block_height = latest_block['height'] if latest_block else 0 if not block and block_id > latest_block_height: return result = {'height': block_id, 'transactions': []} if block: transactions = backend.query.get_transactions(self.connection, block['transactions']) result['transactions'] = [t.to_dict() for t in Transaction.from_db(self, transactions)] return result
Get the block with the specified `block_id`. Returns the block corresponding to `block_id` or None if no match is found. Args: block_id (int): block id of the block to get.
juraj-google-style
def predict_proba(self, x, batch_size=32, verbose=0): warnings.warn('`model.predict_proba()` is deprecated and will be removed after 2021-01-01. Please use `model.predict()` instead.') preds = self.predict(x, batch_size, verbose) if preds.min() < 0.0 or preds.max() > 1.0: logging.warning('Network returning invalid probability values. The last layer might not normalize predictions into probabilities (like softmax or sigmoid would).') return preds
Generates class probability predictions for the input samples. The input samples are processed batch by batch. Args: x: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs). batch_size: integer. verbose: verbosity mode, 0 or 1. Returns: A Numpy array of probability predictions.
github-repos