code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def source(self, value=None): if (value is not None): try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str for field `source`'.format(value)) if (',' in value): raise ValueError('value should not contain a comma for field `source`') self._source = value
Corresponds to IDD Field `source` Args: value (str): value for IDD Field `source` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def set_user_info(self, nick, user='*', real='*'): if self.connected: raise Exception("Can't set user info now, we're already connected!") if (not self.connected): self.nick = nick self.connect_info['user'] = {'nick': nick, 'user': user, 'real': real}
Sets user info for this server, to be used before connection. Args: nick (str): Nickname to use. user (str): Username to use. real (str): Realname to use.
codesearchnet
def validate_id_pool(self, id_or_uri, ids_pools): uri = ((self._client.build_uri(id_or_uri) + '/validate?idList=') + '&idList='.join(ids_pools)) return self._client.get(uri)
Validates an ID pool. Args: id_or_uri: ID or URI of range. ids_pools (list): List of Id Pools. Returns: dict: A dict containing a list with IDs.
codesearchnet
def update_from_json(self, path=join('config', 'hdx_dataset_static.json')): super(Dataset, self).update_from_json(path) self.separate_resources()
Update dataset metadata with static metadata from JSON file Args: path (str): Path to JSON dataset metadata. Defaults to config/hdx_dataset_static.json. Returns: None
juraj-google-style
def __init__(self, *args, **kwargs): super(MinerTransaction, self).__init__(*args, **kwargs) self.Type = TransactionType.MinerTransaction
Create an instance. Args: *args: **kwargs:
juraj-google-style
def IsActiveOn(self, date, date_object=None): if (date in self.date_exceptions): (exception_type, _) = self.date_exceptions[date] if (exception_type == self._EXCEPTION_TYPE_ADD): return True else: return False if (self.start_date and self.end_date and (self.start_date <= date) and (date <= self.end_date)): if (date_object is None): date_object = util.DateStringToDateObject(date) return self.day_of_week[date_object.weekday()] return False
Test if this service period is active on a date. Args: date: a string of form "YYYYMMDD" date_object: a date object representing the same date as date. This parameter is optional, and present only for performance reasons. If the caller constructs the date string from a date object that date object can be passed directly, thus avoiding the costly conversion from string to date object. Returns: True iff this service is active on date.
codesearchnet
def get_task_states(self, job_configs): if self._context_handle: job_names, task_nums = zip(*job_configs) return pywrap_tfe.TFE_GetTaskStates(self._context_handle, job_names, task_nums) else: raise ValueError('Context is not initialized.')
Get task states from the Coordination Service. Args: job_configs: A list of tuples of job name and task number. Returns: A list of TF_Status.
github-repos
def getById(self, Id): csvsource = CSVSource(self.source, self.factory, self.key()) try: for item in csvsource.items(): if Id == item.getId(): return item except StopIteration: return None
Returns ICachableItem that matches id Args: id: String that identifies the item to return whose key matches
juraj-google-style
def shuffle(self, func, lengths, **kwargs): num_splits = len(lengths) kwargs['manual_partition'] = True kwargs['_lengths'] = lengths args = [self.axis, func, num_splits, kwargs, False] args.extend(self.list_of_blocks) return self._wrap_partitions(self.deploy_axis_func(*args))
Shuffle the order of the data in this axis based on the `lengths`. Extends `BaseFrameAxisPartition.shuffle`. Args: func: The function to apply before splitting. lengths: The list of partition lengths to split the result into. Returns: A list of RemotePartition objects split by `lengths`.
codesearchnet
def create_graph_from_data(self, data): warnings.warn("An exhaustive search of the causal structure of CGNN without" " skeleton is super-exponential in the number of variables.") nb_vars = len(list(data.columns)) data = scale(data.values).astype('float32') candidates = [np.reshape(np.array(i), (nb_vars, nb_vars)) for i in itertools.product([0, 1], repeat=nb_vars*nb_vars) if (np.trace(np.reshape(np.array(i), (nb_vars, nb_vars))) == 0 and nx.is_directed_acyclic_graph(nx.DiGraph(np.reshape(np.array(i), (nb_vars, nb_vars)))))] warnings.warn("A total of {} graphs will be evaluated.".format(len(candidates))) scores = [parallel_graph_evaluation(data, i, nh=self.nh, nb_runs=self.nb_runs, gpu=self.gpu, nb_jobs=self.nb_jobs, lr=self.lr, train_epochs=self.train_epochs, test_epochs=self.test_epochs, verbose=self.verbose) for i in candidates] final_candidate = candidates[scores.index(min(scores))] output = np.zeros(final_candidate.shape) for (i, j), x in np.ndenumerate(final_candidate): if x > 0: cand = final_candidate cand[i, j] = 0 output[i, j] = min(scores) - scores[candidates.index(cand)] return nx.DiGraph(candidates[output], {idx: i for idx, i in enumerate(data.columns)})
Use CGNN to create a graph from scratch. All the possible structures are tested, which leads to a super exponential complexity. It would be preferable to start from a graph skeleton for large graphs. Args: data (pandas.DataFrame): Observational data on which causal discovery has to be performed. Returns: networkx.DiGraph: Solution given by CGNN.
juraj-google-style
def get_snapshots(self): data = self.get_data(('volumes/%s/snapshots/' % self.id)) snapshots = list() for jsond in data[u'snapshots']: snapshot = Snapshot(**jsond) snapshot.token = self.token snapshots.append(snapshot) return snapshots
Retrieve the list of snapshots that have been created from a volume. Args:
codesearchnet
def log(self, logger=None, label=None, eager=False): if self.closed(): raise ValueError('Attempt to call log() on a closed Queryable.') if (logger is None): return self if (label is None): label = repr(self) if eager: return self._create(self._eager_log_result(logger, label)) return self._create(self._generate_lazy_log_result(logger, label))
Log query result consumption details to a logger. Args: logger: Any object which supports a debug() method which accepts a str, such as a Python standard library logger object from the logging module. If logger is not provided or is None, this method has no logging side effects. label: An optional label which will be inserted into each line of logging output produced by this particular use of log eager: An optional boolean which controls how the query result will be consumed. If True, the sequence will be consumed and logged in its entirety. If False (the default) the sequence will be evaluated and logged lazily as it consumed. Warning: Use of eager=True requires use of sufficient memory to hold the entire sequence which is obviously not possible with infinite sequences. Use with care! Returns: A queryable over the unaltered source sequence. Raises: AttributeError: If logger does not support a debug() method. ValueError: If the Queryable has been closed.
codesearchnet
def square(duration: int, amp: complex, period: float=None, phase: float=0, name: str=None) -> SamplePulse: if (period is None): period = duration return _sampled_square_pulse(duration, amp, period, phase=phase, name=name)
Generates square wave `SamplePulse`. Applies `left` sampling strategy to generate discrete pulse from continuous function. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude. Wave range is [-amp, amp]. period: Pulse period, units of dt. If `None` defaults to single cycle. phase: Pulse phase. name: Name of pulse.
codesearchnet
def download(path='.', url=None, unpack=False): if url is None: url = 'https: if os.path.exists(path) and os.path.isdir(path): basename = os.path.basename(url).split('?')[0] filename = os.path.join(path, basename) else: filename = path f = open(filename, 'wb') u = urlopen(url) file_size = int(u.headers["Content-Length"][0]) print("Downloading the latest Neurosynth files: {0} bytes: {1}".format( url, file_size)) bytes_dl = 0 block_size = 8192 while True: buffer = u.read(block_size) if not buffer: break bytes_dl += len(buffer) f.write(buffer) p = float(bytes_dl) / file_size status = r"{0} [{1:.2%}]".format(bytes_dl, p) status = status + chr(8) * (len(status) + 1) sys.stdout.write(status) f.close() if unpack: import tarfile tarfile.open(filename, 'r:gz').extractall(os.path.dirname(filename))
Download the latest data files. Args: path (str): Location to save the retrieved data files. Defaults to current directory. unpack (bool): If True, unzips the data file post-download.
juraj-google-style
def qrandom(n): import quantumrandom return np.concatenate([ quantumrandom.get_data(data_type='uint16', array_length=1024) for i in range(int(np.ceil(n/1024.0))) ])[:n]
Creates an array of n true random numbers obtained from the quantum random number generator at qrng.anu.edu.au This function requires the package quantumrandom and an internet connection. Args: n (int): length of the random array Return: array of ints: array of truly random unsigned 16 bit int values
juraj-google-style
def save_feature_names(self, feature_names, feature_list_id): save_lines(feature_names, self.features_dir + 'X_train_{}.names'.format(feature_list_id))
Save the names of the features for the given feature list to a metadata file. Example: `save_feature_names(['num_employees', 'stock_price'], 'company')`. Args: feature_names: A list containing the names of the features, matching the column order. feature_list_id: The name for this feature list.
juraj-google-style
def make_gym_env(name, rl_env_max_episode_steps=(- 1), maxskip_env=False, rendered_env=False, rendered_env_resize_to=None, sticky_actions=False): env = gym.make(name) return gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env, rendered_env_resize_to, sticky_actions)
Create a gym env optionally with a time limit and maxskip wrapper. NOTE: The returned env may already be wrapped with TimeLimit! Args: name: `str` - base name of the gym env to make. rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the env as-in, otherwise we impose the requested timelimit. Setting this to None returns a wrapped env that doesn't have a step limit. maxskip_env: whether to also use MaxAndSkip wrapper before time limit. rendered_env: whether to force render for observations. Use this for environments that are not natively rendering the scene for observations. rendered_env_resize_to: a list of [height, width] to change the original resolution of the native environment render. sticky_actions: whether to use sticky_actions before MaxAndSkip wrapper. Returns: An instance of `gym.Env` or `gym.Wrapper`.
codesearchnet
def from_dict(cls, config_dict, **kwargs): config = cls(**config_dict) to_remove = [] for key, value in kwargs.items(): if hasattr(config, key): setattr(config, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) return config
Constructs a CacheConfig instance from a dictionary of parameters. Args: config_dict (Dict[str, Any]): Dictionary containing configuration parameters. **kwargs: Additional keyword arguments to override dictionary values. Returns: CacheConfig: Instance of CacheConfig constructed from the dictionary.
github-repos
def __init__(self, access_token=None, rate_limit=True): super(Search, self).__init__() self.session = self.get_session(access_token=access_token, rate_limit=rate_limit) self._ignore_codes = [] if rate_limit: self._ignore_codes.append(429)
Construct a Search object. Args: access_token (str): A valid Companies House API. If an access token isn't specified then looks for *CompaniesHouseKey* or COMPANIES_HOUSE_KEY environment variables. Defaults to None.
juraj-google-style
def __init__(self, quant_debug_model_path: Optional[str]=None, quant_debug_model_content: Optional[bytes]=None, float_model_path: Optional[str]=None, float_model_content: Optional[bytes]=None, debug_dataset: Optional[Callable[[], Iterable[Sequence[np.ndarray]]]]=None, debug_options: Optional[QuantizationDebugOptions]=None, converter: Optional[TFLiteConverter]=None) -> None: self._data_gen = debug_dataset self._debug_options = debug_options or QuantizationDebugOptions() self.converter = None self.calibrated_model = None self.float_model = None self._float_interpreter = None if converter is not None: if self._debug_options.model_debug_metrics: old_optimizations = converter.optimizations self.converter = self._set_converter_options_for_float(converter) self.float_model = self.converter.convert() converter.optimizations = old_optimizations self.converter = self._set_converter_options_for_calibration(converter) self.calibrated_model = self.converter.convert() self._init_from_converter(self._debug_options, self.converter, self.calibrated_model, float_model=self.float_model) else: self._quant_interpreter = _interpreter.Interpreter(quant_debug_model_path, quant_debug_model_content, experimental_preserve_all_tensors=self._debug_options.layer_direct_compare_metrics is not None) if self._debug_options.model_debug_metrics: self._float_interpreter = _interpreter.Interpreter(float_model_path, float_model_content) self._initialize_stats()
Runs the TFLite debugging model with given debug options. Args: quant_debug_model_path: Path to the quantized debug TFLite model file. quant_debug_model_content: Content of the quantized debug TFLite model. float_model_path: Path to float TFLite model file. float_model_content: Content of the float TFLite model. debug_dataset: a factory function that returns dataset generator which is used to generate input samples (list of np.ndarray) for the model. The generated elements must have same types and shape as inputs to the model. debug_options: Debug options to debug the given model. converter: Optional, use converter instead of quantized model. Raises: ValueError: If the debugger was unable to be created. Attributes: layer_statistics: results of error metrics for each NumericVerify op results. in {layer_name: {metric_name: metric}} format. model_statistics: results of error metrics for difference between float and quantized models. in {metric_name: metric} format.
github-repos
def _has_no_variables(sess: session.Session) -> bool: for op in sess.graph.get_operations(): if op.type.startswith('Variable') or op.type.endswith('VariableOp'): return False return True
Determines if the graph has any variables. Args: sess: TensorFlow Session. Returns: Bool.
github-repos
def resolve(node, source_info, graphs, definition_factory=Definition): visitor = TreeAnnotator(source_info, graphs, definition_factory) node = visitor.visit(node) return node
Resolves reaching definitions for each symbol. Args: node: ast.AST source_info: transformer.SourceInfo graphs: Dict[ast.FunctionDef, cfg.Graph] definition_factory: Callable[[], Definition] Returns: ast.AST
github-repos
def from_file(filename, file_format='xyz'): mols = list(pb.readfile(str(file_format), str(filename))) return BabelMolAdaptor(mols[0].OBMol)
Uses OpenBabel to read a molecule from a file in all supported formats. Args: filename: Filename of input file file_format: String specifying any OpenBabel supported formats. Returns: BabelMolAdaptor object
codesearchnet
def send_eager_tracebacks(destinations, origin_stack, send_source=True): _send_call_tracebacks(destinations, origin_stack, is_eager_execution=True, send_source=send_source)
Send the tracebacks of an eager execution call to debug server(s). Args: destinations: gRPC destination addresses, a `str` or a `list` of `str`s, e.g., "localhost:4242". If a `list`, gRPC requests containing the same origin_stack: The traceback of the eager operation invocation. send_source: Whether the source files involved in the op tracebacks but outside the TensorFlow library are to be sent.
github-repos
def _finalize_outputs(cls, mapreduce_spec, mapreduce_state): if (mapreduce_spec.mapper.output_writer_class() and (mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS)): mapreduce_spec.mapper.output_writer_class().finalize_job(mapreduce_state)
Finalize outputs. Args: mapreduce_spec: an instance of MapreduceSpec. mapreduce_state: an instance of MapreduceState.
codesearchnet
def init_class_and_forward_node(self, node, cls, container=None, extra_key=None): cls_key = cls.expr if cls.is_late_annotation() and (not cls.resolved) else cls cache = self._instance_cache[cls_key] key = (self.current_opcode, extra_key) status = instance = cache.get(key) if not instance or isinstance(instance, _InitClassState): clsvar = cls.to_variable(node) instantiate_directly = any((v is _InitClassState.INSTANTIATING for v in cache.values())) cache[key] = _InitClassState.INSTANTIATING node, instance = self._instantiate_var(node, clsvar, container, instantiate_directly) if instantiate_directly or status is _InitClassState.INITIALIZING: self._mark_maybe_missing_members(instance.data) else: cache[key] = _InitClassState.INITIALIZING node = self.call_init(node, instance) cache[key] = instance return (node, instance)
Instantiate a class, and also call __init__. Calling __init__ can be expensive, so this method caches its created instances. If you don't need __init__ called, use cls.instantiate instead. Args: node: The current node. cls: The class to instantiate. container: Optionally, a container to pass to the class's instantiate() method, so that type parameters in the container's template are instantiated to TypeParameterInstance. extra_key: Optionally, extra information about the location at which the instantion occurs. By default, this method keys on the current opcode and the class, which sometimes isn't enough to disambiguate callers that shouldn't get back the same cached instance. Returns: A tuple of node and instance variable.
github-repos
def optimizer_arguments(self, states, internals, actions, terminal, reward, next_states, next_internals): arguments = dict(time=self.global_timestep, variables=self.get_variables(), arguments=dict(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals, update=tf.constant(value=True)), fn_reference=self.fn_reference, fn_loss=self.fn_loss) if (self.global_model is not None): arguments['global_variables'] = self.global_model.get_variables() return arguments
Returns the optimizer arguments including the time, the list of variables to optimize, and various functions which the optimizer might require to perform an update step. Args: states (dict): Dict of state tensors. internals (dict): Dict of prior internal state tensors. actions (dict): Dict of action tensors. terminal: 1D boolean is-terminal tensor. reward: 1D (float) rewards tensor. next_states (dict): Dict of successor state tensors. next_internals (dict): Dict of posterior internal state tensors. Returns: Optimizer arguments as dict to be used as **kwargs to the optimizer.
codesearchnet
def create_sample(question: Union[str, List[str]], context: Union[str, List[str]]) -> Union[SquadExample, List[SquadExample]]: if isinstance(question, list): return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)] else: return SquadExample(None, question, context, None, None, None)
QuestionAnsweringPipeline leverages the [`SquadExample`] internally. This helper method encapsulate all the logic for converting question(s) and context(s) to [`SquadExample`]. We currently support extractive question answering. Arguments: question (`str` or `List[str]`): The question(s) asked. context (`str` or `List[str]`): The context(s) in which we will look for the answer. Returns: One or a list of [`SquadExample`]: The corresponding [`SquadExample`] grouping question and context.
github-repos
def info(name): try: groupObj = _get_group_object(name) gr_name = groupObj.Name gr_mem = [_get_username(x) for x in groupObj.members()] except pywintypes.com_error as exc: msg = 'Failed to access group {0}. {1}'.format( name, win32api.FormatMessage(exc.excepinfo[5])) log.debug(msg) return False if not gr_name: return False return {'name': gr_name, 'passwd': None, 'gid': None, 'members': gr_mem}
Return information about a group Args: name (str): The name of the group for which to get information Returns: dict: A dictionary of information about the group CLI Example: .. code-block:: bash salt '*' group.info foo
juraj-google-style
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + [0] * len(token_ids_0) + suffix_ones return prefix_ones + [0] * len(token_ids_0) + [0] * len(token_ids_1) + suffix_ones
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def _pre_commit(self, transaction, *args, **kwargs): transaction._clean_up() transaction._begin(retry_id=self.retry_id) self.current_id = transaction._id if (self.retry_id is None): self.retry_id = self.current_id try: return self.to_wrap(transaction, *args, **kwargs) except: transaction._rollback() raise
Begin transaction and call the wrapped callable. If the callable raises an exception, the transaction will be rolled back. If not, the transaction will be "ready" for ``Commit`` (i.e. it will have staged writes). Args: transaction (~.firestore_v1beta1.transaction.Transaction): A transaction to execute the callable within. args (Tuple[Any, ...]): The extra positional arguments to pass along to the wrapped callable. kwargs (Dict[str, Any]): The extra keyword arguments to pass along to the wrapped callable. Returns: Any: result of the wrapped callable. Raises: Exception: Any failure caused by ``to_wrap``.
codesearchnet
def from_pickle(cls, path): with open(os.path.expanduser(path), 'rb') as pickle: return cPickle.Unpickler(pickle).load()
Load all objects from pickle file and return as dict. The dict returned will have keys named the same as the JSSObject classes contained, and the values will be JSSObjectLists of all full objects of that class (for example, the equivalent of my_jss.Computer().retrieve_all()). This method can potentially take a very long time! Pickling is Python's method for serializing/deserializing Python objects. This allows you to save a fully functional JSSObject to disk, and then load it later, without having to retrieve it from the JSS. Args: path: String file path to the file you wish to load from. Path will have ~ expanded prior to opening.
codesearchnet
def _create_variables_and_slots(self) -> Dict[str, Dict[str, tf_variables.Variable]]: variables = {} for stacked_table_name, tables in self._stacked_table_to_tables.items(): variables[stacked_table_name] = self._create_variables(tables, stacked_table_name=stacked_table_name) return variables
Create variables for TPU embeddings. Returns: A dict of dicts. The outer dict is keyed by the table names and the inner dicts are keyed by 'parameters' and the slot variable names.
github-repos
def bulk_write(self, metrics): actions = [] index = self.get_index() for metric in metrics: actions.append({'index': {'_index': index, '_type': self.doc_type}}) actions.append(metric) try: self.client.bulk(actions) except TransportError as exc: logger.warning('bulk_write metrics %r failure %r', metrics, exc)
Write multiple metrics to elasticsearch in one request Args: metrics (list): data with mappings to send to elasticsearch
codesearchnet
def register_add_grad(left_type, right_type, add_grad_function): key = (left_type, right_type) if (key in grad_adders): raise ValueError(('Types %s already mapped to %s' % (key, grad_adders[key]))) grad_adders[key] = add_grad_function
Register a new gradient adder supporting the given types. Gradient adders are used to add (in the sense of arithmetic addition) intermediate adjoint and tangent variables. TODO: Link to the document explaining the overall terminology and mechanics. Args: left_type: A Python type object. The data type of the left operand supported by the adder. right_type: A Python type object. The data type of the right operand supported by the adder. add_grad_function: A binary function that takes two arguments, left and right, of the types left_type and right_type respectively, and returns their sum. For example, the gradient adder for Numpy objects is np.add. Raises: ValueError: If the given type pair was already registered.
codesearchnet
def _full_axis_reduce(self, axis, func, alternate_index=None): result = self.data.map_across_full_axis(axis, func) if axis == 0: columns = alternate_index if alternate_index is not None else self.columns return self.__constructor__(result, index=["__reduced__"], columns=columns) else: index = alternate_index if alternate_index is not None else self.index return self.__constructor__(result, index=index, columns=["__reduced__"])
Applies map that reduce Manager to series but require knowledge of full axis. Args: func: Function to reduce the Manager by. This function takes in a Manager. axis: axis to apply the function to. alternate_index: If the resulting series should have an index different from the current query_compiler's index or columns. Return: Pandas series containing the reduced data.
juraj-google-style
def __init__(self, data_store, subject, lease_time=None): self.subject = utils.SmartStr(subject) self.store = data_store self.expires = None self.locked = False if lease_time is None: raise ValueError("Trying to lock without a lease time.") self._Acquire(lease_time) self.lease_time = lease_time
Obtain the subject lock for lease_time seconds. This is never called directly but produced from the DataStore.LockedSubject() factory. Args: data_store: A data_store handler. subject: The name of a subject to lock. lease_time: The minimum length of time the lock will remain valid in seconds. Note this will be converted to usec for storage. Raises: ValueError: No lease time was provided.
juraj-google-style
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(SignatureVerifyResponsePayload, self).read(input_stream, kmip_version=kmip_version) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream): self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER) self._unique_identifier.read(local_stream, kmip_version=kmip_version) else: raise ValueError('Parsed payload encoding is missing the unique identifier field.') if self.is_tag_next(enums.Tags.VALIDITY_INDICATOR, local_stream): self._validity_indicator = primitives.Enumeration(enums.ValidityIndicator, tag=enums.Tags.VALIDITY_INDICATOR) self._validity_indicator.read(local_stream, kmip_version=kmip_version) else: raise ValueError('Parsed payload encoding is missing the validity indicator field.') if self.is_tag_next(enums.Tags.DATA, local_stream): self._data = primitives.ByteString(tag=enums.Tags.DATA) self._data.read(local_stream, kmip_version=kmip_version) if self.is_tag_next(enums.Tags.CORRELATION_VALUE, local_stream): self._correlation_value = primitives.ByteString(tag=enums.Tags.CORRELATION_VALUE) self._correlation_value.read(local_stream, kmip_version=kmip_version) self.is_oversized(local_stream)
Read the data encoding the SignatureVerify response payload and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is missing from the encoded payload.
codesearchnet
def __init__(self, *, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, strict=True, object_pairs_hook=None): try: super().__init__( object_hook=self.object_hook, parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant, strict=strict, object_pairs_hook=object_pairs_hook ) except Exception: log.exception('Failed loading JSON data')
Initialize the class, overriding the object hook Args: object_hook: parse_float: parse_int: parse_constant: strict: object_pairs_hook:
juraj-google-style
def _output_dir(self, ext, is_instance=False, interpolatable=False, autohinted=False, is_variable=False): assert (not (is_variable and any([is_instance, interpolatable]))) if is_variable: dir_prefix = 'variable_' elif is_instance: dir_prefix = 'instance_' else: dir_prefix = 'master_' dir_suffix = ('_interpolatable' if interpolatable else '') output_dir = ((dir_prefix + ext) + dir_suffix) if autohinted: output_dir = os.path.join('autohinted', output_dir) return output_dir
Generate an output directory. Args: ext: extension string. is_instance: The output is instance font or not. interpolatable: The output is interpolatable or not. autohinted: The output is autohinted or not. is_variable: The output is variable font or not. Return: output directory string.
codesearchnet
def add_send_last_message(self, connection, send_last_message): self._send_last_message[connection] = send_last_message LOGGER.debug("Added send_last_message function " "for connection %s", connection)
Adds a send_last_message function to the Dispatcher's dictionary of functions indexed by connection. Args: connection (str): A locally unique identifier provided by the receiver of messages. send_last_message (fn): The method that should be called by the dispatcher to respond to messages which arrive via connection, when the connection should be closed after the message has been sent.
juraj-google-style
def print(self, format=TEXT, output=sys.stdout, **kwargs): if format is None: format = TEXT if format == TEXT: print(self._to_text(**kwargs), file=output) elif format == CSV: print(self._to_csv(**kwargs), file=output) elif format == JSON: print(self._to_json(**kwargs), file=output)
Print the object in a file or on standard output by default. Args: format (str): output format (csv, json or text). output (file): descriptor to an opened file (default to standard output). **kwargs (): additional arguments.
juraj-google-style
def stop_rot_gradient(self) -> Rigid: return self.apply_rot_fn(lambda r: r.detach())
Detaches the underlying rotation object Returns: A transformation object with detached rotations
github-repos
def _Initialize(self, http, url): self.EnsureUninitialized() if (self.http is None): self.__http = (http or http_wrapper.GetHttp()) self.__url = url
Initialize this download by setting self.http and self.url. We want the user to be able to override self.http by having set the value in the constructor; in that case, we ignore the provided http. Args: http: An httplib2.Http instance or None. url: The url for this transfer. Returns: None. Initializes self.
codesearchnet
def Compile(self, filter_implementation): self.attribute = self.swap_source.get(self.attribute, self.attribute) arguments = [self.attribute] op_str = self.operator.lower() operator = filter_implementation.OPS.get(op_str, None) if not operator: raise errors.ParseError('Unknown operator {0:s} provided.'.format( self.operator)) if self.attribute == 'timestamp': args = [] for argument in self.args: args.append(DateCompareObject(argument)) self.args = args for argument in self.args: if isinstance(argument, DateCompareObject): if 'Less' in str(operator): TimeRangeCache.SetUpperTimestamp(argument.data) else: TimeRangeCache.SetLowerTimestamp(argument.data) arguments.extend(self.args) expander = filter_implementation.FILTERS['ValueExpander'] ops = operator(arguments=arguments, value_expander=expander) if not self.bool_value: if hasattr(ops, 'FlipBool'): ops.FlipBool() return ops
Compiles the filter implementation. Args: filter_implementation: a filter object (instance of objectfilter.TODO). Returns: A filter operator (instance of TODO). Raises: ParserError: if an unknown operator is provided.
juraj-google-style
def _client_receive(self): try: response = self._client.readline() self.log.debug('Snippet received: %s', response) return response except socket.error as e: raise Error(self._ad, ('Encountered socket error reading RPC response "%s"' % e))
Receives the server's response of an Rpc message. Returns: Raw byte string of the response. Raises: Error: a socket error occurred during the read.
codesearchnet
def getServerSSLContext(self, hostname=None): sslctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) if hostname is None: hostname = socket.gethostname() certfile = self.getHostCertPath(hostname) if certfile is None: raise s_exc.NoCertKey('Missing .crt for %s' % hostname) keyfile = self.getHostKeyPath(hostname) if keyfile is None: raise s_exc.NoCertKey('Missing .key for %s' % hostname) sslctx.load_cert_chain(certfile, keyfile) return sslctx
Returns an ssl.SSLContext appropriate to listen on a socket Args: hostname: if None, the value from socket.gethostname is used to find the key in the servers directory. This name should match the not-suffixed part of two files ending in .key and .crt in the hosts subdirectory
juraj-google-style
def start(self, host, nornir): self.host = host self.nornir = nornir try: logger.debug('Host %r: running task %r', self.host.name, self.name) r = self.task(self, **self.params) if (not isinstance(r, Result)): r = Result(host=host, result=r) except NornirSubTaskError as e: tb = traceback.format_exc() logger.error('Host %r: task %r failed with traceback:\n%s', self.host.name, self.name, tb) r = Result(host, exception=e, result=str(e), failed=True) except Exception as e: tb = traceback.format_exc() logger.error('Host %r: task %r failed with traceback:\n%s', self.host.name, self.name, tb) r = Result(host, exception=e, result=tb, failed=True) r.name = self.name r.severity_level = (logging.ERROR if r.failed else self.severity_level) self.results.insert(0, r) return self.results
Run the task for the given host. Arguments: host (:obj:`nornir.core.inventory.Host`): Host we are operating with. Populated right before calling the ``task`` nornir(:obj:`nornir.core.Nornir`): Populated right before calling the ``task`` Returns: host (:obj:`nornir.core.task.MultiResult`): Results of the task and its subtasks
codesearchnet
def fork(self, command: Command) \ -> Tuple['SelectedMailbox', Iterable[Response]]: frozen = _Frozen(self) cls = type(self) copy = cls(self._guid, self._readonly, self._permanent_flags, self._session_flags, self._selected_set, self._lookup, _mod_sequence=self._mod_sequence, _prev=frozen, _messages=self._messages) if self._prev is not None: with_uid: bool = getattr(command, 'uid', False) untagged = self._compare(self._prev, frozen, with_uid) else: untagged = [] return copy, untagged
Compares the state of the current object to that of the last fork, returning the untagged responses that reflect any changes. A new copy of the object is also returned, ready for the next command. Args: command: The command that was finished.
juraj-google-style
def pluralize(singular): if (singular in UNCOUNTABLES): return singular for i in IRREGULAR: if (i[0] == singular): return i[1] for i in PLURALIZE_PATTERNS: if re.search(i[0], singular): return re.sub(i[0], i[1], singular)
Convert singular word to its plural form. Args: singular: A word in its singular form. Returns: The word in its plural form.
codesearchnet
def add_prefix(self, prefix, flags, prf): self._req(('prefix add %s %s %s' % (prefix, flags, prf))) time.sleep(1) self._req('netdataregister')
Add network prefix. Args: prefix (str): network prefix. flags (str): network prefix flags, please refer thread documentation for details prf (str): network prf, please refer thread documentation for details
codesearchnet
def resolve_symbols(self, database, link_resolver, page=None): page = page or self.root if page.ast is None and not page.generated: with io.open(page.source_file, 'r', encoding='utf-8') as _: page.ast = cmark.hotdoc_to_ast(_.read(), self) page.resolve_symbols(self, database, link_resolver) self.__update_dep_map(page, page.symbols) for pagename in page.subpages: cpage = self.__all_pages[pagename] self.resolve_symbols(database, link_resolver, page=cpage)
Will call resolve_symbols on all the stale subpages of the tree. Args: page: hotdoc.core.tree.Page, the page to resolve symbols in, will recurse on potential subpages.
juraj-google-style
def remove_codeblock_syntax_sentinals(code_text): r flags = re.MULTILINE | re.DOTALL code_text_ = code_text code_text_ = re.sub(r'^ * code_text_ = re.sub(r'^ * code_text_ = re.sub(r'^ * code_text_ = code_text_.rstrip() return code_text_
r""" Removes template comments and vim sentinals Args: code_text (str): Returns: str: code_text_
juraj-google-style
async def init( self, *, advertise_addr: str = None, listen_addr: str = "0.0.0.0:2377", force_new_cluster: bool = False, swarm_spec: Mapping = None ) -> str: data = { "AdvertiseAddr": advertise_addr, "ListenAddr": listen_addr, "ForceNewCluster": force_new_cluster, "Spec": swarm_spec, } response = await self.docker._query_json("swarm/init", method="POST", data=data) return response
Initialize a new swarm. Args: ListenAddr: listen address used for inter-manager communication AdvertiseAddr: address advertised to other nodes. ForceNewCluster: Force creation of a new swarm. SwarmSpec: User modifiable swarm configuration. Returns: id of the swarm node
juraj-google-style
def path_set_md5(url): (scheme, netloc, path, query_string, fragment) = urlsplit(url) path += '.md5' return urlunsplit((scheme, netloc, path, query_string, fragment))
Given a file URL, return a md5 query of the file Args: url: a given URL Returns: URL of the md5 file
codesearchnet
def near_reduce(self, coords_set, threshold=1e-4): unique_coords = [] coords_set = [self.slab.lattice.get_fractional_coords(coords) for coords in coords_set] for coord in coords_set: if not in_coord_list_pbc(unique_coords, coord, threshold): unique_coords += [coord] return [self.slab.lattice.get_cartesian_coords(coords) for coords in unique_coords]
Prunes coordinate set for coordinates that are within threshold Args: coords_set (Nx3 array-like): list or array of coordinates threshold (float): threshold value for distance
juraj-google-style
async def addNodeTag(self, iden, tag, valu=(None, None)): buid = s_common.uhex(iden) parts = tag.split('.') self._reqUserAllowed('tag:add', *parts) async with await self.cell.snap(user=self.user) as snap: with s_provenance.claim('coreapi', meth='tag:add', user=snap.user.iden): node = await snap.getNodeByBuid(buid) if node is None: raise s_exc.NoSuchIden(iden=iden) await node.addTag(tag, valu=valu) return node.pack()
Add a tag to a node specified by iden. Args: iden (str): A hex encoded node BUID. tag (str): A tag string. valu (tuple): A time interval tuple or (None, None).
juraj-google-style
def validate_element(self, value): if isinstance(value, bytes): try: six.text_type(value, 'UTF-8') except UnicodeDecodeError as err: try: _ = self.name except AttributeError: validation_error = ValidationError(('Field encountered non-UTF-8 string %r: %s' % (value, err))) else: validation_error = ValidationError(('Field %s encountered non-UTF-8 string %r: %s' % (self.name, value, err))) validation_error.field_name = self.name raise validation_error else: return super(StringField, self).validate_element(value) return value
Validate StringField allowing for str and unicode. Raises: ValidationError if a str value is not UTF-8.
codesearchnet
def set_control_scheme(self, agent_name, control_scheme): if (agent_name not in self.agents): print(('No such agent %s' % agent_name)) else: self.agents[agent_name].set_control_scheme(control_scheme)
Set the control scheme for a specific agent. Args: agent_name (str): The name of the agent to set the control scheme for. control_scheme (int): A control scheme value (see :obj:`holodeck.agents.ControlSchemes`)
codesearchnet
def dvds_top_rentals(self, **kwargs): path = self._get_path('dvds_top_rentals') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def learn(self, grad_arr): if grad_arr.ndim > 3: grad_arr = grad_arr.reshape(( grad_arr.shape[0], grad_arr.shape[1], -1 )) grad_arr = grad_arr[:, -1] elif grad_arr.ndim == 3: grad_arr = grad_arr[:, -1] delta_arr, _, grads_list = self.__lstm_model.hidden_back_propagate(grad_arr) grads_list.insert(0, None) grads_list.insert(0, None) self.__lstm_model.optimize( grads_list, self.__learning_rate, 1 ) return delta_arr
Update this Discriminator by ascending its stochastic gradient. Args: grad_arr: `np.ndarray` of gradients. Returns: `np.ndarray` of delta or gradients.
juraj-google-style
async def send_command(self, command): command = "{}\r\n".format(command).encode("ascii", errors="backslashreplace") self.write(command) await self.drain()
Sends the given command to the server. Args: command (str): Command to send to the server. Raises: ConnectionResetError: If the connection with the server is lost. (Shouldn't it raise BrokenPipeError too ?)
juraj-google-style
def random(cls, num_qubits, seed=None): if seed is not None: np.random.seed(seed) z = np.random.randint(2, size=num_qubits).astype(np.bool) x = np.random.randint(2, size=num_qubits).astype(np.bool) return cls(z, x)
Return a random Pauli on number of qubits. Args: num_qubits (int): the number of qubits seed (int): Optional. To set a random seed. Returns: Pauli: the random pauli
juraj-google-style
def ReadByte(self, do_ord=True): try: if do_ord: return ord(self.stream.read(1)) return self.stream.read(1) except Exception as e: logger.error('ord expected character but got none') return 0
Read a single byte. Args: do_ord (bool): (default True) convert the byte to an ordinal first. Returns: bytes: a single byte if successful. 0 (int) if an exception occurred.
codesearchnet
def quaternion_from_euler(angles, order='yzy'): angles = np.asarray(angles, dtype=float) quat = quaternion_from_axis_rotation(angles[0], order[0])\ * (quaternion_from_axis_rotation(angles[1], order[1]) * quaternion_from_axis_rotation(angles[2], order[2])) quat.normalize(inplace=True) return quat
Generate a quaternion from a set of Euler angles. Args: angles (array_like): Array of Euler angles. order (str): Order of Euler rotations. 'yzy' is default. Returns: Quaternion: Quaternion representation of Euler rotation.
juraj-google-style
def merge_layouts(layouts): layout = layouts[0].clone() for l in layouts[1:]: layout.files.update(l.files) layout.domains.update(l.domains) for (k, v) in l.entities.items(): if (k not in layout.entities): layout.entities[k] = v else: layout.entities[k].files.update(v.files) return layout
Utility function for merging multiple layouts. Args: layouts (list): A list of BIDSLayout instances to merge. Returns: A BIDSLayout containing merged files and entities. Notes: Layouts will be merged in the order of the elements in the list. I.e., the first Layout will be updated with all values in the 2nd Layout, then the result will be updated with values from the 3rd Layout, etc. This means that order matters: in the event of entity or filename conflicts, later layouts will take precedence.
codesearchnet
def aoi(self, **kwargs): g = self._parse_geoms(**kwargs) if (g is None): return self else: return self[g]
Subsets the Image by the given bounds Args: bbox (list): optional. A bounding box array [minx, miny, maxx, maxy] wkt (str): optional. A WKT geometry string geojson (str): optional. A GeoJSON geometry dictionary Returns: image: an image instance of the same type
codesearchnet
def SetCodepage(self, codepage): try: codecs.getencoder(codepage) self._codepage = codepage except LookupError: raise ValueError('Unsupported codepage: {0:s}'.format(codepage))
Sets the codepage. Args: codepage (str): codepage. Raises: ValueError: if the codepage is not supported.
codesearchnet
def _partitioner(shape, dtype): if axis >= len(shape): raise ValueError(f'Cannot partition variable along axis {axis} when shape is only {shape}') dtype = dtypes.as_dtype(dtype) if dtype.base_dtype == dtypes.string: bytes_per_element = bytes_per_string_element else: bytes_per_element = dtype.size total_size_bytes = shape.num_elements() * bytes_per_element partitions = total_size_bytes / min_slice_size partitions_list = [1] * len(shape) partitions_list[axis] = max(1, min(shape.dims[axis].value, max_partitions, int(math.ceil(partitions)))) return partitions_list
Partitioner that partitions list for a variable of given shape and type. Ex: Consider partitioning a variable of type float32 with shape=[1024, 1024]. If `max_partitions` >= 16, this function would return [(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1]. If `max_partitions` < 16, this function would return [`max_partitions`, 1]. Args: shape: Shape of the variable. dtype: Type of the variable. Returns: List of partitions for each axis (currently only one axis can be partitioned). Raises: ValueError: If axis to partition along does not exist for the variable.
github-repos
def handle_import_error(caught_exc, name): for template in TEMPLATES: expected_msg = template.format(name) if (caught_exc.args == (expected_msg,)): return raise caught_exc
Allow or re-raise an import error. This is to distinguish between expected and unexpected import errors. If the module is not found, it simply means the Cython / Fortran speedups were not built with the package. If the error message is different, e.g. ``... undefined symbol: __curve_intersection_MOD_all_intersections``, then the import error **should** be raised. Args: caught_exc (ImportError): An exception caught when trying to import a Cython module. name (str): The name of the module. For example, for the module ``bezier._curve_speedup``, the name is ``"_curve_speedup"``. Raises: ImportError: If the error message is different than the basic "missing module" error message.
codesearchnet
def code_memory_read(self, addr, num_bytes): buf_size = num_bytes buf = (ctypes.c_uint8 * buf_size)() res = self._dll.JLINKARM_ReadCodeMem(addr, buf_size, buf) if (res < 0): raise errors.JLinkException(res) return list(buf)[:res]
Reads bytes from code memory. Note: This is similar to calling ``memory_read`` or ``memory_read8``, except that this uses a cache and reads ahead. This should be used in instances where you want to read a small amount of bytes at a time, and expect to always read ahead. Args: self (JLink): the ``JLink`` instance addr (int): starting address from which to read num_bytes (int): number of bytes to read Returns: A list of bytes read from the target. Raises: JLinkException: if memory could not be read.
codesearchnet
def _update_replica(self, update_fn, value, **kwargs): if self._policy: return self._policy._update_replica(self, update_fn, value, **kwargs) raise NotImplementedError(f'DistributedVariable._update_replica requires a valid VariablePolicy. Please set the policy via the `var_policy` argument in the constructor, or override this method in sub-classes which support cross-replica accesses. Type name is {type(self)}')
Applies updates in one replica. Args: update_fn: A callable to update the variable. It should has the same signature as `Variable.assign()`. value: value to be passed to `update_fn`. **kwargs: remaining arguments to `update_fn`. Returns: Updated variable or `tf.Operation`.
github-repos
def overlapping(self, variant_obj): category = ('snv' if (variant_obj['category'] == 'sv') else 'sv') query = {'$and': [{'case_id': variant_obj['case_id']}, {'category': category}, {'hgnc_ids': {'$in': variant_obj['hgnc_ids']}}]} sort_key = [('rank_score', pymongo.DESCENDING)] variants = self.variant_collection.find(query).sort(sort_key).limit(30) return variants
Return overlapping variants. Look at the genes that a variant overlaps to. Then return all variants that overlap these genes. If variant_obj is sv it will return the overlapping snvs and oposite There is a problem when SVs are huge since there are to many overlapping variants. Args: variant_obj(dict) Returns: variants(iterable(dict))
codesearchnet
def get_job(self, job_resource_name: str) -> Dict: return self.service.projects().programs().jobs().get(name=job_resource_name).execute()
Returns metadata about a previously created job. See get_job_result if you want the results of the job and not just metadata about the job. Params: job_resource_name: A string of the form `projects/project_id/programs/program_id/jobs/job_id`. Returns: A dictionary containing the metadata.
codesearchnet
def filter_out_spontaneous_genes(genes, custom_spont_id=None): new_genes = DictList() for gene in genes: if not is_spontaneous(gene, custom_id=custom_spont_id): new_genes.append(gene) return new_genes
Return the DictList of genes that are not spontaneous in a model. Args: genes (DictList): Genes DictList custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: DictList: genes excluding ones that are spontaneous
juraj-google-style
def atan(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.atan2, tf.float32)
Returns a TensorFluent for the arctan function. Args: x: The input fluent. Returns: A TensorFluent wrapping the arctan function.
juraj-google-style
def _GetArchiveTypes(self, mediator, path_spec): try: type_indicators = analyzer.Analyzer.GetArchiveTypeIndicators( path_spec, resolver_context=mediator.resolver_context) except IOError as exception: type_indicators = [] warning_message = ( 'analyzer failed to determine archive type indicators ' 'with error: {0!s}').format(exception) mediator.ProduceExtractionWarning(warning_message, path_spec=path_spec) return type_indicators
Determines if a data stream contains an archive such as: TAR or ZIP. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. path_spec (dfvfs.PathSpec): path specification of the data stream. Returns: list[str]: dfVFS archive type indicators found in the data stream.
juraj-google-style
def generate_defect_structure(self, supercell=(1, 1, 1)): defect_structure = self.bulk_structure.copy() defect_structure.make_supercell(supercell) defect_properties = self.site.properties.copy() if ('velocities' in self.bulk_structure.site_properties) and \ 'velocities' not in defect_properties: if all( vel == self.bulk_structure.site_properties['velocities'][0] for vel in self.bulk_structure.site_properties['velocities']): defect_properties['velocities'] = self.bulk_structure.site_properties['velocities'][0] else: raise ValueError("No velocity property specified for defect site and " "bulk_structure velocities are not homogeneous. Please specify this " "property within the initialized defect_site object.") site_properties_for_fake_struct = {prop: [val] for prop,val in defect_properties.items()} struct_for_defect_site = Structure( self.bulk_structure.copy().lattice, [self.site.specie], [self.site.frac_coords], to_unit_cell=True, site_properties = site_properties_for_fake_struct) struct_for_defect_site.make_supercell(supercell) defect_site = struct_for_defect_site[0] defect_structure.append(self.site.specie.symbol, defect_site.coords, coords_are_cartesian=True, properties = defect_site.properties) defect_structure.set_charge(self.charge) return defect_structure
Returns Defective Interstitial structure, decorated with charge Args: supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
juraj-google-style
def matmul_and_same_scale(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]: out = math_ops.matmul(input_tensor, self.filters, name='sample/matmul') if self.same_scale_op == 'concatenate': ones = array_ops.ones_like(out) out = array_ops.concat([out, ones], 0) elif self.same_scale_op == 'gather': out = array_ops.gather(out, indices=[0], axis=0) elif self.same_scale_op == 'max_pool': out = nn_ops.max_pool(out, ksize=3, strides=1, padding='SAME') elif self.same_scale_op == 'pad': paddings = array_ops.ones((array_ops.rank(out), 2), dtype=dtypes.int32) out = array_ops.pad(out, paddings, 'CONSTANT') elif self.same_scale_op == 'reshape': out = array_ops.reshape(out, [-1]) elif self.same_scale_op == 'select': rng = np.random.default_rng(seed=1234) condition = ops.convert_to_tensor(rng.uniform(low=0.0, high=1.0, size=out.shape) < 0.5) ones = array_ops.ones_like(out) out = math_ops.select(condition, out, ones) elif self.same_scale_op == 'slice': begin = array_ops.zeros(array_ops.rank(out), dtype=dtypes.int32) size = array_ops.ones(array_ops.rank(out), dtype=dtypes.int32) out = array_ops.slice(out, begin, size) elif self.same_scale_op == 'transpose': out = array_ops.transpose(out) else: raise NotImplementedError('{} is not implemented for integration test.'.format(self.same_scale_op)) return {'output': out}
Performs a matrix multiplication. Args: input_tensor: Input tensor to matmul with the filter. Returns: A map of: output key -> output result.
github-repos
def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None, **kwargs): vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy if vision_feature_select_strategy not in ['default', 'full']: raise ValueError(f'Unexpected select feature strategy: {self.config.vision_feature_select_strategy}') kwargs = {k: v for k, v in kwargs.items() if v is not None} image_outputs = self.vision_tower(pixel_values, output_hidden_states=True, **kwargs) if isinstance(vision_feature_layer, int): selected_image_feature = image_outputs.hidden_states[vision_feature_layer] if vision_feature_select_strategy == 'default': selected_image_feature = selected_image_feature[:, 1:] else: hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer] if vision_feature_select_strategy == 'default': hs_pool = [hs[:, 1:] for hs in hs_pool] selected_image_feature = torch.cat(hs_pool, dim=-1) image_features = self.multi_modal_projector(selected_image_feature) return image_features
Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`): The tensors corresponding to the input images. vision_feature_layer (`Union[int, List[int]]`, *optional*): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"` Returns: image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
github-repos
def get_slices(lines, clean_lines): indices = np.zeros(len(lines)) for i in range(len(lines) - 1): j = i + 1 while not clean_lines[j] and j < len(lines) - 1: j += 1 if len(clean_lines[i]) < 200 and len(clean_lines[i]) > 3 and (len(clean_lines[j]) < 200) and (len(clean_lines[j]) > 3) and (not clean_lines[i].startswith('[MISSING_PAGE')) and (clean_lines[i] == clean_lines[j] or ratio(clean_lines[i], clean_lines[j]) > 0.9): indices[i:j] = 1 ids = np.where(indices)[0] slices = [] if len(ids) == 0: return slices j0 = 0 for j, x in enumerate(np.diff(ids) > 3): if x: slices.append((ids[j0], ids[j] + 2)) j0 = j + 1 slices.append((ids[j0], ids[-1] + 2)) return [sli for sli in slices if sli[1] - sli[0] > 15]
Get slices of text based on specific criteria within the lines. This function identifies and returns slices of text from the input lines based on certain conditions. These conditions were chosen by the Nougat authors: - The slice is less than 200 characters long. - The slice is more than 3 characters long. - The slice does not start with "[MISSING_PAGE". - The slice is either the same as the next slice or the ratio of the two in terms of Levensthein distance is greater than 0.9. Args: lines (`List[str]`): The list of lines containing the text. clean_lines (`List[str]`): A cleaned version of the text (without numbers). Returns: `List[tuple]`: A list of tuples representing the start and end indices of text slices.
github-repos
def extract_attribute_array(self, data_array, var_name): if var_name not in self.attributes.keys(): self.attributes[var_name] = [] for t in range(self.times.size): self.attributes[var_name].append(data_array[self.i[t], self.j[t]])
Extracts data from a 2D array that has the same dimensions as the grid used to identify the object. Args: data_array: 2D numpy array
juraj-google-style
def update_phase(self, environment, data, prediction, user, item, correct, time, answer_id, **kwargs): pass
After the prediction update the environment and persist some information for the predictive model. Args: environment (proso.models.environment.Environment): environment where all the important data are persist data (object): data from the prepare phase user (int): identifier of the user answering the question item (int): identifier of the question item correct (bool): corretness of the answer
codesearchnet
def __init__( self, resolver_context, file_system, path_spec, is_root=False, is_virtual=False, vslvm_logical_volume=None): if not is_virtual and vslvm_logical_volume is None: vslvm_logical_volume = file_system.GetLVMLogicalVolumeByPathSpec( path_spec) if not is_virtual and vslvm_logical_volume is None: raise errors.BackEndError( 'Missing vslvm logical volume in non-virtual file entry.') super(LVMFileEntry, self).__init__( resolver_context, file_system, path_spec, is_root=is_root, is_virtual=is_virtual) self._name = None self._vslvm_logical_volume = vslvm_logical_volume if self._is_virtual: self.entry_type = definitions.FILE_ENTRY_TYPE_DIRECTORY else: self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
Initializes a file entry. Args: resolver_context (Context): resolver context. file_system (FileSystem): file system. path_spec (PathSpec): path specification. is_root (Optional[bool]): True if the file entry is the root file entry of the corresponding file system. is_virtual (Optional[bool]): True if the file entry is a virtual file vslvm_logical_volume (Optional[pyvslvm.logical_volume]): a LVM logical volume.
juraj-google-style
def _extract_storage_api_response_error(message): try: if len(message) == 3: data = json.loads(message[2]) return data['error']['errors'][0]['message'] except Exception: pass return message
A helper function to extract user-friendly error messages from service exceptions. Args: message: An error message from an exception. If this is from our HTTP client code, it will actually be a tuple. Returns: A modified version of the message that is less cryptic.
juraj-google-style
def get_logfile_name(tags): if not os.path.exists(sd.LOG_DIR): os.mkdir(sd.LOG_DIR) filename = "log" for tag in tags: filename += "_{}".format(tag) filename += ".txt" filename = os.path.join(sd.LOG_DIR,filename) return filename
Formulates a log file name that incorporates the provided tags. The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``. Args: tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag will be added in the same order as provided.
juraj-google-style
def _list_to_complex_array(complex_list): arr = np.asarray(complex_list, dtype=np.complex_) if not arr.shape[-1] == 2: raise QiskitError('Inner most nested list is not of length 2.') return arr[..., 0] + 1j*arr[..., 1]
Convert nested list of shape (..., 2) to complex numpy array with shape (...) Args: complex_list (list): List to convert. Returns: np.ndarray: Complex numpy aray Raises: QiskitError: If inner most array of input nested list is not of length 2.
juraj-google-style
def _TabbedContinuationAlignPadding(spaces, align_style, tab_width): if align_style in ('FIXED', 'VALIGN-RIGHT'): if spaces > 0: return '\t' * int((spaces + tab_width - 1) / tab_width) return '' return ' ' * spaces
Build padding string for continuation alignment in tabbed indentation. Arguments: spaces: (int) The number of spaces to place before the token for alignment. align_style: (str) The alignment style for continuation lines. tab_width: (int) Number of columns of each tab character. Returns: A padding string for alignment with style specified by align_style option.
github-repos
def ReadVFS(pathspec, offset, length, progress_callback=None): fd = VFSOpen(pathspec, progress_callback=progress_callback) fd.Seek(offset) return fd.Read(length)
Read from the VFS and return the contents. Args: pathspec: path to read from offset: number of bytes to skip length: number of bytes to read progress_callback: A callback to indicate that the open call is still working but needs more time. Returns: VFS file contents
juraj-google-style
def clear_operations_touching(self, qubits: Iterable[ops.Qid], moment_indices: Iterable[int]): qubits = frozenset(qubits) for k in moment_indices: if 0 <= k < len(self._moments): self._moments[k] = self._moments[k].without_operations_touching( qubits)
Clears operations that are touching given qubits at given moments. Args: qubits: The qubits to check for operations on. moment_indices: The indices of moments to check for operations within.
juraj-google-style
def read_stream(self, start_offset=0, byte_count=None): try: return self._api.object_download(self._bucket, self._key, start_offset=start_offset, byte_count=byte_count) except Exception as e: raise e
Reads the content of this object as text. Args: start_offset: the start offset of bytes to read. byte_count: the number of bytes to read. If None, it reads to the end. Returns: The text content within the object. Raises: Exception if there was an error requesting the object's content.
codesearchnet
def get_local_aws_session(): if (not all((app_config.aws_api.access_key, app_config.aws_api.secret_key))): return boto3.session.Session() else: session_args = [app_config.aws_api.access_key, app_config.aws_api.secret_key] if app_config.aws_api.session_token: session_args.append(app_config.aws_api.session_token) return boto3.session.Session(*session_args)
Returns a session for the local instance, not for a remote account Returns: :obj:`boto3:boto3.session.Session`
codesearchnet
def maybe_download_image_dataset(image_ids, target_dir): tf.gfile.MakeDirs(target_dir) num_images = len(image_ids) for i, image_id in enumerate(image_ids): destination = os.path.join(target_dir, "%s.jpg" % i) tmp_destination = "%s.temp" % destination source_url = ("http: "section_image_download/%s" % image_id) if tf.gfile.Exists(destination): tf.logging.info("Image with ID already present, " "skipping download (%s of %s)." % ( i+1, num_images )) continue tf.logging.info("Downloading image with id %s (%s of %s)" % ( image_id, i+1, num_images )) response = requests.get(source_url, stream=True) response.raise_for_status() with tf.gfile.Open(tmp_destination, "w") as f: for block in response.iter_content(1024): f.write(block) tf.gfile.Rename(tmp_destination, destination)
Download a set of images from api.brain-map.org to `target_dir`. Args: image_ids: list, a list of image ids. target_dir: str, a directory to which to download the images.
juraj-google-style
def ExpectedEnginesToBuild(self, run_params): if run_params.dynamic_shape: return ['TRTEngineOp_000'] else: return ['TRTEngineOp_000', 'TRTEngineOp_001']
Check that the expected engine is built. Args: run_params: the run parameters. Returns: the expected engines to build. The squeeze op is not converted by TensorRT in implicit batch mode. Because of this we have two TRTEngineOp in the graphs: one for the subgraph before 'squeeze(q,0)', and another one for the rest of the ops after the 'squeeze(q,0)'. In explicit batch mode the whole graph is converted using a single engine.
github-repos
def as_data_frame(self, **kwargs): try: import pandas as pd except ImportError: raise ImportError("What are you doing trying to export a Layout as a pandas DataFrame when you don't have pandas installed? Eh? Eh?") if kwargs: files = self.get(return_type='obj', **kwargs) else: files = self.files.values() data = pd.DataFrame.from_records([f.entities for f in files]) data.insert(0, 'path', [f.path for f in files]) return data
Return information for all Files tracked in the Layout as a pandas DataFrame. Args: kwargs: Optional keyword arguments passed on to get(). This allows one to easily select only a subset of files for export. Returns: A pandas DataFrame, where each row is a file, and each column is a tracked entity. NaNs are injected whenever a file has no value for a given attribute.
codesearchnet
def _container_handler(ion_type, ctx): transition = None first = True at_top = ctx.depth == 0 while True: data_event, self = (yield transition) if data_event is not None and data_event.type is ReadEventType.SKIP: yield ctx.read_data_transition(ctx.remaining, self, skip=True) if ctx.queue.position == ctx.limit: yield Transition( IonEvent(IonEventType.CONTAINER_END, ion_type, depth=ctx.depth-1), ctx.whence ) if ion_type is IonType.STRUCT: self_handler = _create_delegate_handler(self) (field_sid, _), _ = yield ctx.immediate_transition( _var_uint_field_handler(self_handler, ctx) ) field_name = SymbolToken(None, field_sid) else: field_name = None expects_ivm = first and at_top transition = ctx.immediate_transition( _start_type_handler(field_name, self, ctx, expects_ivm, at_top=at_top) ) first = False
Handler for the body of a container (or the top-level stream). Args: ion_type (Optional[IonType]): The type of the container or ``None`` for the top-level. ctx (_HandlerContext): The context for the container.
juraj-google-style
def _ProcessFileEntryDataStream(self, mediator, file_entry, data_stream): display_name = mediator.GetDisplayName() data_stream_name = getattr(data_stream, 'name', '') or '' logger.debug(( '[ProcessFileEntryDataStream] processing data stream: "{0:s}" of ' 'file entry: {1:s}').format(data_stream_name, display_name)) mediator.ClearEventAttributes() if data_stream and self._analyzers: self._AnalyzeDataStream(mediator, file_entry, data_stream.name) self._ExtractMetadataFromFileEntry(mediator, file_entry, data_stream) if not data_stream: return skip_content_extraction = self._CanSkipContentExtraction(file_entry) if skip_content_extraction: display_name = mediator.GetDisplayName() logger.debug( 'Skipping content extraction of: {0:s}'.format(display_name)) self.processing_status = definitions.STATUS_INDICATOR_IDLE return path_spec = copy.deepcopy(file_entry.path_spec) if data_stream and not data_stream.IsDefault(): path_spec.data_stream = data_stream.name archive_types = [] compressed_stream_types = [] if self._process_compressed_streams: compressed_stream_types = self._GetCompressedStreamTypes( mediator, path_spec) if not compressed_stream_types: archive_types = self._GetArchiveTypes(mediator, path_spec) if archive_types: if self._process_archives: self._ProcessArchiveTypes(mediator, path_spec, archive_types) if dfvfs_definitions.TYPE_INDICATOR_ZIP in archive_types: self._ExtractContentFromDataStream( mediator, file_entry, data_stream.name) elif compressed_stream_types: self._ProcessCompressedStreamTypes( mediator, path_spec, compressed_stream_types) else: self._ExtractContentFromDataStream( mediator, file_entry, data_stream.name)
Processes a specific data stream of a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry containing the data stream. data_stream (dfvfs.DataStream): data stream or None if the file entry has no data stream.
juraj-google-style
def get_lower_bound(self): lower_bounds = [] for distribution in self.distribs.values(): lower_bound = distribution.percent_point((distribution.mean / 10000)) if (not pd.isnull(lower_bound)): lower_bounds.append(lower_bound) return min(lower_bounds)
Compute the lower bound to integrate cumulative density. Returns: float: lower bound for cumulative density integral.
codesearchnet
def iter_archive(self, resource): if isinstance(resource, six.string_types): resource = resource_lib.Resource(path=resource) return extractor.iter_archive(resource.path, resource.extract_method)
Returns iterator over files within archive. **Important Note**: caller should read files as they are yielded. Reading out of order is slow. Args: resource: path to archive or `tfds.download.Resource`. Returns: Generator yielding tuple (path_within_archive, file_obj).
codesearchnet
def snake_case_to_headless_camel_case(snake_string): return ''.join([snake_string.split('_')[0]] + list(sub_string.capitalize() for sub_string in snake_string.split('_')[1:]))
Convert snake_case to headlessCamelCase. Args: snake_string: The string to be converted. Returns: The input string converted to headlessCamelCase.
juraj-google-style