code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_policies_from_aws(client, scope='Local'): done = False marker = None policies = [] while not done: if marker: response = client.list_policies(Marker=marker, Scope=scope) else: response = client.list_policies(Scope=scope) policies += response['Policies'] if response['IsTruncated']: marker = response['Marker'] else: done = True return policies
Returns a list of all the policies currently applied to an AWS Account. Returns a list containing all the policies for the specified scope Args: client (:obj:`boto3.session.Session`): A boto3 Session object scope (`str`): The policy scope to use. Default: Local Returns: :obj:`list` of `dict`
juraj-google-style
def as_str(bytes_or_text, encoding='utf-8'): return as_text(bytes_or_text, encoding)
Acts as an alias for the `as_text` function.. Args: bytes_or_text: The input value to be converted. A bytes or unicode object. encoding: Optional string. The encoding to use if bytes_or_text is a bytes object. Defaults to 'utf-8'. Returns: A unicode string. Raises: TypeError: If bytes_or_text is not a bytes or unicode object. UnicodeDecodeError: If bytes_or_text is a bytes object and cannot be decoded using the specified encoding.
github-repos
def make_export_strategy( args, keep_target, assets_extra, features, schema, stats): target_name = feature_transforms.get_target_name(features) csv_header = [col['name'] for col in schema] if not keep_target: csv_header.remove(target_name) def export_fn(estimator, export_dir_base, checkpoint_path=None, eval_result=None): with ops.Graph().as_default() as g: contrib_variables.create_global_step(g) input_ops = feature_transforms.build_csv_serving_tensors_for_training_step( args.analysis, features, schema, stats, keep_target) model_fn_ops = estimator._call_model_fn(input_ops.features, None, model_fn_lib.ModeKeys.INFER) output_fetch_tensors = make_prediction_output_tensors( args=args, features=features, input_ops=input_ops, model_fn_ops=model_fn_ops, keep_target=keep_target) signature_inputs = {key: tf.saved_model.utils.build_tensor_info(tensor) for key, tensor in six.iteritems(input_ops.default_inputs)} signature_outputs = {key: tf.saved_model.utils.build_tensor_info(tensor) for key, tensor in six.iteritems(output_fetch_tensors)} signature_def_map = { 'serving_default': signature_def_utils.build_signature_def( signature_inputs, signature_outputs, tf.saved_model.signature_constants.PREDICT_METHOD_NAME)} if not checkpoint_path: checkpoint_path = saver.latest_checkpoint(estimator._model_dir) if not checkpoint_path: raise ValueError("Couldn't find trained model at %s." % estimator._model_dir) export_dir = saved_model_export_utils.get_timestamped_export_dir( export_dir_base) if (model_fn_ops.scaffold is not None and model_fn_ops.scaffold.saver is not None): saver_for_restore = model_fn_ops.scaffold.saver else: saver_for_restore = saver.Saver(sharded=True) with tf_session.Session('') as session: saver_for_restore.restore(session, checkpoint_path) init_op = control_flow_ops.group( variables.local_variables_initializer(), resources.initialize_resources(resources.shared_resources()), tf.tables_initializer()) builder = saved_model_builder.SavedModelBuilder(export_dir) builder.add_meta_graph_and_variables( session, [tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=ops.get_collection( ops.GraphKeys.ASSET_FILEPATHS), legacy_init_op=init_op) builder.save(False) if assets_extra: assets_extra_path = os.path.join(compat.as_bytes(export_dir), compat.as_bytes('assets.extra')) for dest_relative, source in assets_extra.items(): dest_absolute = os.path.join(compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative)) dest_path = os.path.dirname(dest_absolute) file_io.recursive_create_dir(dest_path) file_io.copy(source, dest_absolute) saved_model_export_utils.garbage_collect_exports( export_dir_base, exports_to_keep=3) if keep_target: final_dir = os.path.join(args.job_dir, 'evaluation_model') else: final_dir = os.path.join(args.job_dir, 'model') if file_io.is_directory(final_dir): file_io.delete_recursively(final_dir) file_io.recursive_create_dir(final_dir) recursive_copy(export_dir, final_dir) return export_dir if keep_target: intermediate_dir = 'intermediate_evaluation_models' else: intermediate_dir = 'intermediate_prediction_models' return export_strategy.ExportStrategy(intermediate_dir, export_fn)
Makes prediction graph that takes json input. Args: args: command line args keep_target: If ture, target column is returned in prediction graph. Target column must also exist in input data assets_extra: other fiels to copy to the output folder job_dir: root job folder features: features dict schema: schema list stats: stats dict
juraj-google-style
def __generate_reference__(self, triple_map, **kwargs): element = kwargs.get("element") found_elements = element.xpath( triple_map.reference, namespaces=self.xml_ns) for elem in found_elements: raw_text = elem.text.strip() if not raw_text.startswith("http"): continue return rdflib.URIRef(raw_text)
Internal method takes a triple_map and returns the result of applying to XPath to the current DOM context Args: ----- triple_map: SimpleNamespace element: etree.Element
juraj-google-style
def add_dict_to_hash(a_hash, a_dict): if (a_dict is None): return for (k, v) in a_dict.items(): a_hash.update((((b'\x00' + k.encode('utf-8')) + b'\x00') + v.encode('utf-8')))
Adds `a_dict` to `a_hash` Args: a_hash (`Hash`): the secure hash, e.g created by hashlib.md5 a_dict (dict[string, [string]]): the dictionary to add to the hash
codesearchnet
def or_filter(self, **filters): clone = copy.deepcopy(self) clone.adapter.add_query([("OR_QRY", filters)]) return clone
Works like "filter" but joins given filters with OR operator. Args: **filters: Query filters as keyword arguments. Returns: Self. Queryset object. Example: >>> Person.objects.or_filter(age__gte=16, name__startswith='jo')
juraj-google-style
def calc_radius(latitude, ellipsoid='WGS84'): ellipsoids = {'Airy (1830)': (6377.563, 6356.257), 'Bessel': (6377.397, 6356.079), 'Clarke (1880)': (6378.249145, 6356.51486955), 'FAI sphere': (6371, 6371), 'GRS-67': (6378.16, 6356.775), 'International': (6378.388, 6356.912), 'Krasovsky': (6378.245, 6356.863), 'NAD27': (6378.206, 6356.584), 'WGS66': (6378.145, 6356.758), 'WGS72': (6378.135, 6356.751), 'WGS84': (6378.137, 6356.752)} (major, minor) = ellipsoids[ellipsoid] eccentricity = (1 - ((minor ** 2) / (major ** 2))) sl = math.sin(math.radians(latitude)) return ((major * (1 - eccentricity)) / ((1 - (eccentricity * (sl ** 2))) ** 1.5))
Calculate earth radius for a given latitude. This function is most useful when dealing with datasets that are very localised and require the accuracy of an ellipsoid model without the complexity of code necessary to actually use one. The results are meant to be used as a :data:`BODY_RADIUS` replacement when the simple geocentric value is not good enough. The original use for ``calc_radius`` is to set a more accurate radius value for use with trigpointing databases that are keyed on the OSGB36 datum, but it has been expanded to cover other ellipsoids. Args: latitude (float): Latitude to calculate earth radius for ellipsoid (tuple of float): Ellipsoid model to use for calculation Returns: float: Approximated Earth radius at the given latitude
codesearchnet
def _get_operator_param_name_and_values(operator_class_name, task_details): operator_task_details = task_details.copy() if ('type' in operator_task_details.keys()): del operator_task_details['type'] if ('up_stream' in operator_task_details.keys()): del operator_task_details['up_stream'] if (operator_class_name == 'BigQueryOperator'): return PipelineGenerator._get_bq_execute_params(operator_task_details) if (operator_class_name == 'BigQueryToCloudStorageOperator'): return PipelineGenerator._get_bq_extract_params(operator_task_details) if (operator_class_name == 'GoogleCloudStorageToBigQueryOperator'): return PipelineGenerator._get_bq_load_params(operator_task_details) return operator_task_details
Internal helper gets the name of the python parameter for the Airflow operator class. In some cases, we do not expose the airflow parameter name in its native form, but choose to expose a name that's more standard for Datalab, or one that's more friendly. For example, Airflow's BigQueryOperator uses 'bql' for the query string, but we want %%bq users in Datalab to use 'query'. Hence, a few substitutions that are specific to the Airflow operator need to be made. Similarly, we the parameter value could come from the notebook's context. All that happens here. Returns: Dict containing _only_ the keys and values that are required in Airflow operator definition. This requires a substituting existing keys in the dictionary with their Airflow equivalents ( i.e. by adding new keys, and removing the existing ones).
codesearchnet
def __init__(self, structure, include_bv_charge=False): self.structure = structure self.include_bv_charge = include_bv_charge sga = SpacegroupAnalyzer(self.structure) self.symm_structure = sga.get_symmetrized_structure() self.equiv_site_seq = list(self.symm_structure.equivalent_sites) self.struct_valences = None if self.include_bv_charge: bv = BVAnalyzer() self.struct_valences = bv.get_valences(self.structure)
Initializes a Vacancy Generator Args: structure(Structure): pymatgen structure object
juraj-google-style
def from_config(cls, config: dict): timestamp = config.get('timestamp', None) return cls(config.get('id'), config.get('type'), config.get('data', dict()), config.get('origin', None), timestamp, config.get('object_type', None), config.get('object_id', None), config.get('object_key', None))
Create an event object from an event dictionary object. Args: config (dict): Event Configuration dictionary.
juraj-google-style
def _checkResponseNumberOfRegisters(payload, numberOfRegisters): _checkString(payload, minlength=4, description='payload') _checkInt(numberOfRegisters, minvalue=1, maxvalue=65535, description='numberOfRegisters') BYTERANGE_FOR_NUMBER_OF_REGISTERS = slice(2, 4) bytesForNumberOfRegisters = payload[BYTERANGE_FOR_NUMBER_OF_REGISTERS] receivedNumberOfWrittenReisters = _twoByteStringToNum(bytesForNumberOfRegisters) if (receivedNumberOfWrittenReisters != numberOfRegisters): raise ValueError('Wrong number of registers to write in the response: {0}, but commanded is {1}. The data payload is: {2!r}'.format(receivedNumberOfWrittenReisters, numberOfRegisters, payload))
Check that the number of written registers as given in the response is correct. The bytes 2 and 3 (zero based counting) in the payload holds the value. Args: * payload (string): The payload * numberOfRegisters (int): Number of registers that have been written Raises: TypeError, ValueError
codesearchnet
class IncMeanTracker(WindowedTracker, MeanTracker): def __init__(self, window_mode, **kwargs): super().__init__(window_mode=window_mode, **kwargs) self._mean = 0 def push(self, x): if not math.isnan(x): self._n += 1 delta = x - self._mean else: delta = 0 if self._window_mode == WindowMode.SLIDING: if len(self._queue) >= self._window_size and (not math.isnan((old_x := self.pop()))): self._n -= 1 delta += self._mean - old_x super().push(x) if self._n > 0: self._mean += delta / self._n else: self._mean = 0 def get(self): if self._n < 1: return float('nan') return self._mean
Base class for incremental mean trackers. This class implements incremental calculation of the mean, which is more efficient for streaming data as it updates the mean with each new data point instead of recalculating from scratch. Args: window_mode: A `WindowMode` enum specifying whether the window is `LANDMARK` or `SLIDING`. **kwargs: Keyword arguments passed to the parent class constructor.
github-repos
def add_variable(var, restore=True): collections = [MODEL_VARIABLES] if restore: collections.append(VARIABLES_TO_RESTORE) for collection in collections: if (var not in tf.get_collection(collection)): tf.add_to_collection(collection, var)
Adds a variable to the MODEL_VARIABLES collection. Optionally it will add the variable to the VARIABLES_TO_RESTORE collection. Args: var: a variable. restore: whether the variable should be added to the VARIABLES_TO_RESTORE collection.
codesearchnet
def _PrintStorageInformationAsText(self, storage_reader): table_view = views.ViewsFactory.GetTableView( self._views_format_type, title='Plaso Storage Information') table_view.AddRow(['Filename', os.path.basename(self._storage_file_path)]) table_view.AddRow(['Format version', storage_reader.format_version]) table_view.AddRow( ['Serialization format', storage_reader.serialization_format]) table_view.Write(self._output_writer) if storage_reader.storage_type == definitions.STORAGE_TYPE_SESSION: self._PrintSessionsOverview(storage_reader) self._PrintSessionsDetails(storage_reader) storage_counters = self._CalculateStorageCounters(storage_reader) if 'parsers' not in storage_counters: self._output_writer.Write( 'Unable to determine number of events generated per parser.\n') else: self._PrintParsersCounter(storage_counters['parsers']) if 'analysis_reports' not in storage_counters: self._output_writer.Write( 'Unable to determine number of reports generated per plugin.\n') else: self._PrintAnalysisReportCounter(storage_counters['analysis_reports']) if 'event_labels' not in storage_counters: self._output_writer.Write( 'Unable to determine number of event tags generated per label.\n') else: self._PrintEventLabelsCounter(storage_counters['event_labels']) self._PrintWarningCounters(storage_counters) if self._verbose: self._PrintWarningsDetails(storage_reader) self._PrintAnalysisReportsDetails(storage_reader) elif storage_reader.storage_type == definitions.STORAGE_TYPE_TASK: self._PrintTasksInformation(storage_reader)
Prints information about the store as human-readable text. Args: storage_reader (StorageReader): storage reader.
juraj-google-style
def append(self, item): if self.should_flush(): self.flush() self.items.append(item)
Add new item to the list. If needed, append will first flush existing items and clear existing items. Args: item: an item to add to the list.
juraj-google-style
def update_state(self, y_true, y_pred, sample_weight=None): y_true = ops.convert_to_tensor(y_true, dtype=self.dtype) y_pred = ops.convert_to_tensor(y_pred, dtype='float32') y_pred = ops.cast(y_pred >= self.threshold, self.dtype) return super().update_state(y_true, y_pred, sample_weight)
Accumulates the confusion matrix statistics. Before the confusion matrix is updated, the predicted values are thresholded to be: 0 for values that are smaller than the `threshold` 1 for values that are larger or equal to the `threshold` Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Can be a `Tensor` whose rank is either 0, or the same as `y_true`, and must be broadcastable to `y_true`. Defaults to `1`. Returns: Update op.
github-repos
def read(self, size=None): if not self._is_open: raise IOError('Not opened.') return self._file_object.read(size)
Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
juraj-google-style
def append_dictionary_to_file(localization_key_to_comment, file_path, section_name): output_file = open_strings_file(file_path, "a") write_section_header_to_file(output_file, section_name) for entry_key, entry_comment in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)): output_file.write(u'\n') write_entry_to_file(output_file, entry_comment, entry_key) output_file.close()
Appends dictionary of localization keys and comments to a file Args: localization_key_to_comment (dict): A mapping between localization keys and comments. file_path (str): The path of the file to append to. section_name (str): The name of the section.
juraj-google-style
def get_container_details(self, container_id_or_name: str) -> dict: container = self._client.containers.get(container_id_or_name) return container.attrs
Get details of a container. Args: container_id_or_name (string): docker container id or name Returns: dict, details of the container
codesearchnet
def numpy(self): return _var_to_tensor(self).numpy()
Copies the values in this ShardedVariable to a NumPy array. First converts to a single Tensor using the registered conversion function, which concatenates the shards, then uses Tensor.numpy() to convert to a NumPy array. Returns: A NumPy array of the same shape and dtype.
github-repos
def create(self, teamId, personId=None, personEmail=None, isModerator=False, **request_parameters): check_type(teamId, basestring, may_be_none=False) check_type(personId, basestring) check_type(personEmail, basestring) check_type(isModerator, bool) post_data = dict_from_items_with_values(request_parameters, teamId=teamId, personId=personId, personEmail=personEmail, isModerator=isModerator) json_data = self._session.post(API_ENDPOINT, json=post_data) return self._object_factory(OBJECT_TYPE, json_data)
Add someone to a team by Person ID or email address. Add someone to a team by Person ID or email address; optionally making them a moderator. Args: teamId(basestring): The team ID. personId(basestring): The person ID. personEmail(basestring): The email address of the person. isModerator(bool): Set to True to make the person a team moderator. **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: TeamMembership: A TeamMembership object with the details of the created team membership. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
codesearchnet
def print_alignment(mapping, instance1, instance2): result = [] for instance1_item, m in zip(instance1, mapping): r = instance1_item[1] + "(" + instance1_item[2] + ")" if m == -1: r += "-Null" else: instance2_item = instance2[m] r += "-" + instance2_item[1] + "(" + instance2_item[2] + ")" result.append(r) return " ".join(result)
print the alignment based on a node mapping Args: mapping: current node mapping list instance1: nodes of AMR 1 instance2: nodes of AMR 2
juraj-google-style
def result(self, timeout=None): self._blocking_poll(timeout=timeout) if (self._exception is not None): raise self._exception return self._result
Get the result of the operation, blocking if necessary. Args: timeout (int): How long (in seconds) to wait for the operation to complete. If None, wait indefinitely. Returns: google.protobuf.Message: The Operation's result. Raises: google.api_core.GoogleAPICallError: If the operation errors or if the timeout is reached before the operation completes.
codesearchnet
def _decode_response_string_and_validate_format(self, rpc_id, response): if not response: raise errors.ProtocolError(self._device, errors.ProtocolError.NO_RESPONSE_FROM_SERVER) result = json.loads(response) for field_name in RPC_RESPONSE_REQUIRED_FIELDS: if field_name not in result: raise errors.ProtocolError(self._device, errors.ProtocolError.RESPONSE_MISSING_FIELD % field_name) if result['id'] != rpc_id: raise errors.ProtocolError(self._device, errors.ProtocolError.MISMATCHED_API_ID) return result
Decodes response JSON string to python dict and validates its format. Args: rpc_id: int, the actual id of this RPC. It should be the same with the id in the response, otherwise throws an error. response: str, the JSON string of the RPC response. Returns: A dict decoded from the response JSON string. Raises: errors.ProtocolError: if the response format is invalid.
github-repos
def get_imports(filename: Union[str, os.PathLike]) -> list[str]: with open(filename, encoding='utf-8') as f: content = f.read() imported_modules = set() import transformers.utils def recursive_look_for_imports(node): if isinstance(node, ast.Try): return elif isinstance(node, ast.If): test = node.test for condition_node in ast.walk(test): if isinstance(condition_node, ast.Call): check_function = getattr(condition_node.func, 'id', '') if check_function.endswith('available') and check_function.startswith('is_flash_attn') or hasattr(transformers.utils.import_utils, check_function): return elif isinstance(node, ast.Import): for alias in node.names: top_module = alias.name.split('.')[0] if top_module: imported_modules.add(top_module) elif isinstance(node, ast.ImportFrom): if node.level == 0 and node.module: top_module = node.module.split('.')[0] if top_module: imported_modules.add(top_module) for child in ast.iter_child_nodes(node): recursive_look_for_imports(child) tree = ast.parse(content) recursive_look_for_imports(tree) return sorted(imported_modules)
Extracts all the libraries (not relative imports this time) that are imported in a file. Args: filename (`str` or `os.PathLike`): The module file to inspect. Returns: `list[str]`: The list of all packages required to use the input module.
github-repos
def create_issue(title: str, description: str, labels: Optional[List[str]]=None) -> Tuple[int, str]: url = 'https: data = {'owner': _GITHUB_REPO_OWNER, 'repo': _GITHUB_REPO_NAME, 'title': title, 'body': description, 'labels': [_AWAITING_TRIAGE_LABEL, _PERF_ALERT_LABEL]} if labels: data['labels'].extend(labels) response = requests.post(url=url, data=json.dumps(data), headers=_HEADERS, timeout=_REQUEST_TIMEOUT_SECS).json() return (response['number'], response['html_url'])
Create an issue with title, description with a label. Args: title: GitHub issue title. description: GitHub issue description. labels: Labels used to tag the GitHub issue. Returns: Tuple containing GitHub issue number and issue URL.
github-repos
def posterior_chromatogram_hypotheses_fast(experiment, prior_chrom_null): tg_ids = experiment.df.tg_num_id.values pp_values = (1 - experiment.df['pep'].values) current_tg_id = tg_ids[0] scores = [] final_result = [] final_result_h0 = [] for i in range(tg_ids.shape[0]): id_ = tg_ids[i] if (id_ != current_tg_id): prior_pg_true = ((1.0 - prior_chrom_null) / len(scores)) rr = single_chromatogram_hypothesis_fast(np.array(scores), prior_chrom_null, prior_pg_true) final_result.extend(rr[1:]) final_result_h0.extend((rr[0] for i in range(len(scores)))) scores = [] current_tg_id = id_ scores.append((1.0 - pp_values[i])) prior_pg_true = ((1.0 - prior_chrom_null) / len(scores)) rr = single_chromatogram_hypothesis_fast(np.array(scores), prior_chrom_null, prior_pg_true) final_result.extend(rr[1:]) final_result_h0.extend(([rr[0]] * len(scores))) return (final_result, final_result_h0)
Compute posterior probabilities for each chromatogram For each chromatogram (each group_id / peptide precursor), all hypothesis of all peaks being correct (and all others false) as well as the h0 (all peaks are false) are computed. The prior probability that the are given in the function This assumes that the input data is sorted by tg_num_id Args: experiment(:class:`data_handling.Multipeptide`): the data of one experiment prior_chrom_null(float): the prior probability that any precursor is absent (all peaks are false) Returns: tuple(hypothesis, h0): two vectors that contain for each entry in the input dataframe the probabilities for the hypothesis that the peak is correct and the probability for the h0
codesearchnet
def get_ref(profile, ref): resource = ('/refs/' + ref) data = api.get_request(profile, resource) return prepare(data)
Fetch a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to fetch, e.g., ``heads/my-feature-branch``. Returns A dict with data about the ref.
codesearchnet
def set_sflow(self, name, value=None, default=False, disable=False): if (value not in [True, False, None]): raise ValueError commands = [('interface %s' % name)] commands.append(self.command_builder('sflow enable', value=value, default=default, disable=disable)) return self.configure(commands)
Configures the sFlow state on the interface Args: name (string): The interface identifier. It must be a full interface name (ie Ethernet, not Et) value (boolean): True if sFlow should be enabled otherwise False default (boolean): Specifies the default value for sFlow disable (boolean): Specifies to disable sFlow Returns: True if the operation succeeds otherwise False is returned
codesearchnet
def __init__(self, filename, error_handler, **kwargs): self._filename = filename self._error_handler = error_handler self.lexer = ply.lex.lex(module=self, **kwargs)
Create a Lex lexer. To pass this into a Ply Yacc parser, pass it in using the .lexer propert of an StlLexer instance: my_lexer = StlLexer() my_parser = ply.yacc.parser(lexer=my_lexer.lexer) Args: filename: The filename string to use in any error messaging. error_handler: A object to handle and lexing errors. kwargs: Forwarded to ply.lex.lex.
github-repos
def save_plot(self, filename, img_format="eps", ylim=None, zero_to_efermi=True, smooth=False): plt = self.get_plot(ylim=ylim, zero_to_efermi=zero_to_efermi, smooth=smooth) plt.savefig(filename, format=img_format) plt.close()
Save matplotlib plot to a file. Args: filename: Filename to write to. img_format: Image format to use. Defaults to EPS. ylim: Specifies the y-axis limits.
juraj-google-style
def open(self): if self._is_open: raise exceptions.ClientConnectionFailure('client connection already open') else: try: self.proxy.open() self._is_open = True except Exception as e: self.logger.error('could not open client connection: %s', e) raise
Open the client connection. Raises: ClientConnectionFailure: if the client connection is already open Exception: if an error occurs while trying to open the connection
codesearchnet
def filter_invalid_unicode_from_table(table): if not hasattr(table, 'table_id'): table.table_id = 0 for row_index, row in table.iterrows(): for col_index, cell in enumerate(row): cell, is_invalid = filter_invalid_unicode(cell) if is_invalid: logging.warning(f'Scrub an invalid table body @ table_id: {table.table_id}, row_index: {row_index}, col_index: {col_index}') for col_index, column in enumerate(table.columns): column, is_invalid = filter_invalid_unicode(column) if is_invalid: logging.warning(f'Scrub an invalid table header @ table_id: {table.table_id}, col_index: {col_index}')
Removes invalid unicode from table. Checks whether a table cell text contains an invalid unicode encoding. If yes, reset the table cell text to an empty str and log a warning for each invalid cell Args: table: table to clean.
github-repos
def create_complete_files(climan, path, cmd, *cmds, zsh_sourceable=False): path = pathlib.Path(path) zsh_dir = (path / 'zsh') if (not zsh_dir.exists()): zsh_dir.mkdir(parents=True) zsh_file = (zsh_dir / '_{}.sh'.format(cmd)) bash_dir = (path / 'bash') if (not bash_dir.exists()): bash_dir.mkdir(parents=True) bash_file = (bash_dir / '{}.sh'.format(cmd)) climan.zsh_complete(zsh_file, cmd, *cmds, sourceable=zsh_sourceable) climan.bash_complete(bash_file, cmd, *cmds)
Create completion files for bash and zsh. Args: climan (:class:`~loam.cli.CLIManager`): CLI manager. path (path-like): directory in which the config files should be created. It is created if it doesn't exist. cmd (str): command name that should be completed. cmds (str): extra command names that should be completed. zsh_sourceable (bool): if True, the generated file will contain an explicit call to ``compdef``, which means it can be sourced to activate CLI completion.
codesearchnet
def ParseNetworkConnectivityUsage( self, parser_mediator, cache=None, database=None, table=None, **unused_kwargs): self._ParseGUIDTable( parser_mediator, cache, database, table, self._NETWORK_CONNECTIVITY_USAGE_VALUES_MAP, SRUMNetworkConnectivityUsageEventData)
Parses the network connectivity usage monitor table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache (Optional[ESEDBCache]): cache, which contains information about the identifiers stored in the SruDbIdMapTable table. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table.
juraj-google-style
def read_zmat(cls, inputfile, implicit_index=True): cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral'] if implicit_index: zmat_frame = pd.read_table(inputfile, comment=' zmat_frame.index = range(1, (len(zmat_frame) + 1)) else: zmat_frame = pd.read_table(inputfile, comment=' zmat_frame.set_index('temp_index', drop=True, inplace=True) zmat_frame.index.name = None if pd.isnull(zmat_frame.iloc[(0, 1)]): zmat_values = [1.27, 127.0, 127.0] zmat_refs = [constants.int_label[x] for x in ['origin', 'e_z', 'e_x']] for (row, i) in enumerate(zmat_frame.index[:3]): cols = ['b', 'a', 'd'] zmat_frame.loc[(:, cols)] = zmat_frame.loc[(:, cols)].astype('O') if (row < 2): zmat_frame.loc[(i, cols[row:])] = zmat_refs[row:] zmat_frame.loc[(i, ['bond', 'angle', 'dihedral'][row:])] = zmat_values[row:] else: zmat_frame.loc[(i, 'd')] = zmat_refs[2] zmat_frame.loc[(i, 'dihedral')] = zmat_values[2] elif (zmat_frame.iloc[(0, 1)] in constants.int_label.keys()): zmat_frame = zmat_frame.replace({col: constants.int_label for col in ['b', 'a', 'd']}) zmat_frame = cls._cast_correct_types(zmat_frame) try: Zmat = cls(zmat_frame) except InvalidReference: raise UndefinedCoordinateSystem('Your zmatrix cannot be transformed to cartesian coordinates') return Zmat
Reads a zmat file. Lines beginning with ``#`` are ignored. Args: inputfile (str): implicit_index (bool): If this option is true the first column has to be the element symbols for the atoms. The row number is used to determine the index. Returns: Zmat:
codesearchnet
def convert_to_generator_like(data, batch_size=None, steps_per_epoch=None, epochs=1, shuffle=False): if isinstance(data, tuple): data = tuple((ele for ele in data if not all((e is None for e in nest.flatten(ele))))) if data_utils.is_generator_or_sequence(data) or isinstance(data, iterator_ops.IteratorBase): if isinstance(data, data_utils.Sequence): if steps_per_epoch is None: steps_per_epoch = len(data) return (data, steps_per_epoch) if isinstance(data, data_types.DatasetV2): return (dataset_ops.make_one_shot_iterator(data), steps_per_epoch) num_samples = int(nest.flatten(data)[0].shape[0]) if batch_size is None: raise ValueError('When passing input data as arrays, do not specify `steps_per_epoch`/`steps` argument. Please use `batch_size` instead.') steps_per_epoch = int(math.ceil(num_samples / batch_size)) def _gen(data): index_array = np.arange(num_samples) for _ in range(epochs): if shuffle: np.random.shuffle(index_array) batches = generic_utils.make_batches(num_samples, batch_size) for batch_start, batch_end in batches: batch_ids = index_array[batch_start:batch_end] flat_batch_data = training_utils.slice_arrays(nest.flatten(data), batch_ids, contiguous=not shuffle) yield nest.pack_sequence_as(data, flat_batch_data) return (_gen(data), steps_per_epoch)
Make a generator out of NumPy or EagerTensor inputs. Args: data: Either a generator or `keras.utils.data_utils.Sequence` object or `Dataset`, `Iterator`, or a {1,2,3}-tuple of NumPy arrays or EagerTensors. If a tuple, the elements represent `(x, y, sample_weights)` and may be `None` or `[None]`. batch_size: Used when creating a generator out of tuples of NumPy arrays or EagerTensors. steps_per_epoch: Steps of the generator to run each epoch. If `None` the number of steps will be read from the data (for `keras.utils.data_utils.Sequence` types). epochs: Total number of epochs to run. shuffle: Whether the data should be shuffled. Returns: - Generator, `keras.utils.data_utils.Sequence`, or `Iterator`. Raises: - ValueError: If `batch_size` is not provided for NumPy or EagerTensor inputs.
github-repos
def must_exist(*components): _path = path(*components) if not exists(_path): raise File404(_path) return _path
Ensure path exists. Arguments: *components (str[]): Path components. Returns: str: File path. Raises: File404: If path does not exist.
juraj-google-style
class EncodecDecoderOutput(ModelOutput): audio_values: Optional[torch.FloatTensor] = None
Args: audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*): Decoded audio values, obtained using the decoder part of Encodec.
github-repos
def list_instances(i_info, param_str, numbered=False): print(param_str) for i in i_info: if numbered: print("Instance {} print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True)
Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist.
juraj-google-style
def current_spi_to_number(self): if self.slots['subpage'] == None: return self.sub_pi_to_number(0, 0) else: return self.sub_pi_to_number(self.slots['subpage'], self.slots['subitem'])
Convert subpage & subitem to a integer * if page == 1, then return 0, since the item count is the true # of items * if page == 2, then return, page-1 * items_per_page, since we are returning the # of items on a full page. Args: * None Returns: * Integer - Which represents the number of items up to the page.
juraj-google-style
def CheckCondition(condition, check_object): try: of = objectfilter.Parser(condition).Parse() compiled_filter = of.Compile(objectfilter.BaseFilterImplementation) return compiled_filter.Matches(check_object) except objectfilter.Error as e: raise ConditionError(e)
Check if a condition matches an object. Args: condition: A string condition e.g. "os == 'Windows'" check_object: Object to validate, e.g. an rdf_client.KnowledgeBase() Returns: True or False depending on whether the condition matches. Raises: ConditionError: If condition is bad.
juraj-google-style
def reshape(vari, shape): if isinstance(vari, Poly): core = vari.A.copy() for key in vari.keys: core[key] = reshape(core[key], shape) out = Poly(core, vari.dim, shape, vari.dtype) return out return numpy.asarray(vari).reshape(shape)
Reshape the shape of a shapeable quantity. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Shapeable input quantity. shape (tuple): The polynomials new shape. Must be compatible with the number of elements in ``vari``. Returns: (chaospy.poly.base.Poly, numpy.ndarray): Same type as ``vari``. Examples: >>> poly = chaospy.prange(6) >>> print(poly) [1, q0, q0^2, q0^3, q0^4, q0^5] >>> print(chaospy.reshape(poly, (2,3))) [[1, q0, q0^2], [q0^3, q0^4, q0^5]]
codesearchnet
def sync_proxy(self, mri, block): done_queue = Queue() self._queues[mri] = done_queue update_fields = set() def callback(value=None): if isinstance(value, Exception): if isinstance(value, Disconnected): update_fields.clear() block.health.set_value( value="pvAccess disconnected", alarm=Alarm.disconnected("pvAccess disconnected") ) else: with block.notifier.changes_squashed: if not update_fields: self.log.debug("Regenerating from %s", list(value)) self._regenerate_block(block, value, update_fields) done_queue.put(None) else: self._update_block(block, value, update_fields) m = self._ctxt.monitor(mri, callback, notify_disconnect=True) self._monitors.add(m) done_queue.get(timeout=DEFAULT_TIMEOUT)
Abstract method telling the ClientComms to sync this proxy Block with its remote counterpart. Should wait until it is connected Args: mri (str): The mri for the remote block block (BlockModel): The local proxy Block to keep in sync
juraj-google-style
def recv(self, request_id): log.debug("Reading response %d from Kafka" % request_id) if not self._sock: self.reinit() resp = self._read_bytes(4) (size,) = struct.unpack('>i', resp) resp = self._read_bytes(size) return resp
Get a response packet from Kafka Arguments: request_id: can be any int (only used for debug logging...) Returns: str: Encoded kafka packet response from server
juraj-google-style
def os_version_info_ex(): if (not HAS_WIN32): return class OSVersionInfo(ctypes.Structure): _fields_ = (('dwOSVersionInfoSize', DWORD), ('dwMajorVersion', DWORD), ('dwMinorVersion', DWORD), ('dwBuildNumber', DWORD), ('dwPlatformId', DWORD), ('szCSDVersion', (WCHAR * 128))) def __init__(self, *args, **kwds): super(OSVersionInfo, self).__init__(*args, **kwds) self.dwOSVersionInfoSize = ctypes.sizeof(self) kernel32.GetVersionExW(ctypes.byref(self)) class OSVersionInfoEx(OSVersionInfo): _fields_ = (('wServicePackMajor', WORD), ('wServicePackMinor', WORD), ('wSuiteMask', WORD), ('wProductType', BYTE), ('wReserved', BYTE)) return OSVersionInfoEx()
Helper function to return the results of the GetVersionExW Windows API call. It is a ctypes Structure that contains Windows OS Version information. Returns: class: An instance of a class containing version info
codesearchnet
def decode(self, encoded): encoded = super().decode(encoded) tokens = [self.itos[index] for index in encoded] return self.detokenize(tokens)
Decodes a tensor into a sequence. Args: encoded (torch.Tensor): Encoded sequence. Returns: str: Sequence decoded from ``encoded``.
juraj-google-style
def log(self: EventSetOrNode) -> EventSetOrNode: from temporian.core.operators.unary import log return log(self)
Calculates the natural logarithm of an [`EventSet`][temporian.EventSet]'s features. Can only be used on floating point features. Example: ```python >>> a = tp.event_set( ... timestamps=[1, 2, 3, 4, 5], ... features={"M": [np.e, 1., 2., 10., -1.]}, ... ) >>> a.log() indexes: ... timestamps: [1. 2. 3. 4. 5.] 'M': [1. 0. 0.6931 2.3026 nan] ... ``` Returns: EventSetOr with logarithm of input features.
github-repos
def delete(self, membershipId): check_type(membershipId, basestring) self._session.delete(((API_ENDPOINT + '/') + membershipId))
Delete a membership, by ID. Args: membershipId(basestring): The membership ID. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
codesearchnet
def load_text(self, text, tokenizer=None): if tokenizer: words = [x.lower() for x in tokenizer(text)] else: words = self.tokenize(text) self._dictionary.update(words) self._update_dictionary()
Load text from which to generate a word frequency list Args: text (str): The text to be loaded tokenizer (function): The function to use to tokenize a string
juraj-google-style
def from_parent(parent_key, i): if (not isinstance(parent_key, HDPrivateKey)): raise TypeError('parent_key must be an HDPrivateKey object.') hmac_key = parent_key.chain_code if (i & 2147483648): hmac_data = ((b'\x00' + bytes(parent_key._key)) + i.to_bytes(length=4, byteorder='big')) else: hmac_data = (parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big')) I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() (Il, Ir) = (I[:32], I[32:]) parse_Il = int.from_bytes(Il, 'big') if (parse_Il >= bitcoin_curve.n): return None child_key = ((parse_Il + parent_key._key.key) % bitcoin_curve.n) if (child_key == 0): return None child_depth = (parent_key.depth + 1) return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint)
Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey):
codesearchnet
def release_docs_side_effect(content): result = content.replace("{", "{{").replace("}", "}}") result = result.replace("{{version}}", "{version}") result = result.replace("{{circleci_build}}", "{circleci_build}") result = result.replace("{{travis_build}}", "{travis_build}") result = result.replace("{{appveyor_build}}", "{appveyor_build}") result = result.replace("{{coveralls_build}}", "{coveralls_build}") return result
Updates the template so that curly braces are escaped correctly. Args: content (str): The template for ``docs/index.rst.release.template``. Returns: str: The updated template with properly escaped curly braces.
juraj-google-style
def __init__(self, model, task, cmdOptions): validateOpfJsonValue(task, "opfTaskSchema.json") self.__logger = logging.getLogger(".".join( ['com.numenta', self.__class__.__module__, self.__class__.__name__])) self.__logger.debug(("Instantiated %s(" + \ "model=%r, " + \ "task=%r, " + \ "cmdOptions=%r)") % \ (self.__class__.__name__, model, task, cmdOptions)) streamDef = task['dataset'] datasetReader = opf_basic_environment.BasicDatasetReader(streamDef) self.__model = model self.__datasetReader = datasetReader self.__task = task self.__cmdOptions = cmdOptions self.__predictionLogger = opf_basic_environment.BasicPredictionLogger( fields=model.getFieldInfo(), experimentDir=cmdOptions.experimentDir, label=task['taskLabel'], inferenceType=self.__model.getInferenceType()) taskControl = task['taskControl'] self.__taskDriver = OPFTaskDriver( taskControl=taskControl, model=model) loggedMetricPatterns = taskControl.get('loggedMetrics', None) loggedMetricLabels = matchPatterns(loggedMetricPatterns, self.__taskDriver.getMetricLabels()) self.__predictionLogger.setLoggedMetrics(loggedMetricLabels) self.__metricsLogger = opf_basic_environment.BasicPredictionMetricsLogger( experimentDir=cmdOptions.experimentDir, label=task['taskLabel'])
Constructor Args: model: The OPF Model instance against which to run the task task: A dictionary conforming to opfTaskSchema.json cmdOptions: ParseCommandLineOptionsResult namedtuple
juraj-google-style
def mark_complex(self, name, serializer, deserializer): self._complex_properties[name] = (serializer, deserializer)
Mark a property as complex with serializer and deserializer functions. Args: name (str): The name of the complex property. serializer (callable): The function to call to serialize the property's value to something that can be saved in a json. deserializer (callable): The function to call to unserialize the property from a dict loaded by a json back to the original value.
juraj-google-style
def get_operation_device(self, operation_name): operation = self._name_to_operation(operation_name) if isinstance(operation, tf.Operation): return operation.device else: return None
The device of an operation. Note that only tf operations have device assignments. Args: operation_name: a string, name of an operation in the graph. Returns: a string or None, representing the device name.
codesearchnet
def putenv(key, value): key = path2fsn(key) value = path2fsn(value) if is_win and PY2: try: set_windows_env_var(key, value) except WindowsError: raise ValueError else: try: os.putenv(key, value) except OSError: raise ValueError
Like `os.putenv` but takes unicode under Windows + Python 2 Args: key (pathlike): The env var to get value (pathlike): The value to set Raises: ValueError
juraj-google-style
def _FormatPropertyName(self, property_name): fix_key = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', property_name) return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', fix_key).lower()
Formats a camel case property name as snake case. Args: property_name (str): property name in camel case. Returns: str: property name in snake case.
juraj-google-style
def _AddAttributeContainer(self, container_type, attribute_container): container_list = self._GetSerializedAttributeContainerList(container_type) identifier = identifiers.SQLTableIdentifier( container_type, container_list.next_sequence_number + 1) attribute_container.SetIdentifier(identifier) serialized_data = self._SerializeAttributeContainer(attribute_container) container_list.PushAttributeContainer(serialized_data) if container_list.data_size > self._maximum_buffer_size: self._WriteSerializedAttributeContainerList(container_type)
Adds an attribute container. Args: container_type (str): attribute container type. attribute_container (AttributeContainer): attribute container. Raises: IOError: if the attribute container cannot be serialized. OSError: if the attribute container cannot be serialized.
juraj-google-style
def get_pattern_step_time(self, patternnumber, stepnumber): _checkPatternNumber(patternnumber) _checkStepNumber(stepnumber) address = _calculateRegisterAddress('time', patternnumber, stepnumber) return self.read_register(address, 0)
Get the step time. Args: * patternnumber (integer): 0-7 * stepnumber (integer): 0-7 Returns: The step time (int??).
codesearchnet
def abort_expired_batches(self, request_timeout_ms, cluster): expired_batches = [] to_remove = [] count = 0 for tp in list(self._batches.keys()): assert tp in self._tp_locks, 'TopicPartition not in locks dict' if tp in self.muted: continue with self._tp_locks[tp]: dq = self._batches[tp] for batch in dq: is_full = bool(bool(batch != dq[-1]) or batch.records.is_full()) if batch.maybe_expire(request_timeout_ms, self.config['retry_backoff_ms'], self.config['linger_ms'], is_full): expired_batches.append(batch) to_remove.append(batch) count += 1 self.deallocate(batch) else: break if to_remove: for batch in to_remove: dq.remove(batch) to_remove = [] if expired_batches: log.warning("Expired %d batches in accumulator", count) return expired_batches
Abort the batches that have been sitting in RecordAccumulator for more than the configured request_timeout due to metadata being unavailable. Arguments: request_timeout_ms (int): milliseconds to timeout cluster (ClusterMetadata): current metadata for kafka cluster Returns: list of ProducerBatch that were expired
juraj-google-style
def get_sine_pos_embed(pos_tensor: torch.Tensor, num_pos_feats: int=128, temperature: int=10000, exchange_xy: bool=True) -> Tensor: scale = 2 * math.pi dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device) dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats) def sine_func(x: torch.Tensor): sin_x = x * scale / dim_t sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2) return sin_x pos_tensor = pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1) position_embeddings = [sine_func(x) for x in pos_tensor] if exchange_xy: position_embeddings[0], position_embeddings[1] = (position_embeddings[1], position_embeddings[0]) position_embeddings = torch.cat(position_embeddings, dim=-1) return position_embeddings
Generate sine position embeddings from a position tensor. Args: pos_tensor (torch.Tensor): Tensor containing positions. Shape: [..., n]. num_pos_feats (`int`, *optional*, defaults to 128): Projected shape for each float in the tensor. temperature (`int`, *optional*, defaults to 10000): Temperature in the sine/cosine function. exchange_xy (`bool`, *optional*, defaults to `True`): Exchange pos x and pos y. For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Returns: position_embeddings (torch.Tensor): shape: [..., n * hidden_size].
github-repos
def print_type_of_instance(self, t: types.BaseValue, instance=None) -> str:
Returns a string of the type of an instance of t. For example, if t is `int`, then this method returns "int". Args: t: An abstract value. instance: A specific instance of t to print.
github-repos
def export(self, template_file_name, output_file_name, sort="public", data=None, limit=0): exportedData = {} exportedUsers = self.getSortedUsers() template = self.__getTemplate(template_file_name) position = 1 if not limit: exportedData["users"] = exportedUsers else: exportedData["users"] = exportedUsers[:limit] for u in exportedData["users"]: u["position"] = position u["comma"] = position < len(exportedData["users"]) position += 1 exportedData["extraData"] = data renderer = Renderer() output = renderer.render(template, exportedData) with open(output_file_name, "w") as text_file: text_file.write(output)
Export ranking to a file. Args: template_file_name (str): where is the template (moustache template) output_file_name (str): where create the file with the ranking sort (str): field to sort the users
juraj-google-style
def insert_arguments_into_query(compilation_result, arguments): _ensure_arguments_are_provided(compilation_result.input_metadata, arguments) if (compilation_result.language == MATCH_LANGUAGE): return insert_arguments_into_match_query(compilation_result, arguments) elif (compilation_result.language == GREMLIN_LANGUAGE): return insert_arguments_into_gremlin_query(compilation_result, arguments) elif (compilation_result.language == SQL_LANGUAGE): return insert_arguments_into_sql_query(compilation_result, arguments) else: raise AssertionError(u'Unrecognized language in compilation result: {}'.format(compilation_result))
Insert the arguments into the compiled GraphQL query to form a complete query. Args: compilation_result: a CompilationResult object derived from the GraphQL compiler arguments: dict, mapping argument name to its value, for every parameter the query expects. Returns: string, a query in the appropriate output language, with inserted argument data
codesearchnet
def _restore_training_state(self, restore_state): self.load_state_dict(restore_state["model"]) self.optimizer.load_state_dict(restore_state["optimizer"]) self.lr_scheduler.load_state_dict(restore_state["lr_scheduler"]) start_iteration = restore_state["iteration"] + 1 if self.config["verbose"]: print(f"Restored checkpoint to iteration {start_iteration}.") if restore_state["best_model_found"]: self.checkpointer.best_model_found = True self.checkpointer.best_iteration = restore_state["best_iteration"] self.checkpointer.best_score = restore_state["best_score"] if self.config["verbose"]: print( f"Updated checkpointer: " f"best_score={self.checkpointer.best_score:.3f}, " f"best_iteration={self.checkpointer.best_iteration}" ) return start_iteration
Restores the model and optimizer states This helper function restores the model's state to a given iteration so that a user can resume training at any epoch. Args: restore_state: a state_dict dictionary
juraj-google-style
def choices_validator(choices): def validator(value): if (value not in choices): raise ValidationError('{} is not in {}'.format(value, list(choices))) return validator
Return validator function that will check if ``value in choices``. Args: max_value (list, set, tuple): allowed choices for new validator
codesearchnet
def parse_dtype_info(flags): if (flags.dtype in (i[0] for i in DTYPE_MAP.values())): return try: (flags.dtype, default_loss_scale) = DTYPE_MAP[flags.dtype] except KeyError: raise ValueError('Invalid dtype: {}'.format(flags.dtype)) flags.loss_scale = (flags.loss_scale or default_loss_scale)
Convert dtype string to tf dtype, and set loss_scale default as needed. Args: flags: namespace object returned by arg parser. Raises: ValueError: If an invalid dtype is provided.
codesearchnet
def load_vocabulary(lang='en', type='wiki'): src_dir = '{}_vocab'.format(type) p = locate_resource(src_dir, lang) return CountedVocabulary.from_vocabfile(p)
Return a CountedVocabulary object. Args: lang (string): language code. type (string): wiki,...
codesearchnet
def __init__(self, resolver_context): super(BDEFileSystem, self).__init__(resolver_context) self._bde_volume = None self._file_object = None
Initializes a file system. Args: resolver_context (Context): resolver context.
juraj-google-style
def runTemplate(id, data={}): conn = Qubole.agent() path = str(id) + "/run" res = conn.post(Template.element_path(path), data) cmdType = res['command_type'] cmdId = res['id'] cmdClass = eval(cmdType) cmd = cmdClass.find(cmdId) while not Command.is_done(cmd.status): time.sleep(Qubole.poll_interval) cmd = cmdClass.find(cmd.id) return Template.getResult(cmdClass, cmd)
Run an existing Template and waits for the Result. Prints result to stdout. Args: `id`: ID of the template to run `data`: json data containing the input_vars Returns: An integer as status (0: success, 1: failure)
juraj-google-style
def load_yaml(task: Task, file: str) -> Result: with open(file, 'r') as f: yml = ruamel.yaml.YAML(typ='safe') data = yml.load(f) return Result(host=task.host, result=data)
Loads a yaml file. Arguments: file: path to the file containing the yaml file to load Examples: Simple example with ``ordered_dict``:: > nr.run(task=load_yaml, file="mydata.yaml") Returns: Result object with the following attributes set: * result (``dict``): dictionary with the contents of the file
codesearchnet
def check_connection(host='localhost', port=27017, username=None, password=None, authdb=None, max_delay=1): if username and password: uri = ("mongodb: .format(quote_plus(username), quote_plus(password), host, port, authdb)) log_uri = ("mongodb: .format(quote_plus(username), host, port, authdb)) else: log_uri = uri = "mongodb: LOG.info("Test connection with uri: %s", log_uri) client = MongoClient(uri, serverSelectionTimeoutMS=max_delay) try: client.server_info() except (ServerSelectionTimeoutError,OperationFailure) as err: LOG.warning(err) return False return True
Check if a connection could be made to the mongo process specified Args: host(str) port(int) username(str) password(str) authdb (str): database to to for authentication max_delay(int): Number of milliseconds to wait for connection Returns: bool: If connection could be established
juraj-google-style
def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) x, tf_data_format = _preprocess_conv3d_input(x, data_format) padding = _preprocess_padding(padding) if tf_data_format == 'NDHWC': strides = (1,) + strides + (1,) pool_size = (1,) + pool_size + (1,) else: strides = (1, 1) + strides pool_size = (1, 1) + pool_size if pool_mode == 'max': x = nn.max_pool3d(x, pool_size, strides, padding=padding, data_format=tf_data_format) elif pool_mode == 'avg': x = nn.avg_pool3d(x, pool_size, strides, padding=padding, data_format=tf_data_format) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) if data_format == 'channels_first' and tf_data_format == 'NDHWC': x = array_ops.transpose(x, (0, 4, 1, 2, 3)) return x
3D Pooling. Args: x: Tensor or variable. pool_size: tuple of 3 integers. strides: tuple of 3 integers. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. pool_mode: string, `"max"` or `"avg"`. Returns: A tensor, result of 3D pooling. Raises: ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`. ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
github-repos
def check_valid(spec): DeviceSpec.from_string(spec)
Check that a device spec is valid. Args: spec: a string. Raises: An exception if the spec is invalid.
github-repos
def _delete(self, url, data, scope): self._create_session(scope) response = self.session.delete(url, data=data) return response.status_code, response.text
Make a DELETE request using the session object to a Degreed endpoint. Args: url (str): The url to send a DELETE request to. data (str): The json encoded payload to DELETE. scope (str): Must be one of the scopes Degreed expects: - `CONTENT_PROVIDER_SCOPE` - `COMPLETION_PROVIDER_SCOPE`
juraj-google-style
def transmit(self, payload, **kwargs): kwargs['app_label'] = 'degreed' kwargs['model_name'] = 'DegreedLearnerDataTransmissionAudit' kwargs['remote_user_id'] = 'degreed_user_email' super(DegreedLearnerTransmitter, self).transmit(payload, **kwargs)
Send a completion status call to Degreed using the client. Args: payload: The learner completion data payload to send to Degreed
juraj-google-style
def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None): t2t_env = rl_utils.setup_env( hparams, batch_size=hparams.real_batch_size, max_num_noops=hparams.max_num_noops ) if which_epoch_data is not None: if which_epoch_data == "last": which_epoch_data = infer_last_epoch_num(data_dir) assert isinstance(which_epoch_data, int), \ "{}".format(type(which_epoch_data)) t2t_env.start_new_epoch(which_epoch_data, data_dir) else: t2t_env.start_new_epoch(-999) return t2t_env
Load T2TGymEnv with data from one epoch. Args: hparams: hparams. data_dir: data directory. which_epoch_data: data from which epoch to load. Returns: env.
juraj-google-style
def get_name_or_instance_id(self, with_id=False): name = self.get_tag('Name', case_sensitive=False) if (name and (len(name.value.strip()) > 0)): return ('{0} ({1})'.format(name.value, self.id) if with_id else name.value) return self.id
Returns the name of an instance if existant, else return the instance id Args: with_id (bool): Include the instance ID even if the name is found (default: False) Returns: Name and/or instance ID of the instance object
codesearchnet
def extract_numerics_alert(event): value = event.summary.value[0] debugger_plugin_metadata_content = None if value.HasField("metadata"): plugin_data = value.metadata.plugin_data if plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME: debugger_plugin_metadata_content = plugin_data.content if not debugger_plugin_metadata_content: raise ValueError("Event proto input lacks debugger plugin SummaryMetadata.") debugger_plugin_metadata_content = tf.compat.as_text( debugger_plugin_metadata_content) try: content_object = json.loads(debugger_plugin_metadata_content) device_name = content_object["device"] except (KeyError, ValueError) as e: raise ValueError("Could not determine device from JSON string %r, %r" % (debugger_plugin_metadata_content, e)) debug_op_suffix = ":DebugNumericSummary" if not value.node_name.endswith(debug_op_suffix): raise ValueError( "Event proto input does not have the expected debug op suffix %s" % debug_op_suffix) tensor_name = value.node_name[:-len(debug_op_suffix)] elements = tf_debug.load_tensor_from_event(event) nan_count = elements[constants.NAN_NUMERIC_SUMMARY_OP_INDEX] neg_inf_count = elements[constants.NEG_INF_NUMERIC_SUMMARY_OP_INDEX] pos_inf_count = elements[constants.POS_INF_NUMERIC_SUMMARY_OP_INDEX] if nan_count > 0 or neg_inf_count > 0 or pos_inf_count > 0: return NumericsAlert( device_name, tensor_name, event.wall_time, nan_count, neg_inf_count, pos_inf_count) return None
Determines whether a health pill event contains bad values. A bad value is one of NaN, -Inf, or +Inf. Args: event: (`Event`) A `tensorflow.Event` proto from `DebugNumericSummary` ops. Returns: An instance of `NumericsAlert`, if bad values are found. `None`, if no bad values are found. Raises: ValueError: if the event does not have the expected tag prefix or the debug op name is not the expected debug op name suffix.
juraj-google-style
def label_contains( node, triggers ): for trigger in triggers: if trigger.trigger_word in node.label: yield TriggerNode(trigger, node)
Determine if node contains any of the trigger_words provided. Args: node(Node): CFG node to check. trigger_words(list[Union[Sink, Source]]): list of trigger words to look for. Returns: Iterable of TriggerNodes found. Can be multiple because multiple trigger_words can be in one node.
juraj-google-style
def register_entity(self, entity_value, entity_type, alias_of=None): if alias_of: self.trie.insert(entity_value.lower(), data=(alias_of, entity_type)) else: self.trie.insert(entity_value.lower(), data=(entity_value, entity_type)) self.trie.insert(entity_type.lower(), data=(entity_type, 'Concept'))
Register an entity to be tagged in potential parse results Args: entity_value(str): the value/proper name of an entity instance (Ex: "The Big Bang Theory") entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
juraj-google-style
def _convert_token_to_id(self, token, token_type='TOKEN_TIME') -> int: return self.encoder.get(f'{token}_{token_type}', int(self.unk_token))
Encodes the Midi tokens to transformer generated token ids. Args: token (`int`): This denotes the token value. token_type (`str`): This denotes the type of the token. There are four types of midi tokens such as "TOKEN_TIME", "TOKEN_VELOCITY", "TOKEN_NOTE" and "TOKEN_SPECIAL". Returns: `int`: returns the id of the token.
github-repos
def convert(self): return super(TFLiteConverterV2, self).convert()
Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format. Raises: ValueError: No concrete function is specified. Multiple concrete functions are specified. Input shape is not specified. Invalid quantization parameters.
github-repos
def _get_data_by_field(self, field_number): if (not self.is_data_loaded): self._import_data() if (not (0 <= field_number < self._num_of_fields)): raise ValueError(('Field number should be between 0-%d' % self._num_of_fields)) return self._data[field_number]
Return a data field by field number. This is a useful method to get the values for fields that Ladybug currently doesn't import by default. You can find list of fields by typing EPWFields.fields Args: field_number: a value between 0 to 34 for different available epw fields. Returns: An annual Ladybug list
codesearchnet
def add_channel(channel: EFBChannel): global master, slaves if isinstance(channel, EFBChannel): if (channel.channel_type == ChannelType.Slave): slaves[channel.channel_id] = channel else: master = channel else: raise TypeError('Channel instance is expected')
Register the channel with the coordinator. Args: channel (EFBChannel): Channel to register
codesearchnet
def forward(self, num_patches_height: int, num_patches_width: int) -> torch.Tensor: hpos_ids = torch.arange(num_patches_height, device=self.inv_freq.device).unsqueeze(1).expand(-1, num_patches_width) wpos_ids = torch.arange(num_patches_width, device=self.inv_freq.device).unsqueeze(0).expand(num_patches_height, -1) pos_ids = torch.stack([hpos_ids.flatten(), wpos_ids.flatten()], dim=-1) max_grid_size = max(num_patches_height, num_patches_width) seq = torch.arange(max_grid_size, device=self.inv_freq.device, dtype=self.inv_freq.dtype) rotary_pos_emb_full = torch.outer(seq, self.inv_freq) rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) return rotary_pos_emb
Calculate the Rotary Position Embedding (RoPE) for MLCDVisionModel based on the grid size. Args: num_patches_height (int): Number of patches in the height dimension. num_patches_width (int): Number of patches in the width dimension. Returns: torch.Tensor: Rotary positional embeddings for the given grid size.
github-repos
def decode_function_result(self, function_name, data): description = self.function_data[function_name] arguments = decode_abi(description['decode_types'], data) return arguments
Return the function call result decoded. Args: function_name (str): One of the existing functions described in the contract interface. data (bin): The encoded result from calling `function_name`. Return: List[object]: The values returned by the call to `function_name`.
codesearchnet
def terminate_session(self, token): url = (self.rest_url + ('/session/%s' % token)) response = self._delete(url) if (not response.ok): return None return True
Terminates the session token, effectively logging out the user from all crowd-enabled services. Args: token: The session token. Returns: True: If session terminated None: If session termination failed
codesearchnet
def inspect_container(self, container): return self._result(self._get(self._url('/containers/{0}/json', container)), True)
Identical to the `docker inspect` command, but only for containers. Args: container (str): The container to inspect Returns: (dict): Similar to the output of `docker inspect`, but as a single dict Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def create_transaction(self, to_account): from_account = self.statement_import.bank_account transaction = Transaction.objects.create() Leg.objects.create(transaction=transaction, account=from_account, amount=(+ (self.amount * (- 1)))) Leg.objects.create(transaction=transaction, account=to_account, amount=(- (self.amount * (- 1)))) transaction.date = self.date transaction.save() self.transaction = transaction self.save() return transaction
Create a transaction for this statement amount and account, into to_account This will also set this StatementLine's ``transaction`` attribute to the newly created transaction. Args: to_account (Account): The account the transaction is into / out of. Returns: Transaction: The newly created (and committed) transaction.
codesearchnet
def deps_from_import_graph(import_graph): def make_module(filename): return resolved_file_to_module(import_graph.provenance[filename]) def split_files(filenames): stubs = [] sources = [] for f in filenames: if _is_type_stub(f): stubs.append(f) else: sources.append(make_module(f)) return (stubs, sources) stubs_to_source_deps = collections.defaultdict(list) modules = [] for node, deps in reversed(import_graph.deps_list()): stubs, sources = split_files(_get_filenames(node)) flat_deps = utils.unique_list(itertools.chain.from_iterable((_get_filenames(d) for d in deps))) stub_deps, source_deps = split_files(flat_deps) for stub in stubs: stubs_to_source_deps[stub].extend(source_deps) for stub_dep in stub_deps: stubs_to_source_deps[stub].extend(stubs_to_source_deps[stub_dep]) if sources: for stub in stub_deps: source_deps.extend(stubs_to_source_deps[stub]) modules.append((tuple(sources), tuple(source_deps))) return modules
Construct PytypeRunner args from an importlab.ImportGraph instance. Kept as a separate function so PytypeRunner can be tested independently of importlab. Args: import_graph: An importlab.ImportGraph instance. Returns: List of (tuple of source modules, tuple of direct deps) in dependency order.
github-repos
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(UsernamePasswordCredential, self).read(input_stream, kmip_version=kmip_version) local_stream = BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.USERNAME, local_stream): self._username = primitives.TextString(tag=enums.Tags.USERNAME) self._username.read(local_stream, kmip_version=kmip_version) else: raise ValueError('Username/password credential encoding missing the username.') if self.is_tag_next(enums.Tags.PASSWORD, local_stream): self._password = primitives.TextString(tag=enums.Tags.PASSWORD) self._password.read(local_stream, kmip_version=kmip_version) self.is_oversized(local_stream)
Read the data encoding the UsernamePasswordCredential struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the username is missing from the encoding.
codesearchnet
def _to_backend_mesh(device_mesh): mesh_dims = list(zip(device_mesh.axis_names, device_mesh.shape)) return dtensor.create_distributed_mesh(mesh_dims=mesh_dims, local_devices=device_mesh.devices.flatten())
Convert the DeviceMesh to Tensorflow backend specific Mesh. Args: device_mesh: DeviceMesh instance to convert. Returns: A `tf.dtensor.Mesh` instance.
github-repos
def preprocess(source): doc = html5lib.parseFragment(source) source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8') source = source.replace(u'\n', u'').strip() source = re.sub(r'\s\s+', u' ', source) return source
Removes unnecessary break lines and white spaces. Args: source (str): Input sentence. Returns: Preprocessed sentence. (str)
juraj-google-style
def build(self): def _create_per_worker_dataset(): dataset = self._dataset_fn() return dataset per_worker_dataset = self._coordinator._create_per_worker_resources(_create_per_worker_dataset) dataset_fn_output_type_spec = self._dataset_fn.structured_outputs._type_spec for dataset_remote_value in per_worker_dataset._values: dataset_remote_value._type_spec = dataset_fn_output_type_spec return per_worker_dataset
Trigger dataset creation on workers without creating an iterator. Returns: A PerWorkerValues object containing a tuple of RemoteValues, themselves containing the built Dataset for each worker
github-repos
def to_pytd_type(self, val: abstract.BaseValue) -> pytd.Type: if val is self._ctx.consts.Any: return pytd.AnythingType() elif isinstance(val, abstract.Union): return pytd_utils.JoinTypes((self.to_pytd_type(v) for v in val.options)) elif isinstance(val, abstract.PythonConstant): return pytd.NamedType(f'builtins.{val.constant.__class__.__name__}') elif isinstance(val, abstract.FunctionArgDict): return pytd.NamedType('builtins.dict') elif isinstance(val, abstract.SimpleClass): return pytd.GenericType(base_type=pytd.NamedType('builtins.type'), parameters=(pytd.NamedType(val.name),)) elif isinstance(val, abstract.BaseInstance): return pytd.NamedType(val.cls.name) elif isinstance(val, (abstract.BaseFunction, abstract.BoundFunction)): if len(val.signatures) > 1: fixed_length_posargs_only = False else: sig = val.signatures[0] fixed_length_posargs_only = not sig.defaults and (not sig.varargs_name) and (not sig.kwonly_params) and (not sig.kwargs_name) if fixed_length_posargs_only: pytd_sig, = self.to_pytd_def(val).signatures params = tuple((param.type for param in pytd_sig.params)) return pytd.CallableType(base_type=pytd.NamedType('typing.Callable'), parameters=params + (pytd_sig.return_type,)) else: ret = abstract.join_values(self._ctx, [frame.get_return_value() for frame in val.analyze()]) return pytd.GenericType(base_type=pytd.NamedType('typing.Callable'), parameters=(pytd.AnythingType(), self.to_pytd_type(ret))) else: raise NotImplementedError(f'to_pytd_type() not implemented for {val.__class__.__name__}: {val}')
Returns the type of the abstract value, as a pytd node. For example, if the abstract value is: PythonConstant(0) then to_pytd_type() produces: pytd.NamedType(int) Args: val: The abstract value.
github-repos
def ReadFromDirectory(self, artifacts_reader, path, extension='yaml'): for artifact_definition in artifacts_reader.ReadDirectory(path, extension=extension): self.RegisterDefinition(artifact_definition)
Reads artifact definitions into the registry from files in a directory. This function does not recurse sub directories. Args: artifacts_reader (ArtifactsReader): an artifacts reader. path (str): path of the directory to read from. extension (Optional[str]): extension of the filenames to read. Raises: KeyError: if a duplicate artifact definition is encountered.
codesearchnet
def git_merge(base, head, no_ff=False): pretend = context.get('pretend', False) branch = git.current_branch(refresh=True) if branch.name != base and not pretend: git_checkout(base) args = [] if no_ff: args.append('--no-ff') log.info("Merging <33>{}<32> into <33>{}<32>", head, base) shell.run('git merge {args} {branch}'.format( args=' '.join(args), branch=head, )) if branch.name != base and not pretend: git_checkout(branch.name)
Merge *head* into *base*. Args: base (str): The base branch. *head* will be merged into this branch. head (str): The branch that will be merged into *base*. no_ff (bool): If set to **True** it will force git to create merge commit. If set to **False** (default) it will do a fast-forward merge if possible.
juraj-google-style
def __init__(self, key=None, **kwargs): if not key: raise ValueError('Missing key.') super(RC4Decrypter, self).__init__() self._rc4_cipher = ARC4.new(key)
Initializes a decrypter. Args: key (Optional[bytes]): key. kwargs (dict): keyword arguments depending on the decrypter. Raises: ValueError: when key is not set.
juraj-google-style
def hessian(self, coordinates): N3 = coordinates.size hessian = numpy.zeros((N3, N3), float) for term in self.terms: term.add_to_hessian(coordinates, hessian) return hessian
Compute the force-field Hessian for the given coordinates. Argument: | ``coordinates`` -- A numpy array with the Cartesian atom coordinates, with shape (N,3). Returns: | ``hessian`` -- A numpy array with the Hessian, with shape (3*N, 3*N).
codesearchnet