code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def swo_speed_info(self): info = structs.JLinkSWOSpeedInfo() res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.GET_SPEED_INFO, ctypes.byref(info)) if res < 0: raise errors.JLinkException(res) return info
Retrieves information about the supported SWO speeds. Args: self (JLink): the ``JLink`` instance Returns: A ``JLinkSWOSpeedInfo`` instance describing the target's supported SWO speeds. Raises: JLinkException: on error
juraj-google-style
def from_spec(cls, spec): dtype = dtypes.as_dtype(spec.dtype) minimum = getattr(spec, 'minimum', dtype.min) maximum = getattr(spec, 'maximum', dtype.max) return BoundedTensorSpec(spec.shape, dtype, minimum, maximum, spec.name)
Returns a `TensorSpec` with the same shape and dtype as `spec`. If `spec` is a `BoundedTensorSpec`, then the new spec's bounds are set to `spec.minimum` and `spec.maximum`; otherwise, the bounds are set to `spec.dtype.min` and `spec.dtype.max`. >>> spec = tf.TensorSpec(shape=[8, 3], dtype=tf.int32, name="x") >>> BoundedTensorSpec.from_spec(spec) BoundedTensorSpec(shape=(8, 3), dtype=tf.int32, name='x', minimum=array(-2147483648, dtype=int32), maximum=array(2147483647, dtype=int32)) Args: spec: The `TypeSpec` used to create the new `BoundedTensorSpec`.
github-repos
def _GatherReturnElements(requested_return_elements, graph, results): return_outputs = c_api.TF_ImportGraphDefResultsReturnOutputs(results) return_opers = c_api.TF_ImportGraphDefResultsReturnOperations(results) combined_return_elements = [] outputs_idx = 0 opers_idx = 0 for name in requested_return_elements: if ':' in name: combined_return_elements.append(graph._get_tensor_by_tf_output(return_outputs[outputs_idx])) outputs_idx += 1 else: combined_return_elements.append(graph._get_operation_by_tf_operation(return_opers[opers_idx])) opers_idx += 1 return combined_return_elements
Returns the requested return elements from results. Args: requested_return_elements: list of strings of operation and tensor names graph: Graph results: wrapped TF_ImportGraphDefResults Returns: list of `Operation` and/or `Tensor` objects
github-repos
def merge_ids(self, token, channel, ids, delete=False): url = self.url() + "/merge/{}/".format(','.join([str(i) for i in ids])) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Could not merge ids {}'.format( ','.join([str(i) for i in ids]))) if delete: self.delete_ramon(token, channel, ids[1:]) return True
Call the restful endpoint to merge two RAMON objects into one. Arguments: token (str): The token to inspect channel (str): The channel to inspect ids (int[]): the list of the IDs to merge delete (bool : False): Whether to delete after merging. Returns: json: The ID as returned by ndstore
juraj-google-style
def notify_changes(self, changes): ret = [] child_changes = {} for change in changes: self._add_child_change(change, child_changes) if self.update_requests: serialized = serialize_object(self.data) for request in self.update_requests: ret.append(request.update_response(serialized)) if self.delta_requests: for change in changes: change[-1] = serialize_object(change[-1]) for request in self.delta_requests: ret.append(request.delta_response(changes)) for name, child_changes in child_changes.items(): ret += self.children[name].notify_changes(child_changes) return ret
Set our data and notify anyone listening Args: changes (list): [[path, optional data]] where path is the path to what has changed, and data is the unserialized object that has changed Returns: list: [(callback, Response)] that need to be called
juraj-google-style
def check_version_2(dataset): if ((float(dataset.get('version')) >= 2.0) if dataset.get('version') else False): return True else: return False
Checks if json-stat version attribute exists and is equal or greater \ than 2.0 for a given dataset. Args: dataset (OrderedDict): data in JSON-stat format, previously \ deserialized to a python object by \ json.load() or json.loads(), Returns: bool: True if version exists and is equal or greater than 2.0, \ False otherwise. For datasets without the version attribute, \ always return False.
codesearchnet
def writeline(self, line=b'', sep=b'\n', echo=None): self.writelines([line], sep, echo)
Write a byte sequences to the channel and terminate it with carriage return and line feed. Args: line(bytes): The line to send. sep(bytes): The separator to use after each line. echo(bool): Whether to echo the written data to stdout. Raises: EOFError: If the channel was closed before all data was sent.
juraj-google-style
def get_tabular_stream(self, url, **kwargs): self.close_response() file_type = kwargs.get('file_type') if (file_type is not None): kwargs['format'] = file_type del kwargs['file_type'] try: self.response = tabulator.Stream(url, **kwargs) self.response.open() return self.response except TabulatorException as e: raisefrom(DownloadError, ('Getting tabular stream for %s failed!' % url), e)
Get Tabulator stream. Args: url (str): URL to download **kwargs: headers (Union[int, List[int], List[str]]): Number of row(s) containing headers or list of headers file_type (Optional[str]): Type of file. Defaults to inferring. delimiter (Optional[str]): Delimiter used for values in each row. Defaults to inferring. Returns: tabulator.Stream: Tabulator Stream object
codesearchnet
def _GetProcessedStorageFilePath(self, task): filename = '{0:s}.plaso'.format(task.identifier) return os.path.join(self._processed_task_storage_path, filename)
Retrieves the path of a task storage file in the processed directory. Args: task (Task): task. Returns: str: path of a task storage file in the processed directory.
juraj-google-style
def get_vocabulary(preprocess_output_dir, name): vocab_file = os.path.join(preprocess_output_dir, CATEGORICAL_ANALYSIS % name) if not file_io.file_exists(vocab_file): raise ValueError('File %s not found in %s' % (CATEGORICAL_ANALYSIS % name, preprocess_output_dir)) labels = python_portable_string( file_io.read_file_to_string(vocab_file)).split('\n') label_values = [x for x in labels if x] return label_values
Loads the vocabulary file as a list of strings. Args: preprocess_output_dir: Should contain the file CATEGORICAL_ANALYSIS % name. name: name of the csv column. Returns: List of strings. Raises: ValueError: if file is missing.
juraj-google-style
def load_tensor_from_event_file(event_file_path): event = event_pb2.Event() with gfile.Open(event_file_path, 'rb') as f: event.ParseFromString(f.read()) return load_tensor_from_event(event)
Load a tensor from an event file. Assumes that the event file contains a `Event` protobuf and the `Event` protobuf contains a `Tensor` value. Args: event_file_path: (`str`) path to the event file. Returns: The tensor value loaded from the event file, as a `numpy.ndarray`. For uninitialized Tensors, returns `None`. For Tensors of data types that cannot be converted to `numpy.ndarray` (e.g., `tf.resource`), return `None`.
github-repos
def unescape(inp, quote='"'): if (len(inp) < 2): return inp output = '' unesc = False for act in inp: if ((act == quote) and unesc): output = output[:(- 1)] output += act if (act == '\\'): unesc = (not unesc) else: unesc = False return output
Unescape `quote` in string `inp`. Example usage:: >> unescape('hello \\"') 'hello "' Args: inp (str): String in which `quote` will be unescaped. quote (char, default "): Specify which character will be unescaped. Returns: str: Unescaped string.
codesearchnet
def __init__(self, string_table): self._string_table = string_table self._node_name_to_sample = {}
Constructor. Args: string_table: A `StringTable` object.
github-repos
def get_dense_tensor(self, transformation_cache, state_manager): if isinstance(self.categorical_column, SequenceCategoricalColumn): raise ValueError('In indicator_column: {}. categorical_column must not be of type SequenceCategoricalColumn. Suggested fix A: If you wish to use DenseFeatures, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use SequenceFeatures instead of DenseFeatures. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) return transformation_cache.get(self, state_manager)
Returns dense `Tensor` representing feature. Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. Returns: Dense `Tensor` created within `transform_feature`. Raises: ValueError: If `categorical_column` is a `SequenceCategoricalColumn`.
github-repos
def __init__(self, columns: list[str]) -> None: super().__init__(columns) if not columns: raise RuntimeError('Columns are not specified. Please specify the column for the op %s' % self.__class__.__name__)
Base Operation class for TFT data processing transformations. Processing logic for the transformation is defined in the apply_transform() method. If you have a custom transformation that is not supported by the existing transforms, you can extend this class and implement the apply_transform() method. Args: columns: List of column names to apply the transformation.
github-repos
def on_fail(self, record):
A function that is executed upon a test failure. User implementation is optional. Args: record: records.TestResultRecord, a copy of the test record for this test, containing all information of the test execution including exception objects.
github-repos
def near(point, dist, points): for cmpt in points: if haversine(point, cmpt) <= dist: return True return False
Determine if the given point is within dist of any of points. Args: point ((float,float)): A latitude, longitude float tuple. dist (int): A distance in mm ( base units ) points (list): A list of latitude, longitude float tuples to compare against.
juraj-google-style
def ProcessBlocks(self, block_limit=1000): self._lock.acquire() try: blockcount = 0 while ((self._current_height <= Blockchain.Default().Height) and ((block_limit == 0) or (blockcount < block_limit))): block = Blockchain.Default().GetBlockByHeight(self._current_height) if (block is not None): self.ProcessNewBlock(block) else: self._current_height += 1 blockcount += 1 self.SaveStoredData('Height', self._current_height) except Exception as e: logger.warn(('Could not process ::: %s ' % e)) finally: self._lock.release()
Method called on a loop to check the current height of the blockchain. If the height of the blockchain is more than the current stored height in the wallet, we get the next block in line and processes it. In the case that the wallet height is far behind the height of the blockchain, we do this 1000 blocks at a time. Args: block_limit (int): the number of blocks to process synchronously. defaults to 1000. set to 0 to block until the wallet is fully rebuilt.
codesearchnet
def predict_proba(self, X): return collections.deque(self.iter_predict_proba(X), maxlen=1).pop()
Returns the predicted probabilities for ``X``. Arguments: X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples. Sparse matrices are accepted only if they are supported by the weak model. Returns: array of shape (n_samples, n_classes) containing the predicted probabilities.
codesearchnet
def setup(self, hosts, files, use_tsk, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True): super(GRRFileCollector, self).setup( reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify) if files is not None: self.files = [item.strip() for item in files.strip().split(',')] self.hostnames = [item.strip() for item in hosts.strip().split(',')] self.use_tsk = use_tsk
Initializes a GRR file collector. Args: hosts: Comma-separated list of hostnames to launch the flow on. files: list of file paths. use_tsk: toggle for use_tsk flag on GRR flow. reason: justification for GRR access. grr_server_url: GRR server URL. grr_username: GRR username. grr_password: GRR password. approvers: list of GRR approval recipients. verify: boolean, whether to verify the GRR server's x509 certificate.
juraj-google-style
def __init__( self, max_r, number_of_bins ): self.max_r = max_r self.number_of_bins = number_of_bins self.data = np.zeros( number_of_bins ) self.dr = max_r / number_of_bins
Initialise a Rdf object for manipulating radial distribution functions. Args: max_r (Float): the maximum r value stored for g(r). number_of_bins (Int): number of bins for storing data about g(r). Returns: None
juraj-google-style
def __init__(self, pb_class_from: Type[FROM], pb_class_to: Type[TO], field_names_to_ignore: Optional[List[str]]=None): if field_names_to_ignore is None: field_names_to_ignore = [] self._pb_class_from = pb_class_from self._pb_class_to = pb_class_to self._field_names_to_ignore = field_names_to_ignore self._function_convert_field_names = [] self._convert_functions = [] self._assert_all_fields_are_handled()
Constructor for the ProtoConverter. Args: pb_class_from: the init method for the proto to convert from. pb_class_to: the init method for the proto to convert to. field_names_to_ignore: the fields from the source proto that will be ignored by the converter. Returns: ProtoConverter Raise: NotImplementedError: When creating the proto converter if there are fields not handled or ignored.
github-repos
def ParsePageVisitRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) was_http_non_get = self._GetRowValue(query_hash, row, 'http_non_get') event_data = SafariHistoryPageVisitedEventData() event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.title = self._GetRowValue(query_hash, row, 'title') event_data.url = self._GetRowValue(query_hash, row, 'url') event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count') event_data.was_http_non_get = bool(was_http_non_get) timestamp = self._GetRowValue(query_hash, row, 'visit_time') date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a visited row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
codesearchnet
def ScanSource(self, source_path): if os.path.islink(source_path): source_path = os.path.realpath(source_path) if (not source_path.startswith('\\\\.\\') and not os.path.exists(source_path)): raise errors.SourceScannerError( 'No such device, file or directory: {0:s}.'.format(source_path)) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(source_path) try: self._source_scanner.Scan(scan_context) except (ValueError, dfvfs_errors.BackEndError) as exception: raise errors.SourceScannerError( 'Unable to scan source with error: {0!s}.'.format(exception)) if scan_context.source_type not in ( scan_context.SOURCE_TYPE_STORAGE_MEDIA_DEVICE, scan_context.SOURCE_TYPE_STORAGE_MEDIA_IMAGE): scan_node = scan_context.GetRootScanNode() self._source_path_specs.append(scan_node.path_spec) return scan_context scan_node = scan_context.GetRootScanNode() while len(scan_node.sub_nodes) == 1: scan_node = scan_node.sub_nodes[0] base_path_specs = [] if scan_node.type_indicator != ( dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION): self._ScanVolume(scan_context, scan_node, base_path_specs) else: partition_identifiers = self._GetTSKPartitionIdentifiers(scan_node) if not partition_identifiers: raise errors.SourceScannerError('No partitions found.') for partition_identifier in partition_identifiers: location = '/{0:s}'.format(partition_identifier) sub_scan_node = scan_node.GetSubNodeByLocation(location) self._ScanVolume(scan_context, sub_scan_node, base_path_specs) if not base_path_specs: raise errors.SourceScannerError( 'No supported file system found in source.') self._source_path_specs = base_path_specs return scan_context
Scans the source path for volume and file systems. This function sets the internal source path specification and source type values. Args: source_path (str): path to the source. Returns: dfvfs.SourceScannerContext: source scanner context. Raises: SourceScannerError: if the format of or within the source is not supported.
juraj-google-style
def events_from_file(filepath): records = list(tf_record.tf_record_iterator(filepath)) result = [] for r in records: event = event_pb2.Event() event.ParseFromString(r) result.append(event) return result
Returns all events in a single event file. Args: filepath: Path to the event file. Returns: A list of all tf.Event protos in the event file.
github-repos
def remove_results(vcs, signature): results_directory = _get_results_directory(vcs, signature) if not os.path.exists(results_directory): raise ResultsNotFoundError shutil.rmtree(results_directory)
Removed saved results for this signature Args: vcs (easyci.vcs.base.Vcs) signature (str) Raises: ResultsNotFoundError
juraj-google-style
def right_shift(x, y): if any_symbolic_tensors((x, y)): return RightShift().symbolic_call(x, y) return backend.numpy.right_shift(x, y)
Shift the bits of an integer to the right. Bits are shifted to the right `y`. Because the internal representation of numbers is in binary format, this operation is equivalent to dividing `x` by `2**y`. Args: x: Input integer tensor. y: Input integer tensor. Returns: Result tensor.
github-repos
def from_bigquery(sql): if isinstance(sql, bq.Query): sql = sql._expanded_sql() parts = sql.split('.') if ((len(parts) == 1) or (len(parts) > 3) or any(((' ' in x) for x in parts))): sql = (('(' + sql) + ')') else: sql = (('`' + sql) + '`') query = bq.Query(('SELECT target, predicted, count(*) as count FROM %s group by target, predicted' % sql)) df = query.execute().result().to_dataframe() labels = sorted((set(df['target']) | set(df['predicted']))) labels_count = len(labels) df['target'] = [labels.index(x) for x in df['target']] df['predicted'] = [labels.index(x) for x in df['predicted']] cm = [([0] * labels_count) for i in range(labels_count)] for (index, row) in df.iterrows(): cm[row['target']][row['predicted']] = row['count'] return ConfusionMatrix(cm, labels)
Create a ConfusionMatrix from a BigQuery table or query. Args: sql: Can be one of: A SQL query string. A Bigquery table string. A Query object defined with '%%bq query --name [query_name]'. The query results or table must include "target", "predicted" columns. Returns: A ConfusionMatrix that can be plotted. Raises: ValueError if query results or table does not include 'target' or 'predicted' columns.
codesearchnet
def handle_error(self): if (not self.tasks): return self.mark_parent_tasks_as_failed(self.cur_task, flush_logs=True) for (index, task) in enumerate(self.tasks.values()): if self.should_show_by_depth((index + 1)): continue start_task_header = logging.LogRecord('', logging.INFO, '', 0, '', [], None) start_task_header.msg = ColorFormatter.colored('default', START_TASK_MSG) start_task_header.task = task.name self.pretty_emit(start_task_header, is_header=True, task_level=(index + 1)) for old_record in self.tasks[self.cur_task]: self.pretty_emit(old_record) self.tasks[self.cur_task].clear()
Handles an error log record that should be shown Returns: None
codesearchnet
def of(seconds: TimestampTypes) -> 'Timestamp': if isinstance(seconds, Timestamp): return seconds elif isinstance(seconds, (int, float)): return Timestamp(seconds) elif isinstance(seconds, datetime.datetime): return Timestamp.from_utc_datetime(seconds) else: raise TypeError('Cannot interpret %s %s as Timestamp.' % (seconds, type(seconds)))
Return the Timestamp for the given number of seconds. If the input is already a Timestamp, the input itself will be returned. Args: seconds: Number of seconds as int, float, long, or Timestamp. Returns: Corresponding Timestamp object.
github-repos
def add(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]: out = math_ops.add(input_tensor, input_tensor) return {'output': out}
Performs an add operation. Args: input_tensor: Input tensor to perform add on. Returns: A map of: output key -> output result.
github-repos
def validate(self, definition, version=None, strict=False): if (not HAS_KUBERNETES_VALIDATE): raise KubernetesValidateMissing() errors = list() warnings = list() try: if (version is None): try: version = self.version['kubernetes']['gitVersion'] except KeyError: version = kubernetes_validate.latest_version() kubernetes_validate.validate(definition, version, strict) except kubernetes_validate.utils.ValidationError as e: errors.append(('resource definition validation error at %s: %s' % ('.'.join([str(item) for item in e.path]), e.message))) except VersionNotSupportedError as e: errors.append(('Kubernetes version %s is not supported by kubernetes-validate' % version)) except kubernetes_validate.utils.SchemaNotFoundError as e: warnings.append(('Could not find schema for object kind %s with API version %s in Kubernetes version %s (possibly Custom Resource?)' % (e.kind, e.api_version, e.version))) return (warnings, errors)
validate checks a kubernetes resource definition Args: definition (dict): resource definition version (str): version of kubernetes to validate against strict (bool): whether unexpected additional properties should be considered errors Returns: warnings (list), errors (list): warnings are missing validations, errors are validation failures
codesearchnet
def ranseed(seed=None): if (seed is None): seed = numpy.random.randint(1, int(2000000000.0), size=3) try: seed = tuple(seed) except TypeError: pass numpy.random.seed(seed) ranseed.seed = seed return seed
Seed random number generators with tuple ``seed``. Argument ``seed`` is an integer or a :class:`tuple` of integers that is used to seed the random number generators used by :mod:`numpy` and :mod:`random` (and therefore by :mod:`gvar`). Reusing the same ``seed`` results in the same set of random numbers. ``ranseed`` generates its own seed when called without an argument or with ``seed=None``. This seed is stored in ``ranseed.seed`` and also returned by the function. The seed can be used to regenerate the same set of random numbers at a later time. Args: seed (int, tuple, or None): Seed for generator. Generates a random tuple if ``None``. Returns: The seed used to reseed the generator.
codesearchnet
def contains(self, name): try: self._api.buckets_get(name) except google.datalab.utils.RequestException as e: if (e.status == 404): return False raise e except Exception as e: raise e return True
Checks if the specified bucket exists. Args: name: the name of the bucket to lookup. Returns: True if the bucket exists; False otherwise. Raises: Exception if there was an error requesting information about the bucket.
codesearchnet
def prepare_locust_tests(path): tests_mapping = loader.load_tests(path) testcases = parser.parse_tests(tests_mapping) locust_tests = [] for testcase in testcases: testcase_weight = testcase.get("config", {}).pop("weight", 1) for _ in range(testcase_weight): locust_tests.append(testcase) return locust_tests
prepare locust testcases Args: path (str): testcase file path. Returns: list: locust tests data [ testcase1_dict, testcase2_dict ]
juraj-google-style
class _ConfusionMatrixConditionCount(Metric): def __init__(self, confusion_matrix_cond, thresholds=None, name=None, dtype=None): super(_ConfusionMatrixConditionCount, self).__init__(name=name, dtype=dtype) self._confusion_matrix_cond = confusion_matrix_cond self.init_thresholds = thresholds self.thresholds = metrics_utils.parse_init_thresholds(thresholds, default_threshold=0.5) self._thresholds_distributed_evenly = metrics_utils.is_evenly_distributed_thresholds(self.thresholds) self.accumulator = self.add_weight('accumulator', shape=(len(self.thresholds),), initializer=init_ops.zeros_initializer) def update_state(self, y_true, y_pred, sample_weight=None): return metrics_utils.update_confusion_matrix_variables({self._confusion_matrix_cond: self.accumulator}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight) def result(self): if len(self.thresholds) == 1: result = self.accumulator[0] else: result = self.accumulator return tensor_conversion.convert_to_tensor_v2_with_dispatch(result) def reset_state(self): num_thresholds = len(to_list(self.thresholds)) backend.batch_set_value([(v, np.zeros((num_thresholds,))) for v in self.variables]) def get_config(self): config = {'thresholds': self.init_thresholds} base_config = super(_ConfusionMatrixConditionCount, self).get_config() return dict(list(base_config.items()) + list(config.items()))
Calculates the number of the given confusion matrix condition. Args: confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix` conditions. thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result.
github-repos
def set_column(self, X, column, value): if isinstance(X, pd.DataFrame): X.loc[(:, column)] = value else: X[(:, column)] = value return X
Sets a column on the matrix X with the given value. Args: X: `numpy.ndarray` or `pandas.DataFrame`. column: `int` or `str`. value: `np.ndarray` with shape (1,) Returns: `np.ndarray` or `pandas.DataFrame` with the inserted column.
codesearchnet
def __init__(self, context): self.logdir = context.logdir self.multiplexer = context.multiplexer self.plugin_logdir = plugin_asset_util.PluginDirectory( self.logdir, PLUGIN_NAME) self.stub = None self.master_tpu_unsecure_channel = context.flags.master_tpu_unsecure_channel self._is_active = False self._is_active_lock = threading.Lock()
Constructs a profiler plugin for TensorBoard. This plugin adds handlers for performance-related frontends. Args: context: A base_plugin.TBContext instance.
juraj-google-style
def get_protocol_version(protocol=None, target=None): target = get_py_internals(target) if (protocol is None): protocol = target['pickle_default_protocol'] if (protocol > cPickle.HIGHEST_PROTOCOL): warnings.warn(('Downgrading pickle protocol, running python supports up to %d.' % cPickle.HIGHEST_PROTOCOL)) protocol = cPickle.HIGHEST_PROTOCOL target_highest_protocol = target['pickle_highest_protocol'] if (protocol > target_highest_protocol): warnings.warn(('Downgrading pickle protocol, target python supports up to %d.' % target_highest_protocol)) protocol = target_highest_protocol return protocol
Return a suitable pickle protocol version for a given target. Arguments: target: The internals description of the targeted python version. If this is ``None`` the specification of the currently running python version will be used. protocol(None or int): The requested protocol version (or None for the default of the target python version). Returns: int: A suitable pickle protocol version.
codesearchnet
def set_hook_data(self, key, data): if not isinstance(data, collections.Mapping): raise ValueError("Hook (key: %s) data must be an instance of " "collections.Mapping (a dictionary for " "example)." % key) if key in self.hook_data: raise KeyError("Hook data for key %s already exists, each hook " "must have a unique data_key.", key) self.hook_data[key] = data
Set hook data for the given key. Args: key(str): The key to store the hook data in. data(:class:`collections.Mapping`): A dictionary of data to store, as returned from a hook.
juraj-google-style
def set_energy_upperbound(self, spins, offset=0): spin_energy = self.energy_upperbound(spins) self.assertions.add(GE(spin_energy, self.gap + offset))
Upper bound the energy of Theta with spins fixed to be greater than (gap + offset). Args: spins (dict): Spin values for a subset of the variables in Theta. offset (float): A value that is added to the upper bound. Default value is 0. Notes: Add equality constraint to assertions.
juraj-google-style
def on_test_batch_end(self, batch, logs=None):
Called at the end of a batch in `evaluate` methods. Also called at the end of a validation batch in the `fit` methods, if validation data is provided. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.
github-repos
def _ParseLine(self, parser_mediator, structure): (month, day_of_month, year, hours, minutes, seconds, milliseconds) = structure.date_time year += 2000 time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds, milliseconds) try: date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(time_elements_tuple=time_elements_tuple) except ValueError: parser_mediator.ProduceExtractionWarning('invalid date time value: {0!s}'.format(structure.date_time)) return event_data = SkyDriveLogEventData() event_data.detail = structure.detail.replace('\n', ' ') event_data.log_level = structure.log_level event_data.module = structure.module event_data.source_code = structure.source_code event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a logline and store appropriate attributes. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file.
codesearchnet
def list(self, pattern='*'): if (self._descriptors is None): self._descriptors = self._client.list_resource_descriptors(filter_string=self._filter_string) return [resource for resource in self._descriptors if fnmatch.fnmatch(resource.type, pattern)]
Returns a list of resource descriptors that match the filters. Args: pattern: An optional pattern to further filter the descriptors. This can include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``. Returns: A list of ResourceDescriptor objects that match the filters.
codesearchnet
def _Dhcpcd(self, interfaces, logger): for interface in interfaces: dhcpcd = ['/sbin/dhcpcd'] try: subprocess.check_call(dhcpcd + ['-x', interface]) except subprocess.CalledProcessError: logger.info('Dhcpcd not yet running for interface %s.', interface) try: subprocess.check_call(dhcpcd + [interface]) except subprocess.CalledProcessError: logger.warning('Could not activate interface %s.', interface)
Use dhcpcd to activate the interfaces. Args: interfaces: list of string, the output device names to enable. logger: logger object, used to write to SysLog and serial port.
juraj-google-style
def unreduce_like(array, original_array, axis, keepdims): atype = type(array) unreducer = unreducers[atype] shape = shape_functions[atype] return unreducer(array, shape(original_array), axis, keepdims)
Reverse summing over a dimension. Args: array: The array that was reduced. original_array: An array whose shape to unreduce to. axis: The axis or axes that were summed. keepdims: Whether these axes were kept as singleton axes. Returns: An array with axes broadcast to match the shape of the original array.
juraj-google-style
def ProcessConfigOverrides(filename): abs_filename = os.path.abspath(filename) cfg_filters = [] keep_looking = True while keep_looking: (abs_path, base_name) = os.path.split(abs_filename) if (not base_name): break cfg_file = os.path.join(abs_path, 'CPPLINT.cfg') abs_filename = abs_path if (not os.path.isfile(cfg_file)): continue try: with open(cfg_file) as file_handle: for line in file_handle: (line, _, _) = line.partition(' if (not line.strip()): continue (name, _, val) = line.partition('=') name = name.strip() val = val.strip() if (name == 'set noparent'): keep_looking = False elif (name == 'filter'): cfg_filters.append(val) elif (name == 'exclude_files'): if base_name: pattern = re.compile(val) if pattern.match(base_name): sys.stderr.write(('Ignoring "%s": file excluded by "%s". File path component "%s" matches pattern "%s"\n' % (filename, cfg_file, base_name, val))) return False elif (name == 'linelength'): global _line_length try: _line_length = int(val) except ValueError: sys.stderr.write('Line length must be numeric.') else: sys.stderr.write(('Invalid configuration option (%s) in file %s\n' % (name, cfg_file))) except IOError: sys.stderr.write(("Skipping config file '%s': Can't open for reading\n" % cfg_file)) keep_looking = False for filter in reversed(cfg_filters): _AddFilters(filter) return True
Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further.
codesearchnet
def confirm(message: Text, default: bool=True, qmark: Text=DEFAULT_QUESTION_PREFIX, style: Optional[Style]=None, **kwargs: Any) -> Question: merged_style = merge_styles([DEFAULT_STYLE, style]) status = {'answer': None} def get_prompt_tokens(): tokens = [] tokens.append(('class:qmark', qmark)) tokens.append(('class:question', ' {} '.format(message))) if (status['answer'] is not None): answer = ' {}'.format((YES if status['answer'] else NO)) tokens.append(('class:answer', answer)) else: instruction = ' {}'.format((YES_OR_NO if default else NO_OR_YES)) tokens.append(('class:instruction', instruction)) return to_formatted_text(tokens) bindings = KeyBindings() @bindings.add(Keys.ControlQ, eager=True) @bindings.add(Keys.ControlC, eager=True) def _(event): event.app.exit(exception=KeyboardInterrupt, style='class:aborting') @bindings.add('n') @bindings.add('N') def key_n(event): status['answer'] = False event.app.exit(result=False) @bindings.add('y') @bindings.add('Y') def key_y(event): status['answer'] = True event.app.exit(result=True) @bindings.add(Keys.ControlM, eager=True) def set_answer(event): status['answer'] = default event.app.exit(result=default) @bindings.add(Keys.Any) def other(event): 'Disallow inserting other text.' pass return Question(PromptSession(get_prompt_tokens, key_bindings=bindings, style=merged_style, **kwargs).app)
Prompt the user to confirm or reject. This question type can be used to prompt the user for a confirmation of a yes-or-no question. If the user just hits enter, the default value will be returned. Args: message: Question text default: Default value will be returned if the user just hits enter. qmark: Question prefix displayed in front of the question. By default this is a `?` style: A custom color and style for the question parts. You can configure colors as well as font types for different elements. Returns: Question: Question instance, ready to be prompted (using `.ask()`).
codesearchnet
def _validate_alias_file_content(alias_file_path, url=''): alias_table = get_config_parser() try: alias_table.read(alias_file_path) for (alias_name, alias_command) in reduce_alias_table(alias_table): _validate_alias_name(alias_name) _validate_alias_command(alias_command) _validate_alias_command_level(alias_name, alias_command) _validate_pos_args_syntax(alias_name, alias_command) except Exception as exception: error_msg = (CONFIG_PARSING_ERROR % AliasManager.process_exception_message(exception)) error_msg = error_msg.replace(alias_file_path, (url or alias_file_path)) raise CLIError(error_msg)
Make sure the alias name and alias command in the alias file is in valid format. Args: The alias file path to import aliases from.
codesearchnet
class DetrEncoder(DetrPreTrainedModel): def __init__(self, config: DetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.layers = nn.ModuleList([DetrEncoderLayer(config) for _ in range(config.encoder_layers)]) self.post_init() def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`DetrEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for DETR: - object_queries are added to the forward pass. Args: config: DetrConfig
github-repos
def addColumn(self, header, values=[]): if len(values) == 0: self._impl.addColumn(header) else: assert len(values) == self.getNumRows() if any(isinstance(value, basestring) for value in values): values = list(map(str, values)) self._impl.addColumnStr(header, values) elif all(isinstance(value, Real) for value in values): values = list(map(float, values)) self._impl.addColumnDbl(header, values) else: raise NotImplementedError
Add a new column with the corresponding header and values to the dataframe. Args: header: The name of the new column. values: A list of size :func:`~amplpy.DataFrame.getNumRows` with all the values of the new column.
juraj-google-style
def WriteSignedBinary(binary_urn, binary_content, private_key, public_key, chunk_size=1024, token=None): if _ShouldUseLegacyDatastore(): collects.GRRSignedBlob.NewFromContent(binary_content, binary_urn, chunk_size=chunk_size, token=token, private_key=private_key, public_key=public_key) if data_store.RelationalDBEnabled(): blob_references = rdf_objects.BlobReferences() for chunk_offset in range(0, len(binary_content), chunk_size): chunk = binary_content[chunk_offset:(chunk_offset + chunk_size)] blob_rdf = rdf_crypto.SignedBlob() blob_rdf.Sign(chunk, private_key, verify_key=public_key) blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob_rdf.SerializeToString()) blob_references.items.Append(rdf_objects.BlobReference(offset=chunk_offset, size=len(chunk), blob_id=blob_id)) data_store.REL_DB.WriteSignedBinaryReferences(_SignedBinaryIDFromURN(binary_urn), blob_references)
Signs a binary and saves it to the datastore. If a signed binary with the given URN already exists, its contents will get overwritten. Args: binary_urn: URN that should serve as a unique identifier for the binary. binary_content: Contents of the binary, as raw bytes. private_key: Key that should be used for signing the binary contents. public_key: Key that should be used to verify the signature generated using the private key. chunk_size: Size, in bytes, of the individual blobs that the binary contents will be split to before saving to the datastore. token: ACL token to use with the legacy (non-relational) datastore.
codesearchnet
def _filter_pb(field_or_unary): if isinstance(field_or_unary, query_pb2.StructuredQuery.FieldFilter): return query_pb2.StructuredQuery.Filter(field_filter=field_or_unary) elif isinstance(field_or_unary, query_pb2.StructuredQuery.UnaryFilter): return query_pb2.StructuredQuery.Filter(unary_filter=field_or_unary) else: raise ValueError('Unexpected filter type', type(field_or_unary), field_or_unary)
Convert a specific protobuf filter to the generic filter type. Args: field_or_unary (Union[google.cloud.proto.firestore.v1beta1.\ query_pb2.StructuredQuery.FieldFilter, google.cloud.proto.\ firestore.v1beta1.query_pb2.StructuredQuery.FieldFilter]): A field or unary filter to convert to a generic filter. Returns: google.cloud.firestore_v1beta1.types.\ StructuredQuery.Filter: A "generic" filter. Raises: ValueError: If ``field_or_unary`` is not a field or unary filter.
codesearchnet
def pivot_samples(self, values, index='ID_REF'): data = [] for gsm in self.gsms.values(): tmp_data = gsm.table.copy() tmp_data['name'] = gsm.name data.append(tmp_data) ndf = concat(data).pivot(index=index, values=values, columns='name') return ndf
Pivot samples by specified column. Construct a table in which columns (names) are the samples, index is a specified column eg. ID_REF and values in the columns are of one specified type. Args: values (:obj:`str`): Column name present in all GSMs. index (:obj:`str`, optional): Column name that will become an index in pivoted table. Defaults to "ID_REF". Returns: :obj:`pandas.DataFrame`: Pivoted data
codesearchnet
def listtransactions(self, user_id="", count=10, start_at=0): txlist = self.rpc.call("listtransactions", user_id, count, start_at) self.logger.debug("Got transaction list for " + str(user_id)) return txlist
List all transactions associated with this account. Args: user_id (str): this user's unique identifier count (int): number of transactions to return (default=10) start_at (int): start the list at this transaction (default=0) Returns: list [dict]: transactions associated with this user's account
juraj-google-style
def split_metrics_by_namespace_and_name(metrics, namespace, name): matching_metrics = [] not_matching_metrics = [] for dist in metrics: if dist.key.metric.namespace == namespace and dist.key.metric.name == name: matching_metrics.append(dist) else: not_matching_metrics.append(dist) return (matching_metrics, not_matching_metrics)
Splits metrics list namespace and name. Args: metrics: list of metrics from pipeline result namespace(str): filter metrics by namespace name(str): filter metrics by name Returns: two lists - one of metrics which are matching filters and second of not matching
github-repos
def get_id(date=None, project: str = 'sip', instance_id: int = None) -> str: if date is None: date = datetime.datetime.utcnow() if isinstance(date, datetime.datetime): date = date.strftime('%Y%m%d') if instance_id is None: instance_id = randint(0, 9999) return 'SBI-{}-{}-{:04d}'.format(date, project, instance_id)
Get a SBI Identifier. Args: date (str or datetime.datetime, optional): UTC date of the SBI project (str, optional ): Project Name instance_id (int, optional): SBI instance identifier Returns: str, Scheduling Block Instance (SBI) ID.
juraj-google-style
def field_content_length(msg: message.Message, field: Union[descriptor.FieldDescriptor, str]) -> int: if isinstance(field, str): field = _field_descriptor_for_name(msg, field) if field_is_repeated(field): return len(getattr(msg, field.name)) return 1 if msg.HasField(field.name) else 0
Returns the size of the field. Args: msg: The Message whose fields to examine. field: The FieldDescriptor or name of the field to examine. Returns: The number of elements at the provided field. If field describes a singular protobuf field, this will return 1. If the field is not set, returns 0.
github-repos
def __write_error(self, status_code, error_message=None): if (error_message is None): error_message = httplib.responses[status_code] status = ('%d %s' % (status_code, httplib.responses[status_code])) message = EndpointsErrorMessage(state=EndpointsErrorMessage.State.APPLICATION_ERROR, error_message=error_message) return (status, self.__PROTOJSON.encode_message(message))
Return the HTTP status line and body for a given error code and message. Args: status_code: HTTP status code to be returned. error_message: Error message to be returned. Returns: Tuple (http_status, body): http_status: HTTP status line, e.g. 200 OK. body: Body of the HTTP request.
codesearchnet
def convert_convtranspose(params, w_name, scope_name, inputs, layers, weights, names): print('Converting transposed convolution ...') if names == 'short': tf_name = 'C' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) bias_name = '{0}.bias'.format(w_name) weights_name = '{0}.weight'.format(w_name) if len(weights[weights_name].numpy().shape) == 4: W = weights[weights_name].numpy().transpose(2, 3, 1, 0) height, width, n_filters, channels = W.shape n_groups = params['group'] if n_groups > 1: raise AssertionError('Cannot convert conv1d with groups != 1') if params['dilations'][0] > 1: raise AssertionError('Cannot convert conv1d with dilation_rate != 1') if bias_name in weights: biases = weights[bias_name].numpy() has_bias = True else: biases = None has_bias = False input_name = inputs[0] if has_bias: weights = [W, biases] else: weights = [W] conv = keras.layers.Conv2DTranspose( filters=n_filters, kernel_size=(height, width), strides=(params['strides'][0], params['strides'][1]), padding='valid', output_padding=0, weights=weights, use_bias=has_bias, activation=None, dilation_rate=params['dilations'][0], bias_initializer='zeros', kernel_initializer='zeros', name=tf_name ) layers[scope_name] = conv(layers[input_name]) layers[scope_name].set_shape(layers[scope_name]._keras_shape) pads = params['pads'] if pads[0] > 0: assert(len(pads) == 2 or (pads[2] == pads[0] and pads[3] == pads[1])) crop = keras.layers.Cropping2D( pads[:2], name=tf_name + '_crop' ) layers[scope_name] = crop(layers[scope_name]) else: raise AssertionError('Layer is not supported for now')
Convert transposed convolution layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def format_float(digit=0, is_pct=False): if is_pct: space = (' ' if (digit < 0) else '') fmt = f'{{:{space}.{abs(int(digit))}%}}' return (lambda vv: ('NaN' if np.isnan(vv) else fmt.format(vv))) else: return (lambda vv: ('NaN' if np.isnan(vv) else (f'{{:,.{digit}f}}'.format(vv) if vv else ('-' + (' ' * abs(digit))))))
Number display format for pandas Args: digit: number of digits to keep if negative, add one space in front of positive pct is_pct: % display Returns: lambda function to format floats Examples: >>> format_float(0)(1e5) '100,000' >>> format_float(1)(1e5) '100,000.0' >>> format_float(-1, True)(.2) ' 20.0%' >>> format_float(-1, True)(-.2) '-20.0%' >>> pd.options.display.float_format = format_float(2)
codesearchnet
def shutdown(self, vm_names=None, reboot=False): self.virt_env.shutdown(vm_names, reboot)
Shutdown this prefix Args: vm_names(list of str): List of the vms to shutdown reboot(bool): If true, reboot the requested vms Returns: None
juraj-google-style
def _match_value_against_type(self, value: cfg.Binding, other_type: abstract.BaseValue, subst: _SubstType, view: _ViewType) -> _SubstType | None: left = value.data left = abstract_utils.unwrap_final(left) other_type = abstract_utils.unwrap_final(other_type) is_recursive = abstract_utils.is_recursive_annotation(other_type) if is_recursive: key = (left, other_type) if key in self._recursive_annots_cache: return subst if self._recursive_annots_cache[key] else None self._recursive_annots_cache[key] = True subst = self._match_nonfinal_value_against_type(left, value, other_type, subst, view) if is_recursive: self._recursive_annots_cache[key] = subst is not None return subst
One-way unify value into pytd type given a substitution. Args: value: A cfg.Binding. other_type: A BaseValue instance. subst: The current substitution. This dictionary is not modified. view: A mapping of Variable to Value. Returns: A new (or unmodified original) substitution dict if the matching succeeded, None otherwise.
github-repos
def In(self, *values): self._awql = self._CreateMultipleValuesCondition(values, 'IN') return self._query_builder
Sets the type of the WHERE clause as "in". Args: *values: The values to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
juraj-google-style
def get_field(self, key: str) -> Optional[Field]: if key in self._fields: return self._fields[key] if self._allow_nonconst_keys: for key_spec, field in self._fields.items(): if key_spec.match(key): return field return None
Get field definition (Field) for a key. Args: key: string as input key. Returns: Matched field. A field is considered a match when: * Its key spec is a ConstStrKey that equals to the input key. * Or it's the first field whose key spec is a NonConstKey which matches the input key.
github-repos
def forward_loss(self, pixel_values, pred, mask, interpolate_pos_encoding: bool=False): target = self.patchify(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) if self.config.norm_pix_loss: mean = target.mean(dim=-1, keepdim=True) var = target.var(dim=-1, keepdim=True) target = (target - mean) / (var + 1e-06) ** 0.5 loss = (pred - target) ** 2 loss = loss.mean(dim=-1) loss = (loss * mask).sum() / mask.sum() return loss
Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. pred (`torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`: Predicted pixel values. mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Tensor indicating which patches are masked (1) and which are not (0). interpolate_pos_encoding (`bool`, *optional*, default `False`): interpolation flag passed during the forward pass. Returns: `torch.FloatTensor`: Pixel reconstruction loss.
github-repos
def monitor(self, job, event_monitor, result_monitor): logging.info('starting to monitor the job') last_active_ms = -1 perf = None cancel_job = False waiting_for_shutdown = False while True: now = int(time.time() * 1000) logging.debug('now is %d', now) curr_perf = NexmarkLauncher.get_performance(job, event_monitor, result_monitor) if perf is None or curr_perf.has_progress(perf): last_active_ms = now if self.streaming and (not waiting_for_shutdown): quiet_duration = (now - last_active_ms) if curr_perf.event_count >= self.args.num_events and curr_perf.result_count >= 0 and (quiet_duration > self.DONE_DELAY): logging.info('streaming query appears to have finished executing') waiting_for_shutdown = True cancel_job = True elif quiet_duration > self.TERMINATE_DELAY: logging.error('streaming query have been stuck for %d seconds', quiet_duration) logging.error('canceling streaming job') waiting_for_shutdown = True cancel_job = True elif quiet_duration > self.WARNING_DELAY: logging.warning('streaming query have been stuck for %d seconds', quiet_duration) if cancel_job: job.cancel() perf = curr_perf stopped = PipelineState.is_terminal(job.state) if stopped: break if not waiting_for_shutdown: if last_active_ms == now: logging.info('activity seen, new performance data extracted') else: logging.info('no activity seen') else: logging.info('waiting for shutdown') time.sleep(self.PERF_DELAY) return perf
keep monitoring the performance and progress of running job and cancel the job if the job is stuck or seems to have finished running Returns: the final performance if it is measured
github-repos
def _set_shape(self, shape): shape = tensor_shape.as_shape(shape) if shape.rank is None: return shape = shape.as_list() if shape[0] is not None: self._row_partition._row_splits.set_shape(shape[0] + 1) dtype = self._row_partition.dtype for i, partition in enumerate(self._nested_row_partitions): size = shape[i + 1] if size is not None: if partition._uniform_row_length is not None: old_row_length = tensor_util.constant_value(partition._uniform_row_length) if old_row_length is not None: if size == old_row_length: continue else: raise ValueError(f'Inconsistent size for axis {i + 1}: {old_row_length} vs. {size}.') partition._uniform_row_length = ops.convert_to_tensor(size, dtype) if partition._nrows is None: partition._nrows = array_ops.size(partition._row_splits, out_type=dtype) - 1 if hasattr(self.flat_values, 'set_shape'): flat_shape = tensor_shape.as_shape([None] + shape[self.ragged_rank + 1:]) self.flat_values.set_shape(flat_shape)
Updates the static shape of `self` to be `shape`. * If a dimension of `shape` has known rank, and is encoded via partitioning, then this will update the corresponding partition to define `_uniform_row_length` and `nrows`. * If a dimension of `shape` has a known rank, and is encoded as one of the `flat_values` dimensions, then `flat_values.set_shape()` will be used to update its shape. Warning: Using this method to assert an incorrect shape for a RaggedTensor (i.e., one that's not consistent with its actual shape) can cause segmentation faults and very difficult-to-diagnose behavior. Only use this method if you are certain that the shape is correct. Args: shape: `tf.TensorShape` specifying the shape for this `RaggedTensor`.
github-repos
def key_periods(ciphertext, max_key_period): if max_key_period <= 0: raise ValueError("max_key_period must be a positive integer") key_scores = [] for period in range(1, min(max_key_period, len(ciphertext)) + 1): score = abs(ENGLISH_IC - index_of_coincidence(*split_columns(ciphertext, period))) key_scores.append((period, score)) return [p[0] for p in sorted(key_scores, key=lambda x: x[1])]
Rank all key periods for ``ciphertext`` up to and including ``max_key_period`` Example: >>> key_periods(ciphertext, 30) [2, 4, 8, 3, ...] Args: ciphertext (str): The text to analyze max_key_period (int): The maximum period the key could be Returns: Sorted list of keys Raises: ValueError: If max_key_period is less than or equal to 0
juraj-google-style
def to_matrix( xx, yy, zz, xy, yz, xz ): matrix = np.array( [[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]] ) return matrix
Convert a list of matrix components to a symmetric 3x3 matrix. Inputs should be in the order xx, yy, zz, xy, yz, xz. Args: xx (float): xx component of the matrix. yy (float): yy component of the matrix. zz (float): zz component of the matrix. xy (float): xy component of the matrix. yz (float): yz component of the matrix. xz (float): xz component of the matrix. Returns: (np.array): The matrix, as a 3x3 numpy array.
juraj-google-style
def _find_hstreaming(): global WARNED_HADOOP_HOME, HADOOP_STREAMING_PATH_CACHE if HADOOP_STREAMING_PATH_CACHE: return HADOOP_STREAMING_PATH_CACHE try: search_root = os.environ['HADOOP_HOME'] except KeyError: search_root = '/' cmd = ('find %s -name hadoop*streaming*.jar' % search_root) p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) HADOOP_STREAMING_PATH_CACHE = p.communicate()[0].split('\n')[0] if ((search_root == '/') and (not WARNED_HADOOP_HOME)): WARNED_HADOOP_HOME = True hadoop_home = HADOOP_STREAMING_PATH_CACHE[:HADOOP_STREAMING_PATH_CACHE.rfind('/contrib/')] logging.warn(('Set the HADOOP_HOME environmental variable to your hadoop path to improve performance. Put the following [export HADOOP_HOME="%s"] in ~/.bashrc' % hadoop_home)) return HADOOP_STREAMING_PATH_CACHE
Finds the whole path to the hadoop streaming jar. If the environmental var HADOOP_HOME is specified, then start the search from there. Returns: Full path to the hadoop streaming jar if found, else return an empty string.
codesearchnet
def import_subview(self, idx, subview): subview.corpus = self self._subviews[idx] = subview
Add the given subview to the corpus. Args: idx (str): An idx that is unique in the corpus for identifying the subview. If already a subview exists with the given id it will be overridden. subview (Subview): The subview to add.
codesearchnet
def batch_shape_tensor(self, name='batch_shape_tensor'): with self._name_scope(name): if self.batch_shape.is_fully_defined(): return ops.convert_to_tensor(self.batch_shape.as_list(), dtype=dtypes.int32, name='batch_shape') return self._batch_shape_tensor()
Shape of a single sample from a single event index as a 1-D `Tensor`. The batch dimensions are indexes into independent, non-identical parameterizations of this distribution. Args: name: name to give to the op Returns: batch_shape: `Tensor`.
github-repos
def add_derivatives(self, path, **kwargs): paths = listify(path) deriv_dirs = [] def check_for_description(dir): dd = os.path.join(dir, 'dataset_description.json') return os.path.exists(dd) for p in paths: p = os.path.abspath(p) if os.path.exists(p): if check_for_description(p): deriv_dirs.append(p) else: subdirs = [d for d in os.listdir(p) if os.path.isdir(os.path.join(p, d))] for sd in subdirs: sd = os.path.join(p, sd) if check_for_description(sd): deriv_dirs.append(sd) if (not deriv_dirs): warnings.warn("Derivative indexing was enabled, but no valid derivatives datasets were found in any of the provided or default locations. Please make sure all derivatives datasets you intend to index contain a 'dataset_description.json' file, as described in the BIDS-derivatives specification.") for deriv in deriv_dirs: dd = os.path.join(deriv, 'dataset_description.json') with open(dd, 'r', encoding='utf-8') as ddfd: description = json.load(ddfd) pipeline_name = description.get('PipelineDescription', {}).get('Name') if (pipeline_name is None): raise ValueError('Every valid BIDS-derivatives dataset must have a PipelineDescription.Name field set inside dataset_description.json.') if (pipeline_name in self.derivatives): raise ValueError("Pipeline name '%s' has already been added to this BIDSLayout. Every added pipeline must have a unique name!") kwargs['config'] = (kwargs.get('config') or ['bids', 'derivatives']) kwargs['sources'] = (kwargs.get('sources') or self) self.derivatives[pipeline_name] = BIDSLayout(deriv, **kwargs) for deriv in self.derivatives.values(): self.entities.update(deriv.entities)
Add BIDS-Derivatives datasets to tracking. Args: path (str, list): One or more paths to BIDS-Derivatives datasets. Each path can point to either a derivatives/ directory containing one more more pipeline directories, or to a single pipeline directory (e.g., derivatives/fmriprep). kwargs (dict): Optional keyword arguments to pass on to BIDSLayout() when initializing each of the derivative datasets. Note: Every derivatives directory intended for indexing MUST contain a valid dataset_description.json file. See the BIDS-Derivatives specification for details.
codesearchnet
def connect(self, fedora_url, data=None, method='Get'): if data is None: data = {} if not fedora_url.startswith("http"): fedora_url = urllib.parse.urljoin(self.base_url, fedora_url) request = urllib.request.Request(fedora_url, method=method) request.add_header('Accept', 'text/turtle') request.add_header('Content-Type', 'text/turtle') if len(data) > 0: request.data = data try: response = urllib.request.urlopen(request) except urllib.error.URLError as err: if hasattr(err, 'reason'): print("failed to reach server at {} with {} method".format( fedora_url, request.method)) print("Reason: ", err.reason) print("Data: ", data) elif hasattr(err, 'code'): print("Server error {}".format(err.code)) raise err return response
Method attempts to connect to REST servers of the Fedora Commons repository using optional data parameter. Args: fedora_url(string): Fedora URL data(dict): Data to through to REST endpoint method(str): REST Method, defaults to GET Returns: result(string): Response string from Fedora
juraj-google-style
def _inchi_labels(mol): obconv = ob.OBConversion() obconv.SetOutFormat(str("inchi")) obconv.AddOption(str("a"), ob.OBConversion.OUTOPTIONS) obconv.AddOption(str("X"), ob.OBConversion.OUTOPTIONS, str("DoNotAddH")) inchi_text = obconv.WriteString(mol) match = re.search(r"InChI=(?P<inchi>.+)\nAuxInfo=.+" r"/N:(?P<labels>[0-9,;]+)/(E:(?P<eq_atoms>[0-9," r";\(\)]*)/)?", inchi_text) inchi = match.group("inchi") label_text = match.group("labels") eq_atom_text = match.group("eq_atoms") heavy_atom_labels = tuple([int(i) for i in label_text.replace( ';', ',').split(',')]) eq_atoms = [] if eq_atom_text is not None: eq_tokens = re.findall(r'\(((?:[0-9]+,)+[0-9]+)\)', eq_atom_text .replace(';', ',')) eq_atoms = tuple([tuple([int(i) for i in t.split(',')]) for t in eq_tokens]) return heavy_atom_labels, eq_atoms, inchi
Get the inchi canonical labels of the heavy atoms in the molecule Args: mol: The molecule. OpenBabel OBMol object Returns: The label mappings. List of tuple of canonical label, original label List of equivalent atoms.
juraj-google-style
def preprocessing_fn(inputs): result = {'clicked': inputs['clicked']} for name in _INTEGER_COLUMN_NAMES: feature = inputs[name] feature = tft.sparse_tensor_to_dense_with_shape(feature, [None, 1], default_value=-1) feature = tf.squeeze(feature, axis=1) result[name] = feature result[name + '_bucketized'] = tft.bucketize(feature, _NUM_BUCKETS) for name in _CATEGORICAL_COLUMN_NAMES: feature = inputs[name] feature = tft.sparse_tensor_to_dense_with_shape(feature, [None, 1], default_value='') feature = tf.squeeze(feature, axis=1) result[get_transformed_categorical_column_name(name)] = tft.compute_and_apply_vocabulary(feature, frequency_threshold=frequency_threshold) return result
User defined preprocessing function for criteo columns. Args: inputs: dictionary of input `tensorflow_transform.Column`. Returns: A dictionary of `tensorflow_transform.Column` representing the transformed columns.
github-repos
def _update_graph_variables(self, learning_rate: float = None, momentum: float = None): if learning_rate is not None: K.set_value(self.get_learning_rate_variable(), learning_rate) if momentum is not None: K.set_value(self.get_momentum_variable(), momentum)
Update graph variables setting giving `learning_rate` and `momentum` Args: learning_rate: learning rate value to be set in graph (set if not None) momentum: momentum value to be set in graph (set if not None) Returns: None
juraj-google-style
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(DerivationParameters, self).read( input_stream, kmip_version=kmip_version ) local_stream = BytearrayStream(input_stream.read(self.length)) if self.is_tag_next( enums.Tags.CRYPTOGRAPHIC_PARAMETERS, local_stream ): self._cryptographic_parameters = CryptographicParameters() self._cryptographic_parameters.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.INITIALIZATION_VECTOR, local_stream): self._initialization_vector = ByteString( tag=enums.Tags.INITIALIZATION_VECTOR ) self._initialization_vector.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.DERIVATION_DATA, local_stream): self._derivation_data = ByteString(tag=enums.Tags.DERIVATION_DATA) self._derivation_data.read(local_stream, kmip_version=kmip_version) if self.is_tag_next(enums.Tags.SALT, local_stream): self._salt = ByteString(tag=enums.Tags.SALT) self._salt.read(local_stream, kmip_version=kmip_version) if self.is_tag_next(Tags.ITERATION_COUNT, local_stream): self._iteration_count = Integer(tag=Tags.ITERATION_COUNT) self._iteration_count.read(local_stream, kmip_version=kmip_version) self.is_oversized(local_stream)
Read the data encoding the DerivationParameters struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def save_data(X, y, path): catalog = {'.csv': save_csv, '.sps': save_libsvm, '.h5': save_hdf5} ext = os.path.splitext(path)[1] func = catalog[ext] if (y is None): y = np.zeros((X.shape[0],)) func(X, y, path)
Save data as a CSV, LibSVM or HDF5 file based on the file extension. Args: X (numpy or scipy sparse matrix): Data matrix y (numpy array): Target vector. If None, all zero vector will be saved. path (str): Path to the CSV, LibSVM or HDF5 file to save data.
codesearchnet
def _CreateImage(media_service, opener, url): image_data = opener.open(url).read().decode('utf-8') image = {'type': 'IMAGE', 'data': image_data, 'xsi_type': 'Image'} return media_service.upload(image)[0]
Creates an image and uploads it to the server. Args: media_service: a SudsServiceProxy instance for AdWords's MediaService. opener: an OpenerDirector instance. url: a str URL used to load image data. Returns: The image that was successfully uploaded.
codesearchnet
def convert(self, vroot, entry_variables): for converter in self.converters: vroot = converter.convert(vroot, entry_variables) return vroot
Convert a given graph. Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
codesearchnet
def BatchConvert(self, metadata_value_pairs, token=None): msg_dict = {} for metadata, msg in metadata_value_pairs: msg_dict.setdefault(msg.source, []).append((metadata, msg)) metadata_objects = [] metadata_to_fetch = [] for client_urn in msg_dict: try: metadata_objects.append(self.cached_metadata[client_urn]) except KeyError: metadata_to_fetch.append(client_urn) if metadata_to_fetch: if data_store.RelationalDBEnabled(): client_ids = set(urn.Basename() for urn in metadata_to_fetch) infos = data_store.REL_DB.MultiReadClientFullInfo(client_ids) fetched_metadata = [ GetMetadata(client_id, info) for client_id, info in infos.items() ] else: client_fds = aff4.FACTORY.MultiOpen( metadata_to_fetch, mode="r", token=token) fetched_metadata = [ GetMetadataLegacy(client_fd, token=token) for client_fd in client_fds ] for metadata in fetched_metadata: self.cached_metadata[metadata.client_urn] = metadata metadata_objects.extend(fetched_metadata) data_by_type = {} for metadata in metadata_objects: try: for original_metadata, message in msg_dict[metadata.client_urn]: new_metadata = ExportedMetadata(metadata) new_metadata.source_urn = original_metadata.source_urn new_metadata.annotations = original_metadata.annotations new_metadata.original_timestamp = message.payload.age cls_name = message.payload.__class__.__name__ if cls_name not in data_by_type: converters_classes = ExportConverter.GetConvertersByValue( message.payload) data_by_type[cls_name] = { "converters": [cls(self.options) for cls in converters_classes], "batch_data": [(new_metadata, message.payload)] } else: data_by_type[cls_name]["batch_data"].append( (new_metadata, message.payload)) except KeyError: pass converted_batch = [] for dataset in itervalues(data_by_type): for converter in dataset["converters"]: converted_batch.extend( converter.BatchConvert(dataset["batch_data"], token=token)) return converted_batch
Converts a batch of GrrMessages into a set of RDFValues at once. Args: metadata_value_pairs: a list or a generator of tuples (metadata, value), where metadata is ExportedMetadata to be used for conversion and value is a GrrMessage to be converted. token: Security token. Returns: Resulting RDFValues. Empty list is a valid result and means that conversion wasn't possible.
juraj-google-style
def business_days_in_period(self, date_tensor, period_tensor): return self.business_days_between(date_tensor, date_tensor + period_tensor)
Calculates number of business days in a period. Includes the dates in `date_tensor`, but excludes final dates resulting from addition of `period_tensor`. Args: date_tensor: DateTensor of starting dates. period_tensor: PeriodTensor, should be broadcastable to `date_tensor`. Returns: An int32 Tensor with the number of business days in given periods that start at given dates.
github-repos
def check_num_tasks(chain, task_count): errors = [] min_decision_tasks = 1 if task_count['decision'] < min_decision_tasks: errors.append("{} decision tasks; we must have at least {}!".format( task_count['decision'], min_decision_tasks )) raise_on_errors(errors)
Make sure there are a specific number of specific task types. Currently we only check decision tasks. Args: chain (ChainOfTrust): the chain we're operating on task_count (dict): mapping task type to the number of links. Raises: CoTError: on failure.
juraj-google-style
def _rank(x): rank = ops.convert_to_tensor(x).get_shape().ndims if rank: return (rank, True) else: return (array_ops.rank(x), False)
Helper function to retrieve the rank of a tensor. Args: x: Something convertible to `Tensor`. Returns: Either a pair `(rank, True)` where `rank` is an integer or a pair `(rank, False)` where `rank` is an integer `Tensor`. In either case, `rank` is the rank of `x`.
github-repos
def get_nodes_lines(self, **kwargs): params = {'Nodes': util.ints_to_string(kwargs.get('nodes', []))} result = self.make_request('bus', 'get_nodes_lines', **params) if not util.check_result(result): return False, result.get('resultDescription', 'UNKNOWN ERROR') values = util.response_list(result, 'resultValues') return True, [emtype.NodeLinesItem(**a) for a in values]
Obtain stop IDs, coordinates and line information. Args: nodes (list[int] | int): nodes to query, may be empty to get all nodes. Returns: Status boolean and parsed response (list[NodeLinesItem]), or message string in case of error.
juraj-google-style
def _CanProcessKeyWithPlugin(self, registry_key, plugin): for registry_key_filter in plugin.FILTERS: if getattr(registry_key_filter, 'key_paths', []): continue if registry_key_filter.Match(registry_key): return True return False
Determines if a plugin can process a Windows Registry key or its values. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key. plugin (WindowsRegistryPlugin): Windows Registry plugin. Returns: bool: True if the Registry key can be processed with the plugin.
juraj-google-style
def fetch_credential(self, credential=None, profile=None): q = self.db.get(self.query.profile == profile) if q is not None: return q.get(credential)
Fetch credential from credentials file. Args: credential (str): Credential to fetch. profile (str): Credentials profile. Defaults to ``'default'``. Returns: str, None: Fetched credential or ``None``.
juraj-google-style
def fulfill_transaction(transaction, *, private_keys): if (not isinstance(private_keys, (list, tuple))): private_keys = [private_keys] if isinstance(private_keys, tuple): private_keys = list(private_keys) transaction_obj = Transaction.from_dict(transaction) try: signed_transaction = transaction_obj.sign(private_keys) except KeypairMismatchException as exc: raise MissingPrivateKeyError('A private key is missing!') from exc return signed_transaction.to_dict()
Fulfills the given transaction. Args: transaction (dict): The transaction to be fulfilled. private_keys (:obj:`str` | :obj:`list` | :obj:`tuple`): One or more private keys to be used for fulfilling the transaction. Returns: dict: The fulfilled transaction payload, ready to be sent to a BigchainDB federation. Raises: :exc:`~.exceptions.MissingPrivateKeyError`: If a private key is missing.
codesearchnet
def __init__(self, app): super(SendgridEmailAdapter, self).__init__(app) sendgrid_api_key = app.config.get('SENDGRID_API_KEY') if not sendgrid_api_key: raise ConfigError( "The SENDGRID_API_KEY setting is missing. Set SENDGRID_API_KEY in your app config.") try: from sendgrid import SendGridAPIClient self.sg = SendGridAPIClient(apikey=sendgrid_api_key) except ImportError: raise ConfigError(SENDGRID_IMPORT_ERROR_MESSAGE)
Check config settings and setup SendGrid Web API v3. Args: app(Flask): The Flask application instance.
juraj-google-style
def doc2id(self, doc): doc = map(self.process_token, doc) return [self.token_to_id(token) for token in doc]
Get the list of token_id given doc. Args: doc (list): document. Returns: list: int id of doc.
codesearchnet
def call(self, func, key, timeout=None): result = self.get(key) if (result == NONE_RESULT): return None if (result is None): result = func() self.set(key, (result if (result is not None) else NONE_RESULT), timeout) return result
Wraps a function call with cache. Args: func (function): the function to call. key (str): the cache key for this call. timeout (int): the cache timeout for the key (the unit of this parameter depends on the cache class you use, for example, if you use the classes from werkzeug, then timeout is in seconds.) Returns: The return value of calling func
codesearchnet
def compute_f(match_num, test_num, gold_num): if test_num == 0 or gold_num == 0: return 0.00, 0.00, 0.00 precision = float(match_num) / float(test_num) recall = float(match_num) / float(gold_num) if (precision + recall) != 0: f_score = 2 * precision * recall / (precision + recall) if veryVerbose: print("F-score:", f_score, file=DEBUG_LOG) return precision, recall, f_score else: if veryVerbose: print("F-score:", "0.0", file=DEBUG_LOG) return precision, recall, 0.00
Compute the f-score based on the matching triple number, triple number of AMR set 1, triple number of AMR set 2 Args: match_num: matching triple number test_num: triple number of AMR 1 (test file) gold_num: triple number of AMR 2 (gold file) Returns: precision: match_num/test_num recall: match_num/gold_num f_score: 2*precision*recall/(precision+recall)
juraj-google-style
def assert_split_at_fraction_fails(source, num_items_to_read_before_split, split_fraction): assert_split_at_fraction_behavior(source, num_items_to_read_before_split, split_fraction, ExpectedSplitOutcome.MUST_FAIL)
Asserts that dynamic work rebalancing at a given fraction fails. Asserts that trying to perform dynamic splitting after reading 'num_items_to_read_before_split' items from the source fails. Args: source: source to perform dynamic splitting on. num_items_to_read_before_split: number of items to read before splitting. split_fraction: fraction to split at.
github-repos
def NewFromJSON(data): return Comment(body=data.get('body', None), posted_at=data.get('posted_at', None), user=User.NewFromJSON(data.get('user', None)))
Create a new Comment instance from a JSON dict. Args: data (dict): JSON dictionary representing a Comment. Returns: A Comment instance.
codesearchnet
def make_grid_texture(num_h_lines=10, num_v_lines=10, resolution=50): (x_h, y_h) = make_lines_texture(num_h_lines, resolution) (y_v, x_v) = make_lines_texture(num_v_lines, resolution) return (np.concatenate([x_h, x_v]), np.concatenate([y_h, y_v]))
Makes a texture consisting of a grid of vertical and horizontal lines. Args: num_h_lines (int): the number of horizontal lines to draw num_v_lines (int): the number of vertical lines to draw resolution (int): the number of midpoints to draw on each line Returns: A texture.
codesearchnet
def format_cert_name(env='', account='', region='', certificate=None): cert_name = None if certificate: if certificate.startswith('arn'): LOG.info('Full ARN provided...skipping lookup.') cert_name = certificate else: generated_cert_name = generate_custom_cert_name(env, region, account, certificate) if generated_cert_name: LOG.info('Found generated certificate %s from template', generated_cert_name) cert_name = generated_cert_name else: LOG.info('Using default certificate name logic') cert_name = 'arn:aws:iam::{account}:server-certificate/{name}'.format(account=account, name=certificate) LOG.debug('Certificate name: %s', cert_name) return cert_name
Format the SSL certificate name into ARN for ELB. Args: env (str): Account environment name account (str): Account number for ARN region (str): AWS Region. certificate (str): Name of SSL certificate Returns: str: Fully qualified ARN for SSL certificate None: Certificate is not desired
codesearchnet
def run_display_app_errors(self, err): if err is not None and err: for e_ in err.decode('utf-8').split('\n'): print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, e_)) self.log.error('[tcrun] App error: {}'.format(e_))
Handle the exit code for the current run. Args: err (str): One or more lines of errors messages.
juraj-google-style
def convertTime(self, time): m_format = '' if time.minute: m_format = ':%M' timeString = time.strftime((('%I' + m_format) + ' %p')) if (not int(timeString[0])): timeString = timeString[1:] return timeString
Convert a datetime object representing a time into a human-ready string that can be read, spoken aloud, etc. Args: time (datetime.date): A datetime object to be converted into text. Returns: A string representation of the input time, ignoring any day-related information.
codesearchnet