code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _print_drift_report(self): try: response = self._cloud_formation.describe_stack_resources(StackName=self._stack_name) rows = [] for resource in response.get('StackResources', []): row = [] row.append(resource.get('LogicalResourceId', 'unknown')) row.append(resource.get('PhysicalResourceId', 'unknown')) row.append(resource.get('ResourceStatus', 'unknown')) row.append(resource.get('DriftInformation', {}).get('StackResourceDriftStatus', 'unknown')) rows.append(row) print('Drift Report:') print(tabulate(rows, headers=[ 'Logical ID', 'Physical ID', 'Resource Status', 'Drift Info' ])) except Exception as wtf: logging.error(wtf, exc_info=True) return False return True
Report the drift of the stack. Args: None Returns: Good or Bad; True or False Note: not yet implemented
juraj-google-style
def __init__(self, session, object_factory): check_type(session, RestSession) super(PeopleAPI, self).__init__() self._session = session self._object_factory = object_factory
Initialize a new PeopleAPI object with the provided RestSession. Args: session(RestSession): The RESTful session object to be used for API calls to the Webex Teams service. Raises: TypeError: If the parameter types are incorrect.
juraj-google-style
def liquid_precipitation_depth(self, value=999.0): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `liquid_precipitation_depth`'.format(value)) self._liquid_precipitation_depth = value
Corresponds to IDD Field `liquid_precipitation_depth` Args: value (float): value for IDD Field `liquid_precipitation_depth` Unit: mm Missing value: 999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def RestrictFeedItemToAdGroup(client, feed_item, adgroup_id): feed_item_target_service = client.GetService( 'FeedItemTargetService', 'v201809') ad_group_target = { 'xsi_type': 'FeedItemAdGroupTarget', 'feedId': feed_item['feedId'], 'feedItemId': feed_item['feedItemId'], 'adGroupId': adgroup_id } operation = {'operator': 'ADD', 'operand': ad_group_target} response = feed_item_target_service.mutate([operation]) new_ad_group_target = response['value'][0] print('Feed item target for feed ID %s and feed item ID %s was created to ' 'restrict serving to ad group ID %s' % (new_ad_group_target['feedId'], new_ad_group_target['feedItemId'], new_ad_group_target['adGroupId']))
Restricts the feed item to an ad group. Args: client: an AdWordsClient instance. feed_item: The feed item. adgroup_id: The ad group ID.
juraj-google-style
def log_metric(self, name, value, unit=None, global_step=None, extras=None): if (not isinstance(value, numbers.Number)): tf.logging.warning('Metric value to log should be a number. Got %s', type(value)) return if extras: extras = [{'name': k, 'value': v} for (k, v) in sorted(extras.items())] else: extras = [] with tf.gfile.GFile(os.path.join(self._logging_dir, METRIC_LOG_FILE_NAME), 'a') as f: metric = {'name': name, 'value': float(value), 'unit': unit, 'global_step': global_step, 'timestamp': datetime.datetime.now().strftime(_DATE_TIME_FORMAT_PATTERN), 'extras': extras} try: json.dump(metric, f) f.write('\n') except (TypeError, ValueError) as e: tf.logging.warning('Failed to dump metric to log file: name %s, value %s, error %s', name, value, e)
Log the benchmark metric information to local file. Currently the logging is done in a synchronized way. This should be updated to log asynchronously. Args: name: string, the name of the metric to log. value: number, the value of the metric. The value will not be logged if it is not a number type. unit: string, the unit of the metric, E.g "image per second". global_step: int, the global_step when the metric is logged. extras: map of string:string, the extra information about the metric.
codesearchnet
def get(cls, resource_type): if isinstance(resource_type, str): obj = getattr(db, cls.__name__).find_one(cls.resource_type == resource_type) elif isinstance(resource_type, int): obj = getattr(db, cls.__name__).find_one(cls.resource_type_id == resource_type) elif isinstance(resource_type, cls): return resource_type else: obj = None if not obj: obj = cls() obj.resource_type = resource_type db.session.add(obj) db.session.commit() db.session.refresh(obj) return obj
Returns the ResourceType object for `resource_type`. If no existing object was found, a new type will be created in the database and returned Args: resource_type (str): Resource type name Returns: :obj:`ResourceType`
juraj-google-style
def run_step(context): logger.debug('started') context.assert_child_key_has_value('fileWriteYaml', 'path', __name__) out_path = context.get_formatted_string(context['fileWriteYaml']['path']) is_payload_specified = ('payload' in context['fileWriteYaml']) yaml_writer = pypyr.yaml.get_yaml_parser_roundtrip_for_context() logger.debug(f'opening destination file for writing: {out_path}') os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True) with open(out_path, 'w') as outfile: if is_payload_specified: payload = context['fileWriteYaml']['payload'] formatted_iterable = context.get_formatted_iterable(payload) else: formatted_iterable = context.get_formatted_iterable(context) yaml_writer.dump(formatted_iterable, outfile) logger.info(f'formatted context content and wrote to {out_path}') logger.debug('done')
Write payload out to yaml file. Args: context: pypyr.context.Context. Mandatory. The following context keys expected: - fileWriteYaml - path. mandatory. path-like. Write output file to here. Will create directories in path for you. - payload. optional. Write this to output file. If not specified, output entire context. Returns: None. Raises: pypyr.errors.KeyNotInContextError: fileWriteYaml or fileWriteYaml['path'] missing in context. pypyr.errors.KeyInContextHasNoValueError: fileWriteYaml or fileWriteYaml['path'] exists but is None.
codesearchnet
def _analyze_input_data(self, entry, k, depth=1, max_depth=3, max_list=3): class _elementInfo(object): def __init__(self, el, pos, depth=0, max_list=3): self.shape = '' self.type = type(el).__name__ self.dtype = '' self.range = '' self.sub_elements = [] self.ident = (' ' * (depth * 2)) self.pos = pos numpy_scalar_types = list(itertools.chain(*np.sctypes.values())) if isinstance(el, (int, float, bool)): self.range = ' with value {}'.format(el) elif (type(el) is np.ndarray): self.shape = ' of shape {}'.format(el.shape) self.dtype = ':{}'.format(str(el.dtype)) self.range = ' in range [{}, {}]'.format(el.min(), el.max()) elif (type(el) in numpy_scalar_types): self.range = ' with value {}'.format(el) elif isinstance(el, list): self.shape = ' of len {}'.format(len(el)) if (depth < max_depth): for (k, subel) in enumerate(el): if (k < max_list): self.sub_elements.append(_elementInfo(subel, k, (depth + 1), max_list)) else: self.sub_elements.append(((' ' * ((depth + 1) * 2)) + '...')) break elif (len(el) > 0): self.sub_elements.append(((' ' * ((depth + 1) * 2)) + ' ...')) def __str__(self): strings = [] vals = (self.ident, self.pos, self.type, self.dtype, self.shape, self.range) strings.append('{}{}: {}{}{}{}'.format(*vals)) for (k, el) in enumerate(self.sub_elements): strings.append(str(el)) return '\n'.join(strings) return str(_elementInfo(entry, k, depth, max_list))
Gather useful debug information from a datapoint. Args: entry: the datapoint component k (int): index of this component in current datapoint depth (int, optional): recursion depth max_depth, max_list: same as in :meth:`__init__`. Returns: string: debug message
codesearchnet
def _remove_string_from_commastring(self, field, string): commastring = self.data.get(field, '') if string in commastring: self.data[field] = commastring.replace(string, '') return True return False
Remove a string from a comma separated list of strings Args: field (str): Field containing comma separated list string (str): String to remove Returns: bool: True if string removed or False if not
juraj-google-style
def size(self): if (len(self.grouping_column_types) > 1): index_type = WeldStruct([self.grouping_column_types]) else: index_type = self.grouping_column_types[0] index_name = self.grouping_column_names[0] return SeriesWeld(grizzly_impl.groupby_size(self.columns, self.column_types, self.grouping_columns, self.grouping_column_types), WeldLong(), index_type=index_type, index_name=index_name)
Returns the sizes of the groups as series. Returns: TYPE: Description
codesearchnet
def _get_required_params_for_conversion(self, event_key, event_tags): snapshot = {} event_dict = {self.EventParams.EVENT_ID: self.config.get_event(event_key).id, self.EventParams.TIME: self._get_time(), self.EventParams.KEY: event_key, self.EventParams.UUID: str(uuid.uuid4())} if event_tags: revenue_value = event_tag_utils.get_revenue_value(event_tags) if (revenue_value is not None): event_dict[event_tag_utils.REVENUE_METRIC_TYPE] = revenue_value numeric_value = event_tag_utils.get_numeric_value(event_tags, self.config.logger) if (numeric_value is not None): event_dict[event_tag_utils.NUMERIC_METRIC_TYPE] = numeric_value if (len(event_tags) > 0): event_dict[self.EventParams.TAGS] = event_tags snapshot[self.EventParams.EVENTS] = [event_dict] return snapshot
Get parameters that are required for the conversion event to register. Args: event_key: Key representing the event which needs to be recorded. event_tags: Dict representing metadata associated with the event. Returns: Dict consisting of the decisions and events info for conversion event.
codesearchnet
def read_saved_model(saved_model_dir): path_to_pbtxt = os.path.join(compat.as_bytes(saved_model_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)) path_to_pb = os.path.join(compat.as_bytes(saved_model_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB)) if not file_io.file_exists(path_to_pbtxt) and (not file_io.file_exists(path_to_pb)): raise IOError('SavedModel file does not exist at: %s' % saved_model_dir) saved_model = saved_model_pb2.SavedModel() if file_io.file_exists(path_to_pb): with file_io.FileIO(path_to_pb, 'rb') as f: file_content = f.read() try: saved_model.ParseFromString(file_content) return saved_model except message.DecodeError as e: raise IOError('Cannot parse proto file %s: %s.' % (path_to_pb, str(e))) elif file_io.file_exists(path_to_pbtxt): with file_io.FileIO(path_to_pbtxt, 'rb') as f: file_content = f.read() try: text_format.Merge(file_content.decode('utf-8'), saved_model) return saved_model except text_format.ParseError as e: raise IOError('Cannot parse pbtxt file %s: %s.' % (path_to_pbtxt, str(e))) else: raise IOError('SavedModel file does not exist at: %s/{%s|%s}' % (saved_model_dir, constants.SAVED_MODEL_FILENAME_PBTXT, constants.SAVED_MODEL_FILENAME_PB))
Reads the saved_model.pb or saved_model.pbtxt file containing `SavedModel`. Args: saved_model_dir: Directory containing the SavedModel file. Returns: A `SavedModel` protocol buffer. Raises: IOError: If the file does not exist, or cannot be successfully parsed.
github-repos
def keypath(self, key): return fs.path(self.path, self.escape_key(key))
Get the filesystem path for a key. Arguments: key: Key. Returns: str: Absolute path.
codesearchnet
def CreateSignatureContract(publicKey): script = Contract.CreateSignatureRedeemScript(publicKey) params = b'\x00' encoded = publicKey.encode_point(True) pubkey_hash = Crypto.ToScriptHash(encoded, unhex=True) return Contract(script, params, pubkey_hash)
Create a signature contract. Args: publicKey (edcsa.Curve.point): e.g. KeyPair.PublicKey. Returns: neo.SmartContract.Contract: a Contract instance.
codesearchnet
def status(self): return BackendStatus(backend_name=self.name(), backend_version=__version__, operational=True, pending_jobs=0, status_msg='')
Return backend status. Returns: BackendStatus: the status of the backend.
codesearchnet
def load_template(filename): template_file = os.path.join(PKG_DIR, 'templates', filename) with open(template_file) as fp: return fp.read()
Load template from file. The templates are part of the package and must be included as ``package_data`` in project ``setup.py``. Args: filename (str): The template path. Relative to `peltak` package directory. Returns: str: The content of the chosen template.
juraj-google-style
def get_central_coors(self, row, col): if ((row < 0) or (row >= self.nRows) or (col < 0) or (col >= self.nCols)): raise ValueError(('The row (%d) or col (%d) must be >=0 and less than nRows (%d) or nCols (%d)!' % (row, col, self.nRows, self.nCols))) else: tmpx = (self.xMin + ((col + 0.5) * self.dx)) tmpy = (self.yMax - ((row + 0.5) * self.dx)) return (tmpx, tmpy)
Get the coordinates of central grid. Args: row: row number, range from 0 to (nRows - 1). col: col number, range from 0 to (nCols - 1). Returns: XY coordinates. If the row or col are invalid, raise ValueError.
codesearchnet
def _constant_value(ragged_factory, inner_factory, pylist, dtype, ragged_rank, inner_shape): if ragged_tensor.is_ragged(pylist): raise TypeError('pylist may not be a RaggedTensor or RaggedTensorValue.') if not isinstance(pylist, (list, tuple)) and np.ndim(pylist) == 0: if ragged_rank is not None and ragged_rank != 0: raise ValueError('Invalid pylist=%r: incompatible with ragged_rank=%d' % (pylist, ragged_rank)) if inner_shape is not None and inner_shape: raise ValueError('Invalid pylist=%r: incompatible with dim(inner_shape)=%d' % (pylist, len(inner_shape))) return inner_factory(pylist, dtype, ()) if ragged_rank is not None and ragged_rank < 0: raise ValueError('Invalid ragged_rank=%r: must be nonnegative' % ragged_rank) scalar_depth, max_depth = _find_scalar_and_max_depth(pylist) if scalar_depth is not None: if max_depth > scalar_depth: raise ValueError('Invalid pylist=%r: empty list nesting is greater than scalar value nesting' % pylist) if ragged_rank is not None and max_depth < ragged_rank: raise ValueError(f'Invalid pylist={pylist}, max depth smaller than ragged_rank={ragged_rank}') if inner_shape is not None and ragged_rank is not None: expected_depth = ragged_rank + len(inner_shape) + 1 if scalar_depth is not None and expected_depth != scalar_depth or (scalar_depth is None and expected_depth < max_depth): raise ValueError('Invalid pylist=%r: incompatible with ragged_rank=%d and dim(inner_shape)=%d' % (pylist, ragged_rank, len(inner_shape))) if ragged_rank == 0 or (ragged_rank is None and (max_depth < 2 or (inner_shape is not None and max_depth - len(inner_shape) < 2))): return inner_factory(pylist, dtype, inner_shape) if inner_shape is None: if ragged_rank is None: inner_shape = () else: inner_shape = _default_inner_shape_for_pylist(pylist, ragged_rank) if ragged_rank is None: if scalar_depth is None: ragged_rank = max(1, max_depth - 1) else: ragged_rank = max(1, scalar_depth - 1 - len(inner_shape)) nested_splits = [] values = pylist for dim in range(ragged_rank): nested_splits.append([0]) concatenated_values = [] for row in values: nested_splits[dim].append(nested_splits[dim][-1] + len(row)) concatenated_values.extend(row) values = concatenated_values values = inner_factory(values, dtype=dtype, shape=(len(values),) + inner_shape, name='values') for row_splits in reversed(nested_splits): values = ragged_factory(values, row_splits) return values
Constructs a constant RaggedTensor or RaggedTensorValue. Args: ragged_factory: A factory function with the signature: `ragged_factory(values, row_splits)` inner_factory: A factory function with the signature: `inner_factory(pylist, dtype, shape, name)` pylist: A nested `list`, `tuple` or `np.ndarray`. dtype: Data type for returned value. ragged_rank: Ragged rank for returned value. inner_shape: Inner value shape for returned value. Returns: A value returned by `ragged_factory` or `inner_factory`. Raises: ValueError: If the scalar values in `pylist` have inconsistent nesting depth; or if ragged_rank or inner_shape are incompatible with `pylist`.
github-repos
def _Open(self, path_spec, mode='rb'): if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') file_object = resolver.Resolver.OpenFileObject( path_spec.parent, resolver_context=self._resolver_context) try: fsapfs_container = pyfsapfs.container() fsapfs_container.open_file_object(file_object) except: file_object.close() raise self._file_object = file_object self._fsapfs_container = fsapfs_container
Opens the file system defined by path specification. Args: path_spec (PathSpec): a path specification. mode (Optional[str])): file access mode. The default is 'rb' read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
juraj-google-style
def common_vector_root(vec1, vec2): root = [] for v1, v2 in zip(vec1, vec2): if v1 == v2: root.append(v1) else: return root return root
Return common root of the two vectors. Args: vec1 (list/tuple): First vector. vec2 (list/tuple): Second vector. Usage example:: >>> common_vector_root([1, 2, 3, 4, 5], [1, 2, 8, 9, 0]) [1, 2] Returns: list: Common part of two vectors or blank list.
juraj-google-style
def __init__( self, resolver_context, encryption_method=None, file_object=None): if file_object is not None and encryption_method is None: raise ValueError( 'File-like object provided without corresponding encryption method.') super(EncryptedStream, self).__init__(resolver_context) self._current_offset = 0 self._decrypted_data = b'' self._decrypted_data_offset = 0 self._decrypted_data_size = 0 self._decrypted_stream_size = None self._decrypter = None self._encrypted_data = b'' self._encryption_method = encryption_method self._file_object = file_object self._file_object_set_in_init = bool(file_object) self._path_spec = None self._realign_offset = True
Initializes a file-like object. If the file-like object is chained do not separately use the parent file-like object. Args: resolver_context (Context): resolver context. encryption_method (Optional[str]): method used to the encrypt the data. file_object (Optional[FileIO]): parent file-like object. Raises: ValueError: if file_object provided but encryption_method is not.
juraj-google-style
def optimized_for_xmon(circuit: circuits.Circuit, new_device: Optional[xmon_device.XmonDevice]=None, qubit_map: Callable[([ops.Qid], devices.GridQubit)]=(lambda e: cast(devices.GridQubit, e)), allow_partial_czs: bool=False) -> circuits.Circuit: copy = circuit.copy() opts = (_OPTIMIZERS_PART_CZ if allow_partial_czs else _OPTIMIZERS) for optimizer in opts: optimizer(copy) return circuits.Circuit.from_ops((op.transform_qubits(qubit_map) for op in copy.all_operations()), strategy=circuits.InsertStrategy.EARLIEST, device=(new_device or copy.device))
Optimizes a circuit with XmonDevice in mind. Starts by converting the circuit's operations to the xmon gate set, then begins merging interactions and rotations, ejecting pi-rotations and phasing operations, dropping unnecessary operations, and pushing operations earlier. Args: circuit: The circuit to optimize. new_device: The device the optimized circuit should be targeted at. If set to None, the circuit's current device is used. qubit_map: Transforms the qubits (e.g. so that they are GridQubits). allow_partial_czs: If true, the optimized circuit may contain partial CZ gates. Otherwise all partial CZ gates will be converted to full CZ gates. At worst, two CZ gates will be put in place of each partial CZ from the input. Returns: The optimized circuit.
codesearchnet
def list_adb_devices_by_usb_id(): out = adb.AdbProxy().devices(['-l']) clean_lines = new_str(out, 'utf-8').strip().split('\n') results = [] for line in clean_lines: tokens = line.strip().split() if ((len(tokens) > 2) and (tokens[1] == 'device')): results.append(tokens[2]) return results
List the usb id of all android devices connected to the computer that are detected by adb. Returns: A list of strings that are android device usb ids. Empty if there's none.
codesearchnet
def isanytargetmethod(object): decorators, target = tf_decorator.unwrap(object) for decorator in decorators: if _inspect.ismethod(decorator.decorated_target): return True while isinstance(target, functools.partial): target = target.func return callable(target) and (not _inspect.isfunction(target))
Checks if `object` or a TF Decorator wrapped target contains self or cls. This function could be used along with `tf_inspect.getfullargspec` to determine if the first argument of `object` argspec is self or cls. If the first argument is self or cls, it needs to be excluded from argspec when we compare the argspec to the input arguments and, if provided, the tf.function input_signature. Like `tf_inspect.getfullargspec` and python `inspect.getfullargspec`, it does not unwrap python decorators. Args: obj: An method, function, or functool.partial, possibly decorated by TFDecorator. Returns: A bool indicates if `object` or any target along the chain of TF decorators is a method.
github-repos
def ParseCookieRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) cookie_name = self._GetRowValue(query_hash, row, 'name') cookie_data = self._GetRowValue(query_hash, row, 'value') hostname = self._GetRowValue(query_hash, row, 'host_key') if hostname.startswith('.'): hostname = hostname[1:] httponly = self._GetRowValue(query_hash, row, 'httponly') path = self._GetRowValue(query_hash, row, 'path') persistent = self._GetRowValue(query_hash, row, 'persistent') secure = self._GetRowValue(query_hash, row, 'secure') if secure: scheme = 'https' else: scheme = 'http' url = '{0:s}: event_data = ChromeCookieEventData() event_data.cookie_name = cookie_name event_data.data = cookie_data event_data.host = hostname event_data.httponly = bool(httponly) event_data.path = path event_data.persistent = bool(persistent) event_data.query = query event_data.secure = bool(secure) event_data.url = url timestamp = self._GetRowValue(query_hash, row, 'creation_utc') date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'last_access_utc') date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'expires_utc') if timestamp: date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) for plugin in self._cookie_plugins: if cookie_name != plugin.COOKIE_NAME: continue try: plugin.UpdateChainAndProcess( parser_mediator, cookie_data=cookie_data, cookie_name=cookie_name, url=url) except Exception as exception: parser_mediator.ProduceExtractionWarning( 'plugin: {0:s} unable to parse cookie with error: {1!s}'.format( plugin.NAME, exception))
Parses a cookie row. Args: parser_mediator (ParserMediator): parser mediator. query (str): query that created the row. row (sqlite3.Row): row resulting from the query.
juraj-google-style
def change_subscription(self, topics): if self._user_assignment: raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE) if isinstance(topics, six.string_types): topics = [topics] if self.subscription == set(topics): log.warning("subscription unchanged by change_subscription(%s)", topics) return for t in topics: self._ensure_valid_topic_name(t) log.info('Updating subscribed topics to: %s', topics) self.subscription = set(topics) self._group_subscription.update(topics) for tp in set(self.assignment.keys()): if tp.topic not in self.subscription: del self.assignment[tp]
Change the topic subscription. Arguments: topics (list of str): topics for subscription Raises: IllegalStateErrror: if assign_from_user has been used already TypeError: if a topic is None or a non-str ValueError: if a topic is an empty string or - a topic name is '.' or '..' or - a topic name does not consist of ASCII-characters/'-'/'_'/'.'
juraj-google-style
def __init__(self, channel): self.ListGroups = channel.unary_unary( "/google.monitoring.v3.GroupService/ListGroups", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupsResponse.FromString, ) self.GetGroup = channel.unary_unary( "/google.monitoring.v3.GroupService/GetGroup", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.GetGroupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.Group.FromString, ) self.CreateGroup = channel.unary_unary( "/google.monitoring.v3.GroupService/CreateGroup", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.CreateGroupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.Group.FromString, ) self.UpdateGroup = channel.unary_unary( "/google.monitoring.v3.GroupService/UpdateGroup", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.UpdateGroupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.Group.FromString, ) self.DeleteGroup = channel.unary_unary( "/google.monitoring.v3.GroupService/DeleteGroup", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.DeleteGroupRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListGroupMembers = channel.unary_unary( "/google.monitoring.v3.GroupService/ListGroupMembers", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupMembersRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupMembersResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def value_container(val): container = None if not isinstance(val, values_lib.DistributedVariable): if hasattr(val, '_distributed_container'): container = val._distributed_container() elif isinstance(val, composite_tensor.CompositeTensor) and hasattr(val, 'handle') and hasattr(val.handle, '_distributed_container'): container = val.handle._distributed_container() return container if container is not None else val
Returns the container that this per-replica `value` belongs to. Args: val: A value returned by `call_for_each_replica()` or a variable created in `scope()`. Returns: A container that `value` belongs to. If value does not belong to any container (including the case of container having been destroyed), returns the value itself.
github-repos
def __and__(self, other): other = self._cast_to_frameset(other) if other is NotImplemented: return NotImplemented return self.from_iterable(self.items & other.items, sort=True)
Overloads the ``&`` operator. Returns a new :class:`FrameSet` that holds only the frames `self` and `other` have in common. Note: The order of operations is irrelevant: ``(self & other) == (other & self)`` Args: other (:class:`FrameSet`): Returns: :class:`FrameSet`: :class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
juraj-google-style
def prepare_prop_defs(prop_defs, prop_name, cls_names): def get_def(prop_defs, def_fields, default_val=None): rtn_list = [] for fld in def_fields: if prop_defs.get(fld): rtn_list += prop_defs.get(fld) if not rtn_list and default_val: rtn_list.append(default_val) elif rtn_list: try: rtn_list = list(set(rtn_list)) except TypeError as e: new_rtn = [] for item in rtn_list: if isinstance(item, MODULE.rdfclass.RdfClassBase): new_rtn.append(\ "|".join(merge_rdf_list(item['owl_unionOf']))) elif isinstance(item, list): new_rtn.append("|".join(item)) else: new_rtn.append(item) rtn_list = list(set(new_rtn)) new_rtn = [] for item in rtn_list: if "|" in item: new_rtn.append([Uri(domain) \ for domain in item.split("|")]) else: new_rtn.append(Uri(item)) rtn_list = new_rtn return rtn_list required_def_defaults = { Uri('kds_rangeDef'): [{}], Uri('rdfs_range'): [Uri("xsd_string")], Uri('rdfs_domain'): cls_names, Uri('rdfs_label'): [NSM.nouri(prop_name)], Uri('kds_formDefault'): [{ Uri('kds:appliesToClass'): Uri('kdr:AllClasses'), Uri('kds:formFieldName'): "emailaddr", Uri('kds:formLabelName'): [NSM.nouri(prop_name)], Uri('kds:formFieldHelp'): find_values(DESCRIPTION_FIELDS, prop_defs, None), Uri('kds:fieldType'): { Uri('rdf:type'): Uri('kdr:TextField') } }], Uri('kds_propertyValidation'): [], Uri('kds_propertySecurity'): [], Uri('kds_propertyProcessing'): [] } for prop in required_def_defaults: if prop not in prop_defs.keys(): prop_defs[prop] = required_def_defaults[prop] prop_defs['rdfs_domain'] = get_def(prop_defs, DOMAIN_FIELDS, cls_names) prop_defs['rdfs_range'] = get_def(prop_defs, RANGE_FIELDS, Uri('xsd_string')) return prop_defs
Examines and adds any missing defs to the prop_defs dictionary for use with the RdfPropertyMeta.__prepare__ method Args: ----- prop_defs: the defintions from the rdf vocabulary defintion prop_name: the property name cls_names: the name of the associated classes Returns: -------- prop_defs
juraj-google-style
def _items(self, cart_status, category=None): if not isinstance(cart_status, Iterable): cart_status = [cart_status] status_query = ( Q(productitem__cart__status=status) for status in cart_status ) in_cart = Q(productitem__cart__user=self.user) in_cart = in_cart & reduce(operator.__or__, status_query) quantities_in_cart = When( in_cart, then="productitem__quantity", ) quantities_or_zero = Case( quantities_in_cart, default=Value(0), ) products = inventory.Product.objects if category: products = products.filter(category=category) products = products.select_related("category") products = products.annotate(quantity=Sum(quantities_or_zero)) products = products.filter(quantity__gt=0) out = [] for prod in products: out.append(ProductAndQuantity(prod, prod.quantity)) return out
Aggregates the items that this user has purchased. Arguments: cart_status (int or Iterable(int)): etc category (Optional[models.inventory.Category]): the category of items to restrict to. Returns: [ProductAndQuantity, ...]: A list of product-quantity pairs, aggregating like products from across multiple invoices.
juraj-google-style
def add(self, name: str, path_or_url: str) -> Source: logger.info('adding source: %s -> %s', name, path_or_url) if (name in self.__sources): logger.info('name already used by existing source: %s', name) raise NameInUseError(name) is_url = False try: scheme = urllib.parse.urlparse(path_or_url).scheme is_url = (scheme in ['http', 'https']) logger.debug('source determined to be remote: %s', path_or_url) except ValueError: logger.debug('source determined to be local: %s', path_or_url) if is_url: url = path_or_url path = url.replace('https: path = path.replace('/', '_') path = path.replace('.', '_') path = os.path.join(self.__path, path) shutil.rmtree(path, ignore_errors=True) try: logger.debug('cloning repository %s to %s', url, path) repo = git.Repo.clone_from(url, path) logger.debug('cloned repository %s to %s', url, path) sha = repo.head.object.hexsha version = repo.git.rev_parse(sha, short=8) except: shutil.rmtree(path, ignore_errors=True) logger.error('failed to download remote source to local: %s -> %s', url, path) raise IOError("failed to download remote source to local installation: '{}' -> '{}'".format(url, path)) source = RemoteSource(name, path, url, version) else: path = os.path.abspath(path_or_url) if (not os.path.isdir(path)): raise IOError('no directory found at path: {}'.format(path)) source = LocalSource(name, path) self.load(source) self.save() logger.info('added source: %s', name)
Attempts to register a source provided by a given URL or local path under a given name. Returns: a description of the registered source. Raises: NameInUseError: if an existing source is already registered under the given name. IOError: if no directory exists at the given path. IOError: if downloading the remote source failed. (FIXME)
codesearchnet
def set_name(self, vid, name=None, default=False, disable=False): cmds = self.command_builder('name', value=name, default=default, disable=disable) return self.configure_vlan(vid, cmds)
Configures the VLAN name EosVersion: 4.13.7M Args: vid (str): The VLAN ID to Configures name (str): The value to configure the vlan name default (bool): Defaults the VLAN ID name disable (bool): Negates the VLAN ID name Returns: True if the operation was successful otherwise False
juraj-google-style
def as_list(self, value): if isinstance(value, tensor_lib.Tensor): return [value] elif isinstance(value, IndexedSlices): return [value] elif isinstance(value, value_lib.Mirrored): return value.values else: raise ValueError('unwrap: unsupported input type: %s' % type(value))
An utility to convert a `Mirrored`, `Tensor` or `IndexedSlices` to a list. The reason it exists is to provide a uniformed view of returned value of "reduce" calls, especially across tf.function boundaries. Returning `Mirrored` from a tf.function will only evaluate the primary value, which makes collective ops of non-primary device being pruned, and will eventually cause hanging. Args: value: the value to convert, can be one of `Mirrored`, `Tensor` and `IndexedSlices`. Returns: A list of `Tensor` or `IndexedSlices`.
github-repos
def DisplayAccountTree(account, accounts, links, depth=0): prefix = '-' * depth * 2 print '%s%s, %s' % (prefix, account['customerId'], account['name']) if account['customerId'] in links: for child_link in links[account['customerId']]: child_account = accounts[child_link['clientCustomerId']] DisplayAccountTree(child_account, accounts, links, depth + 1)
Displays an account tree. Args: account: dict The account to display. accounts: dict Map from customerId to account. links: dict Map from customerId to child links. depth: int Depth of the current account in the tree.
juraj-google-style
def get_keys_from_shelve(file_name, file_location): temp_list = list() file = __os.path.join(file_location, file_name) shelve_store = __shelve.open(file) for key in shelve_store: temp_list.append(key) shelve_store.close() return temp_list
Function to retreive all keys in a shelve Args: file_name: Shelve storage file name file_location: The location of the file, derive from the os module Returns: a list of the keys
juraj-google-style
def from_proto(cls, struct_def_proto: message.Message, backbone_element_path: Optional[str]=None) -> 'QuantityStructureDataType': struct_type = StructureDataType.from_proto(struct_def_proto=struct_def_proto, backbone_element_path=backbone_element_path, parent_definitions=None) return cls(structure_definition=struct_type.structure_definition, backbone_element_path=struct_type.backbone_element_path, base_type=struct_type.base_type, element_type=struct_type.element_type, _child_defs=struct_type._child_defs, _slices=struct_type._slices, _raw_url=struct_type._raw_url, root_element_definition=struct_type.root_element_definition, cardinality=struct_type.cardinality)
Creates a QuantityStructureDataType from a proto. Args: struct_def_proto: Proto containing information about the structure definition. backbone_element_path: Optional path to the structure def. Returns: A QuantityStructureDataType.
github-repos
def left_margin(self, margin): if margin <= 255 and margin >= 0: self.send(chr(27)+'I'+chr(margin)) else: raise RuntimeError('Invalid margin parameter.')
Specify the left margin. Args: margin: The left margin, in character width. Must be less than the media's width. Returns: None Raises: RuntimeError: Invalid margin parameter.
juraj-google-style
def set_Tc(self, Tc, T=None): if isinstance(Tc, Iterable): if len(Tc)==len(T): x = np.concatenate(([-ttconf.BIG_NUMBER], T, [ttconf.BIG_NUMBER])) y = np.concatenate(([Tc[0]], Tc, [Tc[-1]])) self.Tc = interp1d(x,y) else: self.logger("need Tc values and Timepoints of equal length",2,warn=True) self.Tc = interp1d([-ttconf.BIG_NUMBER, ttconf.BIG_NUMBER], [1e-5, 1e-5]) else: self.Tc = interp1d([-ttconf.BIG_NUMBER, ttconf.BIG_NUMBER], [Tc+ttconf.TINY_NUMBER, Tc+ttconf.TINY_NUMBER]) self.calc_integral_merger_rate()
initialize the merger model with a coalescent time Args: - Tc: a float or an iterable, if iterable another argument T of same shape is required - T: an array like of same shape as Tc that specifies the time pivots corresponding to Tc Returns: - None
juraj-google-style
def _decode_exp(self, access_token=None): c = self.get_credentials() jwt = (access_token or c.access_token) x = self.decode_jwt_payload(jwt) if ('exp' in x): try: exp = int(x['exp']) except ValueError: raise PanCloudError('Expiration time (exp) must be an integer') else: self.jwt_exp = exp return exp else: raise PanCloudError('No exp field found in payload')
Extract exp field from access token. Args: access_token (str): Access token to decode. Defaults to ``None``. Returns: int: JWT expiration in epoch seconds.
codesearchnet
def plot_wigner_seitz(lattice, ax=None, **kwargs): ax, fig, plt = get_ax3d_fig_plt(ax) if "color" not in kwargs: kwargs["color"] = "k" if "linewidth" not in kwargs: kwargs["linewidth"] = 1 bz = lattice.get_wigner_seitz_cell() ax, fig, plt = get_ax3d_fig_plt(ax) for iface in range(len(bz)): for line in itertools.combinations(bz[iface], 2): for jface in range(len(bz)): if iface < jface and any( np.all(line[0] == x) for x in bz[jface]) \ and any(np.all(line[1] == x) for x in bz[jface]): ax.plot(*zip(line[0], line[1]), **kwargs) return fig, ax
Adds the skeleton of the Wigner-Seitz cell of the lattice to a matplotlib Axes Args: lattice: Lattice object ax: matplotlib :class:`Axes` or None if a new figure should be created. kwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to black and linewidth to 1. Returns: matplotlib figure and matplotlib ax
juraj-google-style
async def getNodeByBuid(self, buid): node = self.livenodes.get(buid) if (node is not None): return node props = {} proplayr = {} for layr in self.layers: layerprops = (await layr.getBuidProps(buid)) props.update(layerprops) proplayr.update({k: layr for k in layerprops}) node = s_node.Node(self, buid, props.items(), proplayr=proplayr) (await asyncio.sleep(0)) if (node.ndef is None): return None self.buidcache.append(node) self.livenodes[buid] = node return node
Retrieve a node tuple by binary id. Args: buid (bytes): The binary ID for the node. Returns: Optional[s_node.Node]: The node object or None.
codesearchnet
def _ParseCacheEntry( self, parser_mediator, file_object, display_name, block_size): cache_entry, event_data = self._ReadCacheEntry( file_object, display_name, block_size) date_time = dfdatetime_posix_time.PosixTime( timestamp=cache_entry.last_fetched_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data) if cache_entry.last_modified_time: date_time = dfdatetime_posix_time.PosixTime( timestamp=cache_entry.last_modified_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) if cache_entry.expiration_time: date_time = dfdatetime_posix_time.PosixTime( timestamp=cache_entry.expiration_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a cache entry. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. display_name (str): display name. block_size (int): block size.
juraj-google-style
def dframe(self, dimensions=None, multi_index=False): if dimensions is None: dimensions = [d.name for d in self.dimensions()] else: dimensions = [self.get_dimension(d, strict=True).name for d in dimensions] df = self.interface.dframe(self, dimensions) if multi_index: df = df.set_index([d for d in dimensions if d in self.kdims]) return df
Convert dimension values to DataFrame. Returns a pandas dataframe of columns along each dimension, either completely flat or indexed by key dimensions. Args: dimensions: Dimensions to return as columns multi_index: Convert key dimensions to (multi-)index Returns: DataFrame of columns corresponding to each dimension
juraj-google-style
def remove_child(self, child): if (not isinstance(child, Node)): raise TypeError('child must be a Node') try: self.children.remove(child) child.parent = None except: raise RuntimeError('Attempting to remove non-existent child')
Remove child from ``Node`` object Args: ``child`` (``Node``): The child to remove
codesearchnet
def encode(signer, payload, header=None, key_id=None): if header is None: header = {} if key_id is None: key_id = signer.key_id header.update({'typ': 'JWT', 'alg': 'RS256'}) if key_id is not None: header['kid'] = key_id segments = [ _helpers.unpadded_urlsafe_b64encode( json.dumps(header).encode('utf-8') ), _helpers.unpadded_urlsafe_b64encode( json.dumps(payload).encode('utf-8') ), ] signing_input = b'.'.join(segments) signature = signer.sign(signing_input) segments.append( _helpers.unpadded_urlsafe_b64encode(signature) ) return b'.'.join(segments)
Make a signed JWT. Args: signer (google.auth.crypt.Signer): The signer used to sign the JWT. payload (Mapping[str, str]): The JWT payload. header (Mapping[str, str]): Additional JWT header payload. key_id (str): The key id to add to the JWT header. If the signer has a key id it will be used as the default. If this is specified it will override the signer's key id. Returns: bytes: The encoded JWT.
juraj-google-style
def measure(self) -> np.ndarray: probs = np.real(bk.evaluate(self.probabilities())) indices = np.asarray(list(np.ndindex(*([2] * self.qubit_nb)))) res = np.random.choice(probs.size, p=probs.ravel()) res = indices[res] return res
Measure the state in the computational basis. Returns: A [2]*bits array of qubit states, either 0 or 1
codesearchnet
def deps_from_pydit_json(requires, runtime=True): parsed = [] for req in requires: name, specs = None, None reqs = req.split(' ') name = reqs[0] if len(reqs) == 2: specs = reqs[1] specs = specs.split(",") specs = [re.sub('[()]', '', spec) for spec in specs] specs = [re.split('([0-9])', spec, 1) for spec in specs] for spec in specs: spec[1:3] = [''.join(spec[1:3])] if specs: for spec in specs: if '!' in spec[0]: parsed.append(['Conflicts', name, '=', spec[1]]) elif specs[0] == '==': parsed.append(['Requires', name, '=', spec[1]]) else: parsed.append(['Requires', name, spec[0], spec[1]]) else: parsed.append(['Requires', name]) if not runtime: for pars in parsed: pars[0] = 'Build' + pars[0] return parsed
Parses dependencies returned by pydist.json, since versions uses brackets we can't use pkg_resources to parse and we need a separate method Args: requires: list of dependencies as written in pydist.json of the package runtime: are the dependencies runtime (True) or build time (False) Returns: List of semi-SPECFILE dependecies (see dependency_to_rpm for format)
juraj-google-style
def _find_test_class(): try: return utils.find_subclass_in_module(base_test.BaseTestClass, sys.modules['__main__']) except ValueError: logging.exception('Exactly one subclass of `base_test.BaseTestClass` should be in the main file.') sys.exit(1)
Finds the test class in a test script. Walk through module members and find the subclass of BaseTestClass. Only one subclass is allowed in a test script. Returns: The test class in the test module. Raises: SystemExit: Raised if the number of test classes is not exactly one.
github-repos
def center_text(text, width=80): centered = [] for line in text.splitlines(): centered.append(line.center(width)) return "\n".join(centered)
Center all lines of the text. It is assumed that all lines width is smaller then B{width}, because the line width will not be checked. Args: text (str): Text to wrap. width (int): Maximum number of characters per line. Returns: str: Centered text.
juraj-google-style
def isregex_expr(expr): if not isinstance(expr, str): return False return all([ len(expr) > 3, expr.startswith('re/'), expr.endswith('/') ])
Returns ``True`` is the given expression value is a regular expression like string with prefix ``re/`` and suffix ``/``, otherwise ``False``. Arguments: expr (mixed): expression value to test. Returns: bool
juraj-google-style
def init_logger(level, printout=True): root_logger = logging.getLogger("boussole") root_logger.setLevel(level) if not printout: from io import StringIO dummystream = StringIO() handler = logging.StreamHandler(dummystream) else: handler = logging.StreamHandler() handler.setFormatter( colorlog.ColoredFormatter( '%(asctime)s - %(log_color)s%(message)s', datefmt="%H:%M:%S" ) ) root_logger.addHandler(handler) return root_logger
Initialize app logger to configure its level/handler/formatter/etc.. Todo: * A mean to raise click.Abort or sys.exit when CRITICAL is used; Args: level (str): Level name (``debug``, ``info``, etc..). Keyword Arguments: printout (bool): If False, logs will never be outputed. Returns: logging.Logger: Application logger.
juraj-google-style
def readpar(par_file, root): par_nml = deepcopy(PAR_DEFAULT) if PAR_DFLT_FILE.is_file(): _enrich_with_par(par_nml, PAR_DFLT_FILE) else: PAR_DFLT_FILE.parent.mkdir(exist_ok=True) f90nml.write(par_nml, str(PAR_DFLT_FILE)) if (not par_file.is_file()): raise NoParFileError(par_file) par_main = f90nml.read(str(par_file)) if ('default_parameters_parfile' in par_main): par_dflt = par_main['default_parameters_parfile'].get('par_name_defaultparameters', 'par_defaults') par_dflt = (root / par_dflt) if (not par_dflt.is_file()): raise NoParFileError(par_dflt) _enrich_with_par(par_nml, par_dflt) _enrich_with_par(par_nml, par_file) par_out = ((root / par_nml['ioin']['output_file_stem']) / '_parameters.dat') if par_out.is_file(): _enrich_with_par(par_nml, par_out) par_out = ((root / par_nml['ioin']['hdf5_output_folder']) / 'parameters.dat') if par_out.is_file(): _enrich_with_par(par_nml, par_out) return par_nml
Read StagYY par file. The namelist is populated in chronological order with: - :data:`PAR_DEFAULT`, an internal dictionary defining defaults; - :data:`PAR_DFLT_FILE`, the global configuration par file; - ``par_name_defaultparameters`` if it is defined in ``par_file``; - ``par_file`` itself; - ``parameters.dat`` if it can be found in the StagYY output directories. Args: par_file (:class:`pathlib.Path`): path of par file. root (:class:`pathlib.Path`): path on which other paths are rooted. This is usually par.parent. Returns: :class:`f90nml.namelist.Namelist`: case insensitive dict of dict of values with first key being the namelist and second key the variables' name.
codesearchnet
def filter_by_months_per_hour(self, months_per_hour): _filt_values = [] _filt_datetimes = [] for i, d in enumerate(self.datetimes): if d in months_per_hour: _filt_datetimes.append(d) _filt_values.append(self._values[i]) return MonthlyPerHourCollection( self.header.duplicate(), _filt_values, _filt_datetimes)
Filter the Data Collection based on a list of months per hour (as strings). Args: months_per_hour: A list of tuples representing months per hour. Each tuple should possess two values: the first is the month and the second is the hour. (eg. (12, 23) = December at 11 PM) Return: A new Data Collection with filtered data
juraj-google-style
def __init__(self, git, rev): self.git = git self.rev = rev
Create GitTree instance Args: git (dvc.scm.Git): branch:
juraj-google-style
def run_inference(self, batch: Sequence[dict[str, Union[tf.Tensor, torch.Tensor]]], model: Union[AutoModel, TFAutoModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]: inference_args = {} if not inference_args else inference_args if self._inference_fn: return self._inference_fn(batch, model, self._device, inference_args, self._model_uri) if self._framework == 'tf': return _run_inference_tensorflow_keyed_tensor(batch, model, self._device, inference_args, self._model_uri) else: return _run_inference_torch_keyed_tensor(batch, model, self._device, inference_args, self._model_uri)
Runs inferences on a batch of Keyed Tensors and returns an Iterable of Tensors Predictions. This method stacks the list of Tensors in a vectorized format to optimize the inference call. Args: batch: A sequence of Keyed Tensors. These Tensors should be batchable, as this method will call `tf.stack()`/`torch.stack()` and pass in batched Tensors with dimensions (batch_size, n_features, etc.) into the model's predict() function. model: A Tensorflow/PyTorch model. inference_args: Non-batchable arguments required as inputs to the model's inference function. Unlike Tensors in `batch`, these parameters will not be dynamically batched. Returns: An Iterable of type PredictionResult.
github-repos
def _get_connection(self): if getattr(self, '_connection', None): logger.debug('Connection to sqlite db already exists. Using existing one.') else: dsn = self._dsn if (dsn == 'sqlite: dsn = ':memory:' else: dsn = dsn.replace('sqlite: logger.debug('Creating new apsw connection.\n dsn: {}, config_dsn: {}'.format(dsn, self._dsn)) self._connection = apsw.Connection(dsn) return self._connection
Returns connection to sqlite db. Returns: connection to the sqlite db who stores mpr data.
codesearchnet
def delete_row_range(self, format_str, start_game, end_game): row_keys = make_single_array( self.tf_table.keys_by_range_dataset( format_str.format(start_game), format_str.format(end_game))) row_keys = list(row_keys) if not row_keys: utils.dbg('No rows left for games %d..%d' % ( start_game, end_game)) return utils.dbg('Deleting %d rows: %s..%s' % ( len(row_keys), row_keys[0], row_keys[-1])) row_keys.reverse() total_keys = len(row_keys) utils.dbg('Deleting total of %d keys' % total_keys) concurrency = min(MAX_BT_CONCURRENCY, multiprocessing.cpu_count() * 2) with multiprocessing.Pool(processes=concurrency) as pool: batches = [] with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar: for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS, row_keys): pbar.update(len(b)) batches.append((self.btspec, b)) if len(batches) >= concurrency: pool.map(_delete_rows, batches) batches = [] pool.map(_delete_rows, batches) batches = []
Delete rows related to the given game range. Args: format_str: a string to `.format()` by the game numbers in order to create the row prefixes. start_game: the starting game number of the deletion. end_game: the ending game number of the deletion.
juraj-google-style
def DecompressMessageList(cls, packed_message_list): compression = packed_message_list.compression if compression == rdf_flows.PackedMessageList.CompressionType.UNCOMPRESSED: data = packed_message_list.message_list elif (compression == rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION): try: data = zlib.decompress(packed_message_list.message_list) except zlib.error as e: raise DecodingError("Failed to decompress: %s" % e) else: raise DecodingError("Compression scheme not supported") try: result = rdf_flows.MessageList.FromSerializedString(data) except rdfvalue.DecodeError: raise DecodingError("RDFValue parsing failed.") return result
Decompress the message data from packed_message_list. Args: packed_message_list: A PackedMessageList rdfvalue with some data in it. Returns: a MessageList rdfvalue. Raises: DecodingError: If decompression fails.
juraj-google-style
def api_representation(self, content_type): payload = dict(Subject=self.subject, Body=dict(ContentType=content_type, Content=self.body)) if (self.sender is not None): payload.update(From=self.sender.api_representation()) if any((isinstance(item, str) for item in self.to)): self.to = [Contact(email=email) for email in self.to] recipients = [contact.api_representation() for contact in self.to] payload.update(ToRecipients=recipients) if self.cc: if any((isinstance(email, str) for email in self.cc)): self.cc = [Contact(email) for email in self.cc] cc_recipients = [contact.api_representation() for contact in self.cc] payload.update(CcRecipients=cc_recipients) if self.bcc: if any((isinstance(email, str) for email in self.bcc)): self.bcc = [Contact(email) for email in self.bcc] bcc_recipients = [contact.api_representation() for contact in self.bcc] payload.update(BccRecipients=bcc_recipients) if self._attachments: payload.update(Attachments=[attachment.api_representation() for attachment in self._attachments]) payload.update(Importance=str(self.importance)) return dict(Message=payload)
Returns the JSON representation of this message required for making requests to the API. Args: content_type (str): Either 'HTML' or 'Text'
codesearchnet
def get_value_at_percentile(self, percentile): count_at_percentile = self.get_target_count_at_percentile(percentile) total = 0 for index in range(self.counts_len): total += self.get_count_at_index(index) if (total >= count_at_percentile): value_at_index = self.get_value_from_index(index) if percentile: return self.get_highest_equivalent_value(value_at_index) return self.get_lowest_equivalent_value(value_at_index) return 0
Get the value for a given percentile Args: percentile: a float in [0.0..100.0] Returns: the value for the given percentile
codesearchnet
def __init__(self, enum, value=None, tag=enums.Tags.DEFAULT): super(Enumeration, self).__init__(tag, enums.Types.ENUMERATION) self.value = value self.enum = enum self.length = Enumeration.LENGTH self.validate()
Create an Enumeration. Args: enum (class): The enumeration class of which value is a member (e.g., Tags). Required. value (int): The value of the Enumeration, must be an integer (e.g., Tags.DEFAULT). Optional, defaults to None. tag (Tags): An enumeration defining the tag of the Enumeration. Optional, defaults to Tags.DEFAULT.
juraj-google-style
def has_node_with_value(self, value): for node in self.node_list: if (node.value == value): return True else: return False
Whether any node in ``self.node_list`` has the value ``value``. Args: value (Any): The value to find in ``self.node_list`` Returns: bool Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.has_node_with_value('One') True >>> graph.has_node_with_value('Foo') False
codesearchnet
def _add_dispatch(x, y, name=None): if ops.is_auto_dtype_conversion_enabled(): return add(x, y, name=name) if not isinstance(y, tensor_lib.Tensor) and (not isinstance(y, sparse_tensor.SparseTensor)): y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name='y') if x.dtype == dtypes.string: return gen_math_ops.add(x, y, name=name) else: return gen_math_ops.add_v2(x, y, name=name)
The operation invoked by the `Tensor.__add__` operator. Purpose in the API: This method is exposed in TensorFlow's API so that library developers can register dispatching for `Tensor.__add__` to allow it to handle custom composite tensors & other custom objects. The API symbol is not intended to be called by users directly and does appear in TensorFlow's generated documentation. Args: x: The left-hand side of the `+` operator. y: The right-hand side of the `+` operator. name: an optional name for the operation. Returns: The result of the elementwise `+` operation.
github-repos
def __init__(self, table, num_oov_buckets, hasher_spec=FastHashSpec, name=None, key_dtype=None): if name: name = name.rstrip('/') if table: if key_dtype is None: key_dtype = table.key_dtype supported_table_key_dtypes = (dtypes.int64, dtypes.string) if table.key_dtype not in supported_table_key_dtypes: raise TypeError(f'Invalid `key_dtype`, expected one of {supported_table_key_dtypes}, received {key_dtype}.') if table.key_dtype.is_integer != key_dtype.is_integer: raise TypeError('Invalid `key dtype`, expected %s but got %s.' % ('integer' if key_dtype.is_integer else 'non-integer', table.key_dtype)) if table.value_dtype != dtypes.int64: raise TypeError('Invalid `value_dtype`: expected int64 but got %s.' % table.value_dtype) self._table = table name = name or self._table.name else: if num_oov_buckets <= 0: raise ValueError('`oov_buckets` must be > 0 if no `table` is supplied.') key_dtype = dtypes.string if key_dtype is None else key_dtype self._table = None name = name or 'hash_bucket' if not key_dtype.is_integer and dtypes.string != key_dtype: raise TypeError(f'Invalid `key_dtype`, expected integer or string, got {key_dtype}.') self._num_oov_buckets = num_oov_buckets if not isinstance(hasher_spec, HasherSpec): raise TypeError(f'`hasher_spec` must be of type HasherSpec, got {type(hasher_spec)}.') self._hasher_spec = hasher_spec if name: self._table_name = name.split('/')[-1] else: self._table_name = None super(IdTableWithHashBuckets, self).__init__(key_dtype, dtypes.int64)
Construct a `IdTableWithHashBuckets` object. Args: table: Table that maps `tf.string` or `tf.int64` keys to `tf.int64` ids. num_oov_buckets: Number of buckets to use for out-of-vocabulary keys. hasher_spec: A `HasherSpec` to specify the hash function to use for assignation of out-of-vocabulary buckets (optional). name: A name for the operation (optional). key_dtype: Data type of keys passed to `lookup`. Defaults to `table.key_dtype` if `table` is specified, otherwise `tf.string`. Must be string or integer, and must be castable to `table.key_dtype`. Raises: ValueError: when `table` in None and `num_oov_buckets` is not positive. TypeError: when `hasher_spec` is invalid.
github-repos
def users_setPhoto(self, *, image: Union[str, IOBase], **kwargs) -> SlackResponse: self._validate_xoxp_token() return self.api_call("users.setPhoto", files={"image": image}, data=kwargs)
Set the user profile photo Args: image (str): Supply the path of the image you'd like to upload. e.g. 'myimage.png'
juraj-google-style
def create_box_comments(self, box_key, message, **kwargs): uri = '/'.join([ self.api_uri, self.boxes_suffix, box_key, self.comments_suffix ]) if not (box_key and message): return requests.codes.bad_request, None kwargs.update({'message':message}) new_cmt = StreakComment(**kwargs) code, r_data = self._req('put', uri, new_cmt.to_dict()) return code, r_data
Creates a comments in a box with the provided attributes. Args: box_key key for box message message string kwargs {} see StreakComment object for more information return (status code, comment dict)
juraj-google-style
def Reload(self): with self._generator_mutex: for event in self._generator.Load(): self._ProcessEvent(event) return self
Loads all events added since the last call to `Reload`. If `Reload` was never called, loads all events in the file. Returns: The `EventAccumulator`.
codesearchnet
def _cast_dict(self, data_dict): for key, value in data_dict.iteritems(): data_dict[key] = self._cast_value(value) if 'resp_body_data' in data_dict: del data_dict['resp_body_data'] return data_dict
Internal method that makes sure any dictionary elements are properly cast into the correct types, instead of just treating everything like a string from the csv file. Args: data_dict: dictionary containing bro log data. Returns: Cleaned Data dict.
juraj-google-style
def create_sns_event(app_name, env, region, rules): session = boto3.Session(profile_name=env, region_name=region) sns_client = session.client('sns') topic_name = rules.get('topic') lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region) topic_arn = get_sns_topic_arn(topic_name=topic_name, account=env, region=region) protocol = 'lambda' statement_id = '{}_sns_{}'.format(app_name, topic_name) principal = 'sns.amazonaws.com' add_lambda_permissions(function=lambda_alias_arn, statement_id=statement_id, action='lambda:InvokeFunction', principal=principal, source_arn=topic_arn, env=env, region=region) sns_client.subscribe(TopicArn=topic_arn, Protocol=protocol, Endpoint=lambda_alias_arn) LOG.debug('SNS Lambda event created') LOG.info('Created SNS event subscription on topic %s', topic_name)
Create SNS lambda event from rules. Args: app_name (str): name of the lambda function env (str): Environment/Account for lambda function region (str): AWS region of the lambda function rules (str): Trigger rules from the settings
codesearchnet
def checkout(request, user_id=None): if (user_id is not None): if request.user.is_staff: user = User.objects.get(id=int(user_id)) else: raise Http404() else: user = request.user current_cart = CartController.for_user(user) if (('fix_errors' in request.GET) and (request.GET['fix_errors'] == 'true')): current_cart.fix_simple_errors() try: current_invoice = InvoiceController.for_cart(current_cart.cart) except ValidationError as ve: return _checkout_errors(request, ve) return redirect('invoice', current_invoice.invoice.id)
Runs the checkout process for the current cart. If the query string contains ``fix_errors=true``, Registrasion will attempt to fix errors preventing the system from checking out, including by cancelling expired discounts and vouchers, and removing any unavailable products. Arguments: user_id (castable to int): If the requesting user is staff, then the user ID can be used to run checkout for another user. Returns: render or redirect: If the invoice is generated successfully, or there's already a valid invoice for the current cart, redirect to ``invoice``. If there are errors when generating the invoice, render ``registrasion/checkout_errors.html`` with the following data:: { "error_list", [str, ...] # The errors to display. }
codesearchnet
def diff_lineMode(self, text1, text2, deadline): (text1, text2, linearray) = self.diff_linesToChars(text1, text2) diffs = self.diff_main(text1, text2, False, deadline) self.diff_charsToLines(diffs, linearray) self.diff_cleanupSemantic(diffs) diffs.append((self.DIFF_EQUAL, '')) pointer = 0 count_delete = 0 count_insert = 0 text_delete = '' text_insert = '' while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_INSERT: count_insert += 1 text_insert += diffs[pointer][1] elif diffs[pointer][0] == self.DIFF_DELETE: count_delete += 1 text_delete += diffs[pointer][1] elif diffs[pointer][0] == self.DIFF_EQUAL: if count_delete >= 1 and count_insert >= 1: subDiff = self.diff_main(text_delete, text_insert, False, deadline) diffs[pointer - count_delete - count_insert : pointer] = subDiff pointer = pointer - count_delete - count_insert + len(subDiff) count_insert = 0 count_delete = 0 text_delete = '' text_insert = '' pointer += 1 diffs.pop() return diffs
Do a quick line-level diff on both strings, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. Args: text1: Old string to be diffed. text2: New string to be diffed. deadline: Time when the diff should be complete by. Returns: Array of changes.
juraj-google-style
def __init__(self, instrumentation_key, wsgi_application, *args, **kwargs): if not instrumentation_key: raise Exception('Instrumentation key was required but not provided') if not wsgi_application: raise Exception('WSGI application was required but not provided') telemetry_channel = kwargs.pop('telemetry_channel', None) if not telemetry_channel: sender = applicationinsights.channel.AsynchronousSender() queue = applicationinsights.channel.AsynchronousQueue(sender) telemetry_channel = applicationinsights.channel.TelemetryChannel(None, queue) self.client = applicationinsights.TelemetryClient(instrumentation_key, telemetry_channel) self.client.context.device.type = "PC" self._wsgi_application = wsgi_application self._common_properties = kwargs.pop('common_properties', {})
Initialize a new instance of the class. Args: instrumentation_key (str). the instrumentation key to use while sending telemetry to the service.\n wsgi_application (func). the WSGI application that we're wrapping.
juraj-google-style
def __update(self, score, values, error): if self._minimize: if self._best_score is None or score > self._best_score: self._best_score = score self._best_values = values.copy() self._best_error = error self._logger.log( 'debug', 'New best food source memorized: {}'.format( self._best_error ) ) return True elif not self._minimize: if self._best_score is None or score < self._best_score: self._best_score = score self._best_values = values.copy() self._best_error = error self._logger.log( 'debug', 'New best food source memorized: {}'.format( self._best_error ) ) return True return False
Update the best score and values if the given score is better than the current best score Args: score (float): new score to evaluate values (list): new value ranges to evaluate error (float): new fitness function return value to evaluate Returns: bool: True if new score is better, False otherwise
juraj-google-style
def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'): (h, w) = dst_img.shape[:2] return imresize(img, (w, h), return_scale, interpolation)
Resize image to the same size of a given image. Args: img (ndarray): The input image. dst_img (ndarray): The target image. return_scale (bool): Whether to return `w_scale` and `h_scale`. interpolation (str): Same as :func:`resize`. Returns: tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or `resized_img`.
codesearchnet
def ComponentsToPath(components): precondition.AssertIterableType(components, Text) for component in components: if (not component): raise ValueError('Empty path component in: {}'.format(components)) if ('/' in component): raise ValueError("Path component with '/' in: {}".format(components)) if components: return ('/' + '/'.join(components)) else: return ''
Converts a list of path components to a canonical path representation. Args: components: A sequence of path components. Returns: A canonical MySQL path representation.
codesearchnet
def get_subscribers(object_type: str) -> List[str]: return DB.get_list(_keys.subscribers(object_type))
Get the list of subscribers to events of the object type. Args: object_type (str): Type of object. Returns: List[str], list of subscriber names.
juraj-google-style
def __call__(self, request: beam.Row, *args, **kwargs): try: entity_id = request._asdict()[self.row_key] except KeyError: raise KeyError('Enrichment requests to Vertex AI Feature Store should contain a field: %s in the input `beam.Row` to join the input with fetched response. This is used as the `FeatureViewDataKey` to fetch feature values corresponding to this key.' % self.row_key) try: selector = aiplatform.gapic.FeatureSelector(id_matcher=aiplatform.gapic.IdMatcher(ids=self.feature_ids)) response = self.client.read_feature_values(request=aiplatform.gapic.ReadFeatureValuesRequest(entity_type=self.entity_type_path, entity_id=entity_id, feature_selector=selector)) except NotFound: raise ValueError(_not_found_err_message(self.feature_store_id, self.entity_type_id, entity_id)) response_dict = {} proto_to_dict = proto.Message.to_dict(response.entity_view) for key, msg in zip(response.header.feature_descriptors, proto_to_dict['data']): if msg and 'value' in msg: response_dict[key.id] = list(msg['value'].values())[0] elif self.exception_level == ExceptionLevel.RAISE: raise ValueError(_not_found_err_message(self.feature_store_id, self.entity_type_id, entity_id)) elif self.exception_level == ExceptionLevel.WARN: _LOGGER.warning(_not_found_err_message(self.feature_store_id, self.entity_type_id, entity_id)) return (request, beam.Row(**response_dict))
Fetches feature value for an entity-id from Vertex AI Feature Store (Legacy). Args: request: the input `beam.Row` to enrich.
github-repos
def issorted(list_, op=operator.le): return all((op(list_[ix], list_[(ix + 1)]) for ix in range((len(list_) - 1))))
Determines if a list is sorted Args: list_ (list): op (func): sorted operation (default=operator.le) Returns: bool : True if the list is sorted
codesearchnet
def _is_current_explicit_device(device_type): device_type = device_type.upper() if device_type not in ['CPU', 'GPU']: raise ValueError('`device_type` should be either "CPU" or "GPU".') device = _get_current_tf_device() return device is not None and device.device_type == device_type.upper()
Check if the current device is explicitly set on the device type specified. Args: device_type: A string containing `GPU` or `CPU` (case-insensitive). Returns: A boolean indicating if the current device scope is explicitly set on the device type. Raises: ValueError: If the `device_type` string indicates an unsupported device.
github-repos
def glyph_has_ink(font: TTFont, name: Text) -> bool: if 'glyf' in font: return ttf_glyph_has_ink(font, name) elif ('CFF ' in font) or ('CFF2' in font): return cff_glyph_has_ink(font, name) else: raise Exception("Could not find 'glyf', 'CFF ', or 'CFF2' table.")
Checks if specified glyph has any ink. That is, that it has at least one defined contour associated. Composites are considered to have ink if any of their components have ink. Args: font: the font glyph_name: The name of the glyph to check for ink. Returns: True if the font has at least one contour associated with it.
juraj-google-style
def sample_observed_state(self, s: pd.Series) -> Dict: return {n[0]: {i.name: np.random.normal((s[n[0]] * i.mean), i.stdev) for i in n[1]['indicators'].values()} for n in self.nodes(data=True)}
Sample observed state vector. This is the implementation of the emission function. Args: s: Latent state vector. Returns: Observed state vector.
codesearchnet
def _ip_int_from_prefix(self, prefixlen=None): if prefixlen is None: prefixlen = self._prefixlen return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
Turn the prefix length netmask into a int for comparison. Args: prefixlen: An integer, the prefix length. Returns: An integer.
juraj-google-style
def RegisterDecoder(cls, decoder): encoding_method = decoder.ENCODING_METHOD.lower() if encoding_method in cls._decoders: raise KeyError( 'Decoder for encoding method: {0:s} already set.'.format( decoder.ENCODING_METHOD)) cls._decoders[encoding_method] = decoder
Registers a decoder for a specific encoding method. Args: decoder (type): decoder class. Raises: KeyError: if the corresponding decoder is already set.
juraj-google-style
def take_bug_reports(ads, test_name=None, begin_time=None, destination=None): if begin_time is None: begin_time = mobly_logger.get_log_file_timestamp() else: begin_time = mobly_logger.sanitize_filename(str(begin_time)) def take_br(test_name, begin_time, ad, destination): ad.take_bug_report(test_name=test_name, begin_time=begin_time, destination=destination) args = [(test_name, begin_time, ad, destination) for ad in ads] utils.concurrent_exec(take_br, args)
Takes bug reports on a list of android devices. If you want to take a bug report, call this function with a list of android_device objects in on_fail. But reports will be taken on all the devices in the list concurrently. Bug report takes a relative long time to take, so use this cautiously. Args: ads: A list of AndroidDevice instances. test_name: Name of the test method that triggered this bug report. If None, the default name "bugreport" will be used. begin_time: timestamp taken when the test started, can be either string or int. If None, the current time will be used. destination: string, path to the directory where the bugreport should be saved.
github-repos
def get_nn_images(self, structure, n): return [e['image'] for e in self.get_nn_info(structure, n)]
Get image location of all near neighbors of site with index n in structure. Args: structure (Structure): input structure. n (integer): index of site for which to determine the image location of near neighbors. Returns: images (list of 3D integer array): image locations of near neighbors.
codesearchnet
def grid_reload_from_name(job_name): gk = get_api_client() sites = get_all_sites_obj() jobs = [] for site in [s for s in sites if (s.uid not in gk.excluded_site)]: logger.info(('Reloading %s from %s' % (job_name, site.uid))) _jobs = site.jobs.list(name=job_name, state='waiting,launching,running') if (len(_jobs) == 1): logger.info(('Reloading %s from %s' % (_jobs[0].uid, site.uid))) jobs.append(_jobs[0]) elif (len(_jobs) > 1): raise EnosG5kDuplicateJobsError(site, job_name) return jobs
Reload all running or pending jobs of Grid'5000 with a given name. By default all the sites will be searched for jobs with the name ``job_name``. Using EnOSlib there can be only one job per site with name ``job_name``. Note that it honors the ``exluded_sites`` attribute of the client so the scan can be reduced. Args: job_name (str): the job name Returns: The list of the python-grid5000 jobs retrieved. Raises: EnosG5kDuplicateJobsError: if there's several jobs with the same name on a site.
codesearchnet
def get_relavent_units(self): relavent_units = {} for (location, unit) in self.units.items(): if self.unit_is_related(location, self.worksheet): relavent_units[location] = unit return relavent_units
Retrieves the relevant units for this data block. Returns: All flags related to this block.
codesearchnet
def _to_json_like(self, include_defaults): all_attrs = self.properties_with_values(include_defaults=include_defaults) subtype = getattr(self.__class__, '__subtype__', None) if ((subtype is not None) and (subtype != self.__class__.__view_model__)): attrs = {} for (attr, value) in all_attrs.items(): if (attr in self.__class__.__dict__): continue else: attrs[attr] = value else: attrs = all_attrs for (k, v) in attrs.items(): if (isinstance(v, float) and (v == float('inf'))): attrs[k] = None return attrs
Returns a dictionary of the attributes of this object, in a layout corresponding to what BokehJS expects at unmarshalling time. This method does not convert "Bokeh types" into "plain JSON types," for example each child Model will still be a Model, rather than turning into a reference, numpy isn't handled, etc. That's what "json like" means. This method should be considered "private" or "protected", for use internal to Bokeh; use ``to_json()`` instead because it gives you only plain JSON-compatible types. Args: include_defaults (bool) : whether to include attributes that haven't been changed from the default.
codesearchnet
class MgpstrTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, unk_token='[GO]', bos_token='[GO]', eos_token='[s]', pad_token='[GO]', **kwargs): with open(vocab_file, encoding='utf-8') as vocab_handle: self.vocab = json.load(vocab_handle) self.decoder = {v: k for k, v in self.vocab.items()} super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs) @property def vocab_size(self): return len(self.vocab) def get_vocab(self): vocab = dict(self.vocab).copy() vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text): char_tokens = [] for s in text: char_tokens.extend(s) return char_tokens def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error('Vocabulary path ({}) should be a directory'.format(save_directory)) return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.vocab, indent=2, sort_keys=True, ensure_ascii=False) + '\n') return (vocab_file,)
Construct a MGP-STR char tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. unk_token (`str`, *optional*, defaults to `"[GO]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"[GO]"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"[s]"`): The end of sequence token. pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"[GO]"`): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation.
github-repos
def ParseFileObject(self, parser_mediator, file_object): display_name = parser_mediator.GetDisplayName() if (not zipfile.is_zipfile(file_object)): raise errors.UnableToParseFile('[{0:s}] unable to parse file: {1:s} with error: {2:s}'.format(self.NAME, display_name, 'Not a Zip file.')) try: zip_file = zipfile.ZipFile(file_object, 'r', allowZip64=True) self._ProcessZipFileWithPlugins(parser_mediator, zip_file) zip_file.close() except (zipfile.BadZipfile, struct.error) as exception: raise errors.UnableToParseFile('[{0:s}] unable to parse file: {1:s} with error: {2!s}'.format(self.NAME, display_name, exception))
Parses a compound ZIP file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
codesearchnet
def split(self, bitindex): if (bitindex < 0): raise ValueError('bitindex must be larger or equal to 0.') if (bitindex > len(self)): raise ValueError(("bitindex larger than the array's size. Len: %s; bitindex: %s" % (len(self), bitindex))) if (bitindex == 0): return (None, self) if (bitindex == len(self)): return (self, None) left = TDOPromise(self._chain, self._bitstart, bitindex, _parent=self) right = TDOPromise(self._chain, 0, (len(self) - bitindex), _parent=self) self._components = [] self._addsub(left, 0) self._addsub(right, bitindex) return (left, right)
Split a promise into two promises at the provided index. A common operation in JTAG is reading/writing to a register. During the operation, the TMS pin must be low, but during the writing of the last bit, the TMS pin must be high. Requiring all reads or writes to have full arbitrary control over the TMS pin is unrealistic. Splitting a promise into two sub promises is a way to mitigate this issue. The final read bit is its own subpromise that can be associated with a different primitive than the 'rest' of the subpromise. Returns: Two TDOPromise instances: the 'Rest' and the 'Tail'. The 'Rest' is the first chunk of the original promise. The 'Tail' is a single bit sub promise for the final bit in the operation If the 'Rest' would have a length of 0, None is returned
codesearchnet
def HasDateExceptionOn(self, date, exception_type=_EXCEPTION_TYPE_ADD): if date in self.date_exceptions: return exception_type == self.date_exceptions[date][0] return False
Test if this service period has a date exception of the given type. Args: date: a string of form "YYYYMMDD" exception_type: the exception type the date should have. Defaults to _EXCEPTION_TYPE_ADD Returns: True iff this service has service exception of specified type at date.
juraj-google-style
def random_brightness(x, brightness_range, scale=True): if len(brightness_range) != 2: raise ValueError(f'`brightness_range should be tuple or list of two floats. Received: {brightness_range}') u = np.random.uniform(brightness_range[0], brightness_range[1]) return apply_brightness_shift(x, u, scale)
Performs a random brightness shift. DEPRECATED. Args: x: Input tensor. Must be 3D. brightness_range: Tuple of floats; brightness range. scale: Whether to rescale the image such that minimum and maximum values are 0 and 255 respectively. Default: True. Returns: Numpy image tensor. Raises: ValueError if `brightness_range` isn't a tuple.
github-repos
def restore(self, x): with tf.name_scope('pad_reduce/restore'): x = tf.scatter_nd(indices=self.nonpad_ids, updates=x, shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0)) return x
Add padding back to the given tensor. Args: x (tf.Tensor): of shape [dim_compressed,...] Returns: a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The dim is restored from the original reference tensor
codesearchnet
def _LinearFoldByteStream(self, mapped_value, **unused_kwargs): try: return self._operation.WriteTo(mapped_value) except Exception as exception: error_string = ( 'Unable to write: {0:s} to byte stream with error: {1!s}').format( self._data_type_definition.name, exception) raise errors.FoldingError(error_string)
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
juraj-google-style
def add_get_parameters(url, parameters, percent_encode=True): url_parts = list(parse.urlparse(url)) query = dict(parse.parse_qs(url_parts[4])) query.update(parameters) if percent_encode: url_parts[4] = parse.urlencode(query) else: url_parts[4] = '&'.join([((key + '=') + value) for (key, value) in query.items()]) return parse.urlunparse(url_parts)
Utility function to add GET parameters to an existing URL. Args: parameters A dictionary of the parameters that should be added. percent_encode Whether the query parameters should be percent encoded. Returns: The updated URL.
codesearchnet
def set_router_id(self, value=None, default=False, disable=False): cmd = self.command_builder('router-id', value=value, default=default, disable=disable) return self.configure_ospf(cmd)
Controls the router id property for the OSPF Proccess Args: value (str): The router-id value default (bool): Controls the use of the default keyword disable (bool): Controls the use of the no keyword Returns: bool: True if the commands are completed successfully
codesearchnet
class DacDecoderOutput(ModelOutput): audio_values: Optional[torch.FloatTensor] = None
Args: audio_values (`torch.FloatTensor` of shape `(batch_size, input_length)`, *optional*): Decoded audio values, obtained using the decoder part of Dac.
github-repos
def unique(self, name=None) -> 'DatasetV2': from tensorflow.python.data.ops import unique_op return unique_op._unique(self, name)
A transformation that discards duplicate elements of a `Dataset`. Use this transformation to produce a dataset that contains one instance of each unique element in the input. For example: >>> dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1]) >>> dataset = dataset.unique() >>> sorted([a.item() for a in dataset.as_numpy_iterator()]) [1, 2, 37] Note: This transformation only supports datasets which fit into memory and have elements of either `tf.int32`, `tf.int64` or `tf.string` type. Args: name: (Optional.) A name for the tf.data operation. Returns: A new `Dataset` with the transformation applied as described above.
github-repos