code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def represent_as_tuple(string): keep = (".", "[", "]") return tuple(str_digit_to_int(c) if c not in keep else c for c in string)
Represent a number-string in the form of a tuple of digits. "868.0F" -> (8, 6, 8, '.', 0, 15) Args: string - Number represented as a string of digits. Returns: Number represented as an iterable container of digits >>> represent_as_tuple('868.0F') (8, 6, 8, '.', 0, 15)
juraj-google-style
def _call_for_each_replica(distribution, fn, args, kwargs): run_concurrently = False if not context.executing_eagerly(): ops.get_default_graph().switch_to_thread_local() coord = coordinator.Coordinator(clean_stop_exception_types=(_RequestedStop,)) shared_variable_store = {} devices = distribution.extended.worker_devices thread_local_callables = _get_thread_local_configuration_callable() threads = [] for index in range(len(devices)): variable_creator_fn = shared_variable_creator.make_fn(shared_variable_store, index) t = _MirroredReplicaThread(distribution, coord, index, devices, variable_creator_fn, fn, distribute_utils.caching_scope_local, distribute_utils.select_replica(index, args), distribute_utils.select_replica(index, kwargs), thread_local_callables) threads.append(t) for t in threads: t.start() try: with coord.stop_on_exception(): all_done = False while not all_done and (not coord.should_stop()): done = [] if run_concurrently: for t in threads: t.should_run.set() for t in threads: t.has_paused.wait() t.has_paused.clear() if coord.should_stop(): return None done.append(t.done) else: for t in threads: t.should_run.set() t.has_paused.wait() t.has_paused.clear() if coord.should_stop(): return None done.append(t.done) if coord.should_stop(): return None all_done = all(done) if not all_done: if any(done): raise RuntimeError('Some replicas made a different number of replica_context().merge_call() calls.') merge_args = distribute_utils.regroup(tuple((t.merge_args for t in threads))) merge_kwargs = distribute_utils.regroup(tuple((t.merge_kwargs for t in threads))) mtt_captured_name_scope = threads[0].captured_name_scope mtt_captured_var_scope = threads[0].captured_var_scope mtt_captured_control_deps = set() for t in threads: mtt_captured_control_deps.update(t.captured_control_deps) with ops.name_scope(mtt_captured_name_scope), ops.control_dependencies(mtt_captured_control_deps), variable_scope.variable_scope(mtt_captured_var_scope), _maybe_enter_eager_mode(threads[0].merge_call_entered_in_eager): merge_result = threads[0].merge_fn(distribution, *merge_args, **merge_kwargs) for r, t in enumerate(threads): t.merge_result = distribute_utils.select_replica(r, merge_result) finally: for t in threads: t.should_run.set() coord.join(threads) return distribute_utils.regroup(tuple((t.main_result for t in threads)))
Run `fn` in separate threads, once per replica/worker device. Args: distribution: the DistributionStrategy object. fn: function to run (will be run once per replica, each in its own thread). args: positional arguments for `fn` kwargs: keyword arguments for `fn`. Returns: Merged return value of `fn` across all replicas. Raises: RuntimeError: If fn() calls get_replica_context().merge_call() a different number of times from the available devices.
github-repos
def parse_vlq(self, segment): values = [] (cur, shift) = (0, 0) for c in segment: val = B64[ord(c)] (val, cont) = ((val & 31), (val >> 5)) cur += (val << shift) shift += 5 if (not cont): (cur, sign) = ((cur >> 1), (cur & 1)) if sign: cur = (- cur) values.append(cur) (cur, shift) = (0, 0) if (cur or shift): raise SourceMapDecodeError('leftover cur/shift in vlq decode') return values
Parse a string of VLQ-encoded data. Returns: a list of integers.
codesearchnet
def dump_package_data(data, buf, format_=FileFormat.py, skip_attributes=None): if format_ == FileFormat.txt: raise ValueError("'txt' format not supported for packages.") data_ = dict((k, v) for k, v in data.iteritems() if v is not None) data_ = package_serialise_schema.validate(data_) skip = set(skip_attributes or []) items = [] for key in package_key_order: if key not in skip: value = data_.pop(key, None) if value is not None: items.append((key, value)) for key, value in data_.iteritems(): if key not in skip: items.append((key, value)) dump_func = dump_functions[format_] dump_func(items, buf)
Write package data to `buf`. Args: data (dict): Data source - must conform to `package_serialise_schema`. buf (file-like object): Destination stream. format_ (`FileFormat`): Format to dump data in. skip_attributes (list of str): List of attributes to not print.
juraj-google-style
def _GetLoadConfigTimestamp(self, pefile_object): if (not hasattr(pefile_object, 'DIRECTORY_ENTRY_LOAD_CONFIG')): return None timestamp = getattr(pefile_object.DIRECTORY_ENTRY_LOAD_CONFIG.struct, 'TimeDateStamp', 0) return timestamp
Retrieves the timestamp from the Load Configuration directory. Args: pefile_object (pefile.PE): pefile object. Returns: int: load configuration timestamps or None if there are none present.
codesearchnet
def on_connected(self, connection): log.info('PikaClient: connected to RabbitMQ') self.connected = True self.in_channel = self.connection.channel(self.on_channel_open)
AMQP connection callback. Creates input channel. Args: connection: AMQP connection
codesearchnet
def count(cls, cur, table: str, where_keys: list=None): if where_keys: (where_clause, values) = cls._get_where_clause_with_values(where_keys) query = cls._count_query_where.format(table, where_clause) (q, t) = (query, values) else: query = cls._count_query.format(table) (q, t) = (query, ()) (yield from cur.execute(q, t)) result = (yield from cur.fetchone()) return int(result[0])
gives the number of records in the table Args: table: a string indicating the name of the table Returns: an integer indicating the number of records in the table
codesearchnet
def encipher_vigenere(plaintext, plain_vocab, key): ciphertext = [] layers = [ ShiftEncryptionLayer(plain_vocab, i) for i in range(len(plain_vocab)) ] for i, sentence in enumerate(plaintext): cipher_sentence = [] for j, character in enumerate(sentence): key_idx = key[j % len(key)] encrypted_char = layers[key_idx].encrypt_character(character) cipher_sentence.append(encrypted_char) ciphertext.append(cipher_sentence) return ciphertext
Encrypt plain text with given key. Args: plaintext (list of list of Strings): a list of plain text to encrypt. plain_vocab (list of Integer): unique vocabularies being used. key (list of Integer): key to encrypt cipher using Vigenere table. Returns: ciphertext (list of Strings): encrypted plain text.
juraj-google-style
def ValidateFeedStartAndExpirationDates(self, problems, first_date, last_date, first_date_origin, last_date_origin, today): warning_cutoff = (today + datetime.timedelta(days=60)) if (last_date < warning_cutoff): problems.ExpirationDate(time.mktime(last_date.timetuple()), last_date_origin) if (first_date > today): problems.FutureService(time.mktime(first_date.timetuple()), first_date_origin)
Validate the start and expiration dates of the feed. Issue a warning if it only starts in the future, or if it expires within 60 days. Args: problems: The problem reporter object first_date: A date object representing the first day the feed is active last_date: A date object representing the last day the feed is active today: A date object representing the date the validation is being run on Returns: None
codesearchnet
def tags(self, value): if value == self._defaults['tags'] and 'tags' in self._values: del self._values['tags'] else: self._values['tags'] = value
The tags property. Args: value (hash). the property value.
juraj-google-style
def console_set_char_background(con: tcod.console.Console, x: int, y: int, col: Tuple[(int, int, int)], flag: int=BKGND_SET) -> None: lib.TCOD_console_set_char_background(_console(con), x, y, col, flag)
Change the background color of x,y to col using a blend mode. Args: con (Console): Any Console instance. x (int): Character x position from the left. y (int): Character y position from the top. col (Union[Tuple[int, int, int], Sequence[int]]): An (r, g, b) sequence or Color instance. flag (int): Blending mode to use, defaults to BKGND_SET.
codesearchnet
def lattice_2_lmpbox(lattice, origin=(0, 0, 0)): a, b, c = lattice.abc xlo, ylo, zlo = origin xhi = a + xlo m = lattice.matrix xy = np.dot(m[1], m[0] / a) yhi = np.sqrt(b ** 2 - xy ** 2) + ylo xz = np.dot(m[2], m[0] / a) yz = (np.dot(m[1], m[2]) - xy * xz) / (yhi - ylo) zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2) + zlo tilt = None if lattice.is_orthogonal else [xy, xz, yz] rot_matrix = np.linalg.solve([[xhi - xlo, 0, 0], [xy, yhi - ylo, 0], [xz, yz, zhi - zlo]], m) bounds = [[xlo, xhi], [ylo, yhi], [zlo, zhi]] symmop = SymmOp.from_rotation_and_translation(rot_matrix, origin) return LammpsBox(bounds, tilt), symmop
Converts a lattice object to LammpsBox, and calculates the symmetry operation used. Args: lattice (Lattice): Input lattice. origin: A (3,) array/list of floats setting lower bounds of simulation box. Default to (0, 0, 0). Returns: LammpsBox, SymmOp
juraj-google-style
def dilated_conv_stack(name, x, mid_channels, output_channels, dilation_rates, activation='relu', dropout=0.0): with tf.variable_scope(name, reuse=tf.AUTO_REUSE): output = 0.0 for (dil_ind, dil_rate) in enumerate(dilation_rates): curr_out = conv_stack(('dil_%d' % dil_ind), x, mid_channels=mid_channels, output_channels=output_channels, dilations=dil_rate, activation=activation, dropout=dropout) output += curr_out return output
Dilated convolutional stack. Features at different rates are computed independently using a 3 layer convolutional stack and added. Args: name: variable scope. x: 5-D Tensor. mid_channels: Number of output channels of the first layer in the conv stack. output_channels: Number of output channels of the last layer. dilation_rates: A list of dilation rates. activation: Can be either "relu" or "gatu" dropout: dropout. Returns: output: 5-D Tensor.
codesearchnet
def id_token_jwt_grant(request, token_uri, assertion): body = {'assertion': assertion, 'grant_type': _JWT_GRANT_TYPE} response_data = _token_endpoint_request(request, token_uri, body) try: id_token = response_data['id_token'] except KeyError as caught_exc: new_exc = exceptions.RefreshError('No ID token in response.', response_data) six.raise_from(new_exc, caught_exc) payload = jwt.decode(id_token, verify=False) expiry = datetime.datetime.utcfromtimestamp(payload['exp']) return (id_token, expiry, response_data)
Implements the JWT Profile for OAuth 2.0 Authorization Grants, but requests an OpenID Connect ID Token instead of an access token. This is a variant on the standard JWT Profile that is currently unique to Google. This was added for the benefit of authenticating to services that require ID Tokens instead of access tokens or JWT bearer tokens. Args: request (google.auth.transport.Request): A callable used to make HTTP requests. token_uri (str): The OAuth 2.0 authorization server's token endpoint URI. assertion (str): JWT token signed by a service account. The token's payload must include a ``target_audience`` claim. Returns: Tuple[str, Optional[datetime], Mapping[str, str]]: The (encoded) Open ID Connect ID Token, expiration, and additional data returned by the endpoint. Raises: google.auth.exceptions.RefreshError: If the token endpoint returned an error.
codesearchnet
def id_transcripts_by_gene(self, build='37'): hgnc_id_transcripts = {} LOG.info("Fetching all id transcripts") for gene_obj in self.hgnc_collection.find({'build': build}): hgnc_id = gene_obj['hgnc_id'] id_transcripts = self.get_id_transcripts(hgnc_id=hgnc_id, build=build) hgnc_id_transcripts[hgnc_id] = id_transcripts return hgnc_id_transcripts
Return a dictionary with hgnc_id as keys and a set of id transcripts as value Args: build(str) Returns: hgnc_id_transcripts(dict)
juraj-google-style
def _bfs_sort(self, start): pathstates = {} queue = [] queue.append([0, start]) pathstates[start.stateid] = 0 while queue: leaf = queue.pop(0) node = leaf[1] pathlen = leaf[0] for arc in node.arcs: next_state = self.mma[arc.nextstate] if next_state.stateid not in pathstates: queue.append([pathlen + 1, next_state]) pathstates[next_state.stateid] = pathlen + 1 orderedstatesdict = OrderedDict( sorted( pathstates.items(), key=lambda x: x[1], reverse=False)) for state in self.mma.states: orderedstatesdict[state.stateid] = state orderedstates = [x[1] for x in list(orderedstatesdict.items())] return orderedstates
maintain a map of states distance using BFS Args: start (fst state): The initial DFA state Returns: list: An ordered list of DFA states using path distance
juraj-google-style
def validate_id(tx_body): tx_body = rapidjson.loads(rapidjson.dumps(tx_body)) try: proposed_tx_id = tx_body['id'] except KeyError: raise InvalidHash('No transaction id found!') tx_body['id'] = None tx_body_serialized = Transaction._to_str(tx_body) valid_tx_id = Transaction._to_hash(tx_body_serialized) if proposed_tx_id != valid_tx_id: err_msg = ("The transaction's id '{}' isn't equal to " "the hash of its body, i.e. it's not valid.") raise InvalidHash(err_msg.format(proposed_tx_id))
Validate the transaction ID of a transaction Args: tx_body (dict): The Transaction to be transformed.
juraj-google-style
def markdown_to_safe_html(markdown_string): warning = '' if isinstance(markdown_string, six.binary_type): markdown_string_decoded = markdown_string.decode('utf-8') markdown_string = markdown_string_decoded.replace(u'\x00', u'') num_null_bytes = len(markdown_string_decoded) - len(markdown_string) if num_null_bytes: warning = ('<!-- WARNING: discarded %d null bytes in markdown string ' 'after UTF-8 decoding -->\n') % num_null_bytes string_html = markdown.markdown( markdown_string, extensions=['markdown.extensions.tables']) string_sanitized = bleach.clean( string_html, tags=_ALLOWED_TAGS, attributes=_ALLOWED_ATTRIBUTES) return warning + string_sanitized
Convert Markdown to HTML that's safe to splice into the DOM. Arguments: markdown_string: A Unicode string or UTF-8--encoded bytestring containing Markdown source. Markdown tables are supported. Returns: A string containing safe HTML.
juraj-google-style
def _get_example_from_properties(self, spec): local_spec = deepcopy(spec) additional_property = False if 'additionalProperties' in local_spec: additional_property = True if 'properties' not in local_spec: local_spec['properties'] = {} local_spec['properties'].update({ 'any_prop1': local_spec['additionalProperties'], 'any_prop2': local_spec['additionalProperties'], }) del(local_spec['additionalProperties']) required = local_spec.get('required', []) required += ['any_prop1', 'any_prop2'] local_spec['required'] = required example = {} properties = local_spec.get('properties') if properties is not None: required = local_spec.get('required', properties.keys()) for inner_name, inner_spec in properties.items(): if inner_name not in required: continue partial = self.get_example_from_prop_spec(inner_spec) if isinstance(partial, list): partial = partial[0] example[inner_name] = partial return example, additional_property
Get example from the properties of an object defined inline. Args: prop_spec: property specification you want an example of. Returns: An example for the given spec A boolean, whether we had additionalProperties in the spec, or not
juraj-google-style
class WhisperProcessor(ProcessorMixin): feature_extractor_class = 'WhisperFeatureExtractor' tokenizer_class = 'WhisperTokenizer' def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps) def __call__(self, *args, **kwargs): if self._in_target_context_manager: return self.current_processor(*args, **kwargs) audio = kwargs.pop('audio', None) sampling_rate = kwargs.pop('sampling_rate', None) text = kwargs.pop('text', None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.') if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs['labels'] = encodings['input_ids'] return inputs def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) def get_prompt_ids(self, text: str, return_tensors='np'): return self.tokenizer.get_prompt_ids(text, return_tensors=return_tensors)
Constructs a Whisper processor which wraps a Whisper feature extractor and a Whisper tokenizer into a single processor. [`WhisperProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`] and [`WhisperTokenizer`]. See the [`~WhisperProcessor.__call__`] and [`~WhisperProcessor.decode`] for more information. Args: feature_extractor (`WhisperFeatureExtractor`): An instance of [`WhisperFeatureExtractor`]. The feature extractor is a required input. tokenizer (`WhisperTokenizer`): An instance of [`WhisperTokenizer`]. The tokenizer is a required input.
github-repos
def add_read( self, read_tuple_id, bases, qualities, segments, ): assert type(bases) is str, "Wrong type of bases: '{}'".format(bases) assert type(qualities) is str, "Wrong type of qualities: '{}'".format(qualities) assert type(segments) is tuple or type(segments) is list if self.current_read_tuple_id != read_tuple_id: self.flush_read_tuple() self.current_read_tuple_id = read_tuple_id self.seqs_bases.append(bases) self.seqs_qualities.append(qualities) self.segments.extend(segments)
Add a new read to the current buffer. If it is a new read tuple (detected from ID), the buffer will be flushed. Args: read_tuple_id (int): ID of the read tuple. bases (str): Sequence of bases. qualities (str): Sequence of FASTQ qualities. segments (list of rnftools.rnfformat.segment): List of segments constituting the read.
juraj-google-style
def bullet_base_pose_to_world_pose(self, pose_in_base): pose_in_base = T.pose2mat(pose_in_base) base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0]) base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1]) base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world)) pose_in_world = T.pose_in_A_to_pose_in_B( pose_A=pose_in_base, pose_A_in_B=base_pose_in_world ) return T.mat2pose(pose_in_world)
Convert a pose in the base frame to a pose in the world frame. Args: pose_in_base: a (pos, orn) tuple. Returns: pose_in world: a (pos, orn) tuple.
juraj-google-style
def GetUpdates(self, source, search_base, search_filter, search_scope, since): if self.conf.get('ad'): self.attrs.append('whenChanged') else: self.attrs.append('modifyTimestamp') if since is not None: ts = self.FromTimestampToLdap(since) if self.conf.get('ad'): ts = int(ts.rstrip('.0Z')) + 1 ts = '%s.0Z' % ts search_filter = '(&%s(whenChanged>=%s))' % (search_filter, ts) else: ts = int(ts.rstrip('Z')) + 1 ts = '%sZ' % ts search_filter = '(&%s(modifyTimestamp>=%s))' % (search_filter, ts) if search_scope == 'base': search_scope = ldap.SCOPE_BASE elif search_scope in ['one', 'onelevel']: search_scope = ldap.SCOPE_ONELEVEL elif search_scope in ['sub', 'subtree']: search_scope = ldap.SCOPE_SUBTREE else: raise error.ConfigurationError('Invalid scope: %s' % search_scope) source.Search(search_base=search_base, search_filter=search_filter, search_scope=search_scope, attrs=self.attrs) max_ts = None data_map = self.CreateMap() for obj in source: for field in self.essential_fields: if field not in obj: logging.warn('invalid object passed: %r not in %r', field, obj) raise ValueError('Invalid object passed: %r', obj) if self.conf.get('ad'): obj_ts = self.FromLdapToTimestamp(obj['whenChanged'][0]) else: try: obj_ts = self.FromLdapToTimestamp(obj['modifyTimestamp'][0]) except KeyError: obj_ts = 0 if max_ts is None or obj_ts > max_ts: max_ts = obj_ts try: if not data_map.Add(self.Transform(obj)): logging.info('could not add obj: %r', obj) except AttributeError as e: logging.warning('error %r, discarding malformed obj: %r', str(e), obj) self.PostProcess(data_map, source, search_filter, search_scope) data_map.SetModifyTimestamp(max_ts) return data_map
Get updates from a source. Args: source: a data source search_base: the LDAP base of the tree search_filter: the LDAP object filter search_scope: the LDAP scope filter, one of 'base', 'one', or 'sub'. since: a timestamp to get updates since (None for 'get everything') Returns: a tuple containing the map of updates and a maximum timestamp Raises: error.ConfigurationError: scope is invalid ValueError: an object in the source map is malformed
github-repos
def hgnc_id(self, hgnc_symbol, build='37'): query = {'hgnc_symbol': hgnc_symbol, 'build': build} projection = {'hgnc_id': 1, '_id': 0} res = self.hgnc_collection.find(query, projection) if (res.count() > 0): return res[0]['hgnc_id'] else: return None
Query the genes with a hgnc symbol and return the hgnc id Args: hgnc_symbol(str) build(str) Returns: hgnc_id(int)
codesearchnet
def intrusion_set(self, name, **kwargs): group_obj = IntrusionSet(name, **kwargs) return self._group(group_obj)
Add Intrusion Set data to Batch object. Args: name (str): The name for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of IntrusionSet.
juraj-google-style
def copy_code(source: message.Message, target: message.Message) -> None: if not fhir_types.is_type_or_profile_of_code(source.DESCRIPTOR): raise fhir_errors.InvalidFhirError(f'Source: {source.DESCRIPTOR.full_name} is not type or profile of Code.') if not fhir_types.is_type_or_profile_of_code(target.DESCRIPTOR): raise fhir_errors.InvalidFhirError(f'Target: {target.DESCRIPTOR.full_name} is not type or profile of Code.') if proto_utils.are_same_message_type(source.DESCRIPTOR, target.DESCRIPTOR): target.CopyFrom(source) return source_value_field = source.DESCRIPTOR.fields_by_name.get('value') target_value_field = target.DESCRIPTOR.fields_by_name.get('value') if source_value_field is None or target_value_field is None: raise fhir_errors.InvalidFhirError(f'Unable to copy code from {source.DESCRIPTOR.full_name} to {target.DESCRIPTOR.full_name}.') proto_utils.copy_common_field(source, target, 'id') proto_utils.copy_common_field(source, target, 'extension') if source_value_field.type not in _CODE_TYPES or target_value_field.type not in _CODE_TYPES: raise ValueError(f'Unable to copy from {source.DESCRIPTOR.full_name} to {target.DESCRIPTOR.full_name}. Must have a field of TYPE_ENUM or TYPE_STRING.') source_value = proto_utils.get_value_at_field(source, source_value_field) if source_value_field.type == target_value_field.type: proto_utils.set_value_at_field(target, target_value_field, source_value) elif source_value_field.type == descriptor.FieldDescriptor.TYPE_STRING: source_enum_value = code_string_to_enum_value_descriptor(source_value, target_value_field.enum_type) proto_utils.set_value_at_field(target, target_value_field, source_enum_value.number) elif source_value_field.type == descriptor.FieldDescriptor.TYPE_ENUM: source_string_value = enum_value_descriptor_to_code_string(source_value_field.enum_type.values_by_number[source_value]) proto_utils.set_value_at_field(target, target_value_field, source_string_value) else: raise ValueError(f'Unexpected generic value field type: {source_value_field.type}. Must be a field of TYPE_ENUM or TYPE_STRING in order to copy.')
Adds all fields from source to target. Args: source: The FHIR Code instance to copy from. target: The target FHIR Code instance to copy to.
github-repos
def invoke_process_element(self, sdf_invoker, output_processor, element, restriction, watermark_estimator_state, *args, **kwargs): assert isinstance(sdf_invoker, DoFnInvoker) class CheckpointState(object): def __init__(self): self.checkpointed = None self.residual_restriction = None checkpoint_state = CheckpointState() def initiate_checkpoint(): with self._checkpoint_lock: if checkpoint_state.checkpointed: return checkpoint_state.checkpointed = object() split = sdf_invoker.try_split(0) if split: _, checkpoint_state.residual_restriction = split else: checkpoint_state.checkpointed = None output_processor.reset() Timer(self._max_duration, initiate_checkpoint).start() sdf_invoker.invoke_process(element, additional_args=args, restriction=restriction, watermark_estimator_state=watermark_estimator_state) assert output_processor.output_iter is not None output_count = 0 process_continuation = None for output in output_processor.output_iter: assert not process_continuation if isinstance(output, ProcessContinuation): initiate_checkpoint() process_continuation = output continue yield output output_count += 1 if self._max_num_outputs and output_count >= self._max_num_outputs: initiate_checkpoint() result = SDFProcessElementInvoker.Result(residual_restriction=checkpoint_state.residual_restriction) if checkpoint_state.residual_restriction else SDFProcessElementInvoker.Result() yield result
Invokes `process()` method of a Splittable `DoFn` for a given element. Args: sdf_invoker: a `DoFnInvoker` for the Splittable `DoFn`. element: the element to process Returns: a `SDFProcessElementInvoker.Result` object.
github-repos
def join(table1, table2, on=None, how='inner', name=None): if (how not in ('inner', 'left')): ItsdbError("Only 'inner' and 'left' join methods are allowed.") on = _join_pivot(on, table1, table2) fields = _RelationJoin(table1.fields, table2.fields, on=on) get_key = (lambda rec: tuple((rec.get(k) for k in on))) key_indices = set((table2.fields.index(k) for k in on)) right = defaultdict(list) for rec in table2: right[get_key(rec)].append([c for (i, c) in enumerate(rec) if (i not in key_indices)]) rfill = [f.default_value() for f in table2.fields if (f.name not in on)] joined = [] for lrec in table1: k = get_key(lrec) if ((how == 'left') or (k in right)): joined.extend(((lrec + rrec) for rrec in right.get(k, [rfill]))) return Table(fields, joined)
Join two tables and return the resulting Table object. Fields in the resulting table have their names prefixed with their corresponding table name. For example, when joining `item` and `parse` tables, the `i-input` field of the `item` table will be named `item:i-input` in the resulting Table. Pivot fields (those in *on*) are only stored once without the prefix. Both inner and left joins are possible by setting the *how* parameter to `inner` and `left`, respectively. .. warning:: Both *table2* and the resulting joined table will exist in memory for this operation, so it is not recommended for very large tables on low-memory systems. Args: table1 (:class:`Table`): the left table to join table2 (:class:`Table`): the right table to join on (str): the shared key to use for joining; if `None`, find shared keys using the schemata of the tables how (str): the method used for joining (`"inner"` or `"left"`) name (str): the name assigned to the resulting table
codesearchnet
def sample(input_placeholder, logits, seed=None, max_length=1024, temperature=1.0): assert (temperature > 0), 'Temperature must be greater than 0.' if (not seed): seed = chr((ord('A') + random.randint(0, 25))) result = '' recurrent_runner = pt.train.RecurrentRunner() recurrent_runner.reset() for c in seed[:(- 1)]: recurrent_runner.run([logits], {input_placeholder: data_utils.convert_to_int(c)}) result += c ci = ord(seed[(- 1)]) while ((len(result) < max_length) and (ci != data_utils.EOS)): result += chr(ci) logit_result = recurrent_runner.run([logits], {input_placeholder: ci})[0][0] logit_result /= temperature logit_result -= logit_result.max() distribution = numpy.exp(logit_result) distribution /= distribution.sum() distribution -= 1e-08 ci = numpy.argmax(numpy.random.multinomial(1, distribution)) result += chr(ci) return result
Samples from the LSTM model. Sampling is done by first running either the seed or an arbitrary character through the model and then drawing the next character from the probability distribution definted by `softmax`. Args: input_placeholder: A placeholder that expects a scalar feed. logits: The logits. This works with the logits so that it can apply the temperature. seed: Either a string of characters to prime the network or None. max_length: The maximum length to draw in case EOS is not reached. temperature: A value that is used to renormalize the inputs. A higher value selects less likely choices. Returns: A string that was sampled from the model.
codesearchnet
def aside_view_declaration(self, view_name): if (view_name in self._combined_asides): return getattr(self, self._combined_asides[view_name]) else: return None
Find and return a function object if one is an aside_view for the given view_name Aside methods declare their view provision via @XBlockAside.aside_for(view_name) This function finds those declarations for a block. Arguments: view_name (string): the name of the view requested. Returns: either the function or None
codesearchnet
def get_suffixes(arr): arr = tuple(arr) return [arr] return (arr[i:] for i in range(len(arr)))
Returns all possible suffixes of an array (lazy evaluated) Args: arr: input array Returns: Array of all possible suffixes (as tuples)
juraj-google-style
def sql_column_like_drug(self, column_name: str) -> str: clauses = ['{col} LIKE {fragment}'.format(col=column_name, fragment=sql_string_literal(f)) for f in self.sql_like_fragments] return '({})'.format(' OR '.join(clauses))
Returns SQL like .. code-block:: sql (column_name LIKE '%drugname1%' OR column_name LIKE '%drugname2%') for the drug names that this Drug object knows about. Args: column_name: column name, pre-escaped if necessary Returns: SQL fragment as above
codesearchnet
def main(): parser = argparse.ArgumentParser(description='Cherry picking automation.') parser.add_argument('--version', help='<new_major_ver>.<new_minor_ver>.<new_patch_ver>', default='') parser.add_argument('--nightly', help='disable the service provisioning step', action='store_true') args = parser.parse_args() check_all_files() old_version = get_current_semver_version() if args.nightly: if args.version: new_version = Version.parse_from_string(args.version, NIGHTLY_VERSION) new_version.set_identifier_string('-dev' + time.strftime('%Y%m%d')) else: new_version = Version(old_version.major, str(old_version.minor), old_version.patch, '-dev' + time.strftime('%Y%m%d'), NIGHTLY_VERSION) else: new_version = Version.parse_from_string(args.version, SNAPSHOT_VERSION) update_tf_version_bzl(old_version, new_version) update_bazelrc(old_version, new_version) update_readme(old_version, new_version) print('Major: %s -> %s' % (old_version.major, new_version.major)) print('Minor: %s -> %s' % (old_version.minor, new_version.minor)) print('Patch: %s -> %s\n' % (old_version.patch, new_version.patch)) check_for_old_version(old_version, new_version)
This script updates all instances of version in the tensorflow directory. Requirements: version: The version tag OR nightly: Create a nightly tag with current date Raises: RuntimeError: If the script is not being run from tf source dir
github-repos
def validate(self, institute, case, user, link, variant, validate_type): if not validate_type in SANGER_OPTIONS: LOG.warning("Invalid validation string: %s", validate_type) LOG.info("Validation options: %s", ', '.join(SANGER_OPTIONS)) return updated_variant = self.variant_collection.find_one_and_update( {'_id': variant['_id']}, {'$set': {'validation': validate_type}}, return_document=pymongo.ReturnDocument.AFTER ) self.create_event( institute=institute, case=case, user=user, link=link, category='variant', verb='validate', variant=variant, subject=variant['display_name'], ) return updated_variant
Mark validation status for a variant. Arguments: institute (dict): A Institute object case (dict): Case object user (dict): A User object link (str): The url to be used in the event variant (dict): A variant object validate_type(str): The outcome of validation. choices=('True positive', 'False positive') Returns: updated_variant(dict)
juraj-google-style
def parse(cls, args): parsed = {} try: (options, args) = cls.optparser.parse_args(args) except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None parsed['label'] = options.label parsed['can_notify'] = options.can_notify parsed['name'] = options.name parsed['tags'] = options.tags parsed["command_type"] = "HadoopCommand" parsed['print_logs'] = options.print_logs parsed['print_logs_live'] = options.print_logs_live parsed['pool'] = options.pool if len(args) < 2: raise ParseError("Need at least two arguments", cls.usage) subcmd = args.pop(0) if subcmd not in cls.subcmdlist: raise ParseError("First argument must be one of <%s>" % "|".join(cls.subcmdlist)) parsed["sub_command"] = subcmd parsed["sub_command_args"] = " ".join("'" + str(a) + "'" for a in args) return parsed
Parse command line arguments to construct a dictionary of command parameters that can be used to create a command Args: `args`: sequence of arguments Returns: Dictionary that can be used in create method Raises: ParseError: when the arguments are not correct
juraj-google-style
def filter_by_analysis_period(self, analysis_period): self._check_analysis_period(analysis_period) _filtered_data = self.filter_by_doys(analysis_period.doys_int) _filtered_data.header._analysis_period = analysis_period return _filtered_data
Filter the Data Collection based on an analysis period. Args: analysis period: A Ladybug analysis period Return: A new Data Collection with filtered data
juraj-google-style
def from_data(cls, data): obj = cls() with contextlib.closing(BytesIO(data)) as file_handle: obj.load_file(file_handle) return obj
Load an FCS file from a bytes-like object. Args: data: buffer containing contents of an FCS file. Returns: FCSParser instance with data loaded
codesearchnet
def restrict_with(self, expr: str, error_tag: str = None, error_message: str = None) -> None: def parse(x: str) -> Number: res = self.parser(x) if res is None: raise InvalidArgument(expr) return res def simpl(rng: List[Number]) -> List[Number]: return ([rng[0]] if rng[0] == rng[1] else rng) def to_num(xs): return [parse(x) for x in xs] lo = self.intervals[0][0] hi = self.intervals[-1][-1] ran = [] for p in [p.strip() for p in expr.split("|")]: r = [i.strip() for i in p.split("..")] if len(r) > 2: raise InvalidArgument(expr) ran.append(r) if ran[0][0] != "min": lo = parse(ran[0][0]) if ran[-1][-1] != "max": hi = parse(ran[-1][-1]) self.intervals = ( [simpl([lo, hi])] if len(ran) == 1 else ( [simpl([lo, parse(ran[0][-1])])] + [to_num(r) for r in ran[1:-1]] + [simpl([parse(ran[-1][0]), hi])])) if error_tag: self.error_tag = error_tag if error_message: self.error_message = error_message
Combine the receiver with new intervals. Args: expr: "range" or "length" expression. error_tag: error tag of the new expression. error_message: error message for the new expression. Raises: InvalidArgument: If parsing of `expr` fails.
juraj-google-style
def from_pandas(cls, df, block_partitions_cls): new_index = df.index new_columns = df.columns new_dtypes = df.dtypes new_data = block_partitions_cls.from_pandas(df) return cls(new_data, new_index, new_columns, dtypes=new_dtypes)
Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame. Args: cls: DataManger object to convert the DataFrame to. df: Pandas DataFrame object. block_partitions_cls: BlockParitions object to store partitions Returns: Returns DataManager containing data from the Pandas DataFrame.
juraj-google-style
def sharded_filename(filename_tensor: tensor_lib.Tensor, shard: int, num_shards: tensor_lib.Tensor) -> tensor_lib.Tensor: return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)
Append sharding information to a filename. Args: filename_tensor: A string tensor. shard: Integer. The shard for the filename. num_shards: An int Tensor for the number of shards. Returns: A string tensor.
github-repos
def _contains_nd(nodes, point): min_vals = np.min(nodes, axis=1) if (not np.all((min_vals <= point))): return False max_vals = np.max(nodes, axis=1) if (not np.all((point <= max_vals))): return False return True
r"""Predicate indicating if a point is within a bounding box. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): A set of points. point (numpy.ndarray): A 1D NumPy array representing a point in the same dimension as ``nodes``. Returns: bool: Indicating containment.
codesearchnet
def dropout(inputs, keep_prob=0.5, is_training=True, scope=None): if is_training and keep_prob > 0: with tf.name_scope(scope, 'Dropout', [inputs]): return tf.nn.dropout(inputs, keep_prob) else: return inputs
Returns a dropout layer applied to the input. Args: inputs: the tensor to pass to the Dropout layer. keep_prob: the probability of keeping each input unit. is_training: whether or not the model is in training mode. If so, dropout is applied and values scaled. Otherwise, inputs is returned. scope: Optional scope for name_scope. Returns: a tensor representing the output of the operation.
juraj-google-style
def AddCustomJsonFieldMapping(message_type, python_name, json_name, package=None): if (not issubclass(message_type, messages.Message)): raise exceptions.TypecheckError(('Cannot set JSON field mapping for non-message "%s"' % message_type)) try: _ = message_type.field_by_name(python_name) except KeyError: raise exceptions.InvalidDataError(('Field %s not recognized for type %s' % (python_name, message_type))) field_mappings = _JSON_FIELD_MAPPINGS.setdefault(message_type, {}) _CheckForExistingMappings('field', message_type, python_name, json_name) field_mappings[python_name] = json_name
Add a custom wire encoding for a given message field. This is primarily used in generated code, to handle enum values which happen to be Python keywords. Args: message_type: (messages.Message) A message type python_name: (basestring) Python name for this value. json_name: (basestring) JSON name to be used on the wire. package: (NoneType, optional) No effect, exists for legacy compatibility.
codesearchnet
def times_update(self, factor): if (factor < 0): raise ValueError('The factor must not be negative.') elif (factor == 0): self.clear() else: _elements = self._elements for element in _elements: _elements[element] *= factor self._total *= factor
Update each this multiset by multiplying each element's multiplicity with the given scalar factor. >>> ms = Multiset('aab') >>> ms.times_update(2) >>> sorted(ms) ['a', 'a', 'a', 'a', 'b', 'b'] You can also use the ``*=`` operator for the same effect: >>> ms = Multiset('ac') >>> ms *= 3 >>> sorted(ms) ['a', 'a', 'a', 'c', 'c', 'c'] For a variant of the operation which does not modify the multiset, but returns a new multiset instead see :meth:`times`. Args: factor: The factor to multiply each multiplicity with.
codesearchnet
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return token_ids_0 return token_ids_0 + token_ids_1
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. This implementation does not add special tokens and this method should be overridden in a subclass. Args: token_ids_0 (`List[int]`): The first tokenized sequence. token_ids_1 (`List[int]`, *optional*): The second tokenized sequence. Returns: `List[int]`: The model input with special tokens.
github-repos
def channel_ready_future(channel): fut = channel._loop.create_future() def _set_result(state): if ((not fut.done()) and (state is _grpc.ChannelConnectivity.READY)): fut.set_result(None) fut.add_done_callback((lambda f: channel.unsubscribe(_set_result))) channel.subscribe(_set_result, try_to_connect=True) return fut
Creates a Future that tracks when a Channel is ready. Cancelling the Future does not affect the channel's state machine. It merely decouples the Future from channel state machine. Args: channel: A Channel object. Returns: A Future object that matures when the channel connectivity is ChannelConnectivity.READY.
codesearchnet
def transitive_inputs(self, node_name, include_control=True, include_reversed_ref=False, device_name=None): if not self._debug_graphs: raise LookupError('Node inputs are not loaded from partition graphs yet.') device_name = self._infer_device_name(device_name, node_name) input_lists = [self._debug_graphs[device_name].node_inputs] if include_control: input_lists.append(self._debug_graphs[device_name].node_ctrl_inputs) if include_reversed_ref: input_lists.append(self._debug_graphs[device_name].node_reversed_ref_inputs) tracer = debug_graphs.DFSGraphTracer(input_lists, skip_node_names=self._get_merge_node_names(device_name)) tracer.trace(node_name) return tracer.inputs()
Get the transitive inputs of given node according to partition graphs. Args: node_name: Name of the node. include_control: Include control inputs (True by default). include_reversed_ref: Whether a ref input, say from A to B, is to be also considered as an input from B to A. The rationale is that ref inputs generally let the recipient (e.g., B in this case) mutate the value of the source (e.g., A in this case). So the reverse direction of the ref edge reflects the direction of information flow. device_name: (`str`) name of the device. If there is only one device or if node_name exists on only one device, this argument is optional. Returns: (`list` of `str`) all transitive inputs to the node, as a list of node names. Raises: LookupError: If node inputs and control inputs have not been loaded from partition graphs yet.
github-repos
def handle_range(schema, field, validator, parent_schema): if (not isinstance(field, fields.Number)): return schema if validator.min: schema['minimum'] = validator.min schema['exclusiveMinimum'] = True else: schema['minimum'] = 0 schema['exclusiveMinimum'] = False if validator.max: schema['maximum'] = validator.max schema['exclusiveMaximum'] = True return schema
Adds validation logic for ``marshmallow.validate.Range``, setting the values appropriately ``fields.Number`` and it's subclasses. Args: schema (dict): The original JSON schema we generated. This is what we want to post-process. field (fields.Field): The field that generated the original schema and who this post-processor belongs to. validator (marshmallow.validate.Length): The validator attached to the passed in field. parent_schema (marshmallow.Schema): The Schema instance that the field belongs to. Returns: dict: A, possibly, new JSON Schema that has been post processed and altered.
codesearchnet
def OpenFile(self, windows_path): path_spec = self._path_resolver.ResolvePath(windows_path) if (path_spec is None): return None return self._file_system.GetFileObjectByPathSpec(path_spec)
Opens the file specificed by the Windows path. Args: windows_path (str): Windows path to the file. Returns: FileIO: file-like object or None if the file does not exist.
codesearchnet
def random_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): dtype = dtypes.as_dtype(dtype) with ops.name_scope('random_uniform'): samples = random_ops.random_uniform(shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) if dtype.is_complex: if seed is not None: seed += 12345 more_samples = random_ops.random_uniform(shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) samples = math_ops.complex(samples, more_samples) return samples
Tensor with (possibly complex) Uniform entries. Samples are distributed like ``` Uniform[minval, maxval], if dtype is real, X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype.
github-repos
def interpolate_beat_times(self, beat_times: numpy.ndarray, steps_per_beat: numpy.ndarray, n_extend: numpy.ndarray): requires_backends(self, ['scipy']) beat_times_function = scipy.interpolate.interp1d(np.arange(beat_times.size), beat_times, bounds_error=False, fill_value='extrapolate') ext_beats = beat_times_function(np.linspace(0, beat_times.size + n_extend - 1, beat_times.size * steps_per_beat + n_extend)) return ext_beats
This method takes beat_times and then interpolates that using `scipy.interpolate.interp1d` and the output is then used to convert raw audio to log-mel-spectrogram. Args: beat_times (`numpy.ndarray`): beat_times is passed into `scipy.interpolate.interp1d` for processing. steps_per_beat (`int`): used as an parameter to control the interpolation. n_extend (`int`): used as an parameter to control the interpolation.
github-repos
def model_to_dot(model, show_shapes=False, show_dtype=False, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=200, subgraph=False, show_layer_activations=False, show_trainable=False, **kwargs): from keras.src.ops.function import make_node_key if not model.built: raise ValueError('This model has not yet been built. Build the model first by calling `build()` or by calling the model on a batch of data.') from keras.src.models import functional from keras.src.models import sequential if not check_pydot(): raise ImportError('You must install pydot (`pip install pydot`) for model_to_dot to work.') if subgraph: dot = pydot.Cluster(style='dashed', graph_name=model.name) dot.set('label', model.name) dot.set('labeljust', 'l') else: dot = pydot.Dot() dot.set('rankdir', rankdir) dot.set('concentrate', True) dot.set('dpi', dpi) dot.set('splines', 'ortho') dot.set_node_defaults(shape='record') if kwargs.pop('layer_range', None) is not None: raise ValueError('Argument `layer_range` is no longer supported.') if kwargs: raise ValueError(f'Unrecognized keyword arguments: {kwargs}') kwargs = {'show_layer_names': show_layer_names, 'show_layer_activations': show_layer_activations, 'show_dtype': show_dtype, 'show_shapes': show_shapes, 'show_trainable': show_trainable} if isinstance(model, sequential.Sequential): layers = model.layers elif not isinstance(model, functional.Functional): node = make_node(model, **kwargs) dot.add_node(node) return dot else: layers = model._operations for i, layer in enumerate(layers): if expand_nested and isinstance(layer, (functional.Functional, sequential.Sequential)): submodel = model_to_dot(layer, show_shapes, show_dtype, show_layer_names, rankdir, expand_nested, subgraph=True, show_layer_activations=show_layer_activations, show_trainable=show_trainable) dot.add_subgraph(submodel) else: node = make_node(layer, **kwargs) dot.add_node(node) if isinstance(model, sequential.Sequential): if not expand_nested: for i in range(len(layers) - 1): add_edge(dot, layers[i], layers[i + 1]) return dot else: layers = model.layers[1:] for layer in layers: for inbound_index, inbound_node in enumerate(layer._inbound_nodes): if isinstance(model, functional.Functional) and make_node_key(layer, inbound_index) not in model._nodes: continue for input_index, input_tensor in enumerate(inbound_node.input_tensors): input_history = input_tensor._keras_history if input_history.operation is None: continue input_node = input_history.operation._inbound_nodes[input_history.node_index] output_index = input_history.tensor_index source = input_node.operation destination = layer if not expand_nested: add_edge(dot, source, layer) continue while isinstance(source, (functional.Functional, sequential.Sequential)): source, _, output_index = source.outputs[output_index]._keras_history while isinstance(destination, (functional.Functional, sequential.Sequential)): if isinstance(destination, functional.Functional): destination = destination.inputs[input_index]._keras_history.operation else: destination = destination.layers[0] add_edge(dot, source, destination) return dot
Convert a Keras model to dot format. Args: model: A Keras model instance. show_shapes: whether to display shape information. show_dtype: whether to display layer dtypes. show_layer_names: whether to display layer names. rankdir: `rankdir` argument passed to PyDot, a string specifying the format of the plot: `"TB"` creates a vertical plot; `"LR"` creates a horizontal plot. expand_nested: whether to expand nested Functional models into clusters. dpi: Image resolution in dots per inch. subgraph: whether to return a `pydot.Cluster` instance. show_layer_activations: Display layer activations (only for layers that have an `activation` property). show_trainable: whether to display if a layer is trainable. Returns: A `pydot.Dot` instance representing the Keras model or a `pydot.Cluster` instance representing nested model if `subgraph=True`.
github-repos
def plot_generated_images(images, fname): fig = plt.figure(figsize=(4, 4)) canvas = backend_agg.FigureCanvasAgg(fig) for i, image in enumerate(images): ax = fig.add_subplot(4, 4, i + 1) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.imshow(image.reshape(IMAGE_SHAPE[:-1]), cmap='Greys_r') fig.tight_layout() plt.subplots_adjust(wspace=0.05, hspace=0.05) canvas.print_figure(fname, format='png')
Save a synthetic image as a PNG file. Args: images: samples of synthetic images generated by the generative network. fname: Python `str`, filename to save the plot to.
juraj-google-style
def compute_output_signature(self, input_signature): def check_type_return_shape(s): if not isinstance(s, tensor.TensorSpec): raise TypeError('Only TensorSpec signature types are supported, but saw signature entry: {}.'.format(s)) return s.shape input_shape = nest.map_structure(check_type_return_shape, input_signature) output_shape = self.compute_output_shape(input_shape) dtype = self._compute_dtype if dtype is None: input_dtypes = [s.dtype for s in nest.flatten(input_signature)] dtype = input_dtypes[0] return nest.map_structure(lambda s: tensor.TensorSpec(dtype=dtype, shape=s), output_shape)
Compute the output tensor signature of the layer based on the inputs. Unlike a TensorShape object, a TensorSpec object contains both shape and dtype information for a tensor. This method allows layers to provide output dtype information if it is different from the input dtype. For any layer that doesn't implement this function, the framework will fall back to use `compute_output_shape`, and will assume that the output dtype matches the input dtype. Args: input_signature: Single TensorSpec or nested structure of TensorSpec objects, describing a candidate input for the layer. Returns: Single TensorSpec or nested structure of TensorSpec objects, describing how the layer would transform the provided input. Raises: TypeError: If input_signature contains a non-TensorSpec object.
github-repos
def filter(self, cls, recursive=False): source = self.walk_preorder if recursive else self._children return [ codeobj for codeobj in source() if isinstance(codeobj, cls) ]
Retrieves all descendants (including self) that are instances of a given class. Args: cls (class): The class to use as a filter. Kwargs: recursive (bool): Whether to descend recursively down the tree.
juraj-google-style
def opcode_to_name(model, op_code): op = model.operatorCodes[op_code] code = max(op.builtinCode, op.deprecatedBuiltinCode) for name, value in vars(schema_fb.BuiltinOperator).items(): if value == code: return name return None
Converts a TFLite op_code to the human readable name. Args: model: The input tflite model. op_code: The op_code to resolve to a readable name. Returns: A string containing the human readable op name, or None if not resolvable.
github-repos
def pyc_load(fp): magic_1 = U16(fp.read(2), target=MARSHAL_TARGET) magic_2 = U16(fp.read(2), target=MARSHAL_TARGET) internals = MAGIC_MAP.get(magic_1) if (internals is None): raise ValueError(('Invalid or unknown magic (%d).' % magic_1)) if (magic_2 != 2573): raise ValueError(('Invalid secondary magic (%d).' % magic_2)) timestamp = datetime.datetime.fromtimestamp(U32(fp.read(4), target=MARSHAL_TARGET)) if (internals['version'] >= 33): file_size = U32(fp.read(4)) else: file_size = None code_object = marshal_load(fp, internals) return PycFile(magic_1, internals, timestamp, file_size, code_object)
Load a .pyc file from a file-like object. Arguments: fp(file): The file-like object to read. Returns: PycFile: The parsed representation of the .pyc file.
codesearchnet
def duration(self): duration = 0.0 if (len(self.events) > 0): first = datetime.fromtimestamp(self.events[0]['timestamp']) last = datetime.fromtimestamp(self.events[(- 1)]['timestamp']) duration = (last - first).total_seconds() return duration
Calculate how long the stage took. Returns: float: (current) duration of the stage
codesearchnet
def _prepare_socket_file(self, socket_path, default_prefix): if (socket_path is not None): if os.path.exists(socket_path): raise Exception('Socket file {} exists!'.format(socket_path)) socket_dir = os.path.dirname(socket_path) try_to_create_directory(socket_dir) return socket_path return self._make_inc_temp(prefix=default_prefix, directory_name=self._sockets_dir)
Prepare the socket file for raylet and plasma. This method helps to prepare a socket file. 1. Make the directory if the directory does not exist. 2. If the socket file exists, raise exception. Args: socket_path (string): the socket file to prepare.
codesearchnet
def _VerifyValues(self, input_sizes=None, filter_sizes=None, out_backprop_sizes=None, strides=None, dilations=None, padding=None, data_format_src='NHWC', data_format_dst='NHWC', expected=None): total_size_1 = np.prod(input_sizes) total_size_2 = np.prod(out_backprop_sizes) x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes) x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes) strides = [1] + strides + [1] if dilations is not None: dilations = [1] + dilations + [1] expected = np.reshape(expected, filter_sizes) x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src, data_format_dst) x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src, data_format_dst) input_sizes = test_utils.PermuteDimsBetweenDataFormats(input_sizes, data_format_src, data_format_dst) out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(out_backprop_sizes, data_format_src, data_format_dst) strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src, data_format_dst) if dilations is not None: dilations = test_utils.PermuteDimsBetweenDataFormats(dilations, data_format_src, data_format_dst) with self.session() as sess: t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes) t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes) with self.test_scope(): tensor = gen_nn_ops.conv2d_backprop_filter(input=t1, filter_sizes=filter_sizes, out_backprop=t2, strides=strides, dilations=dilations, padding=padding, data_format=data_format_dst) value = sess.run(tensor, {t1: x1, t2: x2}) self.assertAllEqual(filter_sizes, value.shape) self.assertAllClose(expected, value, 0.001)
Tests that gen_nn_ops.conv2d_backprop_filter produces the right output. Args: input_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. out_backprop_sizes: Output gradients tensor dimensions. strides: Stride. dilations: Dilations. padding: Padding type. data_format_src: Data format input is in. data_format_dst: Data format verification will run and input is converted to. expected: Expected output.
github-repos
def check(cls, status): assert (cls.trigger is not None), 'Invalid ErrorTrap, trigger not set' assert (cls.error is not None), 'Invalid ErrorTrap, error not set' if (status == cls.trigger): raise cls.error()
Checks if a status enum matches the trigger originally set, and if so, raises the appropriate error. Args: status (int, enum): A protobuf enum response status to check. Raises: AssertionError: If trigger or error were not set. _ApiError: If the statuses don't match. Do not catch. Will be caught automatically and sent back to the client.
codesearchnet
def _process_list_value(name, parse_fn, var_type, m_dict, values, results_dictionary): if (m_dict['index'] is not None): raise ValueError('Assignment of a list to a list index.') elements = filter(None, re.split('[ ,]', m_dict['vals'])) if (name in results_dictionary): raise _reuse_fail(name, values) try: results_dictionary[name] = [parse_fn(e) for e in elements] except ValueError: _parse_fail(name, var_type, m_dict['vals'], values)
Update results_dictionary from a list of values. Used to update results_dictionary to be returned by parse_values when encountering a clause with a list RHS (e.g. "arr=[1,2,3]".) Mutates results_dictionary. Args: name: Name of variable in assignment ("arr"). parse_fn: Function for parsing individual values. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has an index or the values cannot be parsed.
codesearchnet
def chat_delete(self, *, channel: str, ts: str, **kwargs) -> SlackResponse: kwargs.update({'channel': channel, 'ts': ts}) return self.api_call('chat.delete', json=kwargs)
Deletes a message. Args: channel (str): Channel containing the message to be deleted. e.g. 'C1234567890' ts (str): Timestamp of the message to be deleted. e.g. '1234567890.123456'
codesearchnet
def __init__(self, dllpath=None): self._lib = None self._winlib = None self._path = None self._windows = sys.platform.startswith('win') self._cygwin = sys.platform.startswith('cygwin') self._temp = None if self._windows or self._cygwin: self._sdk = self.get_appropriate_windows_sdk_name() else: self._sdk = self.JLINK_SDK_NAME if dllpath is not None: self.load(dllpath) else: self.load_default()
Initializes an instance of a ``Library``. Loads the default J-Link DLL if ``dllpath`` is ``None``, otherwise loads the DLL specified by the given ``dllpath``. Args: self (Library): the ``Library`` instance dllpath (str): the DLL to load into the library Returns: ``None``
juraj-google-style
def _init_from_converter(self, options: QuantizationDebugOptions, converter: TFLiteConverter, calibrated_model: Optional[bytes]=None, float_model: Optional[bytes]=None) -> None: self.quant_model = convert.mlir_quantize(calibrated_model, disable_per_channel=converter._experimental_disable_per_channel, fully_quantize=options.fully_quantize, enable_numeric_verify=True, denylisted_ops=options.denylisted_ops, denylisted_nodes=options.denylisted_nodes) self._quant_interpreter = _interpreter.Interpreter(model_content=self.quant_model) self._float_interpreter = None if float_model is not None: self._float_interpreter = _interpreter.Interpreter(model_content=float_model)
Convert the model and apply options. Converts the quantized model and initializes a quantized model interpreter with the quantized model. Returns a float model interpreter if float model is provided. Args: options: a QuantizationDebugOptions object. converter: an initialized tf.lite.TFLiteConverter. calibrated_model: Calibrated model bytes. float_model: Float model bytes.
github-repos
def writeline(self, line, line_number): tmp_file = tempfile.TemporaryFile('w+') if not line.endswith(os.linesep): line += os.linesep try: with open(self.path, 'r') as file_handle: for count, new_line in enumerate(file_handle): if count == line_number: new_line = line tmp_file.write(new_line) tmp_file.seek(0) with open(self.path, 'w') as file_handle: for new_line in tmp_file: file_handle.write(new_line) finally: tmp_file.close()
Rewrite a single line in the file. Args: line (str): The new text to write to the file. line_number (int): The line of the file to rewrite. Numbering starts at 0.
juraj-google-style
def unregister_peer(self, connection_id): public_key = self.peer_to_public_key(connection_id) if public_key: self._consensus_notifier.notify_peer_disconnected(public_key) with self._lock: if connection_id in self._peers: del self._peers[connection_id] LOGGER.debug("Removed connection_id %s, " "connected identities are now %s", connection_id, self._peers) self._topology.set_connection_status(connection_id, PeerStatus.TEMP) else: LOGGER.warning("Connection unregister failed as connection " "was not registered: %s", connection_id)
Removes a connection_id from the registry. Args: connection_id (str): A unique identifier which identifies an connection on the network server socket.
juraj-google-style
class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor): def __init__(self, bos_token_id: int): self.bos_token_id = bos_token_id def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: new_scores = jnp.full(scores.shape, -float('inf')) apply_penalty = 1 - jnp.bool_(cur_len - 1) scores = jnp.where(apply_penalty, new_scores.at[:, self.bos_token_id].set(0), scores) return scores
[`FlaxLogitsProcessor`] that enforces the specified token as the first generated token. Args: bos_token_id (`int`): The id of the token to force as the first generated token.
github-repos
def forward(self, inference_args, input_tangents): if self._forward is None: self._forward, self._forward_graph, self._backward, self._forwardprop_output_indices, self._num_forwardprop_outputs = self._forward_and_backward_functions(inference_args, input_tangents) return self._forward
Construct or fetch a forward function with side-outputs. When graph building without a tape active, symbolic gradients rely on regenerating the backward function for higher-order gradients (to account for new side outputs of the rewritten forward function call). Thus there is no fixed backward function for this case. However, when a tape is active (eager or graph building), we generate fixed backward and forward functions at forward function call time. This difference between the tape and non-tape cases is to avoid building unneeded backward functions while graph building (where we may or may not eventually need gradients). Args: inference_args: A flat list of Tensors, arguments to the inference function. input_tangents: A flat list of Tensors, jvps associated with `inference_args`. Returns: A forward atomic_function.AtomicFunction.
github-repos
def _build(self, inputs, memory, treat_input_as_matrix=False): if treat_input_as_matrix: inputs = basic.BatchFlatten(preserve_dims=2)(inputs) inputs_reshape = basic.BatchApply( basic.Linear(self._mem_size), n_dims=2)(inputs) else: inputs = basic.BatchFlatten()(inputs) inputs = basic.Linear(self._mem_size)(inputs) inputs_reshape = tf.expand_dims(inputs, 1) memory_plus_input = tf.concat([memory, inputs_reshape], axis=1) next_memory = self._attend_over_memory(memory_plus_input) n = inputs_reshape.get_shape().as_list()[1] next_memory = next_memory[:, :-n, :] if self._gate_style == 'unit' or self._gate_style == 'memory': self._input_gate, self._forget_gate = self._create_gates( inputs_reshape, memory) next_memory = self._input_gate * tf.tanh(next_memory) next_memory += self._forget_gate * memory output = basic.BatchFlatten()(next_memory) return output, next_memory
Adds relational memory to the TensorFlow graph. Args: inputs: Tensor input. memory: Memory output from the previous time step. treat_input_as_matrix: Optional, whether to treat `input` as a sequence of matrices. Defaulta to False, in which case the input is flattened into a vector. Returns: output: This time step's output. next_memory: The next version of memory to use.
juraj-google-style
def minimize_peak_memory(graph, scheduler_alg): if (scheduler_alg == 'NAIVE'): return _minimize_peak_memory_naive(graph) elif (scheduler_alg == 'LIST'): return _minimize_peak_memory_list(graph) else: raise NotImplementedError('{} is not a scheduler algorithm. It should be one of NAIVE or LIST.'.format(scheduler_alg))
Computes a schedule to minimize peak memory. Args: graph: an mtf.auto_mtf.graph_interface.GraphInterface. scheduler_alg: a string, one of 'NAIVE' or 'LIST' Returns: an iterable of integers representing the schedule.
codesearchnet
def scaled_wulff(self, wulffshape, r): r_ratio = (r / wulffshape.effective_radius) miller_list = wulffshape.miller_energy_dict.keys() se_list = np.array(list(wulffshape.miller_energy_dict.values())) scaled_se = (se_list * r_ratio) return WulffShape(wulffshape.lattice, miller_list, scaled_se, symprec=self.symprec)
Scales the Wulff shape with an effective radius r. Note that the resulting Wulff does not neccesarily have the same effective radius as the one provided. The Wulff shape is scaled by its surface energies where first the surface energies are scale by the minimum surface energy and then multiplied by the given effective radius. Args: wulffshape (WulffShape): Initial, unscaled WulffShape r (float): Arbitrary effective radius of the WulffShape Returns: WulffShape (scaled by r)
codesearchnet
def load_yaml_config(conf_file): global g_config with open(conf_file) as fp: g_config = util.yaml_load(fp) src_dir = get_path('src_dir', None) if (src_dir is not None): sys.path.insert(0, src_dir) for cmd in get('commands', []): _import(cmd)
Load a YAML configuration. This will not update the configuration but replace it entirely. Args: conf_file (str): Path to the YAML config. This function will not check the file name or extension and will just crash if the given file does not exist or is not a valid YAML file.
codesearchnet
def _parse_description(html_chunk): description_tag = html_chunk.match( ["div", {"class": "kniha_detail_text"}], "p" ) if not description_tag: return None description = get_first_content(description_tag) description = description.replace("<br />", "\n") description = description.replace("<br/>", "\n") return dhtmlparser.removeTags(description).strip()
Parse description of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str/None: Description as string or None if not found.
juraj-google-style
def _subtoken_ids_to_tokens(self, subtokens): concatenated = "".join( [self._subtoken_id_to_subtoken_string(s) for s in subtokens]) split = concatenated.split("_") ret = [] for t in split: if t: unescaped = _unescape_token(t + "_") if unescaped: ret.append(unescaped) return ret
Converts a list of subtoken ids to a list of tokens. Args: subtokens: a list of integers in the range [0, vocab_size) Returns: a list of strings.
juraj-google-style
def default_filename(ext): if ext == "py": raise RuntimeError("asked for a default filename with 'py' extension") filename = detect_current_filename() if filename is None: return temp_filename(ext) basedir = dirname(filename) or getcwd() if _no_access(basedir) or _shares_exec_prefix(basedir): return temp_filename(ext) name, _ = splitext(basename(filename)) return join(basedir, name + "." + ext)
Generate a default filename with a given extension, attempting to use the filename of the currently running process, if possible. If the filename of the current process is not available (or would not be writable), then a temporary file with the given extension is returned. Args: ext (str) : the desired extension for the filename Returns: str Raises: RuntimeError If the extensions requested is ".py"
juraj-google-style
class RealmScorerOutput(ModelOutput): relevance_score: Optional[torch.FloatTensor] = None query_score: Optional[torch.FloatTensor] = None candidate_score: Optional[torch.FloatTensor] = None
Outputs of [`RealmScorer`] models. Args: relevance_score (`torch.FloatTensor` of shape `(batch_size, config.num_candidates)`): The relevance score of document candidates (before softmax). query_score (`torch.FloatTensor` of shape `(batch_size, config.retriever_proj_size)`): Query score derived from the query embedder. candidate_score (`torch.FloatTensor` of shape `(batch_size, config.num_candidates, config.retriever_proj_size)`): Candidate score derived from the embedder.
github-repos
def get_package_from_string(txt, paths=None): o = VersionedObject(txt) return get_package(o.name, o.version, paths=paths)
Get a package given a string. Args: txt (str): String such as 'foo', 'bah-1.3'. paths (list of str, optional): paths to search for package, defaults to `config.packages_path`. Returns: `Package` instance, or None if no package was found.
codesearchnet
def _replace_sparse_with_values(value, sparse_list): flat_vals = nest.flatten(value, expand_composites=False) new_vals = [] for v in flat_vals: if isinstance(v, sparse_tensor.SparseTensor): sparse_list.append(v) new_vals.append(v.values) else: new_vals.append(v) return nest.pack_sequence_as(value, new_vals, expand_composites=False)
Replace `SparseTensor`s with their values in `value` Each `SparseTensor` in `value` is replaced by its `values` tensor, and collects all `SparseTensor`s in `sparse_list`. Args: value: A structure of `Tensor`s and `SparseTensor`s sparse_list: A list. Output parameter that collects all `SparseTensor`s in `value`. Returns: `value` with each SparseTensor replaced by its `.value` attribute.
github-repos
def _project_TH2(self, hist: Hist) -> Any: if len(self.projection_axes) != 1: raise ValueError(len(self.projection_axes), "Invalid number of axes") projection_func_map = { TH1AxisType.x_axis.value: hist.ProjectionX, TH1AxisType.y_axis.value: hist.ProjectionY } try: axis_type = self.projection_axes[0].axis_type.value except ValueError: axis_type = self.axis_type projection_func = projection_func_map[axis_type] logger.info(f"Projecting onto axis range {self.projection_axes[0].name} from hist {hist.GetName()}") projected_hist = projection_func() return projected_hist
Perform the actual TH2 -> TH1 projection. This projection can only be to 1D. Args: hist (ROOT.TH2): Histogram from which the projections should be performed. Returns: ROOT.TH1: The projected histogram.
juraj-google-style
async def _connect_and_read(self): while (not self._stopped): try: self._connection_attempts += 1 async with aiohttp.ClientSession(loop=self._event_loop, timeout=aiohttp.ClientTimeout(total=self.timeout)) as session: self._session = session (url, data) = (await self._retreive_websocket_info()) async with session.ws_connect(url, heartbeat=self.ping_interval, ssl=self.ssl, proxy=self.proxy) as websocket: self._logger.debug('The Websocket connection has been opened.') self._websocket = websocket self._dispatch_event(event='open', data=data) (await self._read_messages()) except (client_err.SlackClientNotConnectedError, client_err.SlackApiError) as exception: self._logger.debug(str(exception)) self._dispatch_event(event='error', data=exception) if (self.auto_reconnect and (not self._stopped)): (await self._wait_exponentially(exception)) continue self._logger.exception('The Websocket encountered an error. Closing the connection...') self._close_websocket() raise
Retreives and connects to Slack's RTM API. Makes an authenticated call to Slack's RTM API to retrieve a websocket URL. Then connects to the message server and reads event messages as they come in. If 'auto_reconnect' is specified we retrieve a new url and reconnect any time the connection is lost unintentionally or an exception is thrown. Raises: SlackApiError: Unable to retreive RTM URL from Slack. websockets.exceptions: Errors thrown by the 'websockets' library.
codesearchnet
def gather(self, indices, name=None): return self._implementation.gather(indices, name=name)
Return selected values in the TensorArray as a packed `Tensor`. All of selected values must have been written and their shapes must all match. Args: indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If the `TensorArray` is not dynamic, `max_value=size()`. name: A name for the operation (optional). Returns: The tensors in the `TensorArray` selected by `indices`, packed into one tensor.
github-repos
def get_time_of_day_description(self): seconds_expression = self._expression_parts[0] minute_expression = self._expression_parts[1] hour_expression = self._expression_parts[2] description = StringBuilder() if ((any(((exp in minute_expression) for exp in self._special_characters)) is False) and (any(((exp in hour_expression) for exp in self._special_characters)) is False) and (any(((exp in seconds_expression) for exp in self._special_characters)) is False)): description.append(_('At ')) description.append(self.format_time(hour_expression, minute_expression, seconds_expression)) elif (('-' in minute_expression) and (',' not in minute_expression) and (any(((exp in hour_expression) for exp in self._special_characters)) is False)): minute_parts = minute_expression.split('-') description.append(_('Every minute between {0} and {1}').format(self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1]))) elif ((',' in hour_expression) and ('-' not in hour_expression) and (any(((exp in minute_expression) for exp in self._special_characters)) is False)): hour_parts = hour_expression.split(',') description.append(_('At')) for (i, hour_part) in enumerate(hour_parts): description.append(' ') description.append(self.format_time(hour_part, minute_expression)) if (i < (len(hour_parts) - 2)): description.append(',') if (i == (len(hour_parts) - 2)): description.append(_(' and')) else: seconds_description = self.get_seconds_description() minutes_description = self.get_minutes_description() hours_description = self.get_hours_description() description.append(seconds_description) if description: description.append(', ') description.append(minutes_description) if description: description.append(', ') description.append(hours_description) return str(description)
Generates a description for only the TIMEOFDAY portion of the expression Returns: The TIMEOFDAY description
codesearchnet
def drive_enclosures(self): if (not self.__drive_enclures): self.__drive_enclures = DriveEnclosures(self.__connection) return self.__drive_enclures
Gets the Drive Enclosures API client. Returns: DriveEnclosures:
codesearchnet
def _hexdecode(hexstring): _checkString(hexstring, description='hexstring') if len(hexstring) % 2 != 0: raise ValueError('The input hexstring must be of even length. Given: {!r}'.format(hexstring)) if sys.version_info[0] > 2: by = bytes(hexstring, 'latin1') try: return str(binascii.unhexlify(by), encoding='latin1') except binascii.Error as err: new_error_message = 'Hexdecode reported an error: {!s}. Input hexstring: {}'.format(err.args[0], hexstring) raise TypeError(new_error_message) else: try: return hexstring.decode('hex') except TypeError as err: raise TypeError('Hexdecode reported an error: {}. Input hexstring: {}'.format(err.message, hexstring))
Convert a hex encoded string to a byte string. For example '4A' will return 'J', and '04' will return ``'\\x04'`` (which has length 1). Args: hexstring (str): Can be for example 'A3' or 'A3B4'. Must be of even length. Allowed characters are '0' to '9', 'a' to 'f' and 'A' to 'F' (not space). Returns: A string of half the length, with characters corresponding to all 0-255 values for each byte. Raises: TypeError, ValueError
juraj-google-style
def str_to_mac(mac_string): sp = mac_string.split(':') mac_string = ''.join(sp) return binascii.unhexlify(mac_string)
Convert a readable string to a MAC address Args: mac_string (str): a readable string (e.g. '01:02:03:04:05:06') Returns: str: a MAC address in hex form
juraj-google-style
def indicator_arrays(tc_entity_array): type_dict = {} for ea in tc_entity_array: type_dict.setdefault(ea['type'], []).append(ea['value']) return type_dict
Convert TCEntityArray to Indicator Type dictionary. Args: tc_entity_array (dictionary): The TCEntityArray to convert. Returns: (dictionary): Dictionary containing arrays of indicators for each indicator type.
juraj-google-style
def ray_get_and_free(object_ids): global _last_free_time global _to_free result = ray.get(object_ids) if type(object_ids) is not list: object_ids = [object_ids] _to_free.extend(object_ids) now = time.time() if (len(_to_free) > MAX_FREE_QUEUE_SIZE or now - _last_free_time > FREE_DELAY_S): ray.internal.free(_to_free) _to_free = [] _last_free_time = now return result
Call ray.get and then queue the object ids for deletion. This function should be used whenever possible in RLlib, to optimize memory usage. The only exception is when an object_id is shared among multiple readers. Args: object_ids (ObjectID|List[ObjectID]): Object ids to fetch and free. Returns: The result of ray.get(object_ids).
juraj-google-style
def _BuildKeyHierarchy(self, subkeys, values): if subkeys: for registry_key in subkeys: name = registry_key.name.upper() if name in self._subkeys: continue self._subkeys[name] = registry_key registry_key._key_path = key_paths.JoinKeyPath([ self._key_path, registry_key.name]) if values: for registry_value in values: name = registry_value.name.upper() if name in self._values: continue self._values[name] = registry_value
Builds the Windows Registry key hierarchy. Args: subkeys (list[FakeWinRegistryKey]): list of subkeys. values (list[FakeWinRegistryValue]): list of values.
juraj-google-style
def expect_no_raises(message=None, extras=None): try: yield except Exception as e: e_record = records.ExceptionRecord(e) if extras: e_record.extras = extras msg = message or 'Got an unexpected exception' details = '%s: %s' % (msg, e_record.details) logging.exception(details) e_record.details = details recorder.add_error(e_record)
Expects no exception is raised in a context. If the expectation is not met, the test is marked as fail after its execution finishes. A default message is added to the exception `details`. Args: message: string, custom message to add to exception's `details`. extras: An optional field for extra information to be included in test result.
juraj-google-style
def _compute_attention(self, query, key, value, attention_mask=None, training=None, return_attention_scores=False): if self._flash_attention and return_attention_scores: raise ValueError('Returning attention scores is not supported when flash attention is enabled. Please disable flash attention to access attention scores.') use_dot_product_attention = not (self._dropout > 0.0 or return_attention_scores or len(query.shape) != 4) if use_dot_product_attention: if attention_mask is not None: mask_expansion_axis = -len(self._attention_axes) * 2 - 1 len_attention_scores_shape = 4 for _ in range(len_attention_scores_shape - len(attention_mask.shape)): attention_mask = ops.expand_dims(attention_mask, axis=mask_expansion_axis) attention_mask = ops.cast(attention_mask, dtype='bool') attention_output = ops.dot_product_attention(query=query, key=key, value=value, bias=None, mask=attention_mask, scale=self._inverse_sqrt_key_dim, is_causal=False, flash_attention=self._flash_attention) return (attention_output, None) query = ops.multiply(query, ops.cast(self._inverse_sqrt_key_dim, query.dtype)) attention_scores = ops.einsum(self._dot_product_equation, key, query) attention_scores = self._masked_softmax(attention_scores, attention_mask) if self._dropout > 0.0: final_attn_scores = self._dropout_layer(attention_scores, training=training) else: final_attn_scores = attention_scores attention_output = ops.einsum(self._combine_equation, final_attn_scores, value) return (attention_output, attention_scores)
Applies Dot-product attention with query, key, value tensors. This function defines the computation inside `call` with projected multi-head Q, K, V inputs. Users can override this function for customized attention implementation. Args: query: Projected query tensor of shape `(B, T, N, key_dim)`. key: Projected key tensor of shape `(B, S, N, key_dim)`. value: Projected value tensor of shape `(B, S, N, value_dim)`. attention_mask: a boolean mask of shape `(B, T, S)`, that prevents attention to certain positions. It is generally not needed if the `query` and `value` (and/or `key`) are masked. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Returns: attention_output: Multi-headed outputs of attention computation. attention_scores: Multi-headed attention weights.
github-repos
def install_dependencies(package: str) -> None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])
Install Python dependencies Args: package (string): The package to install
github-repos
def message_upperbound(self, tree, spins, subtheta): energy_sources = set() for v, subtree in tree.items(): assert all(u in spins for u in self._ancestors[v]) def energy_contributions(): yield subtheta.linear[v] for u, bias in subtheta.adj[v].items(): if u in spins: yield Times(limitReal(spins[u]), bias) energy = Plus(energy_contributions()) if subtree: spins[v] = 1. plus = self.message_upperbound(subtree, spins, subtheta) spins[v] = -1. minus = self.message_upperbound(subtree, spins, subtheta) del spins[v] else: plus = minus = limitReal(0.0) m = FreshSymbol(REAL) self.assertions.update({LE(m, Plus(energy, plus)), LE(m, Plus(Times(energy, limitReal(-1.)), minus))}) energy_sources.add(m) return Plus(energy_sources)
Determine an upper bound on the energy of the elimination tree. Args: tree (dict): The current elimination tree spins (dict): The current fixed spins subtheta (dict): Theta with spins fixed. Returns: The formula for the energy of the tree.
juraj-google-style
def intersection(self, other): if (not hasattr(other, '__iter__')): other = [other] bounds = self.bounds for range in other: bounds = self._intersection(bounds, range.bounds) if (not bounds): return None range = VersionRange(None) range.bounds = bounds return range
AND together version ranges. Calculates the intersection of this range with one or more other ranges. Args: other: VersionRange object (or list of) to AND with. Returns: New VersionRange object representing the intersection, or None if no ranges intersect.
codesearchnet
def results(self, use_cache=True, dialect=None, billing_tier=None): return self._materialization.results(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier)
Materialize the view synchronously. If you require more control over the execution, use execute() or execute_async(). Args: use_cache: whether to use cached results or not. dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A QueryResultsTable containing the result set. Raises: Exception if the query could not be executed or query response was malformed.
codesearchnet
def write(self, file_name): try: assert file_name[-6:] == '.xhtml' except (AssertionError, IndexError): raise ValueError('filename must end with .xhtml') with open(file_name, 'wb') as f: f.write(self.content.encode('utf-8'))
Writes the chapter object to an xhtml file. Args: file_name (str): The full name of the xhtml file to save to.
juraj-google-style
def register_write(self, reg_index, value): res = self._dll.JLINKARM_WriteReg(reg_index, value) if (res != 0): raise errors.JLinkException(('Error writing to register %d' % reg_index)) return value
Writes into an ARM register. Note: The data is not immediately written, but is cached before being transferred to the CPU on CPU start. Args: self (JLink): the ``JLink`` instance reg_index (int): the ARM register to write to value (int): the value to write to the register Returns: The value written to the ARM register. Raises: JLinkException: on write error.
codesearchnet
def scatter_div(self, sparse_delta, use_locking=False, name=None): if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}') return self._lazy_read(gen_resource_variable_ops.resource_scatter_div(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
Divide this variable by `tf.IndexedSlices`. Args: sparse_delta: `tf.IndexedSlices` to divide this variable by. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`.
github-repos
def load_database(adapter, variant_file=None, sv_file=None, family_file=None, family_type='ped', skip_case_id=False, gq_treshold=None, case_id=None, max_window=3000, profile_file=None, hard_threshold=0.95, soft_threshold=0.9): vcf_files = [] nr_variants = None vcf_individuals = None if variant_file: vcf_info = check_vcf(variant_file) nr_variants = vcf_info['nr_variants'] variant_type = vcf_info['variant_type'] vcf_files.append(variant_file) vcf_individuals = vcf_info['individuals'] nr_sv_variants = None sv_individuals = None if sv_file: vcf_info = check_vcf(sv_file, 'sv') nr_sv_variants = vcf_info['nr_variants'] vcf_files.append(sv_file) sv_individuals = vcf_info['individuals'] profiles = None matches = None if profile_file: profiles = get_profiles(adapter, profile_file) matches = profile_match(adapter, profiles, hard_threshold=hard_threshold, soft_threshold=soft_threshold) for _vcf_file in vcf_files: vcf = get_vcf(_vcf_file) if gq_treshold: if (not vcf.contains('GQ')): LOG.warning('Set gq-treshold to 0 or add info to vcf {0}'.format(_vcf_file)) raise SyntaxError('GQ is not defined in vcf header') family = None family_id = None if family_file: LOG.info('Loading family from %s', family_file) with open(family_file, 'r') as family_lines: family = get_case(family_lines=family_lines, family_type=family_type) family_id = family.family_id case_id = (case_id or family_id) case_obj = build_case(case=family, case_id=case_id, vcf_path=variant_file, vcf_individuals=vcf_individuals, nr_variants=nr_variants, vcf_sv_path=sv_file, sv_individuals=sv_individuals, nr_sv_variants=nr_sv_variants, profiles=profiles, matches=matches, profile_path=profile_file) load_case(adapter=adapter, case_obj=case_obj) nr_inserted = 0 for file_type in ['vcf_path', 'vcf_sv_path']: variant_type = 'snv' if (file_type == 'vcf_sv_path'): variant_type = 'sv' if (case_obj.get(file_type) is None): continue vcf_obj = get_vcf(case_obj[file_type]) try: nr_inserted += load_variants(adapter=adapter, vcf_obj=vcf_obj, case_obj=case_obj, skip_case_id=skip_case_id, gq_treshold=gq_treshold, max_window=max_window, variant_type=variant_type) except Exception as err: LOG.warning(err) delete(adapter=adapter, case_obj=case_obj) raise err return nr_inserted
Load the database with a case and its variants Args: adapter: Connection to database variant_file(str): Path to variant file sv_file(str): Path to sv variant file family_file(str): Path to family file family_type(str): Format of family file skip_case_id(bool): If no case information should be added to variants gq_treshold(int): If only quality variants should be considered case_id(str): If different case id than the one in family file should be used max_window(int): Specify the max size for sv windows check_profile(bool): Does profile check if True hard_threshold(float): Rejects load if hamming distance above this is found soft_threshold(float): Stores similar samples if hamming distance above this is found Returns: nr_inserted(int)
codesearchnet
def multi_interpolation_basis(n_objectives=6, n_interp_steps=5, width=128, channels=3): (N, M, W, Ch) = (n_objectives, n_interp_steps, width, channels) const_term = sum([lowres_tensor([W, W, Ch], [(W const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch]) example_interps = [sum([lowres_tensor([M, W, W, Ch], [2, (W example_basis = [] for n in range(N): col = [] for m in range(N): interp = (example_interps[n] + example_interps[m][::(- 1)]) col.append(interp) example_basis.append(col) interp_basis = [] for n in range(N): col = [interp_basis[m][(N - n)][::(- 1)] for m in range(n)] col.append(tf.zeros([M, W, W, 3])) for m in range((n + 1), N): interp = sum([lowres_tensor([M, W, W, Ch], [M, (W col.append(interp) interp_basis.append(col) basis = [] for n in range(N): col_ex = tf.stack(example_basis[n]) col_in = tf.stack(interp_basis[n]) basis.append((col_ex + col_in)) basis = tf.stack(basis) return (basis + const_term)
A paramaterization for interpolating between each pair of N objectives. Sometimes you want to interpolate between optimizing a bunch of objectives, in a paramaterization that encourages images to align. Args: n_objectives: number of objectives you want interpolate between n_interp_steps: number of interpolation steps width: width of intepolated images channel Returns: A [n_objectives, n_objectives, n_interp_steps, width, width, channel] shaped tensor, t, where the final [width, width, channel] should be seen as images, such that the following properties hold: t[a, b] = t[b, a, ::-1] t[a, i, 0] = t[a, j, 0] for all i, j t[a, a, i] = t[a, a, j] for all i, j t[a, b, i] = t[b, a, -i] for all i
codesearchnet