code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: residual = hidden_states hidden_states = self.layer_norm1(inputs=hidden_states) attention_outputs = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, training=training) hidden_states = attention_outputs[0] hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(inputs=hidden_states) hidden_states = self.mlp(hidden_states=hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) + attention_outputs[1:] return outputs
Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. causal_attention_mask (`tf.Tensor`): causal attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`): Whether or not to return the attentions tensors of all attention layers. See `outputs` under returned tensors for more detail.
github-repos
def __init__(self, input_energy: energy.BitstringEnergy, initial_seed: Union[None, tf.Tensor]=None, name: Union[None, str]=None): super().__init__(name=name) self._energy = input_energy self._energy.build([None, self._energy.num_bits]) self._tracked_variables = input_energy.variables if len(self._tracked_variables) == 0: self._checkpoint = False else: self._tracked_variables_checkpoint = [tf.Variable(v.read_value(), trainable=False) for v in self._tracked_variables] self._checkpoint = True if initial_seed is None: self._update_seed = tf.Variable(True, trainable=False) else: self._update_seed = tf.Variable(False, trainable=False) self._seed = tf.Variable(tfp.random.sanitize_seed(initial_seed), trainable=False) self._first_inference = tf.Variable(True, trainable=False)
Initializes an EnergyInferenceBase. Args: input_energy: The parameterized energy function which defines this distribution via the equations of an energy based model. This class assumes that all parameters of `energy` are `tf.Variable`s and that they are all returned by `energy.variables`. initial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This seed will be used in the `sample` method. If None, the seed is updated after every inference call. Otherwise, the seed is fixed. name: Optional name for the model.
github-repos
async def set_typing(self, typing=hangouts_pb2.TYPING_TYPE_STARTED): try: (await self._client.set_typing(hangouts_pb2.SetTypingRequest(request_header=self._client.get_request_header(), conversation_id=hangouts_pb2.ConversationId(id=self.id_), type=typing))) except exceptions.NetworkError as e: logger.warning('Failed to set typing status: {}'.format(e)) raise
Set your typing status in this conversation. Args: typing: (optional) ``TYPING_TYPE_STARTED``, ``TYPING_TYPE_PAUSED``, or ``TYPING_TYPE_STOPPED`` to start, pause, or stop typing, respectively. Defaults to ``TYPING_TYPE_STARTED``. Raises: .NetworkError: If typing status cannot be set.
codesearchnet
def show_qouts(self, nids=None, stream=sys.stdout): lines = [] for task in self.iflat_tasks(status=self.S_QCRITICAL, nids=nids): header = "=== " + task.qout_file.path + "===" lines.append(header) if task.qout_file.exists: with open(task.qout_file.path, "rt") as fh: lines += fh.readlines() else: lines.append("File does not exist!") lines.append("=" * len(header) + 2*"\n") return stream.writelines(lines)
Write to the given stream the content of the queue output file for all tasks whose status is S_QCRITICAL. Args: nids: optional list of node identifiers used to filter the tasks. stream: File-like object. Default: sys.stdout
juraj-google-style
def add_backdoor(self, backdoor_name, source, reference, method='', aliases=[], version='', campaign='', confidence='', description='', bucket_list=[]): data = {'api_key': self.api_key, 'username': self.username, 'source': source, 'reference': reference, 'method': method, 'name': backdoor_name, 'aliases': ','.join(aliases), 'version': version, 'campaign': campaign, 'confidence': confidence, 'bucket_list': bucket_list, 'description': description} r = requests.post('{0}/backdoors/'.format(self.url), data=data, verify=self.verify, proxies=self.proxies) if (r.status_code == 200): result_data = json.loads(r.text) return result_data else: log.error('Error with status code {0} and message {1}'.format(r.status_code, r.text)) return None
Add a backdoor object to CRITs. Args: backdoor_name: The primary name of the backdoor source: Source of the information reference: A reference where more information can be found method: The method for obtaining the backdoor information. aliases: List of aliases for the backdoor. version: Version campaign: An associated campaign confidence: The campaign confidence description: A description of the email bucket_list: A list of bucket list items to add
codesearchnet
class Wav2Vec2CTCTokenizerOutput(ModelOutput): text: Union[List[str], str] char_offsets: Union[List[ListOfDict], ListOfDict] = None word_offsets: Union[List[ListOfDict], ListOfDict] = None
Output type of [` Wav2Vec2CTCTokenizer`], with transcription. Args: text (list of `str` or `str`): Decoded logits in text from. Usually the speech transcription. char_offsets (list of `List[Dict[str, Union[int, str]]]` or `List[Dict[str, Union[int, str]]]`): Offsets of the decoded characters. In combination with sampling rate and model downsampling rate char offsets can be used to compute time stamps for each character. Total logit score of the beam associated with produced text. word_offsets (list of `List[Dict[str, Union[int, str]]]` or `List[Dict[str, Union[int, str]]]`): Offsets of the decoded words. In combination with sampling rate and model downsampling rate word offsets can be used to compute time stamps for each word.
github-repos
def _is_trivial_angle(rad: float, atol: float) -> bool: return abs(rad) < atol or abs(abs(rad) - np.pi / 4) < atol
Tests if a circuit for an operator exp(i*rad*XX) (or YY, or ZZ) can be performed with a whole CZ. Args: rad: The angle in radians, assumed to be in the range [-pi/4, pi/4]
juraj-google-style
def airborne_position_with_ref(msg, lat_ref, lon_ref): mb = common.hex2bin(msg)[32:] cprlat = common.bin2int(mb[22:39]) / 131072.0 cprlon = common.bin2int(mb[39:56]) / 131072.0 i = int(mb[21]) d_lat = 360.0/59 if i else 360.0/60 j = common.floor(lat_ref / d_lat) \ + common.floor(0.5 + ((lat_ref % d_lat) / d_lat) - cprlat) lat = d_lat * (j + cprlat) ni = common.cprNL(lat) - i if ni > 0: d_lon = 360.0 / ni else: d_lon = 360.0 m = common.floor(lon_ref / d_lon) \ + common.floor(0.5 + ((lon_ref % d_lon) / d_lon) - cprlon) lon = d_lon * (m + cprlon) return round(lat, 5), round(lon, 5)
Decode airborne position with only one message, knowing reference nearby location, such as previously calculated location, ground station, or airport location, etc. The reference position shall be with in 180NM of the true position. Args: msg (string): even message (28 bytes hexadecimal string) lat_ref: previous known latitude lon_ref: previous known longitude Returns: (float, float): (latitude, longitude) of the aircraft
juraj-google-style
def schedule(time: Union[(datetime.time, datetime.datetime)], callback: Callable, *args): dt = _fillDate(time) now = datetime.datetime.now(dt.tzinfo) delay = (dt - now).total_seconds() loop = asyncio.get_event_loop() loop.call_later(delay, callback, *args)
Schedule the callback to be run at the given time with the given arguments. Args: time: Time to run callback. If given as :py:class:`datetime.time` then use today as date. callback: Callable scheduled to run. args: Arguments for to call callback with.
codesearchnet
def absolute_distance(cls, q0, q1): q0_minus_q1 = (q0 - q1) q0_plus_q1 = (q0 + q1) d_minus = q0_minus_q1.norm d_plus = q0_plus_q1.norm if (d_minus < d_plus): return d_minus else: return d_plus
Quaternion absolute distance. Find the distance between two quaternions accounting for the sign ambiguity. Params: q0: the first quaternion q1: the second quaternion Returns: A positive scalar corresponding to the chord of the shortest path/arc that connects q0 to q1. Note: This function does not measure the distance on the hypersphere, but it takes into account the fact that q and -q encode the same rotation. It is thus a good indicator for rotation similarities.
codesearchnet
def create_string(self, key, value): data = None if ((key is not None) and (value is not None)): if isinstance(value, (bool, list, int, dict)): value = u'{}'.format(value) data = self.db.create(key.strip(), u'{}'.format(json.dumps(value))) else: self.tcex.log.warning(u'The key or value field was None.') return data
Create method of CRUD operation for string data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write.
codesearchnet
def get(self, report_id): return Report(self._app, self._swimlane.request('get', 'reports/{0}'.format(report_id)).json())
Retrieve report by ID Args: report_id (str): Full report ID Returns: Report: Corresponding Report instance
codesearchnet
def reset_logger(name, level=None, handler=None): if (level is None): level = logging.INFO logger = logging.getLogger(name) logger.setLevel(level) handler = (handler or logging.StreamHandler()) handler.setFormatter(logging.Formatter(_DEFAULT_LOG_FORMAT)) logger.handlers = [handler] return logger
Make a standard python logger object with default formatter, handler, etc. Defaults are: - level == logging.INFO - handler == logging.StreamHandler() Args: name: a logger name. level: an optional initial log level for this logger. handler: an optional initial handler for this logger. Returns: a standard python logger with a single handler.
codesearchnet
def add_lambda_permissions(function='', statement_id='', action='lambda:InvokeFunction', principal='', source_arn='', env='', region='us-east-1'): session = boto3.Session(profile_name=env, region_name=region) lambda_client = session.client('lambda') response_action = None prefixed_sid = (FOREMAST_PREFIX + statement_id) add_permissions_kwargs = {'FunctionName': function, 'StatementId': prefixed_sid, 'Action': action, 'Principal': principal} if source_arn: add_permissions_kwargs['SourceArn'] = source_arn try: lambda_client.add_permission(**add_permissions_kwargs) response_action = 'Add permission with Sid: {}'.format(prefixed_sid) except boto3.exceptions.botocore.exceptions.ClientError as error: LOG.debug('Add permission error: %s', error) response_action = 'Did not add permissions' LOG.debug('Related StatementId (SID): %s', prefixed_sid) LOG.info(response_action)
Add permission to Lambda for the event trigger. Args: function (str): Lambda function name statement_id (str): IAM policy statement (principal) id action (str): Lambda action to allow principal (str): AWS principal to add permissions source_arn (str): ARN of the source of the event. Only needed for S3 env (str): Environment/account of function region (str): AWS region of function
codesearchnet
def Serialize(self, writer): try: writer.WriteByte(self.Type) writer.WriteHashes(self.Hashes) except Exception as e: logger.error(f"COULD NOT WRITE INVENTORY HASHES ({self.Type} {self.Hashes}) {e}")
Serialize object. Raises: Exception: if hash writing fails. Args: writer (neo.IO.BinaryWriter):
juraj-google-style
def ParseAccountInformation(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) display_name = self._GetRowValue(query_hash, row, 'given_displayname') fullname = self._GetRowValue(query_hash, row, 'fullname') username = '{0!s} <{1!s}>'.format(fullname, display_name) event_data = SkypeAccountEventData() event_data.country = self._GetRowValue(query_hash, row, 'country') event_data.display_name = display_name event_data.email = self._GetRowValue(query_hash, row, 'emails') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.username = username timestamp = self._GetRowValue(query_hash, row, 'profile_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Profile Changed') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'authreq_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Authenticate Request') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastonline_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Last Online') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'mood_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Mood Event') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'sent_authrequest_time') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Auth Request Sent') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastused_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Last Used') parser_mediator.ProduceEventWithEventData(event, event_data)
Parses account information. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row with account information.
codesearchnet
def index_impute2(fn): logger.info('Indexing {} (IMPUTE2)'.format(fn)) impute2_index(fn, cols=[0, 1, 2], names=['chrom', 'name', 'pos'], sep=' ') logger.info('Index generated')
Indexes an IMPUTE2 file. Args: fn (str): The name of the IMPUTE2 file.
codesearchnet
def display(self, *amplExpressions): exprs = list(map(str, amplExpressions)) lock_and_call( lambda: self._impl.displayLst(exprs, len(exprs)), self._lock )
Writes on the current OutputHandler the outcome of the AMPL statement. .. code-block:: ampl display e1, e2, .., en; where e1, ..., en are the strings passed to the procedure. Args: amplExpressions: Expressions to be evaluated.
juraj-google-style
def delete_box_comment(self, box_key, comment_key): self._raise_unimplemented_error() uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.comments_suffix, comment_key ]) return self._req('delete', uri)
Deletes comment in a box with the comment_key Args: box_key key for box return (status code, list of comment dicts)
juraj-google-style
def _set_xml_from_keys(self, root, item, **kwargs): (key, val) = item target_key = root.find(key) if (target_key is None): target_key = ElementTree.SubElement(root, key) if isinstance(val, dict): for dict_item in val.items(): self._set_xml_from_keys(target_key, dict_item, **kwargs) return if (key in kwargs): kwarg = kwargs[key] if isinstance(kwarg, bool): kwargs[key] = str(kwargs[key]).lower() elif (kwarg is None): kwargs[key] = '' elif isinstance(kwarg, int): kwargs[key] = str(kwargs[key]) elif isinstance(kwarg, JSSObject): kwargs[key] = kwargs[key].name target_key.text = kwargs.get(key, val)
Create SubElements of root with kwargs. Args: root: Element to add SubElements to. item: Tuple key/value pair from self.data_keys to add. kwargs: For each item in self.data_keys, if it has a corresponding kwarg, create a SubElement at root with the kwarg's value. Int and bool values will be cast to string. (Int 10, bool False become string values "10" and "false"). Dicts will be recursively added to their key's Element.
codesearchnet
def from_vision_text_configs(cls, vision_config: PretrainedConfig, text_config: PretrainedConfig, **kwargs): return cls(vision_config=vision_config.to_dict(), text_config=text_config.to_dict(), **kwargs)
Instantiate a [`VisionTextDualEncoderConfig`] (or a derived class) from text model configuration and vision model configuration. Returns: [`VisionTextDualEncoderConfig`]: An instance of a configuration object
github-repos
def RetrieveAsset(logdir, plugin_name, asset_name): asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name) try: with tf.io.gfile.GFile(asset_path, 'r') as f: return f.read() except tf.errors.NotFoundError: raise KeyError(('Asset path %s not found' % asset_path)) except tf.errors.OpError as e: raise KeyError(("Couldn't read asset path: %s, OpError %s" % (asset_path, e)))
Retrieve a particular plugin asset from a logdir. Args: logdir: A directory that was created by a TensorFlow summary.FileWriter. plugin_name: The plugin we want an asset from. asset_name: The name of the requested asset. Returns: string contents of the plugin asset. Raises: KeyError: if the asset does not exist.
codesearchnet
def __init__(self, data=''): super(Lexer, self).__init__() self.buffer = data self.error = 0 self.flags = 0 self.processed = 0 self.processed_buffer = '' self.state = self._INITIAL_STATE self.state_stack = [] self.verbose = 0
Initializes the lexer object. Args: data: optional initial data to be processed by the lexer.
juraj-google-style
def look_up(self, **keys: Dict[(InstanceName, ScalarValue)]) -> 'ArrayEntry': if (not isinstance(self.schema_node, ListNode)): raise InstanceValueError(self.json_pointer(), 'lookup on non-list') try: for i in range(len(self.value)): en = self.value[i] flag = True for k in keys: if (en[k] != keys[k]): flag = False break if flag: return self._entry(i) raise NonexistentInstance(self.json_pointer(), 'entry lookup failed') except KeyError: raise NonexistentInstance(self.json_pointer(), 'entry lookup failed') from None except TypeError: raise InstanceValueError(self.json_pointer(), 'lookup on non-list') from None
Return the entry with matching keys. Args: keys: Keys and values specified as keyword arguments. Raises: InstanceValueError: If the receiver's value is not a YANG list. NonexistentInstance: If no entry with matching keys exists.
codesearchnet
def update_current_state(self, value: str, force: bool=False) -> datetime: value = value.lower() if (not force): current_state = self.current_state if (current_state == 'unknown'): allowed_transitions = self._allowed_states else: allowed_transitions = self._allowed_transitions[current_state] allowed_transitions.append(current_state) LOG.debug('Updating current state of %s to %s', self._id, value) if (value not in allowed_transitions): raise ValueError("Invalid current state update: '{}'. '{}' can be transitioned to states: {}".format(value, current_state, allowed_transitions)) return self._update_state('current', value)
Update the current state. Args: value (str): New value for sdp state force (bool): If true, ignore allowed transitions Returns: datetime, update timestamp Raises: ValueError: If the specified current state is not allowed.
codesearchnet
def is_compatible_with(self, spec_or_tensor): return (self._dtype.is_compatible_with(spec_or_tensor.dtype) and self._shape.is_compatible_with(spec_or_tensor.shape))
Returns True if spec_or_tensor is compatible with this TensorSpec. Two tensors are considered compatible if they have the same dtype and their shapes are compatible (see `tf.TensorShape.is_compatible_with`). Args: spec_or_tensor: A tf.TensorSpec or a tf.Tensor Returns: True if spec_or_tensor is compatible with self.
codesearchnet
def datasets_update(self, dataset_name, dataset_info): url = (Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)) return datalab.utils.Http.request(url, method='PUT', data=dataset_info, credentials=self._credentials)
Updates the Dataset info. Args: dataset_name: the name of the dataset to update as a tuple of components. dataset_info: the Dataset resource with updated fields.
codesearchnet
def from_ase_atoms(cls, atoms): return cls(atoms=atoms.get_chemical_symbols(), coords=atoms.positions)
Create an instance of the own class from an ase molecule Args: molecule (:class:`ase.atoms.Atoms`): Returns: Cartesian:
juraj-google-style
def _flush(self, buffer, start, end): buffer_size = len(buffer) if not buffer_size: return with self._size_lock: if end > self._size: with _handle_azure_exception(): self._resize(content_length=end, **self._client_kwargs) self._reset_head() if buffer_size > self.MAX_FLUSH_SIZE: futures = [] for part_start in range(0, buffer_size, self.MAX_FLUSH_SIZE): buffer_part = buffer[ part_start:part_start + self.MAX_FLUSH_SIZE] if not len(buffer_part): break start_range = start + part_start futures.append(self._workers.submit( self._update_range, data=buffer_part.tobytes(), start_range=start_range, end_range=start_range + len(buffer_part) - 1, **self._client_kwargs)) with _handle_azure_exception(): for future in _as_completed(futures): future.result() else: with _handle_azure_exception(): self._update_range( data=buffer.tobytes(), start_range=start, end_range=end - 1, **self._client_kwargs)
Flush the write buffer of the stream if applicable. Args: buffer (memoryview): Buffer content. start (int): Start of buffer position to flush. Supported only with page blobs. end (int): End of buffer position to flush. Supported only with page blobs.
juraj-google-style
def _step(time, output_ta_t, prev_output, *states): current_input = tuple((ta.read(time) for ta in input_ta)) current_input = nest.pack_sequence_as(inputs, current_input) mask_t = masking_fn(time) output, new_states = step_function(current_input, tuple(states) + tuple(constants)) flat_output = nest.flatten(output) flat_mask_output = flat_zero_output if zero_output_for_mask else nest.flatten(prev_output) flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output) flat_state = nest.flatten(states) flat_new_state = nest.flatten(new_states) for state, new_state in zip(flat_state, flat_new_state): if isinstance(new_state, tensor_lib.Tensor): new_state.set_shape(state.shape) flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state) new_states = nest.pack_sequence_as(new_states, flat_final_state) output_ta_t = tuple((ta.write(time, out) for ta, out in zip(output_ta_t, flat_new_output))) return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states)
RNN step function. Args: time: Current timestep value. output_ta_t: TensorArray. prev_output: tuple of outputs from time - 1. *states: List of states. Returns: Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
github-repos
def __field_to_parameter_type_and_format(self, field): variant = field.variant if variant == messages.Variant.MESSAGE: raise TypeError('A message variant cannot be used in a parameter.') return CUSTOM_VARIANT_MAP.get(variant) or (variant.name.lower(), None)
Converts the field variant type into a tuple describing the parameter. Args: field: An instance of a subclass of messages.Field. Returns: A tuple with the type and format of the field, respectively. Raises: TypeError: if the field variant is a message variant.
juraj-google-style
def try_claim(self, position): raise NotImplementedError
Attempts to claim the block of work in the current restriction identified by the given position. Each claimed position MUST be a valid split point. If this succeeds, the DoFn MUST execute the entire block of work. If it fails, the ``DoFn.process()`` MUST return ``None`` without performing any additional work or emitting output (note that emitting output or performing work from ``DoFn.process()`` is also not allowed before the first call of this method). The API is required to be implemented. Args: position: current position that wants to be claimed. Returns: ``True`` if the position can be claimed as current_position. Otherwise, returns ``False``.
github-repos
def unnest_collection(collection, df_list): for item in collection['link']['item']: if (item['class'] == 'dataset'): df_list.append(Dataset.read(item['href']).write('dataframe')) elif (item['class'] == 'collection'): nested_collection = request(item['href']) unnest_collection(nested_collection, df_list)
Unnest collection structure extracting all its datasets and converting \ them to Pandas Dataframes. Args: collection (OrderedDict): data in JSON-stat format, previously \ deserialized to a python object by \ json.load() or json.loads(), df_list (list): list variable which will contain the converted \ datasets. Returns: Nothing.
codesearchnet
def _get_model_reference(self, model_id): return ModelReference.from_api_repr({'projectId': self.project, 'datasetId': self.dataset_id, 'modelId': model_id})
Constructs a ModelReference. Args: model_id (str): the ID of the model. Returns: google.cloud.bigquery.model.ModelReference: A ModelReference for a model in this dataset.
codesearchnet
def from_string(contents): lines = contents.split('\n') num_sites = int(lines[0]) coords = [] sp = [] prop = [] coord_patt = re.compile(('(\\w+)\\s+([0-9\\-\\.]+)\\s+([0-9\\-\\.]+)\\s+([0-9\\-\\.]+)\\s+' + '([0-9\\-\\.]+)')) for i in range(2, (2 + num_sites)): m = coord_patt.search(lines[i]) if m: sp.append(m.group(1)) coords.append([float(j) for j in [m.group(i) for i in [3, 4, 2]]]) prop.append(float(m.group(5))) return ZeoVoronoiXYZ(Molecule(sp, coords, site_properties={'voronoi_radius': prop}))
Creates Zeo++ Voronoi XYZ object from a string. from_string method of XYZ class is being redefined. Args: contents: String representing Zeo++ Voronoi XYZ file. Returns: ZeoVoronoiXYZ object
codesearchnet
def _step(time, output_ta_t, *states): current_input = tuple((ta.read(time) for ta in input_ta)) current_input = nest.pack_sequence_as(inputs, current_input) output, new_states = step_function(current_input, tuple(states) + tuple(constants)) flat_state = nest.flatten(states) flat_new_state = nest.flatten(new_states) for state, new_state in zip(flat_state, flat_new_state): if isinstance(new_state, tensor_lib.Tensor): new_state.set_shape(state.shape) flat_output = nest.flatten(output) output_ta_t = tuple((ta.write(time, out) for ta, out in zip(output_ta_t, flat_output))) new_states = nest.pack_sequence_as(initial_states, flat_new_state) return (time + 1, output_ta_t) + tuple(new_states)
RNN step function. Args: time: Current timestep value. output_ta_t: TensorArray. *states: List of states. Returns: Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
github-repos
def annotate_source(source, ast_module, pytype_options): source_code = infer_types(source, pytype_options) module = ast_module.parse(source, pytype_options.input) visitor = AnnotateAstVisitor(source_code, ast_module) visitor.visit(module) return module
Infer types for `source`, and return an AST of it with types added. Args: source: Text, the source code to type-infer and parse to an AST. ast_module: An ast-module like object used to parse the source to an AST and traverse the created ast.Module object. pytype_options: pytype.config.Options, the options to pass onto Pytype. Returns: The created Module object from what `ast_factory` returned.
github-repos
def broadcast_weights(weights, values): with ops.name_scope(None, 'broadcast_weights', (weights, values)) as scope: values = ops.convert_to_tensor(values, name='values') weights = ops.convert_to_tensor(weights, dtype=values.dtype.base_dtype, name='weights') weights_shape = weights.get_shape() values_shape = values.get_shape() if weights_shape.is_fully_defined() and values_shape.is_fully_defined() and weights_shape.is_compatible_with(values_shape): return weights if control_flow_ops.get_enclosing_xla_context() is not None: return math_ops.multiply(weights, array_ops.ones_like(values), name=scope) with ops.control_dependencies((assert_broadcastable(weights, values),)): return math_ops.multiply(weights, array_ops.ones_like(values), name=scope)
Broadcast `weights` to the same shape as `values`. This returns a version of `weights` following the same broadcast rules as `mul(weights, values)`, but limited to the weights shapes allowed by `assert_broadcastable`. When computing a weighted average, use this function to broadcast `weights` before summing them; e.g., `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`. Args: weights: `Tensor` whose shape is broadcastable to `values` according to the rules of `assert_broadcastable`. values: `Tensor` of any shape. Returns: `weights` broadcast to `values` shape according to the rules of `assert_broadcastable`.
github-repos
def to_dense_one_hot(labels, class_count): if not isinstance(class_count, tf.compat.integral_types): raise TypeError('class_count must be an integer type.') if labels.dtype.base_dtype not in (tf.int32, tf.int64): raise TypeError('Labels must be an integer: %s' % labels.dtype) if labels.get_shape().ndims != 1: raise ValueError('Labels must be a rank 1 tensor: %s' % labels.get_shape()) dtype = labels.dtype.base_dtype class_tensor = tf.convert_to_tensor( class_count, dtype=dtype, name='class_count') batch = tf.gather(tf.shape(labels), 0) count = tf.expand_dims(tf.range(0, limit=batch), 1) labels = tf.expand_dims(labels, 1) batch = tf.gather(tf.shape(labels), 0) if dtype != tf.int32: count = tf.cast(count, dtype) batch = tf.cast(batch, dtype) result = tf.sparse_to_dense( tf.concat([count, labels], 1), tf.concat([tf.expand_dims(batch, 0), tf.expand_dims(class_tensor, 0)], 0), 1.0, 0.0) result.set_shape([labels.get_shape().dims[0], class_count]) return result
Converts a vector that specified one-hot per batch into a dense version. Args: labels: The labels input. class_count: The number of classes as an int. Returns: One dense vector for each item in the batch. Raises: ValueError: If labels is not rank 1. TypeError: If class_count is not an integer or labels is not an integer Tensor.
juraj-google-style
def pretty_printer_for_analytics(cls, primitive_handler_: primitive_handler.PrimitiveHandler, indent_size: int) -> 'JsonPrinter': return cls(primitive_handler_, _PrettyJsonTextGenerator(indent_size), _FhirJsonFormat.ANALYTIC)
Returns a printer for Analytic FHIR JSON with spaces and newlines. Args: primitive_handler_: Responsible for returning PrimitiveWrappers. indent_size: The size of space indentation for lexical scoping.
github-repos
def seek_to_beginning(self, *partitions): if (not all([isinstance(p, TopicPartition) for p in partitions])): raise TypeError('partitions must be TopicPartition namedtuples') if (not partitions): partitions = self._subscription.assigned_partitions() assert partitions, 'No partitions are currently assigned' else: for p in partitions: assert (p in self._subscription.assigned_partitions()), 'Unassigned partition' for tp in partitions: log.debug('Seeking to beginning of partition %s', tp) self._subscription.need_offset_reset(tp, OffsetResetStrategy.EARLIEST)
Seek to the oldest available offset for partitions. Arguments: *partitions: Optionally provide specific TopicPartitions, otherwise default to all assigned partitions. Raises: AssertionError: If any partition is not currently assigned, or if no partitions are assigned.
codesearchnet
def find_contour_yaml(config_file=__file__, names=None): checked = set() contour_yaml = _find_countour_yaml(os.path.dirname(config_file), checked, names=names) if not contour_yaml: contour_yaml = _find_countour_yaml(os.getcwd(), checked, names=names) return contour_yaml
Traverse directory trees to find a contour.yaml file Begins with the location of this file then checks the working directory if not found Args: config_file: location of this file, override for testing Returns: the path of contour.yaml or None if not found
juraj-google-style
def sg_prod(tensor, opt): return tf.reduce_prod(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
r"""Computes the product of elements across axis of a tensor. See `tf.reduce_prod()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`.
codesearchnet
def __init__(self, filenames, compression_type=None, buffer_size=None, name=None): self._filenames = filenames self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string) self._buffer_size = convert.optional_param_to_tensor('buffer_size', buffer_size, argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES) self._name = name variant_tensor = gen_dataset_ops.text_line_dataset(self._filenames, self._compression_type, self._buffer_size, metadata=self._metadata.SerializeToString()) super(_TextLineDataset, self).__init__(variant_tensor)
Creates a `TextLineDataset`. Args: filenames: A `tf.string` tensor containing one or more filenames. compression_type: (Optional.) A `tf.string` scalar evaluating to one of `""` (no compression), `"ZLIB"`, or `"GZIP"`. buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes to buffer. A value of 0 results in the default buffering values chosen based on the compression type. name: (Optional.) A name for the tf.data operation.
github-repos
def moves_from_last_n_games(self, n, moves, shuffle, column_family, column): self.wait_for_fresh_games() latest_game = self.latest_game_number utils.dbg(('Latest game in %s: %s' % (self.btspec.table, latest_game))) if (latest_game == 0): raise ValueError('Cannot find a latest game in the table') start = int(max(0, (latest_game - n))) ds = self.moves_from_games(start, latest_game, moves, shuffle, column_family, column) return ds
Randomly choose a given number of moves from the last n games. Args: n: number of games at the end of this GameQueue to source. moves: number of moves to be sampled from `n` games. shuffle: if True, shuffle the selected moves. column_family: name of the column family containing move examples. column: name of the column containing move examples. Returns: a dataset containing the selected moves.
codesearchnet
def get_value_set(self, value_set_url: str) -> Optional[_ValueSetT]:
Returns the ValueSet identified by the given URL. Args: value_set_url: The URL for the FHIR ValueSet to be returned. Returns: The corresponding value set, or None if no such value set exists.
github-repos
def mirror_pull(self, **kwargs): path = ('/projects/%s/mirror/pull' % self.get_id()) self.manager.gitlab.http_post(path, **kwargs)
Start the pull mirroring process for the project. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server failed to perform the request
codesearchnet
def get_package_from_string(txt, paths=None): o = VersionedObject(txt) return get_package(o.name, o.version, paths=paths)
Get a package given a string. Args: txt (str): String such as 'foo', 'bah-1.3'. paths (list of str, optional): paths to search for package, defaults to `config.packages_path`. Returns: `Package` instance, or None if no package was found.
juraj-google-style
def fail_api(channel): gui = ui_embed.UI(channel, "Couldn't get stats off RLTrackerNetwork.", 'Maybe the API changed, please tell Infraxion.', modulename=modulename, colour=35071) return gui
Creates an embed UI for when the API call didn't work Args: channel (discord.Channel): The Discord channel to bind the embed to Returns: ui (ui_embed.UI): The embed UI object
codesearchnet
def require_meta_and_content(self, content_handler, params, **kwargs): meta = {'params': params} content = content_handler(params, meta, **kwargs) meta['params'] = params return (meta, content)
Require 'meta' and 'content' dictionaries using proper hander. Args: content_handler (callable): function that accepts ``params, meta, **kwargs`` argument and returns dictionary for ``content`` response section params (dict): dictionary of parsed resource parameters kwargs (dict): dictionary of values created from resource url template Returns: tuple (meta, content): two-tuple with dictionaries of ``meta`` and ``content`` response sections
codesearchnet
def CheckVersion(problems, latest_version=None): if (not latest_version): timeout = 20 socket.setdefaulttimeout(timeout) request = urllib2.Request(LATEST_RELEASE_VERSION_URL) try: response = urllib2.urlopen(request) content = response.read() m = re.search('version=(\\d+\\.\\d+\\.\\d+)', content) if m: latest_version = m.group(1) except urllib2.HTTPError as e: description = ('During the new-version check, we failed to reach transitfeed server: Reason: %s [%s].' % (e.reason, e.code)) problems.OtherProblem(description=description, type=errors.TYPE_NOTICE) return except urllib2.URLError as e: description = ('During the new-version check, we failed to reach transitfeed server. Reason: %s.' % e.reason) problems.OtherProblem(description=description, type=errors.TYPE_NOTICE) return if (not latest_version): description = ('During the new-version check, we had trouble parsing the contents of %s.' % LATEST_RELEASE_VERSION_URL) problems.OtherProblem(description=description, type=errors.TYPE_NOTICE) return newest_version = _MaxVersion([latest_version, __version__]) if (__version__ != newest_version): problems.NewVersionAvailable(newest_version)
Check if there is a newer version of transitfeed available. Args: problems: if a new version is available, a NewVersionAvailable problem will be added latest_version: if specified, override the latest version read from the project page
codesearchnet
def run_defense_work(self, work_id): class_batch_id = self.defense_work.work[work_id]['output_classification_batch_id'] class_batch = self.class_batches.read_batch_from_datastore(class_batch_id) adversarial_batch_id = class_batch['adversarial_batch_id'] submission_id = class_batch['submission_id'] cloud_result_path = class_batch['result_path'] logging.info('Defense work piece: adversarial_batch_id="%s" submission_id="%s"', adversarial_batch_id, submission_id) if (submission_id in self.blacklisted_submissions): raise WorkerError('Blacklisted submission') defense = DefenseSubmission(submission_id, self.submissions, self.storage_bucket) defense.download() input_dir = os.path.join(LOCAL_INPUT_DIR, adversarial_batch_id) if os.path.exists(input_dir): sudo_remove_dirtree(input_dir) os.makedirs(input_dir) try: shell_call(['gsutil', '-m', 'cp', os.path.join('gs: adv_images_files = os.listdir(input_dir) if ((len(adv_images_files) == 1) and adv_images_files[0].endswith('.zip')): logging.info('Adversarial batch is in zip archive %s', adv_images_files[0]) shell_call(['unzip', os.path.join(input_dir, adv_images_files[0]), '-d', input_dir]) os.remove(os.path.join(input_dir, adv_images_files[0])) adv_images_files = os.listdir(input_dir) logging.info('%d adversarial images copied', len(adv_images_files)) except (subprocess.CalledProcessError, IOError) as e: raise WorkerError('Cant copy adversarial batch locally', e) if os.path.exists(LOCAL_OUTPUT_DIR): sudo_remove_dirtree(LOCAL_OUTPUT_DIR) os.mkdir(LOCAL_OUTPUT_DIR) output_filname = os.path.join(LOCAL_OUTPUT_DIR, 'result.csv') elapsed_time_sec = defense.run(input_dir, output_filname) batch_result = eval_lib.analyze_one_classification_result(storage_client=None, file_path=output_filname, adv_batch=self.adv_batches.data[adversarial_batch_id], dataset_batches=self.dataset_batches, dataset_meta=self.dataset_meta) try: shell_call(['gsutil', 'cp', output_filname, os.path.join('gs: except subprocess.CalledProcessError as e: raise WorkerError('Cant result to Cloud Storage', e) return (elapsed_time_sec, submission_id, batch_result)
Runs one defense work. Args: work_id: ID of the piece of work to run Returns: elapsed_time_sec, submission_id - elapsed time and id of the submission Raises: WorkerError: if error occurred during execution.
codesearchnet
def verify_key_in_shelve(file_name, save_key, file_location): file = __os.path.join(file_location, file_name) shelve_store = __shelve.open(file) exists = shelve_store.get(save_key) shelve_store.close() if exists: return True elif not exists: return False
Function to check for a key in a shelve Args: file_name: Shelve storage file name save_key: The name of the key the item is stored in file_location: The location of the file, derive from the os module Returns: returns true or false
juraj-google-style
def __init__(self, config: Dict) -> None: self.fields_dict = dict() try: for field in config["fields"]: if config["fields"][field]["type"] == "kg_id": self.fields_dict[field] = FieldType.KG_ID elif config["fields"][field]["type"] == "number": self.fields_dict[field] = FieldType.NUMBER elif config["fields"][field]["type"] == "date": self.fields_dict[field] = FieldType.DATE elif config["fields"][field]["type"] == "location": self.fields_dict[field] = FieldType.LOCATION else: self.fields_dict[field] = FieldType.STRING except KeyError as key: print(str(key) + " not in config")
Record a mapping about each fields and its type from config file Args: config: Dict
juraj-google-style
def dependency_of_targets(targets, op): if isinstance(op, tf.Tensor): op = op.op assert isinstance(op, tf.Operation), op from tensorflow.contrib.graph_editor import get_backward_walk_ops dependent_ops = get_backward_walk_ops(targets, control_inputs=True) return (op in dependent_ops)
Check that op is in the subgraph induced by the dependencies of targets. The result is memoized. This is useful if some SessionRunHooks should be run only together with certain ops. Args: targets: a tuple of ops or tensors. The targets to find dependencies of. op (tf.Operation or tf.Tensor): Returns: bool: True if any one of `targets` depend on `op`.
codesearchnet
def get_model_indexes(model): indexes = [] for index in get_index_names(): for app_model in get_index_models(index): if app_model == model: indexes.append(index) return indexes
Return list of all indexes in which a model is configured. A model may be configured to appear in multiple indexes. This function will return the names of the indexes as a list of strings. This is useful if you want to know which indexes need updating when a model is saved. Args: model: a Django model class.
juraj-google-style
def get(self, container_id): resp = self.client.api.inspect_container(container_id) return self.prepare_model(resp)
Get a container by name or ID. Args: container_id (str): Container name or ID. Returns: A :py:class:`Container` object. Raises: :py:class:`docker.errors.NotFound` If the container does not exist. :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def _kl_half_normal_half_normal(a, b, name=None): with tf.name_scope(name or "kl_half_normal_half_normal"): return (tf.math.log(b.scale) - tf.math.log(a.scale) + (a.scale**2 - b.scale**2) / (2 * b.scale**2))
Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`. Args: a: Instance of a `HalfNormal` distribution object. b: Instance of a `HalfNormal` distribution object. name: (optional) Name to use for created operations. default is "kl_half_normal_half_normal". Returns: Batchwise KL(a || b)
juraj-google-style
async def do_upload(context, files): status = 0 try: await upload_artifacts(context, files) except ScriptWorkerException as e: status = worst_level(status, e.exit_code) log.error("Hit ScriptWorkerException: {}".format(e)) except aiohttp.ClientError as e: status = worst_level(status, STATUSES['intermittent-task']) log.error("Hit aiohttp error: {}".format(e)) except Exception as e: log.exception("SCRIPTWORKER_UNEXPECTED_EXCEPTION upload {}".format(e)) raise return status
Upload artifacts and return status. Returns the integer status of the upload. args: context (scriptworker.context.Context): the scriptworker context. files (list of str): list of files to be uploaded as artifacts Raises: Exception: on unexpected exception. Returns: int: exit status
juraj-google-style
def export_ply(filename, cutout, level=0): if ".ply" not in filename: filename = filename + ".ply" vs, fs = mcubes.marching_cubes(cutout, level) with open(filename, 'w') as fh: lines = [ "ply" "format ascii 1.0", "comment generated by ndio", "element vertex " + str(len(vs)), "property float32 x", "property float32 y", "property float32 z", "element face " + str(len(fs)), "property list uint8 int32 vertex_index", "end_header" ] fh.writelines(lines) for v in vs: fh.write("{} {} {}".format(v[0], v[1], v[2])) for f in fs: fh.write("3 {} {} {}".format(f[0], f[1], f[2]))
Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success
juraj-google-style
def send(self, content_type='HTML'): payload = self.api_representation(content_type) endpoint = 'https: self._make_api_call('post', endpoint=endpoint, data=json.dumps(payload))
Takes the recipients, body, and attachments of the Message and sends. Args: content_type: Can either be 'HTML' or 'Text', defaults to HTML.
codesearchnet
def assert_array_lines_close(test, expected_array, array_lines): elements = [] for line in array_lines: line = re.sub(_ARRAY_VALUE_SEPARATOR_REGEX, ' ', line) elements.extend((float(s) for s in line.split())) test.assertAllClose(np.array(expected_array).flatten(), elements)
Assert that the array value represented by lines is close to expected. Note that the shape of the array represented by the `array_lines` is ignored. Args: test: An instance of TensorFlowTestCase. expected_array: Expected value of the array. array_lines: A list of strings representing the array. E.g., "array([[ 1.0, 2.0 ], [ 3.0, 4.0 ]])" Assumes that values are separated by commas, parentheses, brackets, "|" characters and whitespace.
github-repos
def extract(self, destdir, decompress='auto'): for e in self.mardata.index.entries: name = e.name entry_path = safejoin(destdir, name) entry_dir = os.path.dirname(entry_path) mkdir(entry_dir) with open(entry_path, 'wb') as f: write_to_file(self.extract_entry(e, decompress), f) os.chmod(entry_path, e.flags)
Extract the entire MAR file into a directory. Args: destdir (str): A local directory on disk into which the contents of this MAR file will be extracted. Required parent directories will be created as necessary. decompress (obj, optional): Controls whether files are decompressed when extracted. Must be one of 'auto' or None. Defaults to 'auto'.
juraj-google-style
def fabrics(self): if (not self.__fabrics): self.__fabrics = Fabrics(self.__connection) return self.__fabrics
Gets the Fabrics API client. Returns: Fabrics:
codesearchnet
def is_line_in_file(filename: str, line: str) -> bool: assert ('\n' not in line) with open(filename, 'r') as file: for fileline in file: if (fileline == line): return True return False
Detects whether a line is present within a file. Args: filename: file to check line: line to search for (as an exact match)
codesearchnet
def _seconds_have_elapsed(token, num_seconds): now = timeit.default_timer() then = _log_timer_per_token.get(token, None) if then is None or (now - then) >= num_seconds: _log_timer_per_token[token] = now return True else: return False
Tests if 'num_seconds' have passed since 'token' was requested. Not strictly thread-safe - may log with the wrong frequency if called concurrently from multiple threads. Accuracy depends on resolution of 'timeit.default_timer()'. Always returns True on the first call for a given 'token'. Args: token: The token for which to look up the count. num_seconds: The number of seconds to test for. Returns: Whether it has been >= 'num_seconds' since 'token' was last requested.
juraj-google-style
def master_key_from_entropy(passphrase='', strength=128): if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = rand_bytes(strength m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n
Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered.
juraj-google-style
def __init__(self, sharding_specs: List[str], mesh: Mesh): if not isinstance(mesh, Mesh): raise ValueError('mesh is not a valid Mesh object.') for _, dim_sharding in enumerate(sharding_specs): if dim_sharding == UNSHARDED or dim_sharding == MATCH: continue if sharding_specs.count(dim_sharding) > 1: raise ValueError(('Mesh dimension {mesh_dim} was repeated in sharding ' + 'specification {sharding_specs}. Mesh dimensions must be unique ' + 'in a layout.').format(mesh_dim=dim_sharding, sharding_specs=sharding_specs)) if dim_sharding not in mesh: raise ValueError(('{dim_sharding}: A dimension sharding must either be a ' + 'valid mesh dimension or UNSHARDED.').format(dim_sharding=dim_sharding)) super().__init__(type=LayoutType.STATIC, sharding_specs=sharding_specs, mesh=mesh)
Builds a Layout from a list of dimension names and a Mesh. Args: sharding_specs: List of sharding specifications, each corresponding to a tensor axis. Each specification (dim_sharding) can either be a mesh dimension or the special value UNSHARDED. mesh: A mesh configuration for the Tensor. Returns: A valid Layout built with given layout & mesh.
github-repos
def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01): infile = generator_utils.maybe_download(tmp_dir, _TAR, _URL) tf.logging.info('Loading examples') all_examples = [] for (i, d) in enumerate(csv.DictReader(gzip.open(infile), delimiter='\t')): if ((i % 100000) == 0): tf.logging.info(('%d examples have been loaded....' % i)) ex = {x: (int(y) if y.isdigit() else y) for (x, y) in d.items()} all_examples.append(ex) random.seed(1) random.shuffle(all_examples) n_train = int((len(all_examples) * prop_train)) n_val = (n_train + int((len(all_examples) * prop_val))) train = all_examples[:n_train] val = all_examples[n_train:n_val] test = [] for e in all_examples[n_val:]: if (e['n_intervening'] == e['n_diff_intervening']): test.append(e) return (all_examples, train, val, test)
Loads exampls from the tsv file. Args: tmp_dir: temp directory. prop_train: proportion of the train data prop_val: proportion of the validation data Returns: All examples in the dataset pluse train, test, and development splits.
codesearchnet
def start(self, device): super(NativeBLEVirtualInterface, self).start(device) self.set_advertising(True)
Start serving access to this VirtualIOTileDevice Args: device (VirtualIOTileDevice): The device we will be providing access to
codesearchnet
def add_densities(density1, density2): return {spin: np.array(density1[spin]) + np.array(density2[spin]) for spin in density1.keys()}
Method to sum two densities. Args: density1: First density. density2: Second density. Returns: Dict of {spin: density}.
juraj-google-style
def min(self): if (len(self._data) == 0): return 10 return next(iter(sorted(self._data.keys())))
Return the minimum value in this histogram. If there are no values in the histogram at all, return 10. Returns: int: The minimum value in the histogram.
codesearchnet
def begin_abort(self, root_pipeline_key, abort_message): def txn(): pipeline_record = db.get(root_pipeline_key) if (pipeline_record is None): logging.warning('Tried to abort root pipeline ID "%s" but it does not exist.', root_pipeline_key.name()) raise db.Rollback() if (pipeline_record.status == _PipelineRecord.ABORTED): logging.warning('Tried to abort root pipeline ID "%s"; already in state: %s', root_pipeline_key.name(), pipeline_record.status) raise db.Rollback() if pipeline_record.abort_requested: logging.warning('Tried to abort root pipeline ID "%s"; abort signal already sent.', root_pipeline_key.name()) raise db.Rollback() pipeline_record.abort_requested = True pipeline_record.abort_message = abort_message pipeline_record.put() task = taskqueue.Task(url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key)) task.add(queue_name=self.queue_name, transactional=True) return True return db.run_in_transaction(txn)
Kicks off the abort process for a root pipeline and all its children. Args: root_pipeline_key: db.Key of the root pipeline to abort. abort_message: Message explaining why the abort happened, only saved into the root pipeline. Returns: True if the abort signal was sent successfully; False otherwise.
codesearchnet
def search(self, searchAreaWkt=None, filters=None, startDate=None, endDate=None, types=None): if (not types): types = ['Acquisition'] if startDate: startDateTime = datetime.datetime.strptime(startDate, '%Y-%m-%dT%H:%M:%S.%fZ') if endDate: endDateTime = datetime.datetime.strptime(endDate, '%Y-%m-%dT%H:%M:%S.%fZ') if (startDate and endDate): diff = (endDateTime - startDateTime) if (diff.days < 0): raise Exception('startDate must come before endDate.') postdata = {'searchAreaWkt': searchAreaWkt, 'types': types, 'startDate': startDate, 'endDate': endDate} if filters: postdata['filters'] = filters if searchAreaWkt: postdata['searchAreaWkt'] = searchAreaWkt url = ('%(base_url)s/search' % {'base_url': self.base_url}) headers = {'Content-Type': 'application/json'} r = self.gbdx_connection.post(url, headers=headers, data=json.dumps(postdata)) r.raise_for_status() results = r.json()['results'] return results
Perform a catalog search Args: searchAreaWkt: WKT Polygon of area to search. Optional. filters: Array of filters. Optional. Example: [ "(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')", "cloudCover < 10", "offNadirAngle < 10" ] startDate: string. Optional. Example: "2004-01-01T00:00:00.000Z" endDate: string. Optional. Example: "2004-01-01T00:00:00.000Z" types: Array of types to search for. Optional. Example (and default): ["Acquisition"] Returns: catalog search resultset
codesearchnet
def lookup_symbol(self, name, namespace_stack): symbol = Symbol(name, name.split('::'), namespace_stack) assert symbol.parts if symbol.parts[0] == '': symbol.parts = symbol.parts[1:] elif namespace_stack is not None: result = self._lookup_in_all_namespaces(symbol) if result: return result return self._lookup_global(symbol)
Returns AST node and module for symbol if found. Args: name: 'name of the symbol to lookup' namespace_stack: None or ['namespaces', 'in', 'current', 'scope'] Returns: (ast.Node, module (ie, any object stored with symbol)) if found Raises: Error if the symbol cannot be found.
juraj-google-style
def _get_ngrams(ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int): generated_ngrams = [{} for _ in range(num_hypos)] for idx in range(num_hypos): gen_tokens = prev_input_ids[idx].tolist() generated_ngram = generated_ngrams[idx] for ngram in zip(*[gen_tokens[i:] for i in range(ngram_size)]): prev_ngram_tuple = tuple(ngram[:-1]) generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] return generated_ngrams
Assume ngram_size=2 and prev_input_ids=tensor([[40, 2883, 2712, 4346]]). The output of generated ngrams look like this {(40,): [2883], (2883,): [2712], (2712,): [4346]}. Args: ngram_size (`int`): The number sequential tokens taken as a group which may only occur once before being banned. prev_input_ids (`torch.Tensor`): Generated token ids for the current hypothesis. num_hypos (`int`): The number of hypotheses for which n-grams need to be generated. Returns: generated_ngrams (`dict`): Dictionary of generated ngrams.
github-repos
def _rmsprop(self, grads, cache=None, decay_rate=0.95): if (cache is None): cache = np.zeros_like(grads) cache = ((decay_rate * cache) + ((1 - decay_rate) * (grads ** 2))) step = ((- grads) / np.sqrt((cache + K.epsilon()))) return (step, cache)
Uses RMSProp to compute step from gradients. Args: grads: numpy array of gradients. cache: numpy array of same shape as `grads` as RMSProp cache decay_rate: How fast to decay cache Returns: A tuple of step: numpy array of the same shape as `grads` giving the step. Note that this does not yet take the learning rate into account. cache: Updated RMSProp cache.
codesearchnet
def _create_and_save_file_init_hash_table_qat_model_tf1(self, output_path: str, tags: Collection[str], signature_def_key: str) -> Tuple[Mapping[str, core.Tensor], Mapping[str, core.Tensor]]: with session.Session(graph=ops.Graph()) as sess: input_vocabs_placeholder, lookup_tensor, output_tensor = self._create_table_init_from_file_qat_model_tf1(sess) inputs = {'input_vocabs': input_vocabs_placeholder} outputs = {'lookup': lookup_tensor, 'output': output_tensor} self._save_tf1_model(sess, output_path, signature_def_key, tags, inputs=inputs, outputs=outputs, init_op=lookup_ops.tables_initializer(), assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)) return (inputs, outputs)
Creates and saves a QAT model that uses a file-initialized table. The asset file "vocab_file.txt" is used to initialize a hash table. Args: output_path: Path to the directory to save the created model. tags: Set of strings that identifies the saved meta graph. signature_def_key: Name of the SignatureDef. Used to identify the SignatureDef within the meta graph. Returns: inputs: A mapping of input_key -> input_tensor (placeholder). The input key is "input_vocabs". outputs: A mapping of output_key -> output_tensor. The output keys are "lookup" and "output".
github-repos
def get_string(self): return_string = None if not self.mmc: return "" method = 'PDASTRING' if method == 'PDASTRING': stringgen = PdaString() print '* Reduce PDA using DFA BFS (remove unreachable states):' newpda = self.mmc.s handle = IntersectionHandling() newpda = handle.get(newpda, self.mmc.accepted) reduce_b = ReducePDA() newpda = reduce_b.get(newpda) print "- Total PDA states after reduction are " + repr(len(newpda)) return_string = stringgen.init(newpda, self.mmc.accepted) if return_string is not None: return_string = return_string[0] elif method == 'PDACFGSTRING': optimized = 1 dt1 = datetime.datetime.fromtimestamp(time.time()) print '* Initiating PDA simplification' print ' - Total PDA states are ' + repr(len(self.mmc.s)) handle = IntersectionHandling() newpda = handle.get(self.mmc.s, self.mmc.accepted) newpda = self.mmc.s simply = SimplifyStateIDs() newpda, biggestid, newaccepted = simply.get( newpda, self.mmc.accepted) print ' - Total PDA states after id clearence are ' + repr(len(newpda)) replace = ReadReplace(newpda, biggestid) newpda = replace.replace_read() print ' - Total PDA states after read elimination are ' + repr(len(newpda)) maxstate = replace.nextstate() - 1 print '* Reduce PDA using DFA BFS (remove unreachable states):' reduce_b = ReducePDA() newpda = reduce_b.get(newpda) print "- Total PDA states after reduction are " + repr(len(newpda)) dt2 = datetime.datetime.fromtimestamp(time.time()) rdelta = dateutil.relativedelta.relativedelta(dt2, dt1) print "* PDA was simplyfied in %d days, %d hours, %d minutes and %d seconds" % ( rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds) dt1 = datetime.datetime.fromtimestamp(time.time()) print '* Initiating CNF from PDA generation' cnfgenerator = PdaCnf(newpda, newaccepted) dt2 = datetime.datetime.fromtimestamp(time.time()) rdelta = dateutil.relativedelta.relativedelta(dt2, dt1) print "* CNF was generated in %d days, %d hours, %d minutes and %d seconds" % ( rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds) dt1 = datetime.datetime.fromtimestamp(time.time()) print '* Initiating string from CFG generation' grammar = cnfgenerator.get_rules(optimized) print ' - Total grammar rules are ' + repr(len(grammar)) gen = CFGGenerator(CNFGenerator(grammar), optimized=optimized, splitstring=0, maxstate=maxstate) return_string = gen.generate() dt2 = datetime.datetime.fromtimestamp(time.time()) rdelta = dateutil.relativedelta.relativedelta(dt2, dt1) print "* A string was generated in %d days, %d hours, %d minutes and %d seconds" % ( rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds) print return_string else: return_string = None return return_string
Returns a string from the Diff resutl. Depending on the method, either the string will be generated directly from the PDA using the state removal method, or the PDA will be first translated to a CFG and then a string will be generated from the CFG Args: None Returns: A string from the Diff
juraj-google-style
def load(cls, fh): dat = fh.read() try: ret = cls.from_json(dat) except: ret = cls.from_yaml(dat) return ret
Load json or yaml data from file handle. Args: fh (file): File handle to load from. Examlple: >>> with open('data.json', 'r') as json: >>> jsdata = composite.load(json) >>> >>> with open('data.yml', 'r') as yml: >>> ymldata = composite.load(yml)
codesearchnet
def check(self, dsm, **kwargs): layered_architecture = True messages = [] categories = dsm.categories dsm_size = dsm.size[0] if (not categories): categories = (['appmodule'] * dsm_size) for i in range(0, (dsm_size - 1)): for j in range((i + 1), dsm_size): if ((categories[i] != 'broker') and (categories[j] != 'broker') and (dsm.entities[i].split('.')[0] != dsm.entities[j].split('.')[0])): if (dsm.data[i][j] > 0): layered_architecture = False messages.append(('Dependency from %s to %s breaks the layered architecture.' % (dsm.entities[i], dsm.entities[j]))) return (layered_architecture, '\n'.join(messages))
Check layered architecture. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. Returns: bool, str: True if layered architecture else False, messages
codesearchnet
def _process_output_source_directive(schema, current_schema_type, ast, location, context, local_unique_directives): output_source_directive = local_unique_directives.get('output_source', None) if output_source_directive: if has_encountered_output_source(context): raise GraphQLCompilationError(u'Cannot have more than one output source!') if is_in_optional_scope(context): raise GraphQLCompilationError(u'Cannot have the output source in an optional block!') set_output_source_data(context, location) return blocks.OutputSource() else: return None
Process the output_source directive, modifying the context as appropriate. Args: schema: GraphQL schema object, obtained from the graphql library current_schema_type: GraphQLType, the schema type at the current location ast: GraphQL AST node, obtained from the graphql library location: Location object representing the current location in the query context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! local_unique_directives: dict, directive name string -> directive object, containing unique directives present on the current AST node *only* Returns: an OutputSource block, if one should be emitted, or None otherwise
codesearchnet
def facade(projectmainfn, **kwargs): site_url = Configuration._create(**kwargs) logger.info('--------------------------------------------------') logger.info('> Using HDX Python API Library %s' % Configuration.apiversion) logger.info('> HDX Site: %s' % site_url) UserAgent.user_agent = Configuration.read().user_agent projectmainfn()
Facade to simplify project setup that calls project main function Args: projectmainfn ((None) -> None): main function of project **kwargs: configuration parameters to pass to HDX Configuration class Returns: None
juraj-google-style
def hub_retry(max_attempts: int=5, wait_before_retry: Optional[float]=2): def decorator(test_func_ref): @functools.wraps(test_func_ref) def wrapper(*args, **kwargs): retry_count = 1 while retry_count < max_attempts: try: return test_func_ref(*args, **kwargs) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, requests.exceptions.ReadTimeout, requests.exceptions.HTTPError, requests.exceptions.RequestException) as err: logger.error(f"Test failed with {err} at try {retry_count}/{max_attempts} as it couldn't connect to the specified Hub repository.") if wait_before_retry is not None: time.sleep(wait_before_retry) retry_count += 1 return test_func_ref(*args, **kwargs) return wrapper return decorator
To decorate tests that download from the Hub. They can fail due to a variety of network issues such as timeouts, connection resets, etc. Args: max_attempts (`int`, *optional*, defaults to 5): The maximum number of attempts to retry the flaky test. wait_before_retry (`float`, *optional*, defaults to 2): If provided, will wait that number of seconds before retrying the test.
github-repos
def Feed(self, size=512): data = self.file_object.read(size) Lexer.Feed(self, data) return len(data)
Feed data into the buffer. Args: size: optional data size to read form the file-like object.
juraj-google-style
def _create_security_group(self, ingress): template_kwargs = {'app': self.app_name, 'env': self.env, 'region': self.region, 'vpc': get_vpc_id(self.env, self.region), 'description': self.properties['security_group']['description'], 'ingress': ingress} secgroup_json = get_template(template_file='infrastructure/securitygroup_data.json.j2', formats=self.generated, **template_kwargs) wait_for_task(secgroup_json) return True
Send a POST to spinnaker to create a new security group. Returns: boolean: True if created successfully
codesearchnet
def get_measurements(region, core_info, data, extra_offset=0): measurements = [] clean_core_info = [x for x in core_info if x] cores = len(clean_core_info) for k in data: if k not in ["1", "Region Info", "Event", "Metric", "CPU clock"]: slot = data[k] for i in range(cores): core = core_info[i] idx = extra_offset + i if core and slot[idx]: measurements.append((region, k, core, slot[idx])) return measurements
Get the complete measurement info from likwid's region info. Args: region: The region we took a measurement in. core_info: The core information. data: The raw data. extra_offset (int): default = 0 Returns (list((region, metric, core, value))): A list of measurement tuples, a tuple contains the information about the region, the metric, the core and the actual value.
juraj-google-style
def _GetValueAsObject(self, property_value): if (property_value.type == pyolecf.value_types.BOOLEAN): return property_value.data_as_boolean if (property_value.type in self._INTEGER_TYPES): return property_value.data_as_integer if (property_value.type in self._STRING_TYPES): return property_value.data_as_string try: data = property_value.data except IOError: data = None return data
Retrieves the property value as a Python object. Args: property_value (pyolecf.property_value): OLECF property value. Returns: object: property value as a Python object.
codesearchnet
def clear_config(clear_constants=False): _set_config_is_locked(False) _CONFIG.clear() _SINGLETONS.clear() if clear_constants: _CONSTANTS.clear() else: saved_constants = _CONSTANTS.copy() _CONSTANTS.clear() for (name, value) in six.iteritems(saved_constants): constant(name, value) _IMPORTED_MODULES.clear() _OPERATIVE_CONFIG.clear()
Clears the global configuration. This clears any parameter values set by `bind_parameter` or `parse_config`, as well as the set of dynamically imported modules. It does not remove any configurable functions or classes from the registry of configurables. Args: clear_constants: Whether to clear constants created by `constant`. Defaults to False.
codesearchnet
def restrict_bond_dict(self, bond_dict): return {j: (bond_dict[j] & set(self.index)) for j in self.index}
Restrict a bond dictionary to self. Args: bond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`, to see examples for a bond_dict. Returns: bond dictionary
codesearchnet
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size): shape = input_tensor.get_shape().as_list() if ((shape[1] is None) or (shape[2] is None)): kernel_size_out = kernel_size else: kernel_size_out = [min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1])] return kernel_size_out
Define kernel size which is automatically reduced for small input. If the shape of the input images is unknown at graph construction time this function assumes that the input images are is large enough. Args: input_tensor: input tensor of size [batch_size, height, width, channels]. kernel_size: desired kernel size of length 2: [kernel_height, kernel_width] Returns: a tensor with the kernel size. TODO(jrru): Make this function work with unknown shapes. Theoretically, this can be done with the code below. Problems are two-fold: (1) If the shape was known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot handle tensors that define the kernel size. shape = tf.shape(input_tensor) return = tf.stack([tf.minimum(shape[1], kernel_size[0]), tf.minimum(shape[2], kernel_size[1])])
codesearchnet
def default_if_empty(self, default): if self.closed(): raise ValueError('Attempt to call default_if_empty() on a closed Queryable.') return self._create(self._generate_default_if_empty_result(default))
If the source sequence is empty return a single element sequence containing the supplied default value, otherwise return the source sequence unchanged. Note: This method uses deferred execution. Args: default: The element to be returned if the source sequence is empty. Returns: The source sequence, or if the source sequence is empty an sequence containing a single element with the supplied default value. Raises: ValueError: If the Queryable has been closed.
codesearchnet
def build_from_file(self, path: str | None) -> imports_map.ImportsMap | None: if not path: return None items = self._read_from_file(path) return self.build_from_items(items)
Create an ImportsMap from a .imports_info file. Builds a dict of short_path to full name (e.g. "path/to/file.py" => "$GENDIR/rulename~~pytype-gen/path_to_file.py~~pytype" Args: path: The file with the info (may be None, for do-nothing) Returns: Dict of .py short_path to list of .pytd path or None if no path
github-repos
def objects_ids_and_slot_variables_and_paths(graph_view, skip_slot_variables=False): trackable_objects, node_paths = graph_view.breadth_first_traversal() object_names = object_identity.ObjectIdentityDictionary() for obj, path in node_paths.items(): object_names[obj] = trackable_utils.object_path_to_string(path) node_ids = object_identity.ObjectIdentityDictionary() for node_id, node in enumerate(trackable_objects): node_ids[node] = node_id if skip_slot_variables: slot_variables = object_identity.ObjectIdentityDictionary() else: slot_variables = serialize_slot_variables(trackable_objects=trackable_objects, node_ids=node_ids, object_names=object_names) return (trackable_objects, node_paths, node_ids, slot_variables, object_names)
Traverse the object graph and list all accessible objects. Looks for `Trackable` objects which are dependencies of `root_trackable`. Includes slot variables only if the variable they are slotting for and the optimizer are dependencies of `root_trackable` (i.e. if they would be saved with a checkpoint). Args: graph_view: A GraphView object. skip_slot_variables: If True does not return trackables for slot variable. Default False. Returns: A tuple of (trackable objects, paths from root for each object, object -> node id, slot variables, object_names)
github-repos
def default_logger(name): logger = logging.getLogger(name) logger_handler = logging.StreamHandler() formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') logger_handler.setFormatter(formatter) logger.addHandler(logger_handler) return logger
Return a toplevel logger. This should be used only in the toplevel file. Files deeper in the hierarchy should use ``logger = logging.getLogger(__name__)``, in order to considered as children of the toplevel logger. Beware that without a setLevel() somewhere, the default value (warning) will be used, so no debug message will be shown. Args: name (str): usually `__name__` in the package toplevel __init__.py, or `__file__` in a script file (because __name__ would be "__main__" in this case).
codesearchnet
def body(self, body): self._request.body = body self.add_matcher(matcher('BodyMatcher', body))
Defines the body data to match. ``body`` argument can be a ``str``, ``binary`` or a regular expression. Arguments: body (str|binary|regex): body data to match. Returns: self: current Mock instance.
codesearchnet
def get_minimizer_options(method): if method == 'Powell': return {'patience': 2, 'patience_line_search': None, 'reset_method': 'EXTRAPOLATED_POINT'} elif method == 'Nelder-Mead': return {'patience': 200, 'alpha': 1.0, 'beta': 0.5, 'gamma': 2.0, 'delta': 0.5, 'scale': 0.1, 'adaptive_scales': True} elif method == 'Levenberg-Marquardt': return {'patience': 250, 'step_bound': 100.0, 'scale_diag': 1, 'usertol_mult': 30} elif method == 'Subplex': return {'patience': 10, 'patience_nmsimplex': 100, 'alpha': 1.0, 'beta': 0.5, 'gamma': 2.0, 'delta': 0.5, 'scale': 1.0, 'psi': 0.0001, 'omega': 0.01, 'adaptive_scales': True, 'min_subspace_length': 'auto', 'max_subspace_length': 'auto'} raise ValueError('Could not find the specified method "{}".'.format(method))
Return a dictionary with the default options for the given minimization method. Args: method (str): the name of the method we want the options off Returns: dict: a dictionary with the default options
juraj-google-style
def send_state_event(self, event_type, content, state_key=''): return self.client.api.send_state_event(self.room_id, event_type, content, state_key)
Send a state event to the room. Args: event_type (str): The type of event that you are sending. content (): An object with the content of the message. state_key (str, optional): A unique key to identify the state.
codesearchnet
def _get_by_name(self, feed_item): key = '' if self._search_field: key = feed_item[self._search_field].strip() search_string = feed_item[self._search_field].strip() args = self._get_base_search_args(search_string) if self._parent_filter_name: if feed_item.get(self._parent_filter_field_name, None): args[self._parent_filter_name] = feed_item.get(self._parent_filter_field_name, None) elif self._parent_dao: parent = self._parent_dao.get(feed_item, required=True) if parent: args[self._parent_filter_name] = parent.get('id', None) key = str(args.get(self._parent_filter_name, '')) + key print('hitting the api to search for %s, %s' % (self._entity, search_string)) search_result = self._api().list(**args).execute() items = search_result[self._list_name] if items and len(items) > 0: item = items[0] if search_string == item['name']: if len(items) > 1 and items[1]['name'] == search_string: raise Exception('ERROR: More than one item found with %s %s' % (self._search_field, feed_item[self._search_field])) else: return (item, key) return (None, key)
Searches CM for an item of name defined in the search field of the DAO class. If more than one item is returned an error is raised, e.g. if there are more than one item with the same name. Args: feed_item: The Bulkdozer feed item with the name to search for. Returns: If found, the CM entity object that matches the search string.
github-repos
def appliance_device_snmp_v3_users(self): if (not self.__appliance_device_snmp_v3_users): self.__appliance_device_snmp_v3_users = ApplianceDeviceSNMPv3Users(self.__connection) return self.__appliance_device_snmp_v3_users
Gets the ApplianceDeviceSNMPv3Users API client. Returns: ApplianceDeviceSNMPv3Users:
codesearchnet