code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def parallactic_angles(times, antenna_positions, field_centre): import pyrap.quanta as pq try: zenith = pm.direction('AZEL', '0deg', '90deg') except AttributeError as e: if (pm is None): raise ImportError('python-casacore import failed') raise reference_positions = [pm.position('itrf', *(pq.quantity(x, 'm') for x in pos)) for pos in antenna_positions] fc_rad = pm.direction('J2000', *(pq.quantity(f, 'rad') for f in field_centre)) return np.asarray([(pm.do_frame(pm.epoch('UTC', pq.quantity(t, 's'))) and [(pm.do_frame(rp) and pm.posangle(fc_rad, zenith).get_value('rad')) for rp in reference_positions]) for t in times])
Computes parallactic angles per timestep for the given reference antenna position and field centre. Arguments: times: ndarray Array of unique times with shape (ntime,), obtained from TIME column of MS table antenna_positions: ndarray of shape (na, 3) Antenna positions, obtained from POSITION column of MS ANTENNA sub-table field_centre : ndarray of shape (2,) Field centre, should be obtained from MS PHASE_DIR Returns: An array of parallactic angles per time-step
codesearchnet
def test_load_saved_model_with_no_variables(self, builder_cls): with ops.Graph().as_default(): path = _get_export_dir('no_variable_saved_model') with session.Session(graph=ops.Graph()) as sess: x = variable_v1.VariableV1(5, name='x', collections=['not_global_variable']) y = variable_v1.VariableV1(11, name='y', collections=['not_global_variable']) self.assertFalse(variables._all_saveable_objects()) z = x + y self.evaluate(variables.variables_initializer([x, y])) foo_sig_def = signature_def_utils.build_signature_def({'foo_input': utils.build_tensor_info(x)}, {'foo_output': utils.build_tensor_info(z)}) builder = saved_model_builder.SavedModelBuilder(path) builder.add_meta_graph_and_variables(sess, ['foo_graph'], {'foo': foo_sig_def}, saver=tf_saver.Saver([x, y])) builder.save() loader = loader_impl.SavedModelLoader(path) with self.session(graph=ops.Graph()) as sess: saver, _ = loader.load_graph(sess.graph, ['foo_graph']) self.assertFalse(variables._all_saveable_objects()) self.assertIsNotNone(saver) with self.session(graph=ops.Graph()) as sess: loader.load(sess, ['foo_graph']) self.assertEqual(5, sess.run(_tensor_name('x'))) self.assertEqual(11, sess.run(_tensor_name('y')))
Test that SavedModel runs saver when there appear to be no variables. When no variables are detected, this may mean that the variables were saved to different collections, or the collections weren't saved to the SavedModel. If the SavedModel MetaGraphDef contains a saver, it should still run in either of these cases. Args: builder_cls: SavedModelBuilder or _SavedModelBuilder class
github-repos
def _add_filestore_resources(self, filestore_resources, create_default_views, hxl_update): for resource in filestore_resources: for created_resource in self.data['resources']: if resource['name'] == created_resource['name']: merge_two_dictionaries(resource.data, created_resource) del resource['url'] resource.update_in_hdx() merge_two_dictionaries(created_resource, resource.data) break self.init_resources() self.separate_resources() if create_default_views: self.create_default_views() if hxl_update: self.hxl_update()
Helper method to create files in filestore by updating resources. Args: filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) create_default_views (bool): Whether to call package_create_default_resource_views. hxl_update (bool): Whether to call package_hxl_update. Returns: None
juraj-google-style
def stop_ec2_instance(client, resource): instance = EC2Instance.get(resource.id) if (instance.state in ('stopped', 'terminated')): return (ActionStatus.IGNORED, {}) client.stop_instances(InstanceIds=[resource.id]) return (ActionStatus.SUCCEED, {'instance_type': resource.instance_type, 'public_ip': resource.public_ip})
Stop an EC2 Instance This function will attempt to stop a running instance. Args: client (:obj:`boto3.session.Session.client`): A boto3 client object resource (:obj:`Resource`): The resource object to stop Returns: `ActionStatus`
codesearchnet
def delete_panel(self, panel_obj): res = self.panel_collection.delete_one({'_id': panel_obj['_id']}) LOG.warning("Deleting panel %s, version %s" % (panel_obj['panel_name'], panel_obj['version'])) return res
Delete a panel by '_id'. Args: panel_obj(dict) Returns: res(pymongo.DeleteResult)
juraj-google-style
def _MakeEnumValueDescriptor(self, value_proto, index): return descriptor.EnumValueDescriptor(name=value_proto.name, index=index, number=value_proto.number, options=_OptionsOrNone(value_proto), type=None)
Creates a enum value descriptor object from a enum value proto. Args: value_proto: The proto describing the enum value. index: The index of the enum value. Returns: An initialized EnumValueDescriptor object.
codesearchnet
def split_last_dimension(x, n): x_shape = common_layers.shape_list(x) m = x_shape[-1] if isinstance(m, int) and isinstance(n, int): assert m % n == 0 return tf.reshape(x, x_shape[:-1] + [n, m
Reshape x so that the last dimension becomes two dimensions. The first of these two dimensions is n. Args: x: a Tensor with shape [..., m] n: an integer. Returns: a Tensor with shape [..., n, m/n]
juraj-google-style
def wrap_callable(cls, uri, methods, callable_obj): if isinstance(callable_obj, HandlerMeta): callable_obj.base_endpoint = uri callable_obj.is_valid = True return callable_obj if isinstance(callable_obj, types.FunctionType): return cls(uri=uri, methods=methods, callable_obj=callable_obj) raise RouteError("Invalid handler type.")
Wraps function-based callable_obj into a `Route` instance, else proxies a `bottle_neck.handlers.BaseHandler` subclass instance. Args: uri (str): The uri relative path. methods (tuple): A tuple of valid method strings. callable_obj (instance): The callable object. Returns: A route instance. Raises: RouteError for invalid callable object type.
juraj-google-style
def visualize_embeddings(summary_writer, config): logdir = summary_writer.get_logdir() if (logdir is None): raise ValueError('Summary writer must have a logdir') config_pbtxt = _text_format.MessageToString(config) path = os.path.join(logdir, _projector_plugin.PROJECTOR_FILENAME) with tf.io.gfile.GFile(path, 'w') as f: f.write(config_pbtxt)
Stores a config file used by the embedding projector. Args: summary_writer: The summary writer used for writing events. config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig` proto that holds the configuration for the projector such as paths to checkpoint files and metadata files for the embeddings. If `config.model_checkpoint_path` is none, it defaults to the `logdir` used by the summary_writer. Raises: ValueError: If the summary writer does not have a `logdir`.
codesearchnet
def validate(self): if (not isinstance(self.value, bytes)): raise TypeError('key value must be bytes') elif (not isinstance(self.cryptographic_algorithm, enums.CryptographicAlgorithm)): raise TypeError('key algorithm must be a CryptographicAlgorithm enumeration') elif (not isinstance(self.cryptographic_length, six.integer_types)): raise TypeError('key length must be an integer') elif (not isinstance(self.key_format_type, enums.KeyFormatType)): raise TypeError('key format type must be a KeyFormatType enumeration') elif (self.key_format_type not in self._valid_formats): raise ValueError('key format type must be one of {0}'.format(self._valid_formats)) mask_count = len(self.cryptographic_usage_masks) for i in range(mask_count): mask = self.cryptographic_usage_masks[i] if (not isinstance(mask, enums.CryptographicUsageMask)): position = '({0} in list)'.format(i) raise TypeError('key mask {0} must be a CryptographicUsageMask enumeration'.format(position)) name_count = len(self.names) for i in range(name_count): name = self.names[i] if (not isinstance(name, six.string_types)): position = '({0} in list)'.format(i) raise TypeError('key name {0} must be a string'.format(position))
Verify that the contents of the PublicKey object are valid. Raises: TypeError: if the types of any PublicKey attributes are invalid.
codesearchnet
def update_service(name, service_map): if (name in service_map): service = service_map[name] data = service.update() if (not data): logger.warning('no data received for service: %s', name) else: data['service_name'] = service.service_name CACHE[name] = dict(data=data, updated=datetime.now()) else: logger.warning('service not found: %s', name) if (name in CACHE): return add_time(CACHE[name]) return {}
Get an update from the specified service. Arguments: name (:py:class:`str`): The name of the service. service_map (:py:class:`dict`): A mapping of service names to :py:class:`flash.service.core.Service` instances. Returns: :py:class:`dict`: The updated data.
codesearchnet
def _FormatMessageShort(self, event): (_, message_short) = self._output_mediator.GetFormattedMessages(event) if (message_short is None): data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type)) return message_short
Formats the short message. Args: event (EventObject): event. Returns: str: short message field. Raises: NoFormatterFound: if no event formatter can be found to match the data type in the event.
codesearchnet
def CheckForHeaderGuard(filename, clean_lines, error): raw_lines = clean_lines.lines_without_raw_strings for i in raw_lines: if Search(r' return for i in raw_lines: if Search(r'^\s* return cppvar = GetHeaderGuardCPPVariable(filename) ifndef = '' ifndef_linenum = 0 define = '' endif = '' endif_linenum = 0 for linenum, line in enumerate(raw_lines): linesplit = line.split() if len(linesplit) >= 2: if not ifndef and linesplit[0] == ' ifndef = linesplit[1] ifndef_linenum = linenum if not define and linesplit[0] == ' define = linesplit[1] if line.startswith(' endif = line endif_linenum = linenum if not ifndef or not define or ifndef != define: error(filename, 0, 'build/header_guard', 5, 'No cppvar) return if ifndef != cppvar: error_level = 0 if ifndef != cppvar + '_': error_level = 5 ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum, error) error(filename, ifndef_linenum, 'build/header_guard', error_level, ' ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum, error) match = Match(r' if match: if match.group(1) == '_': error(filename, endif_linenum, 'build/header_guard', 0, ' return no_single_line_comments = True for i in xrange(1, len(raw_lines) - 1): line = raw_lines[i] if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])* no_single_line_comments = False break if no_single_line_comments: match = Match(r' if match: if match.group(1) == '_': error(filename, endif_linenum, 'build/header_guard', 0, ' return error(filename, endif_linenum, 'build/header_guard', 5, '
Checks that the file contains a header guard. Logs an error if no #ifndef header guard is present. For other headers, checks that the full pathname is used. Args: filename: The name of the C++ header file. clean_lines: A CleansedLines instance containing the file. error: The function to call with any errors found.
juraj-google-style
def __init__(self, executor_type=None, config_proto=None): self.config_proto_serialized = config_proto self.executor_type = executor_type
Constructor. Args: executor_type: (optional) name of the executor to be used to execute the eager function. If None or an empty string, the default Tensorflow executor will be used. config_proto: (optional) a `config_pb2.ConfigProto` proto or a serialized string of that proto. The config used by Grappler when optimizing the function graph. Each concrete function is optimized the first time is called. Changing config_proto after the first call has no effect. If config_proto is None, an empty RewriterConfig will be used.
github-repos
def merge_variables(variables, **kwargs): var_dict = OrderedDict() for v in variables: if (v.name not in var_dict): var_dict[v.name] = [] var_dict[v.name].append(v) return [merge_variables(vars_, **kwargs) for vars_ in list(var_dict.values())]
Concatenates Variables along row axis. Args: variables (list): List of Variables to merge. Variables can have different names (and all Variables that share a name will be concatenated together). Returns: A list of Variables.
codesearchnet
def is_supported(cls, desc): for m in cls: if m.matches(desc): return True return False
Determines if the given metric descriptor is supported. Args: desc (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricDescriptor`): the metric descriptor to test Return: `True` if desc is supported, otherwise `False`
codesearchnet
def save(self, checkpoint_dir=None): checkpoint_dir = os.path.join((checkpoint_dir or self.logdir), 'checkpoint_{}'.format(self._iteration)) if (not os.path.exists(checkpoint_dir)): os.makedirs(checkpoint_dir) checkpoint = self._save(checkpoint_dir) saved_as_dict = False if isinstance(checkpoint, string_types): if ((not checkpoint.startswith(checkpoint_dir)) or (checkpoint == checkpoint_dir)): raise ValueError('The returned checkpoint path must be within the given checkpoint dir {}: {}'.format(checkpoint_dir, checkpoint)) if (not os.path.exists(checkpoint)): raise ValueError('The returned checkpoint path does not exist: {}'.format(checkpoint)) checkpoint_path = checkpoint elif isinstance(checkpoint, dict): saved_as_dict = True checkpoint_path = os.path.join(checkpoint_dir, 'checkpoint') with open(checkpoint_path, 'wb') as f: pickle.dump(checkpoint, f) else: raise ValueError('`_save` must return a dict or string type: {}'.format(str(type(checkpoint)))) with open((checkpoint_path + '.tune_metadata'), 'wb') as f: pickle.dump({'experiment_id': self._experiment_id, 'iteration': self._iteration, 'timesteps_total': self._timesteps_total, 'time_total': self._time_total, 'episodes_total': self._episodes_total, 'saved_as_dict': saved_as_dict}, f) return checkpoint_path
Saves the current model state to a checkpoint. Subclasses should override ``_save()`` instead to save state. This method dumps additional metadata alongside the saved path. Args: checkpoint_dir (str): Optional dir to place the checkpoint. Returns: Checkpoint path that may be passed to restore().
codesearchnet
def category(msg): if common.typecode(msg) < 1 or common.typecode(msg) > 4: raise RuntimeError("%s: Not a identification message" % msg) msgbin = common.hex2bin(msg) return common.bin2int(msgbin[5:8])
Aircraft category number Args: msg (string): 28 bytes hexadecimal message string Returns: int: category number
juraj-google-style
def update(self, friendly_name=None, description=None): self._get_info() if self._info: if friendly_name: self._info['friendlyName'] = friendly_name if description: self._info['description'] = description try: self._api.datasets_update(self._name_parts, self._info) except Exception as e: raise e finally: self._info = None
Selectively updates Dataset information. Args: friendly_name: if not None, the new friendly name. description: if not None, the new description. Returns:
juraj-google-style
def load(self, path): missing_files = self._check_for_missing_files(path) if len(missing_files) > 0: raise IOError('Invalid data set of type {}: files {} not found at {}'.format( self.type(), ' '.join(missing_files), path)) return self._load(path)
Load and return the corpus from the given path. Args: path (str): Path to the data set to load. Returns: Corpus: The loaded corpus Raises: IOError: When the data set is invalid, for example because required files (annotations, …) are missing.
juraj-google-style
def _open_interface(self, conn_id, iface, callback): try: context = self.conns.get_context(conn_id) except ArgumentError: callback(conn_id, self.id, False, "Could not find connection information") return self.conns.begin_operation(conn_id, 'open_interface', callback, self.get_config('default_timeout')) topics = context['topics'] open_iface_message = {'key': context['key'], 'type': 'command', 'operation': 'open_interface', 'client': self.name, 'interface': iface} self.client.publish(topics.action, open_iface_message)
Open an interface on this device Args: conn_id (int): the unique identifier for the connection iface (string): the interface name to open callback (callback): Callback to be called when this command finishes callback(conn_id, adapter_id, success, failure_reason)
juraj-google-style
def forward(self, hidden_states: torch.FloatTensor, rotary_pos_emb: torch.FloatTensor, attention_mask: torch.LongTensor, position_ids: torch.LongTensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.input_rmsnorm(hidden_states) attention_outputs = self.self_attn(hidden_states=hidden_states, rotary_pos_emb=rotary_pos_emb, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions) hidden_states = attention_outputs[0] hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_rmsnorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[-1],) return outputs
Args: hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`): input to the layer. rotary_pos_emb (`torch.FloatTensor`): rotary position embeddings generated by `ClvpRotaryPositionalEmbedding` module. attention_mask (`torch.FloatTensor` of shape `(batch, 1, tgt_len, src_len)`): attention mask where padding elements are indicated by very large negative values. position_ids (`torch.LongTensor`): Denotes position ids of the input tokens. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def compare_files(path1, path2): diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines()) return [x for x in diff if (x[0] in ['-', '+', '?'])]
Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files
codesearchnet
def fib_list(n): assert n >= 0, 'n must be a positive integer' list_results = [0, 1] for i in range(2, n+1): list_results.append(list_results[i-1] + list_results[i-2]) return list_results[n]
[summary] This algorithm computes the n-th fibbonacci number very quick. approximate O(n) The algorithm use dynamic programming. Arguments: n {[int]} -- [description] Returns: [int] -- [description]
juraj-google-style
def ensemble_center(self, site_list, indices, cartesian=True): if cartesian: return np.average([site_list[i].coords for i in indices], axis=0) else: return np.average([site_list[i].frac_coords for i in indices], axis=0)
Finds the center of an ensemble of sites selected from a list of sites. Helper method for the find_adsorption_sites algorithm. Args: site_list (list of sites): list of sites indices (list of ints): list of ints from which to select sites from site list cartesian (bool): whether to get average fractional or cartesian coordinate
juraj-google-style
def list_nics(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Network/', '/networkInterfaces?api-version=', NETWORK_API]) return do_get(endpoint, access_token)
List the network interfaces in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of NICs list with properties.
juraj-google-style
def __init__(self, input_ebm: ebm.EnergyInference, input_qnn: qnn.QuantumInference, name: Union[None, str]=None): super().__init__(name=name) self._e_inference = input_ebm self._q_inference = input_qnn self._modular_hamiltonian = hamiltonian.Hamiltonian(self.e_inference.energy, self.q_inference.circuit)
Initializes a QHBM. Args: input_ebm: Attends to density operator eigenvalues. input_qnn: Attends to density operator eigenvectors. name: Optional name for the model.
github-repos
def send_audio_file(self, audio_file, device_state, authentication_headers, dialog_request_id, distance_profile, audio_format): payload = {'context': device_state, 'event': {'header': {'namespace': 'SpeechRecognizer', 'name': 'Recognize', 'messageId': self.generate_message_id(), 'dialogRequestId': dialog_request_id}, 'payload': {'profile': distance_profile, 'format': audio_format}}} multipart_data = MultipartEncoder(fields=[('request', ('request', json.dumps(payload), 'application/json;', {'Content-Disposition': "form-data; name='request'"})), ('audio', ('audio', audio_file, 'application/octet-stream', {'Content-Disposition': "form-data; name='audio'"}))], boundary='boundary') headers = {**authentication_headers, 'Content-Type': multipart_data.content_type} stream_id = self.connection.request('POST', '/v20160207/events', headers=headers, body=multipart_data) response = self.connection.get_response(stream_id) return self.parse_response(response)
Send audio to AVS The file-like object are steaming uploaded for improved latency. Returns: bytes -- wav audio bytes returned from AVS
codesearchnet
def FromTrimmedData(byts): block = Block() block.__is_trimmed = True ms = StreamManager.GetStream(byts) reader = BinaryReader(ms) block.DeserializeUnsigned(reader) reader.ReadByte() witness = Witness() witness.Deserialize(reader) block.Script = witness bc = GetBlockchain() tx_list = [] for tx_hash in reader.ReadHashes(): tx = bc.GetTransaction(tx_hash)[0] if not tx: raise Exception("Could not find transaction!\n Are you running code against a valid Blockchain instance?\n Tests that accesses transactions or size of a block but inherit from NeoTestCase instead of BlockchainFixtureTestCase will not work.") tx_list.append(tx) if len(tx_list) < 1: raise Exception("Invalid block, no transactions found for block %s " % block.Index) block.Transactions = tx_list StreamManager.ReleaseStream(ms) return block
Deserialize a block from raw bytes. Args: byts: Returns: Block:
juraj-google-style
def encode(self): slot = 0 match_op = self.KNOWN_MATCH_NAMES['match_controller'] if (not self.controller): slot = self.slot match_op = self.KNOWN_MATCH_NAMES['match_slot'] return struct.pack('<B6xB', slot, match_op)
Encode this slot identifier into a binary descriptor. Returns: bytes: The 8-byte encoded slot identifier
codesearchnet
def _group_and_publish_tasks_statistics(self, result): for i in result: executor_id = i['executor_id'] i['executor_id'] = executor_id[:executor_id.rfind('.')] i['statistics']['instances_count'] = 1 r = {} for i in result: executor_id = i['executor_id'] r[executor_id] = r.get(executor_id, {}) r[executor_id]['framework_id'] = i['framework_id'] r[executor_id]['statistics'] = r[executor_id].get('statistics', {}) r[executor_id]['statistics'] = self._sum_statistics( i['statistics'], r[executor_id]['statistics']) self._add_cpu_usage(r) self._add_cpu_percent(r) self._add_mem_percent(r) self._publish(r)
This function group statistics of same tasks by adding them. It also add 'instances_count' statistic to get information about how many instances is running on the server Args: result: result of mesos query. List of dictionaries with 'executor_id', 'framework_id' as a strings and 'statistics' as dictionary of labeled numbers
juraj-google-style
def assignee(self, assignee_id, action='ADD'): if not self.can_update(): self._tcex.handle_error(910, [self.type]) return self.tc_requests.assignee( self.api_type, self.api_sub_type, self.unique_id, assignee_id, action=action )
Adds a assignee to the task Args: assignee_id: The id of the assignee to be added action:
juraj-google-style
def serialize_to_normalized_pretty_json(py_obj): return json.dumps(py_obj, sort_keys=True, indent=2, cls=ToJsonCompatibleTypes)
Serialize a native object to normalized, pretty printed JSON. The JSON string is normalized by sorting any dictionary keys. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, pretty printed JSON string.
juraj-google-style
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() visit_type = event_values.get('visit_type', 0) transition = self._URL_TRANSITIONS.get(visit_type, None) if transition: transition_str = 'Transition: {0!s}'.format(transition) extra = event_values.get('extra', None) if extra: if transition: extra.append(transition_str) event_values['extra_string'] = ' '.join(extra) elif transition: event_values['extra_string'] = transition_str return self._ConditionalFormatMessages(event_values)
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
juraj-google-style
def psnr(x1, x2, max_val): if any_symbolic_tensors((x1, x2)): return PSNR(max_val).symbolic_call(x1, x2) return backend.nn.psnr(x1, x2, max_val)
Peak Signal-to-Noise Ratio (PSNR) function. This function computes the Peak Signal-to-Noise Ratio between two signals, `x1` and `x2`. PSNR is a measure of the quality of a reconstructed signal. The higher the PSNR, the closer the reconstructed signal is to the original signal. Note that it can become negative when the signal power is smaller that the noise power. Args: x1: The first input signal. x2: The second input signal. Must have the same shape as `x1`. max_val: The maximum possible value in the signals. Returns: float: The PSNR value between `x1` and `x2`. Examples: >>> x1 = keras.random.normal((2, 4, 4, 3)) >>> x2 = keras.random.normal((2, 4, 4, 3)) >>> max_val = 1.0 >>> keras.ops.nn.psnr(x1, x2, max_val) -3.1697404
github-repos
def energy_upperbound(self, spins): subtheta = self.theta.copy() subtheta.fix_variables(spins) trees = self._trees if not trees: assert not subtheta.linear and not subtheta.quadratic return subtheta.offset energy = Plus(self.message_upperbound(trees, {}, subtheta), subtheta.offset) return energy
A formula for an upper bound on the energy of Theta with spins fixed. Args: spins (dict): Spin values for a subset of the variables in Theta. Returns: Formula that upper bounds the energy with spins fixed.
juraj-google-style
def __init__(self, tpu_cluster_resolver=None, steps_per_run=None, device_assignment=None): super().__init__(TPUExtended(self, tpu_cluster_resolver, steps_per_run, device_assignment)) distribute_lib.distribution_strategy_gauge.get_cell('V1').set('TPUStrategy') distribute_lib.distribution_strategy_replica_gauge.get_cell('num_workers').set(self.extended.num_hosts) distribute_lib.distribution_strategy_replica_gauge.get_cell('num_replicas_per_worker').set(self.extended.num_replicas_per_host) self._enable_packed_variable_in_eager_mode = True
Initializes the TPUStrategy object. Args: tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver, which provides information about the TPU cluster. steps_per_run: Number of steps to run on device before returning to the host. Note that this can have side-effects on performance, hooks, metrics, summaries etc. This parameter is only used when Distribution Strategy is used with Keras. device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to specify the placement of replicas on the TPU cluster. Currently only supports the usecase of using a single core within a TPU cluster.
github-repos
def unregister(self, alias): if alias not in self._service_objects: raise Error(self._device, 'No service is registered with alias "%s".' % alias) service_obj = self._service_objects.pop(alias) if service_obj.is_alive: with expects.expect_no_raises( 'Failed to stop service instance "%s".' % alias): service_obj.stop()
Unregisters a service instance. Stops a service and removes it from the manager. Args: alias: string, the alias of the service instance to unregister.
juraj-google-style
def from_string(string): lines = list(clean_lines(string.splitlines())) def input_mode(line): if line[0] == "&": return ("sections", line[1:].lower()) elif "ATOMIC_SPECIES" in line: return ("pseudo", ) elif "K_POINTS" in line: return ("kpoints", line.split("{")[1][:-1]) elif "CELL_PARAMETERS" in line or "ATOMIC_POSITIONS" in line: return ("structure", line.split("{")[1][:-1]) elif line == "/": return None else: return mode sections = {"control": {}, "system": {}, "electrons": {}, "ions": {}, "cell":{}} pseudo = {} pseudo_index = 0 lattice = [] species = [] coords = [] structure = None site_properties = {"pseudo":[]} mode = None for line in lines: mode = input_mode(line) if mode == None: pass elif mode[0] == "sections": section = mode[1] m = re.match(r'(\w+)\(?(\d*?)\)?\s*=\s*(.*)', line) if m: key = m.group(1).strip() key_ = m.group(2).strip() val = m.group(3).strip() if key_ != "": if sections[section].get(key, None) == None: val_ = [0.0]*20 val_[int(key_)-1] = PWInput.proc_val(key, val) sections[section][key] = val_ site_properties[key] = [] else: sections[section][key][int(key_)-1] = PWInput.proc_val(key, val) else: sections[section][key] = PWInput.proc_val(key, val) elif mode[0] == "pseudo": m = re.match(r'(\w+)\s+(\d*.\d*)\s+(.*)', line) if m: pseudo[m.group(1).strip()] = {} pseudo[m.group(1).strip()]["index"] = pseudo_index pseudo[m.group(1).strip()]["pseudopot"] = m.group(3).strip() pseudo_index += 1 elif mode[0] == "kpoints": m = re.match(r'(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)', line) if m: kpoints_grid = (int(m.group(1)), int(m.group(2)), int(m.group(3))) kpoints_shift = (int(m.group(4)), int(m.group(5)), int(m.group(6))) else: kpoints_mode = mode[1] elif mode[0] == "structure": m_l = re.match(r'(-?\d+\.?\d*)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)', line) m_p = re.match(r'(\w+)\s+(-?\d+\.\d*)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)', line) if m_l: lattice += [ float(m_l.group(1)), float(m_l.group(2)), float(m_l.group(3)) ] elif m_p: site_properties["pseudo"].append(pseudo[m_p.group(1)]["pseudopot"]) species += [pseudo[m_p.group(1)]["pseudopot"].split(".")[0]] coords += [[float(m_p.group(2)), float(m_p.group(3)), float(m_p.group(4))]] for k, v in site_properties.items(): if k != "pseudo": site_properties[k].append(sections['system'][k][pseudo[m_p.group(1)]["index"]]) if mode[1] == "angstrom": coords_are_cartesian = True elif mode[1] == "crystal": coords_are_cartesian = False structure = Structure(Lattice(lattice), species, coords, coords_are_cartesian=coords_are_cartesian, site_properties=site_properties) return PWInput(structure=structure, control=sections["control"], system=sections["system"], electrons=sections["electrons"], ions=sections["ions"], cell=sections["cell"], kpoints_mode=kpoints_mode, kpoints_grid=kpoints_grid, kpoints_shift=kpoints_shift)
Reads an PWInput object from a string. Args: string (str): PWInput string Returns: PWInput object
juraj-google-style
def _apply_with_plugs(self, subplugs, error_on_unknown): plugs_by_name = {plug.name: plug for plug in self.plugs} new_plugs = dict(plugs_by_name) for (name, sub_class) in six.iteritems(subplugs): original_plug = plugs_by_name.get(name) accept_substitute = True if (original_plug is None): if (not error_on_unknown): continue accept_substitute = False elif isinstance(original_plug.cls, openhtf.plugs.PlugPlaceholder): accept_substitute = issubclass(sub_class, original_plug.cls.base_class) else: accept_substitute = (('auto_placeholder' in original_plug.cls.__dict__) and original_plug.cls.auto_placeholder and issubclass(sub_class, original_plug.cls)) if (not accept_substitute): raise openhtf.plugs.InvalidPlugError(('Could not find valid placeholder for substitute plug %s required for phase %s' % (name, self.name))) new_plugs[name] = mutablerecords.CopyRecord(original_plug, cls=sub_class) return mutablerecords.CopyRecord(self, plugs=list(new_plugs.values()), options=self.options.format_strings(**subplugs), measurements=[m.with_args(**subplugs) for m in self.measurements])
Substitute plugs for placeholders for this phase. Args: subplugs: dict of plug name to plug class, plug classes to replace. error_on_unknown: bool, if True, then error when an unknown plug name is provided. Raises: openhtf.plugs.InvalidPlugError if for one of the plug names one of the following is true: - error_on_unknown is True and the plug name is not registered. - The new plug subclass is not a subclass of the original. - The original plug class is not a placeholder or automatic placeholder. Returns: PhaseDescriptor with updated plugs.
codesearchnet
def calculate_checksum_on_iterator( itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm) for chunk in itr: checksum_calc.update(chunk) return checksum_calc.hexdigest()
Calculate the checksum of an iterator. Args: itr: iterable Object which supports the iterator protocol. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: str : Checksum as a hexadecimal string, with length decided by the algorithm.
juraj-google-style
def apply_grads(self, grads, variables): ops = [] for grad, var in zip(grads, variables): ops.extend(self.apply_grad(grad, var)) if not ops: return ops return variables[0].graph.combine_assignments(ops)
Apply gradients to variables. Call this function externally instead of apply_grad(). This causes the operations to be combined, which is necessary for stacking variables see mtf.rewrite_stack_variables(). Args: grads: a list of Tensor variables: a list of Variables Returns: a list of Operations
juraj-google-style
def delete_merged_branches(self, **kwargs): path = ('/projects/%s/repository/merged_branches' % self.get_id()) self.manager.gitlab.http_delete(path, **kwargs)
Delete merged branches. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabDeleteError: If the server failed to perform the request
codesearchnet
def add_datasets(self, datasets, datasets_to_check=None): if datasets_to_check is None: datasets_to_check = self.get_datasets() alldatasetsadded = True for dataset in datasets: if not self.add_dataset(dataset, datasets_to_check=datasets_to_check): alldatasetsadded = False return alldatasetsadded
Add multiple datasets Args: datasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase. Returns: bool: True if all datasets added or False if any already present
juraj-google-style
def render_layout_form(form, layout=None, **kwargs): def make_component(type_, *args): if type_ == "Text": return "".join(args) elif type_ == "Field": result = "" for c in args: if isinstance(c, tuple): result += make_component(*c) elif isinstance(c, str): result += render_field(form.__getitem__(c), **kwargs) return result else: if len(args) < 2: return "" result = "".join([make_component(*c) for c in args]) if type_: return "<div class=\"%s\">%s</div>" % (type_.lower(), result) else: return result return mark_safe("".join([make_component(*component) for component in layout]))
Render an entire form with Semantic UI wrappers for each field with a layout provided in the template or in the form class Args: form (form): Django Form layout (tuple): layout design kwargs (dict): other attributes will be passed to fields Returns: string: HTML of Django Form fields with Semantic UI wrappers
juraj-google-style
def from_string(key_pem, is_x509_cert): if is_x509_cert: key_pem = _helpers._to_bytes(key_pem) pemLines = key_pem.replace(b' ', b'').split() certDer = _helpers._urlsafe_b64decode(b''.join(pemLines[1:-1])) certSeq = DerSequence() certSeq.decode(certDer) tbsSeq = DerSequence() tbsSeq.decode(certSeq[0]) pubkey = RSA.importKey(tbsSeq[6]) else: pubkey = RSA.importKey(key_pem) return PyCryptoVerifier(pubkey)
Construct a Verified instance from a string. Args: key_pem: string, public key in PEM format. is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is expected to be an RSA key in PEM format. Returns: Verifier instance.
juraj-google-style
def construct_error_message(driver_id, error_type, message, timestamp): builder = flatbuffers.Builder(0) driver_offset = builder.CreateString(driver_id.binary()) error_type_offset = builder.CreateString(error_type) message_offset = builder.CreateString(message) ray.core.generated.ErrorTableData.ErrorTableDataStart(builder) ray.core.generated.ErrorTableData.ErrorTableDataAddDriverId(builder, driver_offset) ray.core.generated.ErrorTableData.ErrorTableDataAddType(builder, error_type_offset) ray.core.generated.ErrorTableData.ErrorTableDataAddErrorMessage(builder, message_offset) ray.core.generated.ErrorTableData.ErrorTableDataAddTimestamp(builder, timestamp) error_data_offset = ray.core.generated.ErrorTableData.ErrorTableDataEnd(builder) builder.Finish(error_data_offset) return bytes(builder.Output())
Construct a serialized ErrorTableData object. Args: driver_id: The ID of the driver that the error should go to. If this is nil, then the error will go to all drivers. error_type: The type of the error. message: The error message. timestamp: The time of the error. Returns: The serialized object.
codesearchnet
def __init__(self, max_edit_distance=0, match_threshold=0.0): self.root = TrieNode('root') self.max_edit_distance = max_edit_distance self.match_threshold = match_threshold
Init the Trie object and create root node. Creates an Trie object with a root node with the passed in max_edit_distance and match_threshold. Args: max_edit_distance(int): ? match_threshold(int): ? Notes: This never seems to get called with max_edit_distance or match_threshold
juraj-google-style
def InteractiveShell(self, cmd=None, strip_cmd=True, delim=None, strip_delim=True): conn = self._get_service_connection(b'shell:') return self.protocol_handler.InteractiveShellCommand(conn, cmd=cmd, strip_cmd=strip_cmd, delim=delim, strip_delim=strip_delim)
Get stdout from the currently open interactive shell and optionally run a command on the device, returning all output. Args: cmd: Optional. Command to run on the target. strip_cmd: Optional (default True). Strip command name from stdout. delim: Optional. Delimiter to look for in the output to know when to stop expecting more output (usually the shell prompt) strip_delim: Optional (default True): Strip the provided delimiter from the output Returns: The stdout from the shell command.
codesearchnet
def bind_to_uniform_block(self, binding=0, *, offset=0, size=-1) -> None: self.mglo.bind_to_uniform_block(binding, offset, size)
Bind the buffer to a uniform block. Args: binding (int): The uniform block binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all.
juraj-google-style
def get_cols_to_keep(gctoo, cid=None, col_bool=None, cidx=None, exclude_cid=None): if cid is not None: assert type(cid) == list, "cid must be a list. cid: {}".format(cid) cols_to_keep = [gctoo_col for gctoo_col in gctoo.data_df.columns if gctoo_col in cid] num_missing_cids = len(cid) - len(cols_to_keep) if num_missing_cids != 0: logger.info("{} cids were not found in the GCT.".format(num_missing_cids)) elif col_bool is not None: assert len(col_bool) == gctoo.data_df.shape[1], ( "col_bool must have length equal to gctoo.data_df.shape[1]. " + "len(col_bool): {}, gctoo.data_df.shape[1]: {}".format( len(col_bool), gctoo.data_df.shape[1])) cols_to_keep = gctoo.data_df.columns[col_bool].values elif cidx is not None: assert type(cidx[0]) is int, ( "cidx must be a list of integers. cidx[0]: {}, " + "type(cidx[0]): {}").format(cidx[0], type(cidx[0])) assert max(cidx) <= gctoo.data_df.shape[1], ( "cidx contains an integer larger than the number of columns in " + "the GCToo. max(cidx): {}, gctoo.data_df.shape[1]: {}").format( max(cidx), gctoo.data_df.shape[1]) cols_to_keep = gctoo.data_df.columns[cidx].values else: cols_to_keep = gctoo.data_df.columns.values if exclude_cid is not None: cols_to_keep = [col_to_keep for col_to_keep in cols_to_keep if col_to_keep not in exclude_cid] return cols_to_keep
Figure out based on the possible columns inputs which columns to keep. Args: gctoo (GCToo object): cid (list of strings): col_bool (boolean array): cidx (list of integers): exclude_cid (list of strings): Returns: cols_to_keep (list of strings): col ids to be kept
juraj-google-style
def calcPF(pf): pf_y = pf[:1] pf_x = pf[1:] result = 100 if (pf_y == CosTheta.CapacitiveLead): result = (200 - int(pf_x)) elif (pf_y == CosTheta.InductiveLag): result = int(pf_x) return result
Simple wrap to calc legacy PF value Args: pf: meter power factor reading Returns: int: legacy push pf
codesearchnet
def get_fail_graph(self, failure_index=None): (phase, _) = self._get_failed_phase(failure_index) return phase.get_graph()
Returns a graph showing a solve failure. Args: failure_index: See `failure_reason` Returns: A pygraph.digraph object.
codesearchnet
def preprocess(self, raw_inputs): image_arrays = [] for raw_im in raw_inputs: im = raw_im.convert('L') im = im.resize(MNIST_DIM, Image.ANTIALIAS) arr = np.array(im) image_arrays.append(arr) inputs = np.array(image_arrays) return inputs.reshape(len(inputs), MNIST_DIM[0], MNIST_DIM[1], 1).astype('float32') / 255
Convert images into the format required by our model. Our model requires that inputs be grayscale (mode 'L'), be resized to `MNIST_DIM`, and be represented as float32 numpy arrays in range [0, 1]. Args: raw_inputs (list of Images): a list of PIL Image objects Returns: array (float32): num images * height * width * num channels
juraj-google-style
def _to_boolean(operand: List[WorkSpaceMessage]) -> Optional[bool]: if not operand: return None if len(operand) > 1: raise ValueError('Expected a single boolean result but got multiple items.') if not fhir_types.is_boolean(operand[0].message): raise ValueError('Expected a boolean but got a non-boolean value.') return proto_utils.get_value_at_field(operand[0].message, 'value')
Converts an evaluation result to a boolean value or None. Args: operand: an expression operand result to convert to boolean. Returns: the boolean value, or None if the operand was empty. Raises: ValueError if it is not an empty result or a single, boolean value.
github-repos
def get_version(tool_name, tool_command): result = {} for line in Bash(ShellConfig(script=tool_command, internal=True)).process(): if (line.find('command not found') >= 0): VersionsCheck.LOGGER.error("Required tool '%s' not found (stopping pipeline)!", tool_name) sys.exit(1) else: version = list(re.findall('(\\d+(\\.\\d+)+)+', line))[0][0] result = {tool_name: Version(str(version))} break return result
Get name and version of a tool defined by given command. Args: tool_name (str): name of the tool. tool_command (str): Bash one line command to get the version of the tool. Returns: dict: tool name and version or empty when no line has been found
codesearchnet
def create_from_json(cls, json_data): block = Block() block_info = json_data['block_info'] block.block_id = block_info['block_id'] block.num_bins = (block_info['num_bins'] if ('num_bins' in block_info) else None) block.property_type = (block_info['property_type'] if ('property_type' in block_info) else None) block.meta = (json_data['meta'] if ('meta' in json_data) else None) block.component_results = _create_component_results(json_data, 'block_info') return block
Deserialize block json data into a Block object Args: json_data (dict): The json data for this block Returns: Block object
codesearchnet
def get_hostname_prefix(): parts = [] version = modules.get_current_version_name() default_version = modules.get_default_version() if (version != default_version): parts.append(version) module = modules.get_current_module_name() if (module != 'default'): parts.append(module) if parts: parts.append('') return '-dot-'.join(parts)
Returns the hostname prefix of a running Endpoints service. The prefix is the portion of the hostname that comes before the API name. For example, if a non-default version and a non-default service are in use, the returned result would be '{VERSION}-dot-{SERVICE}-'. Returns: str, the hostname prefix.
codesearchnet
def dtype(self) -> torch.dtype: if self._rot_mats is not None: return self._rot_mats.dtype elif self._quats is not None: return self._quats.dtype else: raise ValueError('Both rotations are None')
Returns the dtype of the underlying rotation. Returns: The dtype of the underlying rotation
github-repos
def marginalize_out(node_indices, tpm): return (tpm.sum(tuple(node_indices), keepdims=True) / np.array(tpm.shape)[list(node_indices)].prod())
Marginalize out nodes from a TPM. Args: node_indices (list[int]): The indices of nodes to be marginalized out. tpm (np.ndarray): The TPM to marginalize the node out of. Returns: np.ndarray: A TPM with the same number of dimensions, with the nodes marginalized out.
codesearchnet
def symmetry_reduce(tensors, structure, tol=1e-08, **kwargs): sga = SpacegroupAnalyzer(structure, **kwargs) symmops = sga.get_symmetry_operations(cartesian=True) unique_mapping = TensorMapping([tensors[0]], [[]], tol=tol) for tensor in tensors[1:]: is_unique = True for (unique_tensor, symmop) in itertools.product(unique_mapping, symmops): if np.allclose(unique_tensor.transform(symmop), tensor, atol=tol): unique_mapping[unique_tensor].append(symmop) is_unique = False break if is_unique: unique_mapping[tensor] = [] return unique_mapping
Function that converts a list of tensors corresponding to a structure and returns a dictionary consisting of unique tensor keys with symmop values corresponding to transformations that will result in derivative tensors from the original list Args: tensors (list of tensors): list of Tensor objects to test for symmetrically-equivalent duplicates structure (Structure): structure from which to get symmetry tol (float): tolerance for tensor equivalence kwargs: keyword arguments for the SpacegroupAnalyzer returns: dictionary consisting of unique tensors with symmetry operations corresponding to those which will reconstruct the remaining tensors as values
codesearchnet
def set_bpduguard(self, name, value=False, default=False, disable=False): value = ('enable' if value else 'disable') string = 'spanning-tree bpduguard' cmds = self.command_builder(string, value=value, default=default, disable=disable) return self.configure_interface(name, cmds)
Configures the bpduguard value for the specified interface Args: name (string): The interface identifier to configure. The name must be the full interface name (eg Ethernet1, not Et1) value (bool): True if bpduguard is enabled otherwise False default (bool): Configures the bpduguard parameter to its default value using the EOS CLI default config command disable (bool): Negates the bpduguard parameter using the EOS CLI no config command Returns: True if the command succeeds, otherwise False Raises: ValueError: Rasied if an invalid interface name is specified TypeError: Raised if the value keyword argument does not evaluate to a valid boolean
codesearchnet
def remove_triple(self, p, o, auto_refresh=True): self.rdf.graph.remove((self.uri, p, self._handle_object(o))) self._handle_triple_refresh(auto_refresh)
remove triple by supplying p,o Args: p (rdflib.term.URIRef): predicate o (): object auto_refresh (bool): whether or not to update object-like self.rdf.triples Returns: None: removes triple from self.rdf.graph
codesearchnet
def __init__(self, xid=None, role=None, generation_id=None): super().__init__(xid) self.role = role self.generation_id = generation_id
Create a RoleBaseMessage with the optional parameters below. Args: xid (int): OpenFlow xid to the header. role (:class:`~.controller2switch.common.ControllerRole`): . generation_id (int): Master Election Generation Id.
juraj-google-style
def create_forwarding_information_base(self, timeout=(- 1)): uri = '{}{}'.format(self.data['uri'], self.FORWARDING_INFORMATION_PATH) return self._helper.do_post(uri, None, timeout, None)
Generates the forwarding information base dump file for a logical interconnect. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: Interconnect Forwarding Information Base DataInfo.
codesearchnet
def delete(self, teamId): check_type(teamId, basestring, may_be_none=False) self._session.delete(((API_ENDPOINT + '/') + teamId))
Delete a team. Args: teamId(basestring): The ID of the team to be deleted. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
codesearchnet
def __init__(self, name, default_name=None, values=None) -> None: self._name_scope = name_scope(name, default_name, values, skip_on_eager=False) self._name = default_name if name is None else name
Initialize the context manager. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the `name` argument is `None`. values: The list of `Tensor` arguments that are passed to the op function. Raises: TypeError: if `default_name` is passed in but not a string.
github-repos
def get_messages(self): uri = '{}/messages'.format(self.data['uri']) return self._helper.do_get(uri)
Retrieves the error or status messages associated with the specified profile. Returns: dict: Server Profile Health.
codesearchnet
def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs): data_format = backend.standardize_data_format(data_format) img = array_to_img(x, data_format=data_format, scale=scale) if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'): warnings.warn('The JPG format does not support RGBA images, converting to RGB.') img = img.convert('RGB') img.save(path, format=file_format, **kwargs)
Saves an image stored as a NumPy array to a path or file object. Args: path: Path or file object. x: NumPy array. data_format: Image data format, either `"channels_first"` or `"channels_last"`. file_format: Optional file format override. If omitted, the format to use is determined from the filename extension. If a file object was used instead of a filename, this parameter should always be used. scale: Whether to rescale image values to be within `[0, 255]`. **kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
github-repos
def properties(cls, with_bases=True): if with_bases: return accumulate_from_superclasses(cls, '__properties__') else: return set(cls.__properties__)
Collect the names of properties on this class. This method *optionally* traverses the class hierarchy and includes properties defined on any parent classes. Args: with_bases (bool, optional) : Whether to include properties defined on parent classes in the results. (default: True) Returns: set[str] : property names
codesearchnet
def dump(self): out = [] out.append(self.filetype) out.append('Format: {}'.format(self.version)) out.append('Type: ASCII') out.append('') for cmd in self.commands: out.append(self.encode(cmd)) return ('\n'.join(out) + '\n')
Dump all commands in this object to a string. Returns: str: An encoded list of commands separated by \n characters suitable for saving to a file.
codesearchnet
def data(self): if self._data_type == int: if self._pb.HasField("int64_data"): return self._pb.int64_data if self._pb.HasField("int32_data"): return self._pb.int32_data if self._pb.HasField("uint64_data"): return self._pb.uint64_data if self._pb.HasField("uint32_data"): return self._pb.uint32_data elif self._data_type == float: if self._pb.HasField("float32_data"): return self._pb.float32_data if self._pb.HasField("float64_data"): return self._pb.float64_data elif self._data_type == str: return self._pb.string_data elif self._data_type == bool: return self._pb.bool_data elif self._data_type == bytes: return self._pb.bytes_data return None
Metric data Args: value (:obj:`bool` or :obj:`int` or :obj:`long` or :obj:`float` or :obj:`basestring` or :obj:`bytes`) Returns: value Raises: :obj:`TypeError`
juraj-google-style
def extract_bundle(self, resource, timeout=(- 1)): return self._client.update(resource, timeout=timeout, custom_headers={'Content-Type': 'text/plain'})
Extracts the existing bundle on the appliance and creates all the artifacts. Args: resource (dict): Artifact Bundle to extract. timeout: Timeout in seconds. Waits for task completion by default. The timeout does not abort the operation in OneView, it just stops waiting for its completion. Returns: dict: The Artifact Bundle.
codesearchnet
def _validate(self, obj): report = ValidationReport() if not self.validator.is_valid(obj): for v in self.validator.iter_errors(obj): report.add_error("[%s] %s" % ('.'.join(str(vv) for vv in v.path), v.message)) return report
Do the actual validation Arguments: obj (dict): object to validate Returns: ValidationReport
juraj-google-style
def without_document_lock(func): @wraps(func) def wrapper(*args, **kw): return func(*args, **kw) wrapper.nolock = True return wrapper
Wrap a callback function to execute without first obtaining the document lock. Args: func (callable) : The function to wrap Returns: callable : a function wrapped to execute without a |Document| lock. While inside an unlocked callback, it is completely *unsafe* to modify ``curdoc()``. The value of ``curdoc()`` inside the callback will be a specially wrapped version of |Document| that only allows safe operations, which are: * :func:`~bokeh.document.Document.add_next_tick_callback` * :func:`~bokeh.document.Document.remove_next_tick_callback` Only these may be used safely without taking the document lock. To make other changes to the document, you must add a next tick callback and make your changes to ``curdoc()`` from that second callback. Attempts to otherwise access or change the Document will result in an exception being raised.
codesearchnet
def get_service_account_email(self, project=None): if (project is None): project = self.project path = ('/projects/%s/serviceAccount' % (project,)) api_response = self._connection.api_request(method='GET', path=path) return api_response['email']
Get the email address of the project's BigQuery service account Note: This is the service account that BigQuery uses to manage tables encrypted by a key in KMS. Args: project (str, optional): Project ID to use for retreiving service account email. Defaults to the client's project. Returns: str: service account email address Example: >>> from google.cloud import bigquery >>> client = bigquery.Client() >>> client.get_service_account_email() my_service_account@my-project.iam.gserviceaccount.com
codesearchnet
def is_number(s): try: float(s) return True except ValueError: pass try: import unicodedata unicodedata.numeric(s) return True except (TypeError, ValueError): pass return False
Determines if the input is numeric Args: s: The value to check. Returns: bool: ``True`` if the input is numeric, ``False`` otherwise.
juraj-google-style
def SetDecodedStreamSize(self, decoded_stream_size): if self._is_open: raise IOError('Already open.') if decoded_stream_size < 0: raise ValueError(( 'Invalid decoded stream size: {0:d} value out of ' 'bounds.').format(decoded_stream_size)) self._decoded_stream_size = decoded_stream_size
Sets the decoded stream size. This function is used to set the decoded stream size if it can be determined separately. Args: decoded_stream_size (int): size of the decoded stream in bytes. Raises: IOError: if the file-like object is already open. OSError: if the file-like object is already open. ValueError: if the decoded stream size is invalid.
juraj-google-style
def str2tuple(str_in): tuple_out = safe_eval(str_in) if not isinstance(tuple_out, tuple): tuple_out = None return tuple_out
Extracts a tuple from a string. Args: str_in (string) that contains python tuple Returns: (dict) or None if no valid tuple was found Raises: -
juraj-google-style
def delete_clinvar_object(self, object_id, object_type, submission_id): LOG.info('Deleting clinvar object %s (%s)', object_id, object_type) result = '' if (object_type == 'variant_data'): self.clinvar_submission_collection.find_one_and_update({'_id': ObjectId(submission_id)}, {'$pull': {'variant_data': object_id}}) variant_object = self.clinvar_collection.find_one({'_id': object_id}) linking_id = variant_object.get('linking_id') result = self.clinvar_collection.delete_many({'linking_id': linking_id}) else: result = self.clinvar_collection.delete_one({'_id': object_id}) self.clinvar_submission_collection.find_one_and_update({'_id': ObjectId(submission_id)}, {'$pull': {'case_data': object_id}}) updated_submission = self.clinvar_submission_collection.find_one_and_update({'_id': submission_id}, {'$set': {'updated_at': datetime.now()}}, return_document=pymongo.ReturnDocument.AFTER) return updated_submission
Remove a variant object from clinvar database and update the relative submission object Args: object_id(str) : the id of an object to remove from clinvar_collection database collection (a variant of a case) object_type(str) : either 'variant_data' or 'case_data'. It's a key in the clinvar_submission object. submission_id(str): the _id key of a clinvar submission Returns: updated_submission(obj): an updated clinvar submission
codesearchnet
def decoded(self): logging.info('Decoding message: {0}'.format(self.message)) self.offset = (self.offset * (- 1)) return self.cipher()
Decodes message using Caesar shift cipher Inverse operation of encoding, applies negative offset to Caesar shift cipher. Returns: String decoded with cipher.
codesearchnet
def send_example_telemetry(example_name, *example_args, framework='pytorch'): if is_offline_mode(): return data = {'example': example_name, 'framework': framework} for args in example_args: args_as_dict = {k: v for k, v in args.__dict__.items() if not k.startswith('_') and v is not None} if 'model_name_or_path' in args_as_dict: model_name = args_as_dict['model_name_or_path'] if not os.path.isdir(model_name): data['model_name'] = args_as_dict['model_name_or_path'] if 'dataset_name' in args_as_dict: data['dataset_name'] = args_as_dict['dataset_name'] elif 'task_name' in args_as_dict: script_name = example_name.replace('tf_', '').replace('flax_', '').replace('run_', '') script_name = script_name.replace('_no_trainer', '') data['dataset_name'] = f'{script_name}-{args_as_dict['task_name']}' send_telemetry(topic='examples', library_name='transformers', library_version=__version__, user_agent=http_user_agent(data))
Sends telemetry that helps tracking the examples use. Args: example_name (`str`): The name of the example. *example_args (dataclasses or `argparse.ArgumentParser`): The arguments to the script. This function will only try to extract the model and dataset name from those. Nothing else is tracked. framework (`str`, *optional*, defaults to `"pytorch"`): The framework for the example.
github-repos
def __init__(self, location, resource_pool): self.location = location self.pool = resource_pool
Create a package repository. Args: location (str): A string specifying the location of the repository. This could be a filesystem path, or a database uri, etc. resource_pool (`ResourcePool`): The pool used to manage package resources.
juraj-google-style
def value(self): raise NotImplementedError
Returns the last snapshot of this variable. You usually do not need to call this method as all ops that need the value of the variable call it automatically through a `convert_to_tensor()` call. Returns a `Tensor` which holds the value of the variable. You can not assign a new value to this tensor as it is not a reference to the variable. To avoid copies, if the consumer of the returned value is on the same device as the variable, this actually returns the live value of the variable, not a copy. Updates to the variable are seen by the consumer. If the consumer is on a different device it will get a copy of the variable. Returns: A `Tensor` containing the value of the variable.
github-repos
def training_job_summaries(self, force_refresh=False): if force_refresh: self.clear_cache() if self._training_job_summaries is not None: return self._training_job_summaries output = [] next_args = {} for count in range(100): logging.debug("Calling list_training_jobs_for_hyper_parameter_tuning_job %d" % count) raw_result = self._sage_client.list_training_jobs_for_hyper_parameter_tuning_job( HyperParameterTuningJobName=self.name, MaxResults=100, **next_args ) new_output = raw_result['TrainingJobSummaries'] output.extend(new_output) logging.debug("Got %d more TrainingJobs. Total so far: %d" % (len(new_output), len(output))) if ('NextToken' in raw_result) and (len(new_output) > 0): next_args['NextToken'] = raw_result['NextToken'] else: break self._training_job_summaries = output return output
A (paginated) list of everything from ``ListTrainingJobsForTuningJob``. Args: force_refresh (bool): Set to True to fetch the latest data from SageMaker API. Returns: dict: The Amazon SageMaker response for ``ListTrainingJobsForTuningJob``.
juraj-google-style
def GetFormattedMessages(self, event): event_formatter = self.GetEventFormatter(event) if (not event_formatter): return (None, None) return event_formatter.GetMessages(self._formatter_mediator, event)
Retrieves the formatted messages related to the event. Args: event (EventObject): event. Returns: tuple: containing: str: full message string or None if no event formatter was found. str: short message string or None if no event formatter was found.
codesearchnet
def query(botcust2, message): logger.debug("Getting Mitsuku reply") params = { 'botid': 'f6a012073e345a08', 'amp;skin': 'chat' } headers = { 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.8', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', 'Content-Length': str(len(message) + 34), 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': 'botcust2=' + botcust2, 'DNT': '1', 'Host': 'kakko.pandorabots.com', 'Origin': 'https: 'Referer': 'https: 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/58.0.3029.110 Safari/537.36' } data = { 'botcust2': botcust2, 'message': message } logger.debug("Sending POST request") response = requests.post( url, params=params, headers=headers, data=data ) logger.debug("POST response {}".format(response)) parsed = lxml.html.parse(io.StringIO(response.text)).getroot() try: result = parsed[1][2][0][2].tail[1:] logger.debug("Getting botcust2 successful") except IndexError: result = False logger.critical("Getting botcust2 from html failed") return result
Sends a message to Mitsuku and retrieves the reply Args: botcust2 (str): The botcust2 identifier message (str): The message to send to Mitsuku Returns: reply (str): The message Mitsuku sent back
juraj-google-style
def restore(cdiff, a): left = (a.splitlines(1) if isinstance(a, string_types) else a) lrest = [] iline = 0 for (i, line) in enumerate(left): if (iline not in cdiff): lrest.append((' ' + line)) iline += 1 else: cs = [l[0] for l in cdiff[iline]] add = (cs.count('+') - cs.count('-')) lrest.extend(cdiff[iline]) iline += (add + 1) for i in sorted(cdiff.keys()): if (i >= len(left)): lrest.extend(cdiff[i]) from difflib import restore return list(restore(lrest, 2))
Restores the full text of either the edited text using the compressed diff. Args: cdiff (dict): compressed diff returned by :func:`~acorn.logging.diff.compress`. a (str or list): *original* string or list of strings to use as a reference to restore the edited version.
codesearchnet
def ParseFileObject(self, parser_mediator, file_object): file_offset = file_object.get_offset() file_size = file_object.get_size() while file_offset < file_size: try: self._ParseRecord(parser_mediator, file_object) except errors.ParseError as exception: if file_offset == 0: raise errors.UnableToParseFile( 'Unable to parse first event record with error: {0!s}'.format( exception)) file_offset = file_object.get_offset()
Parses a BSM file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
juraj-google-style
def _countdown(self, waitTime=0, printString="Waiting %*d seconds...", verbose=True): if waitTime <= 0: waitTime = self.__retryDelay for remaining in range(waitTime, 0, -1): _vPrint(verbose, "\r" + printString % (len(str(waitTime)), remaining), end="", flush=True) time.sleep(1) if verbose: _vPrint(verbose, "\r" + printString % (len(str(waitTime)), 0))
Makes a pretty countdown. Args: gitquery (str): The query or endpoint itself. Examples: query: 'query { viewer { login } }' endpoint: '/user' printString (Optional[str]): A counter message to display. Defaults to 'Waiting %*d seconds...' verbose (Optional[bool]): If False, all extra printouts will be suppressed. Defaults to True.
juraj-google-style
def _reload_config(self, reload_original_config): if reload_original_config: self.original_config = self.running_config self.original_config.set_name('original') paths = self.running_config.get_paths() self.running_config = FortiConfig('running', vdom=self.vdom) for path in paths: self.load_config(path, empty_candidate=True)
This command will update the running config from the live device. Args: * reload_original_config: * If ``True`` the original config will be loaded with the running config before reloading the\ original config. * If ``False`` the original config will remain untouched.
codesearchnet
def create(cls, session, record, endpoint_override=None, out_type=None, **add_params): cls._check_implements('create') data = record.to_api() params = {'reload': True} params.update(**add_params) data.update(params) return cls((endpoint_override or ('/%s.json' % cls.__endpoint__)), data=data, request_type=RequestPaginator.POST, singleton=True, session=session, out_type=out_type)
Create an object on HelpScout. Args: session (requests.sessions.Session): Authenticated session. record (helpscout.BaseModel): The record to be created. endpoint_override (str, optional): Override the default endpoint using this. out_type (helpscout.BaseModel, optional): The type of record to output. This should be provided by child classes, by calling super. **add_params (mixed): Add these to the request parameters. Returns: helpscout.models.BaseModel: Newly created record. Will be of the
codesearchnet
def chat_postEphemeral(self, *, channel: str, user: str, **kwargs) -> SlackResponse: kwargs.update({'channel': channel, 'user': user}) return self.api_call('chat.postEphemeral', json=kwargs)
Sends an ephemeral message to a user in a channel. Args: channel (str): The channel id. e.g. 'C1234567890' user (str): The id of user who should see the message. e.g. 'U0BPQUNTA' text (str): The message you'd like to share. e.g. 'Hello world' text is not required when presenting blocks. blocks (list): A dictionary list of blocks. Blocks are required when not presenting text. e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
codesearchnet
def __init__(self, callback): self._callback = callback self._ras = brocade_ras(callback=pynos.utilities.return_xml)
RAS init method. Args: callback: Callback function that will be called for each action. Returns: RAS Object Raises: None
juraj-google-style
def get_configured_consensus_module(block_id, state_view): settings_view = SettingsView(state_view) default_consensus = \ 'genesis' if block_id == NULL_BLOCK_IDENTIFIER else 'devmode' consensus_module_name = settings_view.get_setting( 'sawtooth.consensus.algorithm', default_value=default_consensus) return ConsensusFactory.get_consensus_module( consensus_module_name)
Returns the consensus_module based on the consensus module set by the "sawtooth_settings" transaction family. Args: block_id (str): the block id associated with the current state_view state_view (:obj:`StateView`): the current state view to use for setting values Raises: UnknownConsensusModuleError: Thrown when an invalid consensus module has been configured.
juraj-google-style
def normalize_cell_value(value): if (isinstance(value, dict) or isinstance(value, list)): return json.dumps(value) return value
Process value for writing into a cell. Args: value: any type of variable Returns: json serialized value if value is list or dict, else value
codesearchnet
def VerifyStructure(self, parser_mediator, lines): return ((re.match(self._VERIFICATION_REGEX, lines) or re.match(self._CHROMEOS_VERIFICATION_REGEX, lines)) is not None)
Verifies that this is a syslog-formatted file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. lines (str): one or more lines from the text file. Returns: bool: True if this is the correct parser, False otherwise.
codesearchnet
def __init__(self, resolver_context): super(FileIO, self).__init__() self._is_cached = False self._is_open = False self._resolver_context = resolver_context
Initializes a file-like object. Args: resolver_context (Context): resolver context.
juraj-google-style
def xslt_transformation(xml, template): transformer = ET.XSLT(_read_template(template)) newdom = transformer(_read_marcxml(xml)) return ET.tostring(newdom, pretty_print=True, encoding='utf-8')
Transform `xml` using XSLT `template`. Args: xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. template (str): Filename or XML string. Don't use ``\\n`` in case of filename. Returns: str: Transformed `xml` as string.
codesearchnet
def dim_reduce_data(data, d): genes, cells = data.shape distances = np.zeros((cells, cells)) for i in range(cells): for j in range(cells): distances[i,j] = poisson_dist(data[:,i], data[:,j]) proximity = distances**2 J = np.eye(cells) - 1./cells B = -0.5*np.dot(J, np.dot(proximity, J)) e_val, e_vec = np.linalg.eigh(B) lam = np.diag(e_val[-d:])[::-1] E = e_vec[:,-d:][::-1] X = np.dot(E, lam**0.5) return X
Does a MDS on the data directly, not on the means. Args: data (array): genes x cells d (int): desired dimensionality Returns: X, a cells x d matrix
juraj-google-style