code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def getNext(self, dataset, requires_initialization=False, shared_name=None): def ta_wrapper(gn): def _wrapper(): r = gn() if isinstance(r, tensor_array_ops.TensorArray): return r.stack() else: return r return _wrapper if context.executing_eagerly() or ops.inside_function(): iterator = iter(dataset) return ta_wrapper(iterator._next_internal) else: if requires_initialization: iterator = dataset_ops.make_initializable_iterator(dataset, shared_name) self.evaluate(iterator.initializer) else: iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() return ta_wrapper(lambda: get_next)
Returns a callable that returns the next element of the dataset. Example use: ```python # In both graph and eager modes dataset = ... get_next = self.getNext(dataset) result = self.evaluate(get_next()) ``` Args: dataset: A dataset whose elements will be returned. requires_initialization: Indicates that when the test is executed in graph mode, it should use an initializable iterator to iterate through the dataset (e.g. when it contains stateful nodes). Defaults to False. shared_name: (Optional.) If non-empty, the returned iterator will be shared under the given name across multiple sessions that share the same devices (e.g. when using a remote server). Returns: A callable that returns the next element of `dataset`. Any `TensorArray` objects `dataset` outputs are stacked.
github-repos
def gray2bgr(img): img = img[..., None] if img.ndim == 2 else img out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) return out_img
Convert a grayscale image to BGR image. Args: img (ndarray or str): The input image. Returns: ndarray: The converted BGR image.
juraj-google-style
def monitoring(line, cell=None): parser = datalab.utils.commands.CommandParser(prog='monitoring', description=( 'Execute various Monitoring-related operations. Use "%monitoring ' '<command> -h" for help on a specific command.')) list_parser = parser.subcommand( 'list', 'List the metrics or resource types in a monitored project.') list_metric_parser = list_parser.subcommand( 'metrics', 'List the metrics that are available through the Monitoring API.') list_metric_parser.add_argument( '-t', '--type', help='The type of metric(s) to list; can include wildchars.') list_metric_parser.add_argument( '-p', '--project', help='The project on which to execute the request.') list_metric_parser.set_defaults(func=_list_metric_descriptors) list_resource_parser = list_parser.subcommand( 'resource_types', ('List the monitored resource types that are available through the ' 'Monitoring API.')) list_resource_parser.add_argument( '-p', '--project', help='The project on which to execute the request.') list_resource_parser.add_argument( '-t', '--type', help='The resource type(s) to list; can include wildchars.') list_resource_parser.set_defaults(func=_list_resource_descriptors) list_group_parser = list_parser.subcommand( 'groups', ('List the Stackdriver groups in this project.')) list_group_parser.add_argument( '-p', '--project', help='The project on which to execute the request.') list_group_parser.add_argument( '-n', '--name', help='The name of the group(s) to list; can include wildchars.') list_group_parser.set_defaults(func=_list_groups) return datalab.utils.commands.handle_magic_line(line, cell, parser)
Implements the monitoring cell magic for ipython notebooks. Args: line: the contents of the storage line. Returns: The results of executing the cell.
juraj-google-style
def _build_request_factory(cls, session: AppSession): def request_factory(*args, **kwargs): request = session.factory.class_map['Request'](*args, **kwargs) user_agent = (session.args.user_agent or session.default_user_agent) request.fields['User-Agent'] = user_agent if session.args.referer: request.fields['Referer'] = session.args.referer for header_string in session.args.header: request.fields.parse(header_string) if session.args.http_compression: request.fields['Accept-Encoding'] = 'gzip, deflate' if session.args.no_cache: request.fields['Cache-Control'] = 'no-cache, must-revalidate' request.fields['Pragma'] = 'no-cache' return request return request_factory
Create the request factory. A request factory is any callable object that returns a :class:`.http.Request`. The callable must accept the same arguments to Request. Returns: A callable object
codesearchnet
def get(self): if len(self._queue) == 0: return float('nan') with warnings.catch_warnings(record=False): warnings.simplefilter('ignore') return np.nanmean(self._queue)
Calculates and returns the mean of the current sliding window. Returns: float: The mean of the values in the current sliding window. Returns NaN if the window is empty.
github-repos
def save_graph(graph_str, dest_file, fmt=None, image_ratio=None): g = pydot.graph_from_dot_data(graph_str) if (fmt is None): fmt = (os.path.splitext(dest_file)[1].lower().strip('.') or 'png') if hasattr(g, ('write_' + fmt)): write_fn = getattr(g, ('write_' + fmt)) else: raise Exception(("Unsupported graph format: '%s'" % fmt)) if image_ratio: g.set_ratio(str(image_ratio)) write_fn(dest_file) return fmt
Render a graph to an image file. Args: graph_str (str): Dot-language graph string. dest_file (str): Filepath to save the graph to. fmt (str): Format, eg "png", "jpg". image_ratio (float): Image ratio. Returns: String representing format that was written, such as 'png'.
codesearchnet
def scale(reader, writer, column, start, stop, multiple): for i, row in enumerate(reader): if i >= start and i <= stop: row[column] = type(multiple)(row[column]) * multiple writer.appendRecord(row)
Multiplies a value over a range of rows. Args: reader: A FileRecordStream object with input data. writer: A FileRecordStream object to write output data to. column: The column of data to modify. start: The first row in the range to modify. end: The last row in the range to modify. multiple: The value to scale/multiply by.
juraj-google-style
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Funnel sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def StartProfiling(self, configuration, identifier, process_information): if not configuration: return if configuration.HaveProfileParsers(): identifier = '{0:s}-parsers'.format(identifier) self._cpu_time_profiler = profilers.CPUTimeProfiler( identifier, configuration) self._cpu_time_profiler.Start() self._memory_profiler = profilers.MemoryProfiler( identifier, configuration) self._memory_profiler.Start() self._process_information = process_information
Starts profiling. Args: configuration (ProfilingConfiguration): profiling configuration. identifier (str): identifier of the profiling session used to create the sample filename. process_information (ProcessInfo): process information.
juraj-google-style
def _patch_expand_paths(self, settings, name, value): return [self._patch_expand_path(settings, name, item) for item in value]
Apply ``SettingsPostProcessor._patch_expand_path`` to each element in list. Args: settings (dict): Current settings. name (str): Setting name. value (list): List of paths to patch. Returns: list: Patched path list to an absolute path.
juraj-google-style
def dbmin_mean(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dbmin_mean`'.format(value)) self._dbmin_mean = value
Corresponds to IDD Field `dbmin_mean` Mean of extreme annual minimum dry-bulb temperature Args: value (float): value for IDD Field `dbmin_mean` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def __init__(self, receive_port, logdir, always_flush=False): debugger_directory = os.path.join( os.path.expanduser(logdir), constants.DEBUGGER_DATA_DIRECTORY_NAME) if not tf.io.gfile.exists(debugger_directory): try: tf.io.gfile.makedirs(debugger_directory) logger.info("Created directory for debugger data: %s", debugger_directory) except tf.errors.OpError as e: logger.fatal( "Could not make directory for debugger data: %s. Error: %s", debugger_directory, e) self._events_writer_manager = events_writer_manager_lib.EventsWriterManager( events_directory=debugger_directory, always_flush=always_flush) try: self._events_writer_manager.write_event( tf.compat.v1.Event( wall_time=0, step=0, file_version=constants.EVENTS_VERSION)) except IOError as e: logger.error( "Writing to %s failed: %s", self._events_writer_manager.get_current_file_name(), e) self._registry_backup_file_path = os.path.join( debugger_directory, constants.ALERT_REGISTRY_BACKUP_FILE_NAME) initial_data = None if tf.io.gfile.exists(self._registry_backup_file_path): with tf.io.gfile.GFile(self._registry_backup_file_path, "r") as backup_file: try: initial_data = json.load(backup_file) except ValueError as err: logger.error( "Could not parse contents of %s: %s", self._registry_backup_file_path, err) self._numerics_alert_registry = numerics_alert.NumericsAlertRegistry( initialization_list=initial_data) self._numerics_alert_lock = threading.Lock() curried_handler_constructor = functools.partial( DebuggerDataStreamHandler, self._events_writer_manager, self._numerics_alert_callback) grpc_debug_server.EventListenerBaseServicer.__init__( self, receive_port, curried_handler_constructor)
Receives health pills from a debugger and writes them to disk. Args: receive_port: The port at which to receive health pills from the TensorFlow debugger. logdir: The directory in which to write events files that TensorBoard will read. always_flush: A boolean indicating whether the EventsWriter will be flushed after every write. Can be used for testing.
juraj-google-style
def add_graph(self, graph, global_step=None, graph_def=None): if graph is not None and graph_def is not None: raise ValueError('Please pass only graph, or graph_def (deprecated), but not both.') if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph): if not isinstance(graph, ops.Graph): logging.warning('When passing a `Graph` object, please use the `graph` named argument instead of `graph_def`.') graph = graph_def true_graph_def = graph.as_graph_def(add_shapes=True) self._write_plugin_assets(graph) elif isinstance(graph, graph_pb2.GraphDef) or isinstance(graph_def, graph_pb2.GraphDef): logging.warning('Passing a `GraphDef` to the SummaryWriter is deprecated. Pass a `Graph` object instead, such as `sess.graph`.') if isinstance(graph, graph_pb2.GraphDef): true_graph_def = graph else: true_graph_def = graph_def else: raise TypeError('The passed graph must be an instance of `Graph` or the deprecated `GraphDef`') self._add_graph_def(true_graph_def, global_step)
Adds a `Graph` to the event file. The graph described by the protocol buffer will be displayed by TensorBoard. Most users pass a graph in the constructor instead. Args: graph: A `Graph` object, such as `sess.graph`. global_step: Number. Optional global step counter to record with the graph. graph_def: DEPRECATED. Use the `graph` parameter instead. Raises: ValueError: If both graph and graph_def are passed to the method.
github-repos
def _AddDependencyEdges(self, rdf_artifact): artifact_dependencies = artifact_registry.GetArtifactPathDependencies(rdf_artifact) if artifact_dependencies: for attribute in artifact_dependencies: self._AddEdge(attribute, rdf_artifact.name) else: self.reachable_nodes.add(rdf_artifact.name) self.graph[rdf_artifact.name].is_provided = True
Add an edge for every dependency of the given artifact. This method gets the attribute names for a given artifact and for every attribute it adds a directed edge from the attribute node to the artifact node. If an artifact does not have any dependencies it is added to the set of reachable nodes. Args: rdf_artifact: The artifact object.
codesearchnet
def _serialize_linear_biases(linear, nodelist): linear_bytes = struct.pack(('<' + ('d' * len(linear))), *[linear[i] for i in nodelist]) return base64.b64encode(linear_bytes).decode('utf-8')
Serializes the linear biases. Args: linear: a interable object where linear[v] is the bias associated with v. nodelist (list): an ordered iterable containing the nodes. Returns: str: base 64 encoded string of little endian 8 byte floats, one for each of the biases in linear. Ordered according to nodelist. Examples: >>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [1, 2, 3]) 'AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA' >>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [3, 2, 1]) 'AAAAAAAAAAAAAAAAAADwPwAAAAAAAPC/'
codesearchnet
def setMeterPassword(self, new_pwd, pwd='00000000'): result = False self.setContext('setMeterPassword') try: if ((len(new_pwd) != 8) or (len(pwd) != 8)): self.writeCmdMsg('Passwords must be exactly eight characters.') self.setContext('') return result if (not self.request(False)): self.writeCmdMsg('Pre command read failed: check serial line.') elif (not self.serialCmdPwdAuth(pwd)): self.writeCmdMsg('Password failure') else: req_pwd = binascii.hexlify(new_pwd.zfill(8)) req_str = (('015731023030323028' + req_pwd) + '2903') req_str += self.calc_crc16(req_str[2:].decode('hex')) self.m_serial_port.write(req_str.decode('hex')) if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'): self.writeCmdMsg('Success(setMeterPassword): 06 returned.') result = True self.serialPostEnd() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext('') return result
Serial Call to set meter password. USE WITH CAUTION. Args: new_pwd (str): 8 digit numeric password to set pwd (str): Old 8 digit numeric password. Returns: bool: True on completion with ACK.
codesearchnet
def write_filter(script, filter_xml): if isinstance(script, mlx.FilterScript): script.filters.append(filter_xml) elif isinstance(script, str): script_file = open(script, 'a') script_file.write(filter_xml) script_file.close() else: print(filter_xml) return None
Write filter to FilterScript object or filename Args: script (FilterScript object or filename str): the FilterScript object or script filename to write the filter to. filter_xml (str): the xml filter string
codesearchnet
def escalatee(self, main_type, sub_type, unique_id, escalatee_id, action='GET', params=None): params = params or {} url = '/v2/{}/{}/{}/escalatees/{}'.format(main_type, sub_type, unique_id, escalatee_id) if action == 'GET': return self.tcex.session.get(url, params=params) if action == 'DELETE': return self.tcex.session.delete(url) if action == 'ADD': return self.tcex.session.post(url) return None
Args: main_type: sub_type: unique_id: escalatee_id: action: params: Return:
juraj-google-style
def CacheFileSystem(self, path_spec, file_system): identifier = self._GetFileSystemCacheIdentifier(path_spec) self._file_system_cache.CacheObject(identifier, file_system)
Caches a file system object based on a path specification. Args: path_spec (PathSpec): path specification. file_system (FileSystem): file system object.
juraj-google-style
def concatenate(inputs, axis=-1, **kwargs): return Concatenate(axis=axis, **kwargs)(inputs)
Functional interface to the `Concatenate` layer. Args: inputs: A list of input tensors. axis: Concatenation axis. **kwargs: Standard layer keyword arguments. Returns: A tensor, the concatenation of the inputs alongside axis `axis`.
github-repos
def _build_watermark_updates(runner_execution_context: execution.FnApiRunnerExecutionContext, stage_inputs: Iterable[str], expected_timers: Iterable[translations.TimerFamilyId], pcolls_with_da: Set[str], transforms_w_splits: Set[str], watermarks_by_transform_and_timer_family: Dict[translations.TimerFamilyId, timestamp.Timestamp]) -> Dict[Union[str, translations.TimerFamilyId], timestamp.Timestamp]: updates: Dict[Union[str, translations.TimerFamilyId], timestamp.Timestamp] = {} def get_pcoll_id(transform_id): buffer_id = runner_execution_context.input_transform_to_buffer_id[transform_id] if buffer_id == translations.IMPULSE_BUFFER: pcollection_id = transform_id else: _, pcollection_id = translations.split_buffer_id(buffer_id) return pcollection_id for pcoll in pcolls_with_da: updates[pcoll] = timestamp.MIN_TIMESTAMP for tr in transforms_w_splits: pcoll_id = get_pcoll_id(tr) updates[pcoll_id] = timestamp.MIN_TIMESTAMP for timer_pcoll_id in expected_timers: updates[timer_pcoll_id] = watermarks_by_transform_and_timer_family.get(timer_pcoll_id, timestamp.MAX_TIMESTAMP) for transform_id in stage_inputs: pcoll_id = get_pcoll_id(transform_id) if pcoll_id not in updates: updates[pcoll_id] = timestamp.MAX_TIMESTAMP return updates
Builds a dictionary of PCollection (or TimerFamilyId) to timestamp. Args: stage_inputs: represent the set of expected input PCollections for a stage. These do not include timers. expected_timers: represent the set of TimerFamilyIds that the stage can expect to receive as inputs. pcolls_with_da: represent the set of stage input PCollections that had delayed applications. transforms_w_splits: represent the set of transforms in the stage that had input splits. watermarks_by_transform_and_timer_family: represent the set of watermark holds to be added for each timer family.
github-repos
def start_router(router_class, router_name): handle = router_class.remote(router_name) ray.experimental.register_actor(router_name, handle) handle.start.remote() return handle
Wrapper for starting a router and register it. Args: router_class: The router class to instantiate. router_name: The name to give to the router. Returns: A handle to newly started router actor.
codesearchnet
def authenticate_credentials(self, token): try: user_info = self.get_user_info(token) except UserInfoRetrievalFailed: msg = 'Failed to retrieve user info. Unable to authenticate.' logger.error(msg) raise exceptions.AuthenticationFailed(msg) user, __ = get_user_model().objects.get_or_create(username=user_info['username'], defaults=user_info) if not user.is_active: raise exceptions.AuthenticationFailed('User inactive or deleted.') return user, token
Validate the bearer token against the OAuth provider. Arguments: token (str): Access token to validate Returns: (tuple): tuple containing: user (User): User associated with the access token access_token (str): Access token Raises: AuthenticationFailed: The user is inactive, or retrieval of user info failed.
juraj-google-style
def get_angle_degrees(self, indices): coords = ['x', 'y', 'z'] if isinstance(indices, pd.DataFrame): i_pos = self.loc[(indices.index, coords)].values b_pos = self.loc[(indices.loc[(:, 'b')], coords)].values a_pos = self.loc[(indices.loc[(:, 'a')], coords)].values else: indices = np.array(indices) if (len(indices.shape) == 1): indices = indices[(None, :)] i_pos = self.loc[(indices[(:, 0)], coords)].values b_pos = self.loc[(indices[(:, 1)], coords)].values a_pos = self.loc[(indices[(:, 2)], coords)].values (BI, BA) = ((i_pos - b_pos), (a_pos - b_pos)) (bi, ba) = [(v / np.linalg.norm(v, axis=1)[(:, None)]) for v in (BI, BA)] dot_product = np.sum((bi * ba), axis=1) dot_product[(dot_product > 1)] = 1 dot_product[(dot_product < (- 1))] = (- 1) angles = np.degrees(np.arccos(dot_product)) return angles
Return the angles between given atoms. Calculates the angle in degrees between the atoms with indices ``i, b, a``. The indices can be given in three ways: * As simple list ``[i, b, a]`` * As list of lists: ``[[i1, b1, a1], [i2, b2, a2]...]`` * As :class:`pd.DataFrame` where ``i`` is taken from the index and ``b`` and ``a`` from the respective columns ``'b'`` and ``'a'``. Args: indices (list): Returns: :class:`numpy.ndarray`: Vector of angles in degrees.
codesearchnet
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_value: Optional[ZambaHybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, transformer_hidden_states: Optional[torch.Tensor]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = hidden_states + transformer_hidden_states if transformer_hidden_states is not None else hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states = self.mamba(hidden_states=hidden_states, cache_params=past_key_value, attention_mask=attention_mask) self_attn_weights = None hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (past_key_value,) return outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence.
github-repos
def _in_place_subclassed_model_reset(model): assert not model._is_graph_network version_utils.swap_class(model.__class__, training.Model, training_v1.Model, ops.executing_eagerly_outside_functions()) attributes_cache = {} for name in dir(model): if name == 'submodules' or name == '_self_tracked_trackables': continue try: value = getattr(model, name) except (AttributeError, ValueError, TypeError): continue if isinstance(value, Layer): attributes_cache[name] = value assert value in model.layers if hasattr(value, 'layers') and value.layers: raise ValueError('We do not support the use of nested layers in `model_to_estimator` at this time. Found nested layer: %s' % value) elif isinstance(value, (list, tuple)) and name not in ('layers', '_layers', 'metrics', '_compile_metric_functions', '_output_loss_metrics'): if value and all((isinstance(val, Layer) for val in value)): raise ValueError('We do not support the use of list-of-layers attributes in subclassed models used with `model_to_estimator` at this time. Found list model: %s' % name) layers_to_names = {value: key for key, value in attributes_cache.items()} original_layers = list(model._flatten_layers(include_self=False, recursive=False)) setattr_tracking = model._setattr_tracking model._setattr_tracking = False model._self_tracked_trackables = [] for layer in original_layers: config = layer.get_config() if isinstance(layer, training.Model) and (not layer._is_graph_network): raise ValueError('We do not support the use of nested subclassed models in `model_to_estimator` at this time. Found nested model: %s' % layer) fresh_layer = layer.__class__.from_config(config) name = layers_to_names[layer] setattr(model, name, fresh_layer) model._self_tracked_trackables.append(fresh_layer) if not hasattr(model, '_original_attributes_cache') or model._original_attributes_cache is None: if model.built: attributes_to_cache = ['inputs', 'outputs', 'total_loss', 'optimizer', 'train_function', 'test_function', 'predict_function', '_training_endpoints', '_collected_trainable_weights', '_feed_inputs', '_feed_input_names', '_feed_input_shapes'] for name in attributes_to_cache: attributes_cache[name] = getattr(model, name) model._original_attributes_cache = attributes_cache _reset_build_compile_trackers(model) model._setattr_tracking = setattr_tracking
Substitute for model cloning that works for subclassed models. Subclassed models cannot be cloned because their topology is not serializable. To "instantiate" an identical model in a new TF graph, we reuse the original model object, but we clear its state. After calling this function on a model instance, you can use the model instance as if it were a model clone (in particular you can use it in a new graph). This method clears the state of the input model. It is thus destructive. However the original state can be restored fully by calling `_in_place_subclassed_model_state_restoration`. Args: model: Instance of a Keras model created via subclassing. Raises: ValueError: In case the model uses a subclassed model as inner layer.
github-repos
def position(self, partition): if not isinstance(partition, TopicPartition): raise TypeError('partition must be a TopicPartition namedtuple') assert self._subscription.is_assigned(partition), 'Partition is not assigned' offset = self._subscription.assignment[partition].position if offset is None: self._update_fetch_positions([partition]) offset = self._subscription.assignment[partition].position return offset
Get the offset of the next record that will be fetched Arguments: partition (TopicPartition): Partition to check Returns: int: Offset
juraj-google-style
def notify( self, method_name: str, *args: Any, trim_log_values: Optional[bool] = None, validate_against_schema: Optional[bool] = None, **kwargs: Any ) -> Response: return self.send( Notification(method_name, *args, **kwargs), trim_log_values=trim_log_values, validate_against_schema=validate_against_schema, )
Send a JSON-RPC request, without expecting a response. Args: method_name: The remote procedure's method name. args: Positional arguments passed to the remote procedure. kwargs: Keyword arguments passed to the remote procedure. trim_log_values: Abbreviate the log entries of requests and responses. validate_against_schema: Validate response against the JSON-RPC schema.
juraj-google-style
def tomography_data(results, name, tomoset): labels = tomography_circuit_names(tomoset, name) circuits = tomoset['circuits'] data = [] prep = None for j, _ in enumerate(labels): counts = marginal_counts(results.get_counts(labels[j]), tomoset['qubits']) shots = sum(counts.values()) meas = circuits[j]['meas'] prep = circuits[j].get('prep', None) meas_qubits = sorted(meas.keys()) if prep: prep_qubits = sorted(prep.keys()) circuit = {} for c in counts.keys(): circuit[c] = {} circuit[c]['meas'] = [(meas[meas_qubits[k]], int(c[-1 - k])) for k in range(len(meas_qubits))] if prep: circuit[c]['prep'] = [prep[prep_qubits[k]] for k in range(len(prep_qubits))] data.append({'counts': counts, 'shots': shots, 'circuit': circuit}) ret = {'data': data, 'meas_basis': tomoset['meas_basis']} if prep: ret['prep_basis'] = tomoset['prep_basis'] return ret
Return a results dict for a state or process tomography experiment. Args: results (Result): Results from execution of a process tomography circuits on a backend. name (string): The name of the circuit being reconstructed. tomoset (tomography_set): the dict of tomography configurations. Returns: list: A list of dicts for the outcome of each process tomography measurement circuit.
juraj-google-style
def as_matrix(self, depth=0): if (depth in self._matrix_cache): return self._matrix_cache[depth] self._matrix_cache[depth] = matrix = Matrix(self, depth=depth) return matrix
Create a matrix with self as node, cache it, return it. Args: depth (int): depth of the matrix. Returns: Matrix: an instance of Matrix.
codesearchnet
def DeregisterDefinition(self, data_type_definition): name = data_type_definition.name.lower() if (name not in self._definitions): raise KeyError('Definition not set for name: {0:s}.'.format(data_type_definition.name)) del self._definitions[name]
Deregisters a data type definition. The data type definitions are identified based on their lower case name. Args: data_type_definition (DataTypeDefinition): data type definition. Raises: KeyError: if a data type definition is not set for the corresponding name.
codesearchnet
def vibrational_free_energy(self, temperature, volume): y = self.debye_temperature(volume) / temperature return self.kb * self.natoms * temperature * ( 9./8. * y + 3 * np.log(1 - np.exp(-y)) - self.debye_integral(y))
Vibrational Helmholtz free energy, A_vib(V, T). Eq(4) in doi.org/10.1016/j.comphy.2003.12.001 Args: temperature (float): temperature in K volume (float) Returns: float: vibrational free energy in eV
juraj-google-style
def _inter_df_op_handler(self, func, other, **kwargs): axis = kwargs.get('axis', 0) axis = (pandas.DataFrame()._get_axis_number(axis) if (axis is not None) else 0) if isinstance(other, type(self)): return self._inter_manager_operations(other, 'outer', (lambda x, y: func(x, y, **kwargs))) else: return self._scalar_operations(axis, other, (lambda df: func(df, other, **kwargs)))
Helper method for inter-manager and scalar operations. Args: func: The function to use on the Manager/scalar. other: The other Manager/scalar. Returns: New DataManager with new data and index.
codesearchnet
def __init__( self, name: str, dtype: type, unique: bool, validators: t.List[VALIDATOR_FUNCTION], recoders: t.List[RECODER_FUNCTION],) -> None: if validators is None: validators = [] if recoders is None: recoders = [] self.name = name self.dtype = dtype self.unique = unique self.validators = self._dict_of_funcs(validators) self.recoders = self._dict_of_funcs(recoders)
Construct a new `Column` object. Args: name (str): The exact name of the column in a ``pd.DataFrame``. dtype (type): The type that each member of the recoded column must belong to. unique (bool): Whether values are allowed to recur in this column. validators (list): A list of validator functions. recoders (list): A list of recoder functions.
juraj-google-style
def select_top_predictions(self, predictions): scores = predictions.get_field('scores') keep = torch.nonzero((scores > self.confidence_threshold)).squeeze(1) predictions = predictions[keep] scores = predictions.get_field('scores') (_, idx) = scores.sort(0, descending=True) return predictions[idx]
Select only predictions which have a `score` > self.confidence_threshold, and returns the predictions in descending order of score Arguments: predictions (BoxList): the result of the computation by the model. It should contain the field `scores`. Returns: prediction (BoxList): the detected objects. Additional information of the detection properties can be found in the fields of the BoxList via `prediction.fields()`
codesearchnet
def stop(pid): if psutil.pid_exists(pid): try: p = psutil.Process(pid) p.kill() except Exception: pass
Shut down a specific process. Args: pid: the pid of the process to shutdown.
codesearchnet
def add_stream(self, stream, path, compress, flags): self.data_fileobj.seek(self.last_offset) if compress == 'bz2': stream = bz2_compress_stream(stream) elif compress == 'xz': stream = xz_compress_stream(stream) elif compress is None: pass else: raise ValueError('Unsupported compression type: {}'.format(compress)) size = write_to_file(stream, self.data_fileobj) if os.sep == '\\': path = path.replace('\\', '/') e = dict( name=six.u(path), offset=self.last_offset, size=size, flags=flags, ) self.entries.append(e) self.last_offset += e['size']
Add the contents of an iterable to the MAR file. Args: stream (iterable): yields blocks of data path (str): name of this file in the MAR file compress (str): One of 'xz', 'bz2', or None. Defaults to None. flags (int): permission of this file in the MAR file
juraj-google-style
def visualize_conv_activations(activation, name): import math with tf.name_scope(('visualize_act_' + name)): (_, h, w, c) = activation.get_shape().as_list() rows = [] c_per_row = int(math.sqrt(c)) for y in range(0, (c - c_per_row), c_per_row): row = activation[(:, :, :, y:(y + c_per_row))] cols = tf.unstack(row, axis=3) row = tf.concat(cols, 1) rows.append(row) viz = tf.concat(rows, 2) tf.summary.image(('visualize_act_' + name), tf.expand_dims(viz, (- 1)))
Visualize activations for convolution layers. Remarks: This tries to place all activations into a square. Args: activation: tensor with the activation [B,H,W,C] name: label for tensorboard Returns: image of almost all activations
codesearchnet
def get_data(self, columns, type='ndarray', with_index=False): res = self.select_columns(columns) if type == 'ndarray': if with_index: return res.reset_index().values else: return res.values elif type == 'list': if with_index: return res.reset_index().values.tolist() else: return res.values.tolist() elif type == 'dataframe': if with_index: return res.reset_index() else: return res
获取不同格式的数据 Arguments: columns {[type]} -- [description] Keyword Arguments: type {str} -- [description] (default: {'ndarray'}) with_index {bool} -- [description] (default: {False}) Returns: [type] -- [description]
juraj-google-style
def _parse(json_str: str, primitive_cls: Type[Time]) -> Time: try: time = datetime.datetime.strptime(json_str, '%H:%M:%S').time() return _primitive_time_utils.build_time(time, _primitive_time_utils.TimePrecision.MICROSECOND.SECOND, primitive_cls) except ValueError: pass try: time = datetime.datetime.strptime(json_str, '%H:%M:%S.%f').time() if _primitive_time_utils.PRECISION_PATTERN_MILLISECOND.search(json_str) is not None: return _primitive_time_utils.build_time(time, _primitive_time_utils.TimePrecision.MILLISECOND, primitive_cls) elif _primitive_time_utils.PRECISION_PATTERN_MICROSECOND.search(json_str) is not None: return _primitive_time_utils.build_time(time, _primitive_time_utils.TimePrecision.MICROSECOND, primitive_cls) except ValueError: pass raise fhir_errors.InvalidFhirError(f'Invalid Time: {json_str!r}.')
Parses the json_str into a Time FHIR primitive. Args: json_str: The raw JSON string to parse. primitive_cls: The FHIR primitive to parse into. Returns: A FHIR primitive Time instance. Raises: fhir_errors.InvalidFhirError: In the event that no FHIR primitive Time format was able to properly parse the json_str.
github-repos
def _gauss(mean: int, sigma: int) -> int: return int(random.gauss(mean, sigma))
Creates a variation from a base value Args: mean: base value sigma: gaussian sigma Returns: random value
juraj-google-style
def _validate_required(self, settings, name, value): if not value: raise SettingsInvalidError(("Required value from setting '{name}' " "must not be " "empty.").format(name=name)) return value
Validate a required setting (value can not be empty) Args: settings (dict): Current settings. name (str): Setting name. value (str): Required value to validate. Raises: boussole.exceptions.SettingsInvalidError: If value is empty. Returns: str: Validated value.
juraj-google-style
def add_error(self, position, e): if self.result != TestResultEnums.TEST_RESULT_FAIL: self.result = TestResultEnums.TEST_RESULT_ERROR if position in self.extra_errors: raise Error('An exception is already recorded with position "%s", cannot reuse.' % position) if isinstance(e, ExceptionRecord): self.extra_errors[position] = e else: self.extra_errors[position] = ExceptionRecord(e, position=position)
Add extra error happened during a test. If the test has passed or skipped, this will mark the test result as ERROR. If an error is added the test record, the record's result is equivalent to the case where an uncaught exception happened. If the test record has not recorded any error, the newly added error would be the main error of the test record. Otherwise the newly added error is added to the record's extra errors. Args: position: string, where this error occurred, e.g. 'teardown_test'. e: An exception or a `signals.ExceptionRecord` object.
github-repos
def process_function_type_comment(node, op, func, ctx): if not op.annotation: return comment, line = op.annotation if func.signature.annotations: ctx.errorlog.redundant_function_type_comment(op.code.filename, line) return fake_stack = ctx.vm.simple_stack(op.at_line(line)) m = _FUNCTION_TYPE_COMMENT_RE.match(comment) if not m: ctx.errorlog.invalid_function_type_comment(fake_stack, comment) return args, return_type = m.groups() assert args is not None and return_type is not None if args != '...': annot = args.strip() try: ctx.annotation_utils.eval_multi_arg_annotation(node, func, annot, fake_stack) except abstract_utils.ConversionError: ctx.errorlog.invalid_function_type_comment(fake_stack, annot, details='Must be constant.') ret = ctx.convert.build_string(None, return_type) func.signature.set_annotation('return', ctx.annotation_utils.extract_annotation(node, ret, 'return', fake_stack))
Modifies annotations from a function type comment. Checks if a type comment is present for the function. If so, the type comment is used to populate annotations. It is an error to have a type comment when annotations is not empty. Args: node: The current node. op: An opcode (used to determine filename and line number). func: An abstract.InterpreterFunction. ctx: The current context.
github-repos
def is_collection_aligned(self, data_collection): if self._collection_type != data_collection._collection_type: return False elif len(self.values) != len(data_collection.values): return False elif self.datetimes != data_collection.datetimes: return False else: return True
Check if this Data Collection is aligned with another. Aligned Data Collections are of the same Data Collection class, have the same number of values and have matching datetimes. Args: data_collection: The Data Collection which you want to test if this collection is aligned with. Return: True if collections are aligned, False if not aligned
juraj-google-style
def get_metadata(self, resource, keys): self.metadata_service.set_auth(self._token_metadata) return self.metadata_service.get(resource, keys)
Gets the values for given keys associated with the given resource. Args: resource (intern.resource.boss.BossResource) keys (list) Returns: (dictionary) Raises: HTTPErrorList on failure.
juraj-google-style
def from_dict(cls, data): try: fulfillment = _fulfillment_from_details(data['condition']['details']) except KeyError: fulfillment = data['condition']['uri'] try: amount = int(data['amount']) except ValueError: raise AmountError('Invalid amount: %s' % data['amount']) return cls(fulfillment, data['public_keys'], amount)
Transforms a Python dictionary to an Output object. Note: To pass a serialization cycle multiple times, a Cryptoconditions Fulfillment needs to be present in the passed-in dictionary, as Condition URIs are not serializable anymore. Args: data (dict): The dict to be transformed. Returns: :class:`~bigchaindb.common.transaction.Output`
juraj-google-style
def GetFileEntryByPathSpec(self, path_spec): volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec) if volume_index is None: location = getattr(path_spec, 'location', None) if location is None or location != self.LOCATION_ROOT: return None return apfs_container_file_entry.APFSContainerFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True) if (volume_index < 0 or volume_index >= self._fsapfs_container.number_of_volumes): return None return apfs_container_file_entry.APFSContainerFileEntry( self._resolver_context, self, path_spec)
Retrieves a file entry for a path specification. Args: path_spec (PathSpec): a path specification. Returns: APFSContainerFileEntry: a file entry or None if not exists.
juraj-google-style
def GetName(self, number): value = self._data_type_definition.values_per_number.get(number, None) if not value: return None return value.name
Retrieves the name of an enumeration value by number. Args: number (int): number. Returns: str: name of the enumeration value or None if no corresponding enumeration value was found.
juraj-google-style
def _get_sync(self, url): response = self.session.get(url) if (response.status_code == requests.codes.ok): return response.json() else: raise HTTPError
Internal method used for GET requests Args: url (str): URL to fetch Returns: Individual URL request's response Raises: HTTPError: If HTTP request failed.
codesearchnet
def get_modname_from_modpath(module_fpath): modsubdir_list = get_module_subdir_list(module_fpath) modname = '.'.join(modsubdir_list) modname = modname.replace('.__init__', '').strip() modname = modname.replace('.__main__', '').strip() return modname
returns importable name from file path get_modname_from_modpath Args: module_fpath (str): module filepath Returns: str: modname Example: >>> # ENABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> module_fpath = ut.util_path.__file__ >>> modname = ut.get_modname_from_modpath(module_fpath) >>> result = modname >>> print(result) utool.util_path
codesearchnet
def post_request(profile, resource, payload): url = get_url(profile, resource) headers = get_headers(profile) response = requests.post(url, json=payload, headers=headers) return response.json()
Do a POST request to Github's API. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. resource The part of a Github API URL that comes after ``.../:repo/git``. For instance, for ``.../:repo/git/commits``, it's ``/commits``. payload A dict of values to send as the payload of the POST request. The data will be JSON-encoded. Returns: The body of the response, converted from JSON into a Python dict.
codesearchnet
def create_identity_with_nan_gradients_fn(have_nan_gradients): @custom_gradient.custom_gradient def _identity_with_nan_gradients(x): x = array_ops.identity(x) def grad(dx): return cond.cond(have_nan_gradients, lambda: dx * float('NaN'), lambda: dx) return (x, grad) def identity_with_nan_gradients(x): return _identity_with_nan_gradients(x) return identity_with_nan_gradients
Returns a function that optionally has NaN gradients. This serves as a hook to introduce NaN gradients to a model. This returns an identity function. The identity's gradient function will check if the boolean tensor `have_nan_gradients` is True. If so, the gradient will be NaN. Otherwise, the gradient will also be the identity. Args: have_nan_gradients: A scalar boolean tensor. If True, gradients will be NaN. Otherwise, the gradient function is the identity function. Returns: An identity function whose gradient function will return NaNs, if `have_nan_gradients` is True.
github-repos
def update_config_pwd(msg, cfg): msg_type = msg.__class__.__name__.lower() key_fmt = msg.profile + "_" + msg_type if isinstance(msg._auth, (MutableSequence, tuple)): cfg.pwd[key_fmt] = " :: ".join(msg._auth) else: cfg.pwd[key_fmt] = msg._auth
Updates the profile's auth entry with values set by the user. This will overwrite existing values. Args: :msg: (Message class) an instance of a message class. :cfg: (jsonconfig.Config) config instance.
juraj-google-style
def pylint_check(files): files = fs.wrap_paths(files) cfg_path = conf.get_path('lint.pylint_cfg', 'ops/tools/pylint.ini') pylint_cmd = 'pylint --rcfile {} {}'.format(cfg_path, files) return shell.run(pylint_cmd, exit_on_error=False).return_code
Run code checks using pylint. Args: files (list[str]): A list of files to check Returns: bool: **True** if all files passed the checks, **False** otherwise.
juraj-google-style
def read_undone_from_datastore(self, shard_id=None, num_shards=None): if (shard_id is not None): shards_list = [((i + shard_id) % num_shards) for i in range(num_shards)] else: shards_list = [] shards_list.append(None) for shard in shards_list: self._read_undone_shard_from_datastore(shard) if self._work: return shard return None
Reads undone work from the datastore. If shard_id and num_shards are specified then this method will attempt to read undone work for shard with id shard_id. If no undone work was found then it will try to read shard (shard_id+1) and so on until either found shard with undone work or all shards are read. Args: shard_id: Id of the start shard num_shards: total number of shards Returns: id of the shard with undone work which was read. None means that work from all datastore was read.
codesearchnet
def zopen(filename, *args, **kwargs): if ((Path is not None) and isinstance(filename, Path)): filename = str(filename) (name, ext) = os.path.splitext(filename) ext = ext.upper() if (ext == '.BZ2'): if (PY_VERSION[0] >= 3): return bz2.open(filename, *args, **kwargs) else: args = list(args) if (len(args) > 0): args[0] = ''.join([c for c in args[0] if (c != 't')]) if ('mode' in kwargs): kwargs['mode'] = ''.join([c for c in kwargs['mode'] if (c != 't')]) return bz2.BZ2File(filename, *args, **kwargs) elif (ext in ('.GZ', '.Z')): return gzip.open(filename, *args, **kwargs) else: return io.open(filename, *args, **kwargs)
This function wraps around the bz2, gzip and standard python's open function to deal intelligently with bzipped, gzipped or standard text files. Args: filename (str/Path): filename or pathlib.Path. \*args: Standard args for python open(..). E.g., 'r' for read, 'w' for write. \*\*kwargs: Standard kwargs for python open(..). Returns: File-like object. Supports with context.
codesearchnet
def update_note(self, note): if ('key' in note): noteid = note.pop('key', None) else: noteid = uuid.uuid4().hex if ('version' in note): version = note.pop('version', None) url = ('%s/i/%s/v/%s?response=1' % (DATA_URL, noteid, version)) else: url = ('%s/i/%s?response=1' % (DATA_URL, noteid)) note = self.__remove_simplenote_api_fields(note) request = Request(url, data=json.dumps(note).encode('utf-8')) request.add_header(self.header, self.get_token()) request.add_header('Content-Type', 'application/json') response = '' try: response = urllib2.urlopen(request) except HTTPError as e: if (e.code == 401): raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return (e, (- 1)) except IOError as e: return (e, (- 1)) note = json.loads(response.read().decode('utf-8')) note = self.__add_simplenote_api_fields(note, noteid, int(response.info().get('X-Simperium-Version'))) return (note, 0)
Method to update a specific note object, if the note object does not have a "key" field, a new note is created Arguments - note (dict): note object to update Returns: A tuple `(note, status)` - note (dict): note object - status (int): 0 on success and -1 otherwise
codesearchnet
def find_all(self, collection): obj = getattr(self.db, collection) result = obj.find() return result
Search a collection for all available items. Args: collection: The db collection. See main class documentation. Returns: List of all items in the collection.
juraj-google-style
def active_futures(ticker: str, dt) -> str: t_info = ticker.split() (prefix, asset) = (' '.join(t_info[:(- 1)]), t_info[(- 1)]) info = const.market_info(f'{prefix[:(- 1)]}1 {asset}') (f1, f2) = (f'{prefix[:(- 1)]}1 {asset}', f'{prefix[:(- 1)]}2 {asset}') fut_2 = fut_ticker(gen_ticker=f2, dt=dt, freq=info['freq']) fut_1 = fut_ticker(gen_ticker=f1, dt=dt, freq=info['freq']) fut_tk = bdp(tickers=[fut_1, fut_2], flds='Last_Tradeable_Dt', cache=True) if (pd.Timestamp(dt).month < pd.Timestamp(fut_tk.last_tradeable_dt[0]).month): return fut_1 d1 = bdib(ticker=f1, dt=dt) d2 = bdib(ticker=f2, dt=dt) return (fut_1 if (d1[f1].volume.sum() > d2[f2].volume.sum()) else fut_2)
Active futures contract Args: ticker: futures ticker, i.e., ESA Index, Z A Index, CLA Comdty, etc. dt: date Returns: str: ticker name
codesearchnet
def _list(self, request, start_response): configs = [] generator = directory_list_generator.DirectoryListGenerator(request) for config in self._config_manager.configs.itervalues(): if (config != self.API_CONFIG): configs.append(config) directory = generator.pretty_print_config_to_json(configs) if (not directory): _logger.error('Failed to get API directory') return util.send_wsgi_not_found_response(start_response) return self._send_success_response(directory, start_response)
Sends HTTP response containing the API directory. This calls start_response and returns the response body. Args: request: An ApiRequest, the transformed request sent to the Discovery API. start_response: A function with semantics defined in PEP-333. Returns: A string containing the response body.
codesearchnet
def wrap_callable(cls, uri, methods, callable_obj): if isinstance(callable_obj, HandlerMeta): callable_obj.base_endpoint = uri callable_obj.is_valid = True return callable_obj if isinstance(callable_obj, types.FunctionType): return cls(uri=uri, methods=methods, callable_obj=callable_obj) raise RouteError('Invalid handler type.')
Wraps function-based callable_obj into a `Route` instance, else proxies a `bottle_neck.handlers.BaseHandler` subclass instance. Args: uri (str): The uri relative path. methods (tuple): A tuple of valid method strings. callable_obj (instance): The callable object. Returns: A route instance. Raises: RouteError for invalid callable object type.
codesearchnet
def get_data_layout(self, data_shape): raise NotImplementedError()
Retrieve the `TensorLayout` for the input data. Args: data_shape: shape for the input data in list or tuple format. Returns: The `TensorLayout` for the data, which can be used by `backend.distribute_value()` to redistribute a input data.
github-repos
def __init__(self, event_count=0, first_timestamp=-1, last_timestamp=-1): self.event_count = event_count self.first_timestamp = first_timestamp self.last_timestamp = last_timestamp
Tracks events for a single category of values. Args: event_count: The initial event count to use. first_timestamp: The timestamp of the first event with this value. last_timestamp: The timestamp of the last event with this category of values.
juraj-google-style
def get_content_metadata(self, enterprise_customer): content_metadata = OrderedDict() if enterprise_customer.catalog: response = self._load_data( self.ENTERPRISE_CUSTOMER_ENDPOINT, detail_resource='courses', resource_id=str(enterprise_customer.uuid), traverse_pagination=True, ) for course in response['results']: for course_run in course['course_runs']: course_run['content_type'] = 'courserun' content_metadata[course_run['key']] = course_run for enterprise_customer_catalog in enterprise_customer.enterprise_customer_catalogs.all(): response = self._load_data( self.ENTERPRISE_CUSTOMER_CATALOGS_ENDPOINT, resource_id=str(enterprise_customer_catalog.uuid), traverse_pagination=True, querystring={'page_size': 1000}, ) for item in response['results']: content_id = utils.get_content_metadata_item_id(item) content_metadata[content_id] = item return content_metadata.values()
Return all content metadata contained in the catalogs associated with the EnterpriseCustomer. Arguments: enterprise_customer (EnterpriseCustomer): The EnterpriseCustomer to return content metadata for. Returns: list: List of dicts containing content metadata.
juraj-google-style
def _get_populate_values(self, instance) -> Tuple[str, str]: return [ ( lang_code, self._get_populate_from_value( instance, self.populate_from, lang_code ), ) for lang_code, _ in settings.LANGUAGES ]
Gets all values (for each language) from the specified's instance's `populate_from` field. Arguments: instance: The instance to get the values from. Returns: A list of (lang_code, value) tuples.
juraj-google-style
def filter_sequences(self, seq_type): return DictList((x for x in self.sequences if isinstance(x, seq_type)))
Return a DictList of only specified types in the sequences attribute. Args: seq_type (SeqProp): Object type Returns: DictList: A filtered DictList of specified object type only
codesearchnet
def softplus_inverse(x, name=None): with tf.name_scope(name or "softplus_inverse"): x = tf.convert_to_tensor(value=x, name="x") threshold = np.log(np.finfo(dtype_util.as_numpy_dtype(x.dtype)).eps) + 2. is_too_small = tf.less(x, np.exp(threshold)) is_too_large = tf.greater(x, -threshold) too_small_value = tf.math.log(x) too_large_value = x x = tf.where(tf.logical_or(is_too_small, is_too_large), tf.ones_like(x), x) y = x + tf.math.log(-tf.math.expm1(-x)) return tf.where(is_too_small, too_small_value, tf.where(is_too_large, too_large_value, y))
Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)). Mathematically this op is equivalent to: ```none softplus_inverse = log(exp(x) - 1.) ``` Args: x: `Tensor`. Non-negative (not enforced), floating-point. name: A name for the operation (optional). Returns: `Tensor`. Has the same type/shape as input `x`.
juraj-google-style
def __call__(self, *args, **kwargs): for loop, m in self.iter_methods(): coro = m(*args, **kwargs) self.submit_coroutine(coro, loop)
Triggers all stored callbacks (coroutines) Args: *args: Positional arguments to pass to callbacks **kwargs: Keyword arguments to pass to callbacks
juraj-google-style
def map_resources(self): assert not context.executing_eagerly() object_map = object_identity.ObjectIdentityDictionary() tensor_map = object_identity.ObjectIdentityDictionary() asset_info = _AssetInfo(asset_defs=[], asset_initializers_by_resource=object_identity.ObjectIdentityDictionary(), asset_filename_map={}, asset_index={}) for node_id in _dependency_sorted_node_ids(self): obj = self.nodes[node_id] tensors = obj._export_to_saved_model_graph(object_map=object_map, tensor_map=tensor_map, options=self.options) if isinstance(obj, asset.Asset): _add_asset_info(obj, asset_info, tensor_map[obj.asset_path]) if tensors: for tensor in tensors: self.captured_tensor_node_ids[tensor] = node_id return (object_map, tensor_map, asset_info)
Makes new resource handle ops corresponding to existing resource tensors. Creates resource handle ops in the current default graph, whereas `accessible_objects` will be from an eager context. Resource mapping adds resource handle ops to the main GraphDef of a SavedModel, which allows the C++ loader API to interact with resources. Returns: A tuple of (object_map, tensor_map, asset_info): object_map: A dictionary mapping from object in `accessible_objects` to replacement objects created to hold the new resource tensors. tensor_map: A dictionary mapping from resource tensors extracted from `accessible_objects` to newly created resource tensors. asset_info: An _AssetInfo tuple describing external assets referenced from accessible_objects.
github-repos
def filter(self, predicates): tys = [] for col_name, raw_column in self.raw_columns.items(): dtype = str(raw_column.dtype) if dtype == 'object' or dtype == '|S64': weld_type = WeldVec(WeldChar()) else: weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype] tys.append(weld_type) if len(tys) == 1: weld_type = tys[0] else: weld_type = WeldStruct(tys) if isinstance(predicates, SeriesWeld): predicates = predicates.expr return DataFrameWeldExpr( grizzly_impl.filter( grizzly_impl.zip_columns( self.raw_columns.values(), ), predicates ), self.raw_columns.keys(), weld_type )
Summary Args: grouping_column_name (TYPE): Description Returns: TYPE: Description
juraj-google-style
def sampler_to_iterator(dataset, sampler): for sample in sampler: if isinstance(sample, (list, tuple)): (yield [dataset[i] for i in sample]) else: (yield dataset[sample])
Given a batch sampler or sampler returns examples instead of indices Args: dataset (torch.utils.data.Dataset): Dataset to sample from. sampler (torch.utils.data.sampler.Sampler): Sampler over the dataset. Returns: generator over dataset examples
codesearchnet
def get_device_name(self, cached=True): if (cached and (self.name is not None)): return self.name device_name = self.get_characteristic_handle_from_uuid(UUID_DEVICE_NAME) if (device_name is None): logger.warn('Failed to find handle for device name') return None self.name = self.dongle._read_attribute(self.conn_handle, device_name) return self.name
Returns the SK8 device BLE name. Args: cached (bool): if True, returns the locally cached copy of the name. If this is set to False, or the name is not cached, it will read from the device instead. Returns: str. The current device name. May be `None` if an error occurs.
codesearchnet
def save_collection(png_filename_base, numpy_data, start_layers_at=1): file_ext = png_filename_base.split('.')[-1] if file_ext in ['png']: file_base = '.'.join(png_filename_base.split('.')[:-1]) else: file_base = png_filename_base file_ext = ".png" file_base_array = file_base.split('*') output_files = [] i = start_layers_at for layer in numpy_data: layer_filename = (str(i).zfill(6)).join(file_base_array) + file_ext output_files.append(save(layer_filename, layer)) i += 1 return output_files
Export a numpy array to a set of png files, with each Z-index 2D array as its own 2D file. Arguments: png_filename_base: A filename template, such as "my-image-*.png" which will lead to a collection of files named "my-image-0.png", "my-image-1.png", etc. numpy_data: The numpy array data to save to png. Returns: Array. A list of expanded filenames that hold png data.
juraj-google-style
def get_program(self, program_resource_name: str) -> Dict: return self.service.projects().programs().get(name=program_resource_name).execute()
Returns the previously created quantum program. Params: program_resource_name: A string of the form `projects/project_id/programs/program_id`. Returns: A dictionary containing the metadata and the program.
codesearchnet
def auth_middleware(policy): assert isinstance(policy, AbstractAuthentication) async def _auth_middleware_factory(app, handler): async def _middleware_handler(request): request[POLICY_KEY] = policy response = (await handler(request)) (await policy.process_response(request, response)) return response return _middleware_handler return _auth_middleware_factory
Returns a aiohttp_auth middleware factory for use by the aiohttp application object. Args: policy: A authentication policy with a base class of AbstractAuthentication.
codesearchnet
def insert_json(table=None, bulk_size=1000, concurrency=25, hosts=None, output_fmt=None): if (not hosts): return print_only(table) queries = (to_insert(table, d) for d in dicts_from_stdin()) bulk_queries = as_bulk_queries(queries, bulk_size) print('Executing inserts: bulk_size={} concurrency={}'.format(bulk_size, concurrency), file=sys.stderr) stats = Stats() with clients.client(hosts, concurrency=concurrency) as client: f = partial(aio.measure, stats, client.execute_many) try: aio.run_many(f, bulk_queries, concurrency) except clients.SqlException as e: raise SystemExit(str(e)) try: print(format_stats(stats.get(), output_fmt)) except KeyError: if (not stats.sampler.values): raise SystemExit('No data received via stdin') raise
Insert JSON lines fed into stdin into a Crate cluster. If no hosts are specified the statements will be printed. Args: table: Target table name. bulk_size: Bulk size of the insert statements. concurrency: Number of operations to run concurrently. hosts: hostname:port pairs of the Crate nodes
codesearchnet
def ensure_app_cache_dir(appname, *args): from ubelt import util_path dpath = get_app_cache_dir(appname, *args) util_path.ensuredir(dpath) return dpath
Calls `get_app_cache_dir` but ensures the directory exists. Args: appname (str): the name of the application *args: any other subdirectories may be specified SeeAlso: get_app_cache_dir Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt') >>> assert exists(dpath)
codesearchnet
def downsample_residual(x, output_channels, dim='2d', stride=1, scope='h'): with tf.variable_scope(scope): if stride > 1: avg_pool = CONFIG[dim]['avg_pool'] x = avg_pool(x, pool_size=(stride, stride), strides=(stride, stride), padding='VALID') input_channels = tf.shape(x)[3] diff = output_channels - input_channels x = tf.pad( x, [[0, 0], [0, 0], [0, 0], [diff return x
Downsamples 'x' by `stride` using average pooling. Args: x: input tensor of size [N, H, W, C] output_channels: Desired number of output channels. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. stride: What stride to use. Usually 1 or 2. scope: Optional variable scope. Returns: A downsampled tensor of size [N, H/2, W/2, output_channels] if stride is 2, else returns a tensor of size [N, H, W, output_channels] if stride is 1.
juraj-google-style
def make_agent() -> EcommerceAgent: config_path = find_config('tfidf_retrieve') skill = build_model(config_path) agent = EcommerceAgent(skills=[skill]) return agent
Make an agent Returns: agent: created Ecommerce agent
codesearchnet
def appliance_device_read_community(self): if (not self.__appliance_device_read_community): self.__appliance_device_read_community = ApplianceDeviceReadCommunity(self.__connection) return self.__appliance_device_read_community
Gets the ApplianceDeviceReadCommunity API client. Returns: ApplianceDeviceReadCommunity:
codesearchnet
def __random_density_hs(N, rank=None, seed=None): G = __ginibre_matrix(N, rank, seed) G = G.dot(G.conj().T) return (G / np.trace(G))
Generate a random density matrix from the Hilbert-Schmidt metric. Args: N (int): the length of the density matrix. rank (int or None): the rank of the density matrix. The default value is full-rank. seed (int): Optional. To set a random seed. Returns: ndarray: rho (N,N a density matrix.
codesearchnet
def Write(self, map_data): self._Begin() written_keys = set() write_offset = 0 try: while 1: entry = map_data.PopItem() for index in self._indices: self._indices[index][str(getattr(entry, index))] = str(write_offset) write_offset += self._WriteData(self.temp_cache_file, entry) written_keys.update(self._ExpectedKeysForEntry(entry)) except KeyError: self.temp_cache_file.flush() except: self._Rollback() raise return written_keys
Write the map to the cache. Warning -- this destroys map_data as it is written. This is done to save memory and keep our peak footprint smaller. We consume memory again on Verify() as we read a new copy of the entries back in. Args: map_data: A Map subclass containing the entire map to be written. Returns: a set of keys written or None on failure.
github-repos
def __call__(self, fn): def debug(app, *args, **kwargs): data = fn(app, *args, **kwargs) app.tcex.log.debug( 'function: "{}", args: "{}", kwargs: "{}"'.format( self.__class__.__name__, vars(args), kwargs ) ) return data return debug
Implement __call__ function for decorator. Args: fn (function): The decorated function. Returns: function: The custom decorator function.
juraj-google-style
def random_strings(self, string_length=1): str_list = [] for path in self.uniform_generate(string_length): str_list.append(self._path_to_str(path)) return str_list
Generate string_length random strings that belong to the automaton. Args: string_length (integer): The size of the random string Returns: str: The generated string
juraj-google-style
def __getitem__(self, index: Any) -> Rotation: if type(index) is not tuple: index = (index,) if self._rot_mats is not None: rot_mats = self._rot_mats[index + (slice(None), slice(None))] return Rotation(rot_mats=rot_mats) elif self._quats is not None: quats = self._quats[index + (slice(None),)] return Rotation(quats=quats, normalize_quats=False) else: raise ValueError('Both rotations are None')
Allows torch-style indexing over the virtual shape of the rotation object. See documentation for the shape property. Args: index: A torch index. E.g. (1, 3, 2), or (slice(None,)) Returns: The indexed rotation
github-repos
def stack_residual_blocks_v1(x, filters, blocks, stride1=2, name=None): x = residual_block_v1(x, filters, stride=stride1, name=name + '_block1') for i in range(2, blocks + 1): x = residual_block_v1(x, filters, conv_shortcut=False, name=name + '_block' + str(i)) return x
A set of stacked residual blocks. Args: x: Input tensor. filters: Number of filters in the bottleneck layer in a block. blocks: Number of blocks in the stacked blocks. stride1: Stride of the first layer in the first block. Defaults to `2`. name: Stack label. Returns: Output tensor for the stacked blocks.
github-repos
def _get_ngrams_with_counter(segment, max_order): ngram_counts = collections.Counter() for order in xrange(1, max_order + 1): for i in xrange(0, len(segment) - order + 1): ngram = tuple(segment[i:i + order]) ngram_counts[ngram] += 1 return ngram_counts
Extracts all n-grams up to a given maximum order from an input segment. Args: segment: text segment from which n-grams will be extracted. max_order: maximum length in tokens of the n-grams returned by this methods. Returns: The Counter containing all n-grams upto max_order in segment with a count of how many times each n-gram occurred.
juraj-google-style
def schedule(cls, mapreduce_spec): task_name = mapreduce_spec.mapreduce_id + "-finalize" finalize_task = taskqueue.Task( name=task_name, url=(mapreduce_spec.params["base_path"] + "/finalizejob_callback/" + mapreduce_spec.mapreduce_id), params={"mapreduce_id": mapreduce_spec.mapreduce_id}, headers=util._get_task_headers(mapreduce_spec.mapreduce_id)) queue_name = util.get_queue_name(None) if not _run_task_hook(mapreduce_spec.get_hooks(), "enqueue_controller_task", finalize_task, queue_name): try: finalize_task.add(queue_name) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e: logging.warning("Task %r already exists. %s: %s", task_name, e.__class__, e)
Schedule finalize task. Args: mapreduce_spec: mapreduce specification as MapreduceSpec.
juraj-google-style
def write(self, save_path, options=None): return self._write(save_path, options)
Save the checkpointed variables. Args: save_path: The file prefix of the checkpoint file. options: Optional CheckpointOption instance. Returns: The full path of the checkpoint file.
github-repos
def match(self, f, *args): try: match = f(self.tokenizer, *args) except StopIteration: return if match is None: return if not isinstance(match, grammar.TokenMatch): raise TypeError("Invalid grammar function %r returned %r." % (f, match)) self.matched = match return match
Match grammar function 'f' against next token and set 'self.matched'. Arguments: f: A grammar function - see efilter.parsers.common.grammar. Must return TokenMatch or None. args: Passed to 'f', if any. Returns: Instance of efilter.parsers.common.grammar.TokenMatch or None. Comment: If a match is returned, it will also be stored in self.matched.
juraj-google-style
def __init__(self, max_size=10, max_age=600): super(TimeBasedCache, self).__init__(max_size) self.max_age = max_age def HouseKeeper(): if not time: return now = time.time() for cache in TimeBasedCache.active_caches: with cache.lock: for node in list(itervalues(cache._hash)): timestamp, obj = node.data if timestamp + cache.max_age < now: cache.KillObject(obj) cache._age.Unlink(node) cache._hash.pop(node.key, None) if not TimeBasedCache.house_keeper_thread: TimeBasedCache.active_caches = weakref.WeakSet() TimeBasedCache.house_keeper_thread = InterruptableThread( name="HouseKeeperThread", target=HouseKeeper) TimeBasedCache.house_keeper_thread.start() TimeBasedCache.active_caches.add(self)
Constructor. This cache will refresh the age of the cached object as long as they are accessed within the allowed age. The age refers to the time since it was last touched. Args: max_size: The maximum number of objects held in cache. max_age: The maximum length of time an object is considered alive.
juraj-google-style
def register_entry(self, navbar_kwargs): path = navbar_kwargs.pop('path') if ((not hasattr(path, '__iter__')) or isinstance(path, basestring)): path = [path] entry_group = self.navbar_entries for (name, is_last) in iter_islast(path): kwargs = deepcopy(navbar_kwargs) kwargs['name'] = name for existing_entry in entry_group: if (existing_entry.name == name): entry = existing_entry if is_last: entry.endpoint = kwargs['endpoint'] break else: if (not is_last): kwargs['endpoint'] = None entry = NavbarEntry(**kwargs) entry_group.add(entry) entry_group = entry.children
Register a navbar entry with the copilot. Args: navbar_kwargs (dict): Arguments passed to the :class:`NavbarEntry` instance.
codesearchnet
def _model_source_dir(self): return (self.source_dir if self.sagemaker_session.local_mode else self.uploaded_code.s3_prefix)
Get the appropriate value to pass as source_dir to model constructor on deploying Returns: str: Either a local or an S3 path pointing to the source_dir to be used for code by the model to be deployed
codesearchnet
def __init__(self, array): self.array = array
Specify a NumPy array to wrap. Args: array: The NumPy array to save and restore (may be overwritten).
github-repos
def GetMetadata( self, metadata_key='', recursive=True, timeout=None, retry=True): return self._HandleMetadataUpdate( metadata_key=metadata_key, recursive=recursive, wait=False, timeout=timeout, retry=retry)
Retrieve the contents of metadata server for a metadata key. Args: metadata_key: string, the metadata key to watch for changes. recursive: bool, True if we should recursively watch for metadata changes. timeout: int, timeout in seconds for returning metadata output. retry: bool, True if we should retry on failure. Returns: json, the deserialized contents of the metadata server or None if error.
juraj-google-style
def _GenClientLibCallback(args, client_func=_GenClientLib): client_path = client_func(args.discovery_doc[0], args.language, args.output, args.build_system) print 'API client library written to %s' % client_path
Generate a client library to file. Args: args: An argparse.Namespace object to extract parameters from client_func: A function that generates client libraries and stores them to files, accepting a path to a discovery doc, a client library language, an output directory, and a build system for the client library language.
juraj-google-style
def exists(self, vars_list: List[str]) -> 'TensorFluent': return self._aggregation_op(tf.reduce_any, self, vars_list)
Returns the TensorFluent for the exists aggregation function. Args: vars_list: The list of variables to be aggregated over. Returns: A TensorFluent wrapping the exists aggregation function.
juraj-google-style
def __send_smtp_email(self, recipients, subject, html_body, text_body): smtp = smtplib.SMTP(dbconfig.get('smtp_server', NS_EMAIL, 'localhost'), dbconfig.get('smtp_port', NS_EMAIL, 25)) source_arn = dbconfig.get('source_arn', NS_EMAIL) return_arn = dbconfig.get('return_path_arn', NS_EMAIL) from_arn = dbconfig.get('from_arn', NS_EMAIL) msg = MIMEMultipart('alternative') if (source_arn and from_arn and return_arn): msg['X-SES-SOURCE-ARN'] = source_arn msg['X-SES-FROM-ARN'] = from_arn msg['X-SES-RETURN-PATH-ARN'] = return_arn msg['Subject'] = subject msg['To'] = ','.join(recipients) msg['From'] = self.sender if html_body: html_part = MIMEText(html_body, 'html') msg.attach(html_part) if text_body: text_part = MIMEText(text_body, 'plain') msg.attach(text_part) if dbconfig.get('smtp_tls', NS_EMAIL, False): smtp.starttls() username = dbconfig.get('smtp_username', NS_EMAIL) password = dbconfig.get('smtp_password', NS_EMAIL) if (username and password): smtp.login(username, password) smtp.sendmail(self.sender, recipients, msg.as_string()) smtp.quit()
Send an email using SMTP Args: recipients (`list` of `str`): List of recipient email addresses subject (str): Subject of the email html_body (str): HTML body of the email text_body (str): Text body of the email Returns: `None`
codesearchnet
def convert_to_qutip(expr, full_space=None, mapping=None): if (full_space is None): full_space = expr.space if (not expr.space.is_tensor_factor_of(full_space)): raise ValueError(("expr '%s' must be in full_space %s" % (expr, full_space))) if (full_space == TrivialSpace): raise AlgebraError('Cannot convert object in TrivialSpace to qutip. You may pass a non-trivial `full_space`') if (mapping is not None): if (expr in mapping): ret = mapping[expr] if isinstance(ret, qutip.Qobj): return ret else: assert callable(ret) return ret(expr) if (expr is IdentityOperator): local_spaces = full_space.local_factors if (len(local_spaces) == 0): raise ValueError(('full_space %s does not have local factors' % full_space)) else: return qutip.tensor(*[qutip.qeye(s.dimension) for s in local_spaces]) elif (expr is ZeroOperator): return qutip.tensor(*[qutip.Qobj(csr_matrix((s.dimension, s.dimension))) for s in full_space.local_factors]) elif isinstance(expr, LocalOperator): return _convert_local_operator_to_qutip(expr, full_space, mapping) elif (isinstance(expr, Operator) and isinstance(expr, Operation)): return _convert_operator_operation_to_qutip(expr, full_space, mapping) elif isinstance(expr, OperatorTrace): raise NotImplementedError('Cannot convert OperatorTrace to qutip') elif isinstance(expr, State): return _convert_ket_to_qutip(expr, full_space, mapping) elif isinstance(expr, SuperOperator): return _convert_superoperator_to_qutip(expr, full_space, mapping) elif isinstance(expr, Operation): return _convert_state_operation_to_qutip(expr, full_space, mapping) elif isinstance(expr, SLH): raise ValueError('SLH objects can only be converted using SLH_to_qutip routine') else: raise ValueError(("Cannot convert '%s' of type %s" % (str(expr), type(expr))))
Convert a QNET expression to a qutip object Args: expr: a QNET expression full_space (HilbertSpace): The Hilbert space in which `expr` is defined. If not given, ``expr.space`` is used. The Hilbert space must have a well-defined basis. mapping (dict): A mapping of any (sub-)expression to either a `quip.Qobj` directly, or to a callable that will convert the expression into a `qutip.Qobj`. Useful for e.g. supplying objects for symbols Raises: ValueError: if `expr` is not in `full_space`, or if `expr` cannot be converted.
codesearchnet