code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def Images(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.Images(tag)
Retrieve the image events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.ImageEvents`.
codesearchnet
def __init__(self, uid): message = 'Schema of UID "{}" is unrecognized.'.format(uid) super(UnknownUIDSchema, self).__init__(message)
Exception raised when a schema of a UID is unknown. Args: uid (string): given UID
juraj-google-style
def ensure_list_size(list_, size_): lendiff = (size_) - len(list_) if lendiff > 0: extension = [None for _ in range(lendiff)] list_.extend(extension)
Allocates more space if needbe. Ensures len(``list_``) == ``size_``. Args: list_ (list): ``list`` to extend size_ (int): amount to exent by
juraj-google-style
def CopyFromDict(self, attributes): for attribute_name, attribute_value in attributes.items(): if attribute_name[0] == '_': continue setattr(self, attribute_name, attribute_value)
Copies the attribute container from a dictionary. Args: attributes (dict[str, object]): attribute values per name.
juraj-google-style
def __sub__(self, other): try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value - other.value)
Returns the subtraction of `other` from `self`. Dimensions are subtracted as follows: ```python tf.compat.v1.Dimension(m) - tf.compat.v1.Dimension(n) == tf.compat.v1.Dimension(m - n) tf.compat.v1.Dimension(m) - tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(n) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the subtraction of `other` from `self`.
github-repos
def download_archive(self, id_or_uri, file_path): uri = self.URI + "/archive/" + extract_id_from_uri(id_or_uri) return self._client.download(uri, file_path)
Download the details of the Golden Image capture logs, which has been archived based on the specific attribute ID. Args: id_or_uri: ID or URI of the Golden Image. file_path (str): File name to save the archive. Returns: bool: Success.
juraj-google-style
def _from_keras_log_format(data, **kwargs): data_val = pd.DataFrame(data[['epoch']]) data_val['acc'] = data['val_acc'] data_val['loss'] = data['val_loss'] data_val['data'] = 'validation' data_training = pd.DataFrame(data[['acc', 'loss', 'epoch']]) data_training['data'] = 'training' result = pd.concat([data_training, data_val], sort=False) plot(result, **kwargs)
Plot accuracy and loss from a panda's dataframe. Args: data: Panda dataframe in the format of the Keras CSV log. output_dir_path: The path to the directory where the resultings plots should end up.
codesearchnet
def alt40mcp(msg): d = hex2bin(data(msg)) if d[0] == '0': return None alt = bin2int(d[1:13]) * 16 return alt
Selected altitude, MCP/FCU Args: msg (String): 28 bytes hexadecimal message (BDS40) string Returns: int: altitude in feet
juraj-google-style
def _sample_row(self): unis = np.random.uniform(0, 1, self.n_var) first_ind = np.random.randint(0, self.n_var) adj = self.trees[0].get_adjacent_matrix() visited = [] explore = [first_ind] sampled = np.zeros(self.n_var) itr = 0 while explore: current = explore.pop(0) neighbors = np.where((adj[(current, :)] == 1))[0].tolist() if (itr == 0): new_x = self.ppfs[current](unis[current]) else: for i in range((itr - 1), (- 1), (- 1)): current_ind = (- 1) if (i >= self.truncated): continue current_tree = self.trees[i].edges for edge in current_tree: if (i == 0): if (((edge.L == current) and (edge.R == visited[0])) or ((edge.R == current) and (edge.L == visited[0]))): current_ind = edge.index break elif ((edge.L == current) or (edge.R == current)): condition = set(edge.D) condition.add(edge.L) condition.add(edge.R) visit_set = set(visited) visit_set.add(current) if condition.issubset(visit_set): current_ind = edge.index break if (current_ind != (- 1)): copula_type = current_tree[current_ind].name copula = Bivariate(CopulaTypes(copula_type)) copula.theta = current_tree[current_ind].theta derivative = copula.partial_derivative_scalar if (i == (itr - 1)): tmp = optimize.fminbound(derivative, EPSILON, 1.0, args=(unis[visited[0]], unis[current])) else: tmp = optimize.fminbound(derivative, EPSILON, 1.0, args=(unis[visited[0]], tmp)) tmp = min(max(tmp, EPSILON), 0.99) new_x = self.ppfs[current](tmp) sampled[current] = new_x for s in neighbors: if (s not in visited): explore.insert(0, s) itr += 1 visited.insert(0, current) return sampled
Generate a single sampled row from vine model. Returns: numpy.ndarray
codesearchnet
def build_relative_position(query_layer, key_layer): query_size = query_layer.size(-2) key_size = key_layer.size(-2) q_ids = torch.arange(query_size, dtype=torch.long, device=query_layer.device) k_ids = torch.arange(key_size, dtype=torch.long, device=key_layer.device) rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1) rel_pos_ids = rel_pos_ids[:query_size, :] rel_pos_ids = rel_pos_ids.unsqueeze(0) return rel_pos_ids
Build relative position according to the query and key We assume the absolute position of query \(P_q\) is range from (0, query_size) and the absolute position of key \(P_k\) is range from (0, key_size), The relative positions from query to key is \(R_{q \rightarrow k} = P_q - P_k\) Args: query_size (int): the length of query key_size (int): the length of key Return: `torch.LongTensor`: A tensor with shape [1, query_size, key_size]
github-repos
def role_instance(self, value): if value == self._defaults['ai.cloud.roleInstance'] and 'ai.cloud.roleInstance' in self._values: del self._values['ai.cloud.roleInstance'] else: self._values['ai.cloud.roleInstance'] = value
The role_instance property. Args: value (string). the property value.
juraj-google-style
def get_volume_list(self) -> list: volumes = [] if (not self._manager): raise RuntimeError('Only the Swarm manager node can retrieve all the services.') volume_list = self._client.volumes.list() for v_list in volume_list: volumes.append(v_list.name) return volumes
Get a list of docker volumes. Only the manager nodes can retrieve all the volumes Returns: list, all the names of the volumes in swarm
codesearchnet
def chomp(text, max_len=280, split=None): split = split or '—;,.' while length(text) > max_len: try: text = re.split(r'[' + split + ']', text[::-1], 1)[1][::-1] except IndexError: return text return text
Shorten a string so that it fits under max_len, splitting it at 'split'. Not guaranteed to return a string under max_len, as it may not be possible Args: text (str): String to shorten max_len (int): maximum length. default 140 split (str): strings to split on (default is common punctuation: "-;,.")
juraj-google-style
def flatten(weights, start=0, stop=2): for (key, val) in weights.items(): new_shape = ((val.shape[0:start] + ((- 1),)) + val.shape[stop:]) weights[key] = val.reshape(new_shape) return weights
This methods reshapes all values in a dictionary. The indices from start to stop will be flattened into a single index. Args: weights: A dictionary mapping keys to numpy arrays. start: The starting index. stop: The ending index.
codesearchnet
def total_clicks(self, url): url = self.clean_url(url) clicks_url = f'{self.api_url}v3/link/clicks' params = { 'link': url, 'access_token': self.api_key, 'format': 'txt' } response = self._get(clicks_url, params=params) if not response.ok: raise BadAPIResponseException(response.content) try: total_clicks = int(response.text) except (KeyError, TypeError) as e: logger.warning('Bad value from total_clicks response: %s', e) return 0 return total_clicks
Total clicks implementation for Bit.ly Args: url: the URL you want to get the total clicks count Returns: An int containing the total clicks count Raises: BadAPIResponseException: If the API Returns an error as response
juraj-google-style
def shape_tensor(self, name='shape_tensor'): with self._name_scope(name): if self.shape.is_fully_defined(): return linear_operator_util.shape_tensor(self.shape.as_list()) else: return self._shape_tensor()
Shape of this `LinearOperator`, determined at runtime. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding `[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`. Args: name: A name for this `Op`. Returns: `int32` `Tensor`
github-repos
def jaccard_sims(feature_list): sim_info_list = [] for feature_info in feature_list: md5_source = feature_info['md5'] features_source = feature_info['features'] for feature_info in feature_list: md5_target = feature_info['md5'] features_target = feature_info['features'] if md5_source == md5_target: continue sim = jaccard_sim(features_source, features_target) if sim > .5: sim_info_list.append({'source': md5_source, 'target': md5_target, 'sim': sim}) return sim_info_list
Compute Jaccard similarities between all the observations in the feature list. Args: feature_list: a list of dictionaries, each having structure as { 'md5' : String, 'features': list of Strings } Returns: list of dictionaries with structure as {'source': md5 String, 'target': md5 String, 'sim': Jaccard similarity Number}
juraj-google-style
def flatten(input_layer, preserve_batch=True): if preserve_batch: return reshape(input_layer, [DIM_SAME, -1]) else: return reshape(input_layer, [-1])
Flattens this. If preserve_batch is True, the result is rank 2 and the first dim (batch) is unchanged. Otherwise the result is rank 1. Args: input_layer: The Pretty Tensor object, supplied. preserve_batch: If True (the default), then preserve the first dimension. Returns: A LayerWrapper with the flattened tensor.
juraj-google-style
def _AlignDecodedDataOffset(self, decoded_data_offset): self._file_object.seek(0, os.SEEK_SET) self._decoder = self._GetDecoder() self._decoded_data = b'' encoded_data_offset = 0 encoded_data_size = self._file_object.get_size() while encoded_data_offset < encoded_data_size: read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE) if read_count == 0: break encoded_data_offset += read_count if decoded_data_offset < self._decoded_data_size: self._decoded_data_offset = decoded_data_offset break decoded_data_offset -= self._decoded_data_size
Aligns the encoded file with the decoded data offset. Args: decoded_data_offset (int): decoded data offset.
juraj-google-style
def run_multiple(self, eventLoops): self.nruns += len(eventLoops) return self.communicationChannel.put_multiple(eventLoops)
run the event loops in the background. Args: eventLoops (list): a list of event loops to run
juraj-google-style
def Wget(src_url, tgt_name, tgt_root=None): if (tgt_root is None): tgt_root = str(CFG['tmp_dir']) from benchbuild.utils.cmd import wget tgt_file = (local.path(tgt_root) / tgt_name) if (not source_required(tgt_file)): Copy(tgt_file, '.') return wget(src_url, '-O', tgt_file) update_hash(tgt_file) Copy(tgt_file, '.')
Download url, if required. Args: src_url (str): Our SOURCE url. tgt_name (str): The filename we want to have on disk. tgt_root (str): The TARGET directory for the download. Defaults to ``CFG["tmpdir"]``.
codesearchnet
def fit_truncated_gaussian(samples, lower_bounds, upper_bounds): if (len(samples.shape) == 1): return _TruncatedNormalFitter()((samples, lower_bounds, upper_bounds)) def item_generator(): for ind in range(samples.shape[0]): if is_scalar(lower_bounds): lower_bound = lower_bounds else: lower_bound = lower_bounds[ind] if is_scalar(upper_bounds): upper_bound = upper_bounds else: upper_bound = upper_bounds[ind] (yield (samples[ind], lower_bound, upper_bound)) results = np.array(multiprocess_mapping(_TruncatedNormalFitter(), item_generator())) return (results[(:, 0)], results[(:, 1)])
Fits a truncated gaussian distribution on the given samples. This will do a maximum likelihood estimation of a truncated Gaussian on the provided samples, with the truncation points given by the lower and upper bounds. Args: samples (ndarray): a one or two dimensional array. If one dimensional we fit the truncated Gaussian on all values. If two dimensional, we calculate the truncated Gaussian for every set of samples over the first dimension. lower_bounds (ndarray or float): the lower bound, either a scalar or a lower bound per problem (first index of samples) upper_bounds (ndarray or float): the upper bound, either a scalar or an upper bound per problem (first index of samples) Returns: mean, std: the mean and std of the fitted truncated Gaussian
codesearchnet
def xmon_op_from_proto_dict(proto_dict: Dict) -> ops.Operation: def raise_missing_fields(gate_name: str): raise ValueError( '{} missing required fields: {}'.format(gate_name, proto_dict)) param = _parameterized_value_from_proto_dict qubit = devices.GridQubit.from_proto_dict if 'exp_w' in proto_dict: exp_w = proto_dict['exp_w'] if ('half_turns' not in exp_w or 'axis_half_turns' not in exp_w or 'target' not in exp_w): raise_missing_fields('ExpW') return ops.PhasedXPowGate( exponent=param(exp_w['half_turns']), phase_exponent=param(exp_w['axis_half_turns']), ).on(qubit(exp_w['target'])) elif 'exp_z' in proto_dict: exp_z = proto_dict['exp_z'] if 'half_turns' not in exp_z or 'target' not in exp_z: raise_missing_fields('ExpZ') return ops.Z(qubit(exp_z['target']))**param(exp_z['half_turns']) elif 'exp_11' in proto_dict: exp_11 = proto_dict['exp_11'] if ('half_turns' not in exp_11 or 'target1' not in exp_11 or 'target2' not in exp_11): raise_missing_fields('Exp11') return ops.CZ(qubit(exp_11['target1']), qubit(exp_11['target2']))**param(exp_11['half_turns']) elif 'measurement' in proto_dict: meas = proto_dict['measurement'] invert_mask = cast(Tuple[Any, ...], ()) if 'invert_mask' in meas: invert_mask = tuple(json.loads(x) for x in meas['invert_mask']) if 'key' not in meas or 'targets' not in meas: raise_missing_fields('Measurement') return ops.MeasurementGate( num_qubits=len(meas['targets']), key=meas['key'], invert_mask=invert_mask ).on(*[qubit(q) for q in meas['targets']]) else: raise ValueError('invalid operation: {}'.format(proto_dict))
Convert the proto dictionary to the corresponding operation. See protos in api/google/v1 for specification of the protos. Args: proto_dict: Dictionary representing the proto. Keys are always strings, but values may be types correspond to a raw proto type or another dictionary (for messages). Returns: The operation. Raises: ValueError if the dictionary does not contain required values corresponding to the proto.
juraj-google-style
def query(self, object_class=None, json=None, **kwargs): path = '/directory-sync-service/v1/{}'.format(object_class) r = self._httpclient.request(method='POST', url=self.url, json=json, path=path, **kwargs) return r
Query data stored in directory. Retrieves directory data by querying a Directory Sync Service cloud-based instance. The directory data is stored with the Directory Sync Service instance using an agent that is installed in the customer's network.This agent retrieves directory data from the customer's Active Directory, and then sends it to the cloud-based Directory Sync Service instance. Args: object_class (str): Directory object class. json (dict): Payload/request body. **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Returns: requests.Response: Requests Response() object. Examples: Coming soon.
codesearchnet
def two_point_effective_mass( cartesian_k_points, eigenvalues ): assert( cartesian_k_points.shape[0] == 2 ) assert( eigenvalues.size == 2 ) dk = cartesian_k_points[ 1 ] - cartesian_k_points[ 0 ] mod_dk = np.sqrt( np.dot( dk, dk ) ) delta_e = ( eigenvalues[ 1 ] - eigenvalues[ 0 ] ) * ev_to_hartree * 2.0 effective_mass = mod_dk * mod_dk / delta_e return effective_mass
Calculate the effective mass given eigenvalues at two k-points. Reimplemented from Aron Walsh's original effective mass Fortran code. Args: cartesian_k_points (np.array): 2D numpy array containing the k-points in (reciprocal) Cartesian coordinates. eigenvalues (np.array): numpy array containing the eigenvalues at each k-point. Returns: (float): The effective mass
juraj-google-style
def name_scope_only_in_function_or_graph(name): if not context.executing_eagerly(): return ops.name_scope_v1(name) else: return NullContextmanager()
Internal-only entry point for `name_scope*`. Enters a compat.v1.name_scope only when in a function or graph, not when running fully eagerly. Args: name: The name argument that is passed to the op function. Returns: `name_scope*` context manager.
github-repos
def add_string_pairs_from_attributed_ui_element(results, ui_element, comment_prefix): attributed_strings = ui_element.getElementsByTagName('attributedString') if (attributed_strings.length == 0): return False attributed_element = attributed_strings[0] fragment_index = 1 for fragment in attributed_element.getElementsByTagName('fragment'): try: label_entry_key = fragment.attributes['content'].value except KeyError: label_entry_key = fragment.getElementsByTagName('string')[0].firstChild.nodeValue comment = ('%s Part %d' % (comment_prefix, fragment_index)) results.append((label_entry_key, comment)) fragment_index += 1 return (fragment_index > 1)
Adds string pairs from a UI element with attributed text Args: results (list): The list to add the results to. attributed_element (element): The element from the xib that contains, to extract the fragments from. comment_prefix (str): The prefix of the comment to use for extracted string (will be appended "Part X" suffices) Returns: bool: Whether or not an attributed string was found.
codesearchnet
def replace(self, pattern, replacement): for i, line in enumerate(self): if pattern in line: self[i] = line.replace(pattern, replacement)
Replace all instances of a pattern with a replacement. Args: pattern (str): Pattern to replace replacement (str): Text to insert
juraj-google-style
def _paths_referenced_by(node: AbstractSyntaxTree) -> Tuple[Optional[str], Collection[str]]: if isinstance(node, Identifier): if node.value in ('$this', '$index', '$total'): return (None, ()) else: return (node.value, ()) if not node.children: return (None, ()) context, paths = _paths_referenced_by(node.children[0]) if isinstance(node, Function): context = None if isinstance(node, Invocation) and isinstance(node.rhs, Identifier): context = _append_path_to_context(context, node.rhs.value) return (context, paths) if context is not None: paths = paths + (context,) if isinstance(node, Invocation): child_paths = _get_paths_from_children_except_first(node) child_paths = tuple((_append_path_to_context(context, child_path) for child_path in child_paths)) return (context, paths + child_paths) child_paths = _get_paths_from_children_except_first(node) return (context, paths + child_paths)
Finds paths for any fields referenced in the given tree. Recursively builds paths by visitng the trees nodes depth-first in-order. Returns a tuple of (context, paths) where `context` is an identifier which may be part of a dotted path completed by its parent and `paths` are the full dotted paths found so far. Callers are responsible for attempting to either continue chaining successive identifiers from invocations to the context or acknowledging it as completed and adding it to `paths` if the caller has no identifiers to add to the chain. Args: node: The abstract syntax tree to search for paths. Returns: A tuple of (context, paths) as described above.
github-repos
def _placeholder_value(like, shape_invariant, original=None): if like is None: return (original, None) elif isinstance(like, (variables.Undefined, variables.UndefinedReturnValue)): return (original, None) elif isinstance(like, (int, float, bool)): return (type(like)(0), None) elif tensor_util.is_tf_type(like): like_shape = shape_invariant if shape_invariant is not None else like.shape if like_shape is None or like_shape.rank is None: return (array_ops.zeros((), like.dtype), like_shape) placeholder_shape = [] has_dynamic_dims = False for s, i in zip(like.shape, like_shape): if i is None: like_dim = 0 elif isinstance(i, tensor_shape.Dimension): if i.value is None: like_dim = 0 else: like_dim = i.value else: like_dim = i if s is None: placeholder_shape.append(like_dim) has_dynamic_dims = True elif isinstance(s, tensor_shape.Dimension): if s.value is None: placeholder_shape.append(like_dim) has_dynamic_dims = True else: placeholder_shape.append(s.value) else: placeholder_shape.append(s) if has_dynamic_dims: invariant = like_shape else: invariant = None return (array_ops.zeros(placeholder_shape, like.dtype), invariant) elif isinstance(like, (list, tuple, dict)): if shape_invariant is None: zipped = nest.map_structure(lambda v: _placeholder_value(v, None), nest.flatten(like)) else: zipped = nest.map_structure(_placeholder_value, nest.flatten(like), nest.flatten(shape_invariant)) vals, invars = zip(*zipped) return (nest.pack_sequence_as(like, vals), nest.pack_sequence_as(like, invars)) raise TypeError("Found an unsupported type '{}' while creating placeholder for {}. Supported types include Tensor, int, float, bool, list, tuple or dict.".format(type(like).__name__, like))
Constructs a (dummy) placeholder value for a loop-initialized variable. Args: like: Any object. The value created by the first iteration of the loop. If a Python scalar, the placeholder will be the zero value of that type. If a Tensor, the placeholder will be a zero tensor of matching shape and dtype. If a list, dict or tuple, the placeholder will be an identical structure of placeholders. shape_invariant: The shape invariant specified by the user (or None, if nothing was specified) for the respective variable. original: Any object. The value of the variable prior to entering the loop. Typically, this is one of the special "Undefined" value, because that's when a placeholder is needed. Returns: Either a zero value of structure, shape and dtype matching 'like', or 'original', if no such zero value could be created.
github-repos
def map_kegg_all_genes(organism_code, target_db): mapping = bs_kegg.conv(target_db, organism_code) new_mapping = {} for k,v in mapping.items(): new_mapping[k.replace(organism_code + ':', '')] = str(v.split(':')[1]) return new_mapping
Map all of an organism's gene IDs to the target database. This is faster than supplying a specific list of genes to map, plus there seems to be a limit on the number you can map with a manual REST query anyway. Args: organism_code: the three letter KEGG code of your organism target_db: ncbi-proteinid | ncbi-geneid | uniprot Returns: Dictionary of ID mapping
juraj-google-style
def _run_graph(self, device, output_shape, variable, num_outputs, axis): graph = ops.Graph() with graph.as_default(): if not variable: if axis == 0: input_shape = [output_shape[0] * num_outputs, output_shape[1]] sizes = [output_shape[0] for _ in range(num_outputs)] else: input_shape = [output_shape[0], output_shape[1] * num_outputs] sizes = [output_shape[1] for _ in range(num_outputs)] else: sizes = np.random.randint(low=max(1, output_shape[axis] - 2), high=output_shape[axis] + 2, size=num_outputs) total_size = np.sum(sizes) if axis == 0: input_shape = [total_size, output_shape[1]] else: input_shape = [output_shape[0], total_size] outputs = build_graph(device, input_shape, sizes, axis) config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(optimizer_options=config_pb2.OptimizerOptions(opt_level=config_pb2.OptimizerOptions.L0))) with session_lib.Session(graph=graph, config=config) as session: logging.set_verbosity('info') variables.global_variables_initializer().run() bench = benchmark.TensorFlowBenchmark() bench.run_op_benchmark(session, outputs, mbs=input_shape[0] * input_shape[1] * 4 * 2 * 100 / 1000000.0, extras={'input_shape': input_shape, 'variable': variable, 'axis': axis})
Run the graph and print its execution time. Args: device: string, the device to run on. output_shape: shape of each output tensors. variable: whether or not the output shape should be fixed num_outputs: the number of outputs to split the input into axis: axis to be split Returns: The duration of the run in seconds.
github-repos
def recipe_fred_regional_to_bigquery(config, auth, fred_api_key, fred_series_group, fred_region_type, fred_units, fred_frequency, fred_season, fred_aggregation_method, project, dataset): fred(config, {'auth': auth, 'api_key': fred_api_key, 'frequency': fred_frequency, 'region_type': fred_region_type, 'regions': [{'series_group': fred_series_group, 'units': fred_units, 'season': fred_season, 'aggregation_method': fred_aggregation_method}], 'out': {'bigquery': {'project': project, 'dataset': dataset}}})
Download federal reserve region. Args: auth (authentication) - Credentials used for writing data. fred_api_key (string) - 32 character alpha-numeric lowercase string. fred_series_group (string) - The ID for a group of seriess found in GeoFRED. fred_region_type (choice) - The region you want want to pull data for. fred_units (choice) - A key that indicates a data value transformation. fred_frequency (choice) - An optional parameter that indicates a lower frequency to aggregate values to. fred_season (choice) - The seasonality of the series group. fred_aggregation_method (choice) - A key that indicates the aggregation method used for frequency aggregation. project (string) - Existing BigQuery project. dataset (string) - Existing BigQuery dataset.
github-repos
def join(self) -> None: self._is_thread_joined = True self._thread.join() if self._exception is not None: self._testcase.fail('Error in checkedThread: %s' % str(self._exception))
Blocks until the thread terminates. Raises: self._testcase.failureException: If the thread terminates with due to an exception.
github-repos
def compute_predicted_aligned_error(logits: torch.Tensor, max_bin: int=31, no_bins: int=64, **kwargs) -> Dict[str, torch.Tensor]: boundaries = torch.linspace(0, max_bin, steps=no_bins - 1, device=logits.device) aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=-1) predicted_aligned_error, max_predicted_aligned_error = _calculate_expected_aligned_error(alignment_confidence_breaks=boundaries, aligned_distance_error_probs=aligned_confidence_probs) return {'aligned_confidence_probs': aligned_confidence_probs, 'predicted_aligned_error': predicted_aligned_error, 'max_predicted_aligned_error': max_predicted_aligned_error}
Computes aligned confidence metrics from logits. Args: logits: [*, num_res, num_res, num_bins] the logits output from PredictedAlignedErrorHead. max_bin: Maximum bin value no_bins: Number of bins Returns: aligned_confidence_probs: [*, num_res, num_res, num_bins] the predicted aligned error probabilities over bins for each residue pair. predicted_aligned_error: [*, num_res, num_res] the expected aligned distance error for each pair of residues. max_predicted_aligned_error: [*] the maximum predicted error possible.
github-repos
def set_xlim(self, xlim): if len(xlim) != 2: raise ValueError("xlim must contain two elements") if xlim[1] < xlim[0]: raise ValueError("Min must be less than Max") self.options["min_x"] = xlim[0] self.options["max_x"] = xlim[1]
Set x-axis limits. Accepts a two-element list to set the x-axis limits. Args: xlim (list): lower and upper bounds Raises: ValueError: xlim must contain two elements ValueError: Min must be less than max
juraj-google-style
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False): residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Input to the layer. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Attention mask. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings, to be added to `hidden_states`. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes of the backbone feature maps. level_start_index (`torch.LongTensor`, *optional*): Level start index. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def uniform(self, low: float, high: float) -> float: return float(lib.TCOD_random_get_double(self.random_c, low, high))
Return a random floating number in the range: low <= n <= high. Args: low (float): The lower bound of the random range. high (float): The upper bound of the random range. Returns: float: A random float.
juraj-google-style
def get_resource(self, resource_key, **variables): handle = self.make_resource_handle(resource_key, **variables) return self.get_resource_from_handle(handle, verify_repo=False)
Get a resource. Attempts to get and return a cached version of the resource if available, otherwise a new resource object is created and returned. Args: resource_key (`str`): Name of the type of `Resources` to find variables: data to identify / store on the resource Returns: `PackageRepositoryResource` instance.
juraj-google-style
def schema(self): def _info2columns(info): return tuple(((column['name'], column['type']) for column in info)) def _table2tuple(table): return (table, _info2columns(self.table_info(table))) return [_table2tuple(table) for table in self.tables]
Returns the schema of all tables. For each table, return the name, and a list of tuples representing the columns. Each column tuple consists of a (name, type) pair. Note that additional metadata, such as whether a column may be null, or whether a column is a primary key, is not returned. Example: >>> db.schema [("bar", (("id", "integer"), ("name", "table"))] Returns: list of tuples: Each tuple has the format (name, columns), where "columns" is a list of tuples of the form (name, type).
codesearchnet
def WriteArtifactsFile(self, artifacts, filename): with open(filename, 'w') as file_object: file_object.write(self.FormatArtifacts(artifacts))
Writes artifact definitions to a file. Args: artifacts (list[ArtifactDefinition]): artifact definitions to be written. filename (str): name of the file to write artifacts to.
juraj-google-style
def DecodeValueFromAttribute(self, attribute_name, value, ts): try: attribute = Attribute.PREDICATES[attribute_name] cls = attribute.attribute_type self._AddAttributeToCache(attribute, LazyDecoder(cls, value, ts), self.synced_attributes) except KeyError: pass except (ValueError, rdfvalue.DecodeError): logging.debug('%s: %s invalid encoding. Skipping.', self.urn, attribute_name)
Given a serialized value, decode the attribute. Only attributes which have been previously defined are permitted. Args: attribute_name: The string name of the attribute. value: The serialized attribute value. ts: The timestamp of this attribute.
codesearchnet
def write_bit(self, registeraddress, value, functioncode=5): _checkFunctioncode(functioncode, [5, 15]) _checkInt(value, minvalue=0, maxvalue=1, description='input value') self._genericCommand(functioncode, registeraddress, value)
Write one bit to the slave. Args: * registeraddress (int): The slave register address (use decimal numbers, not hex). * value (int): 0 or 1 * functioncode (int): Modbus function code. Can be 5 or 15. Returns: None Raises: ValueError, TypeError, IOError
codesearchnet
def get_all_pipelines(self): pipelines = list(map(self.get_pipeline, self.pipeline_dict.keys())) self._construct_solid_defs(pipelines) return pipelines
Return all pipelines as a list Returns: List[PipelineDefinition]:
codesearchnet
def subproc_call(cmd, timeout=None): try: output = subprocess.check_output( cmd, stderr=subprocess.STDOUT, shell=True, timeout=timeout) return output, 0 except subprocess.TimeoutExpired as e: logger.warn("Command '{}' timeout!".format(cmd)) logger.warn(e.output.decode('utf-8')) return e.output, -1 except subprocess.CalledProcessError as e: logger.warn("Command '{}' failed, return code={}".format(cmd, e.returncode)) logger.warn(e.output.decode('utf-8')) return e.output, e.returncode except Exception: logger.warn("Command '{}' failed to run.".format(cmd)) return "", -2
Execute a command with timeout, and return STDOUT and STDERR Args: cmd(str): the command to execute. timeout(float): timeout in seconds. Returns: output(bytes), retcode(int). If timeout, retcode is -1.
juraj-google-style
def open_if_needed(self, mode=None): was_open = self.is_open() if (not was_open): self.open(mode=mode) try: (yield self) finally: if (not was_open): self.close()
Convenience context-manager for the use with ``with``. Opens the container if not already done. Only closes the container if it was opened within this context. Args: mode (str): Either 'r' for read-only, 'w' for truncate and write or 'a' for append. (default: 'a'). If ``None``, uses ``self.mode``.
codesearchnet
def parse_json(self, values_json): values_map = json.loads(values_json) return self.override_from_dict(values_map)
Override existing hyperparameter values, parsing new values from a json object. Args: values_json: String containing a json object of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_json` doesn't exist. ValueError: If `values_json` cannot be parsed.
juraj-google-style
async def send_message(self, segments, image_file=None, image_id=None, image_user_id=None): async with self._send_message_lock: if image_file: try: uploaded_image = (await self._client.upload_image(image_file, return_uploaded_image=True)) except exceptions.NetworkError as e: logger.warning('Failed to upload image: {}'.format(e)) raise image_id = uploaded_image.image_id try: request = hangouts_pb2.SendChatMessageRequest(request_header=self._client.get_request_header(), event_request_header=self._get_event_request_header(), message_content=hangouts_pb2.MessageContent(segment=[seg.serialize() for seg in segments])) if (image_id is not None): request.existing_media.photo.photo_id = image_id if (image_user_id is not None): request.existing_media.photo.user_id = image_user_id request.existing_media.photo.is_custom_user_id = True (await self._client.send_chat_message(request)) except exceptions.NetworkError as e: logger.warning('Failed to send message: {}'.format(e)) raise
Send a message to this conversation. A per-conversation lock is acquired to ensure that messages are sent in the correct order when this method is called multiple times asynchronously. Args: segments: List of :class:`.ChatMessageSegment` objects to include in the message. image_file: (optional) File-like object containing an image to be attached to the message. image_id: (optional) ID of an Picasa photo to be attached to the message. If you specify both ``image_file`` and ``image_id`` together, ``image_file`` takes precedence and ``image_id`` will be ignored. image_user_id: (optional) Picasa user ID, required only if ``image_id`` refers to an image from a different Picasa user, such as Google's sticker user. Raises: .NetworkError: If the message cannot be sent.
codesearchnet
def _sign_simple_signature_fulfillment(cls, input_, message, key_pairs): input_ = deepcopy(input_) public_key = input_.owners_before[0] message = sha3_256(message.encode()) if input_.fulfills: message.update('{}{}'.format(input_.fulfills.txid, input_.fulfills.output).encode()) try: input_.fulfillment.sign(message.digest(), base58.b58decode(key_pairs[public_key].encode())) except KeyError: raise KeypairMismatchException('Public key {} is not a pair to any of the private keys'.format(public_key)) return input_
Signs a Ed25519Fulfillment. Args: input_ (:class:`~bigchaindb.common.transaction. Input`) The input to be signed. message (str): The message to be signed key_pairs (dict): The keys to sign the Transaction with.
codesearchnet
def save(self, path): self.clip.write_videofile(path, audio_fps=self.clip.audio.fps)
Save source video to file. Args: path (str): Filename to save to. Notes: Saves entire source video to file, not just currently selected frames.
juraj-google-style
def print_matrix(self, format=None, output=sys.stdout, depth=0, **kwargs): matrix = self.as_matrix(depth=depth) matrix.print(format=format, output=output, **kwargs)
Print the matrix for self's nodes. Args: format (str): output format (csv, json or text). output (file): file descriptor on which to write. depth (int): depth of the matrix.
juraj-google-style
def _dominant_task_for_jobs(tasks): per_job = _group_tasks_by_jobid(tasks) ret = [] for job_id in per_job.keys(): tasks_in_salience_order = sorted(per_job[job_id], key=_importance_of_task) ret.append(tasks_in_salience_order[0]) return ret
A list with, for each job, its dominant task. The dominant task is the one that exemplifies its job's status. It is either: - the first (FAILURE or CANCELED) task, or if none - the first RUNNING task, or if none - the first SUCCESS task. Args: tasks: a list of tasks to consider Returns: A list with, for each job, its dominant task.
juraj-google-style
def __init__(self, conf, map_name, automount_mountpoint=None): super(FilesCache, self).__init__(conf, map_name, automount_mountpoint=automount_mountpoint) self.cache_filename_suffix = conf.get('cache_filename_suffix', 'cache') self._indices = {} if hasattr(self, '_INDEX_ATTRIBUTES'): for index in self._INDEX_ATTRIBUTES: self._indices[index] = {}
Create a handler for the given map type. Args: conf: a configuration object map_name: a string representing the type of map we are automount_mountpoint: A string containing the automount mountpoint, used only by automount maps.
github-repos
def speed_clustering(clf, points, min_time): changepoints = detect_changepoints(points, min_time) cp_info = [] for i in range(0, len(changepoints) - 1): from_index = changepoints[i] to_index = changepoints[i+1] info = classify(clf, points[from_index:to_index], min_time, from_index, to_index) if info: cp_info.append(info) return group_modes(cp_info)
Transportation mode infering, based on changepoint segmentation Args: clf (:obj:`Classifier`): Classifier to use points (:obj:`list` of :obj:`Point`) min_time (float): Min time, in seconds, before do another segmentation Returns: :obj:`list` of :obj:`dict`
juraj-google-style
def expand_role(self, role): if '/' in role: return role else: return self.boto_session.resource('iam').Role(role).arn
Expand an IAM role name into an ARN. If the role is already in the form of an ARN, then the role is simply returned. Otherwise we retrieve the full ARN and return it. Args: role (str): An AWS IAM role (either name or full ARN). Returns: str: The corresponding AWS IAM role ARN.
juraj-google-style
def to_df(self, variables=None, format='wide', fillna=np.nan, **kwargs): if (variables is None): variables = list(self.variables.keys()) if (not isinstance(variables[0], BIDSVariable)): variables = [v for v in self.variables.values() if (v.name in variables)] dfs = [v.to_df(**kwargs) for v in variables] df = pd.concat(dfs, axis=0, sort=True) if (format == 'long'): return df.reset_index(drop=True).fillna(fillna) ind_cols = list((set(df.columns) - {'condition', 'amplitude'})) df['amplitude'] = df['amplitude'].fillna('n/a') df = df.pivot_table(index=ind_cols, columns='condition', values='amplitude', aggfunc='first') df = df.reset_index().replace('n/a', fillna) df.columns.name = None return df
Merge variables into a single pandas DataFrame. Args: variables (list): Optional list of column names to retain; if None, all variables are returned. format (str): Whether to return a DataFrame in 'wide' or 'long' format. In 'wide' format, each row is defined by a unique onset/duration, and each variable is in a separate column. In 'long' format, each row is a unique combination of onset, duration, and variable name, and a single 'amplitude' column provides the value. fillna: Replace missing values with the specified value. kwargs: Optional keyword arguments to pass onto each Variable's to_df() call (e.g., condition, entities, and timing). Returns: A pandas DataFrame.
codesearchnet
def CopyToProto(self, proto): if ((self.file is not None) and (self._serialized_start is not None) and (self._serialized_end is not None)): proto.ParseFromString(self.file.serialized_pb[self._serialized_start:self._serialized_end]) else: raise Error('Descriptor does not contain serialization.')
Copies this to the matching proto in descriptor_pb2. Args: proto: An empty proto instance from descriptor_pb2. Raises: Error: If self couldnt be serialized, due to to few constructor arguments.
codesearchnet
def _set_unknown_flag(self, name, value): setter = self.__dict__['__set_unknown'] if setter: try: setter(name, value) return value except (TypeError, ValueError): raise _exceptions.IllegalFlagValueError( '"{1}" is not valid for --{0}' .format(name, value)) except NameError: pass raise _exceptions.UnrecognizedFlagError(name, value)
Returns value if setting flag |name| to |value| returned True. Args: name: str, name of the flag to set. value: Value to set. Returns: Flag value on successful call. Raises: UnrecognizedFlagError IllegalFlagValueError
juraj-google-style
def GetHasherNamesFromString(cls, hasher_names_string): hasher_names = [] if ((not hasher_names_string) or (hasher_names_string.strip() == 'none')): return hasher_names if (hasher_names_string.strip() == 'all'): return cls.GetHasherNames() for hasher_name in hasher_names_string.split(','): hasher_name = hasher_name.strip() if (not hasher_name): continue hasher_name = hasher_name.lower() if (hasher_name in cls._hasher_classes): hasher_names.append(hasher_name) return hasher_names
Retrieves a list of a hasher names from a comma separated string. Takes a string of comma separated hasher names transforms it to a list of hasher names. Args: hasher_names_string (str): comma separated names of hashers to enable, the string 'all' to enable all hashers or 'none' to disable all hashers. Returns: list[str]: names of valid hashers from the string, or an empty list if no valid names are found.
codesearchnet
def _lsb_release_info(self): if (not self.include_lsb): return {} with open(os.devnull, 'w') as devnull: try: cmd = ('lsb_release', '-a') stdout = subprocess.check_output(cmd, stderr=devnull) except OSError: return {} content = stdout.decode(sys.getfilesystemencoding()).splitlines() return self._parse_lsb_release_content(content)
Get the information items from the lsb_release command output. Returns: A dictionary containing all information items.
codesearchnet
def _add_partition(self, connection, partition): logger.debug('Creating virtual table for partition.\n partition: {}'.format(partition.name)) sqlite_med.add_partition(connection, partition.datafile, partition.vid+'_vt')
Creates sqlite virtual table for mpr file of the given partition. Args: connection: connection to the sqlite db who stores mpr data. partition (orm.Partition):
juraj-google-style
def save_csv(X, y, path): if sparse.issparse(X): X = X.todense() np.savetxt(path, np.hstack((y.reshape((-1, 1)), X)), delimiter=',')
Save data as a CSV file. Args: X (numpy or scipy sparse matrix): Data matrix y (numpy array): Target vector. path (str): Path to the CSV file to save data.
juraj-google-style
def run_fetches_info(self): output = self._run_fetches_info return output[0] if len(output) == 1 else output
Get a str representation of the fetches used in the Session.run() call. Returns: If the information is available from one `Session.run` call, a `str` obtained from `repr(fetches)`. If the information is available from multiple `Session.run` calls, a `list` of `str` from `repr(fetches)`. If the information is not available, `None`.
github-repos
def split_input(cls, mapper_spec): params = _get_params(mapper_spec) blob_keys = params[cls.BLOB_KEYS_PARAM] if isinstance(blob_keys, basestring): blob_keys = blob_keys.split(',') blob_sizes = {} for blob_key in blob_keys: blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key)) blob_sizes[blob_key] = blob_info.size shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count) shards_per_blob = (shard_count if (shards_per_blob == 0): shards_per_blob = 1 chunks = [] for (blob_key, blob_size) in blob_sizes.items(): blob_chunk_size = (blob_size for i in xrange((shards_per_blob - 1)): chunks.append(BlobstoreLineInputReader.from_json({cls.BLOB_KEY_PARAM: blob_key, cls.INITIAL_POSITION_PARAM: (blob_chunk_size * i), cls.END_POSITION_PARAM: (blob_chunk_size * (i + 1))})) chunks.append(BlobstoreLineInputReader.from_json({cls.BLOB_KEY_PARAM: blob_key, cls.INITIAL_POSITION_PARAM: (blob_chunk_size * (shards_per_blob - 1)), cls.END_POSITION_PARAM: blob_size})) return chunks
Returns a list of shard_count input_spec_shards for input_spec. Args: mapper_spec: The mapper specification to split from. Must contain 'blob_keys' parameter with one or more blob keys. Returns: A list of BlobstoreInputReaders corresponding to the specified shards.
codesearchnet
def get(self, item, default=None): if hasattr(self, item): return getattr(self, item) try: return self.__getitem__(item) except KeyError: return default
Returns the value ``item`` from the host or hosts group variables. Arguments: item(``str``): The variable to get default(``any``): Return value if item not found
juraj-google-style
def pairwise_alignment_stats(reference_seq_aln, other_seq_aln): if len(reference_seq_aln) != len(other_seq_aln): raise ValueError('Sequence lengths not equal - was an alignment run?') reference_seq_aln = ssbio.protein.sequence.utils.cast_to_str(reference_seq_aln) other_seq_aln = ssbio.protein.sequence.utils.cast_to_str(other_seq_aln) infodict = {} stats_percent_ident = get_percent_identity(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln) infodict['percent_identity'] = stats_percent_ident aln_df = get_alignment_df(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln) infodict['deletions'] = get_deletions(aln_df) infodict['insertions'] = get_insertions(aln_df) infodict['mutations'] = get_mutations(aln_df) infodict['unresolved'] = get_unresolved(aln_df) return infodict
Get a report of a pairwise alignment. Args: reference_seq_aln (str, Seq, SeqRecord): Reference sequence, alignment form other_seq_aln (str, Seq, SeqRecord): Other sequence, alignment form Returns: dict: Dictionary of information on mutations, insertions, sequence identity, etc.
juraj-google-style
def d_step(self, true_frames, gen_frames): hparam_to_disc_loss = {'least_squares': gan_losses.least_squares_discriminator_loss, 'cross_entropy': gan_losses.modified_discriminator_loss, 'wasserstein': gan_losses.wasserstein_discriminator_loss} (_, batch_size, _, _, _) = common_layers.shape_list(true_frames) all_frames = tf.concat([true_frames, tf.stop_gradient(gen_frames)], axis=1) all_logits = self.discriminator(all_frames) (true_logits, fake_logits_stop) = (all_logits[:batch_size], all_logits[batch_size:]) mean_true_logits = tf.reduce_mean(true_logits) tf.summary.scalar('mean_true_logits', mean_true_logits) mean_fake_logits_stop = tf.reduce_mean(fake_logits_stop) tf.summary.scalar('mean_fake_logits_stop', mean_fake_logits_stop) discriminator_loss_func = hparam_to_disc_loss[self.hparams.gan_loss] gan_d_loss = discriminator_loss_func(discriminator_real_outputs=true_logits, discriminator_gen_outputs=fake_logits_stop, add_summaries=True) return (gan_d_loss, true_logits, fake_logits_stop)
Performs the discriminator step in computing the GAN loss. Applies stop-gradient to the generated frames while computing the discriminator loss to make sure that the gradients are not back-propagated to the generator. This makes sure that only the discriminator is updated. Args: true_frames: True outputs gen_frames: Generated frames. Returns: d_loss: Loss component due to the discriminator.
codesearchnet
def _generate_matrix(self, hash_bytes): half_columns = self.columns cells = self.rows * half_columns matrix = [[False] * self.columns for _ in range(self.rows)] for cell in range(cells): if self._get_bit(cell, hash_bytes[1:]): column = cell row = cell % self.rows matrix[row][column] = True matrix[row][self.columns - column - 1] = True return matrix
Generates matrix that describes which blocks should be coloured. Arguments: hash_bytes - List of hash byte values for which the identicon is being generated. Each element of the list should be an integer from 0 to 255. Returns: List of rows, where each element in a row is boolean. True means the foreground colour should be used, False means a background colour should be used.
juraj-google-style
def push(self, x): raise NotImplementedError()
Push a new value to the tracker. Args: x: The value to be pushed.
github-repos
def _broadcast(value, target): return tf.broadcast_to(tf.convert_to_tensor(value=value, dtype=target.dtype), distribution_util.prefer_static_shape(target)[:(- 1)])
Broadcast a value to match the batching dimensions of a target. If necessary the value is converted into a tensor. Both value and target should be of the same dtype. Args: value: A value to broadcast. target: A `Tensor` of shape [b1, ..., bn, d]. Returns: A `Tensor` of shape [b1, ..., bn] and same dtype as the target.
codesearchnet
def __init__(self, request, scalars_plugin_instance): self._request = request self._scalars_plugin_instance = scalars_plugin_instance
Constructor. Args: request: A ListSessionGroupsRequest protobuf. scalars_plugin_instance: A scalars_plugin.ScalarsPlugin.
juraj-google-style
def save_libsvm(X, y, path): dump_svmlight_file(X, y, path, zero_based=False)
Save data as a LibSVM file. Args: X (numpy or scipy sparse matrix): Data matrix y (numpy array): Target vector. path (str): Path to the CSV file to save data.
juraj-google-style
async def wasSet(self, node, oldv): for func in self.onsets: try: await s_coro.ornot(func, node, oldv) except asyncio.CancelledError: raise except Exception: logger.exception('onset() error for %s' % (self.full,))
Fire the onset() handlers for this property. Args: node (synapse.lib.node.Node): The node whose property was set. oldv (obj): The previous value of the property.
juraj-google-style
def from_dict(self, dictionary): for (remote_name, remote_value) in dictionary.items(): local_name = next((name for (name, attribute) in self._attributes.items() if (attribute.remote_name == remote_name)), None) if local_name: setattr(self, local_name, remote_value) else: pass
Sets all the exposed ReST attribues from the given dictionary Args: dictionary (dict): dictionnary containing the raw object attributes and their values. Example: >>> info = {"name": "my group", "private": False} >>> group = NUGroup() >>> group.from_dict(info) >>> print "name: %s - private: %s" % (group.name, group.private) "name: my group - private: False"
codesearchnet
def sparse(self, rows: np.ndarray=None, cols: np.ndarray=None, layer: str=None) -> scipy.sparse.coo_matrix: if (layer is None): return self.layers[''].sparse(rows=rows, cols=cols) else: return self.layers[layer].sparse(rows=rows, cols=cols)
Return the main matrix or specified layer as a scipy.sparse.coo_matrix, without loading dense matrix in RAM Args: rows: Rows to include, or None to include all cols: Columns to include, or None to include all layer: Layer to return, or None to return the default layer Returns: Sparse matrix (:class:`scipy.sparse.coo_matrix`)
codesearchnet
def compile_from_config(self, config): has_overridden_compile = self.__class__.compile != Trainer.compile if has_overridden_compile: warnings.warn("`compile()` was not called as part of model loading because the model's `compile()` method is custom. All subclassed Models that have `compile()` overridden should also override `get_compile_config()` and `compile_from_config(config)`. Alternatively, you can call `compile()` manually after loading.", stacklevel=2) return config = serialization_lib.deserialize_keras_object(config) self.compile(**config) if hasattr(self, 'optimizer') and self.built: self.optimizer.build(self.trainable_variables)
Compiles the model with the information given in config. This method uses the information in the config (optimizer, loss, metrics, etc.) to compile the model. Args: config: Dict containing information for compiling the model.
github-repos
def custom_returnvalue(self, printer, desc=None): self.return_info = ReturnInfo(None, printer, True, desc)
Use a custom function to print the return value. Args: printer (callable): A function that should take in the return value and convert it to a string. desc (str): An optional description of the return value.
juraj-google-style
def assert_existing_objects_matched(self): for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes): trackable = self._checkpoint.object_by_proto_id.get(node_id, None) if trackable is not None and trackable._update_uid < self._checkpoint.restore_uid: raise AssertionError(f'Object {node} not assigned a value from checkpoint.') for trackable_object in util.list_objects(self._object_graph_view, self._options.experimental_skip_slot_variables): if isinstance(trackable_object, data_structures.TrackableDataStructure) and (not trackable_object._trackable_children(save_type=base.SaveType.CHECKPOINT)): continue self._checkpoint.all_python_objects.add(trackable_object) unused_python_objects = object_identity.ObjectIdentitySet(_objects_with_attributes(self._checkpoint.all_python_objects)) - object_identity.ObjectIdentitySet(self._checkpoint.object_by_proto_id.values()) if unused_python_objects: num_unused_python_objects = len(list(unused_python_objects)) num_variables_to_show = min(10, num_unused_python_objects) raise AssertionError(f'Found {num_unused_python_objects} Python objects that were not bound to checkpointed values, likely due to changes in the Python program. Showing {num_variables_to_show} of {num_unused_python_objects} unmatched objects: {list(unused_python_objects)[:num_variables_to_show]}') return self
Asserts that trackable Python objects have been matched. Note that this is a weaker assertion than `assert_consumed`. It will only fail for existing Python objects which are (transitive) dependencies of the root object and which do not have an entry in the checkpoint. It will not fail, for example, if a `tf.keras.Layer` object has not yet been built and so has not created any `tf.Variable` objects. Returns: `self` for chaining. Raises: AssertionError: If a Python object exists in the transitive dependencies of the root object but does not have a value in the checkpoint.
github-repos
def mtr_lm_dense(sz): n = 2 ** sz hparams = mtf_unitransformer_base() hparams.d_model = 1024 hparams.max_length = 1024 hparams.batch_size = 128 hparams.num_hidden_layers = 6 hparams.d_ff = 8192 * n hparams.d_kv = 256 hparams.num_heads = 8 * n hparams.learning_rate_decay_steps = 65536 hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" hparams.mesh_shape = "batch:32" return hparams
Series of architectures for language modeling. We assume infinite training data, so no dropout necessary. You can use languagemodel_wiki_noref_v32k_l1k. (1 epoch = ~46000 steps). TODO(noam): find a large enough dataset for these experiments. Args: sz: an integer Returns: a hparams
juraj-google-style
def _fulfillment_from_details(data, _depth=0): if _depth == 100: raise ThresholdTooDeep() if data['type'] == 'ed25519-sha-256': public_key = base58.b58decode(data['public_key']) return Ed25519Sha256(public_key=public_key) if data['type'] == 'threshold-sha-256': threshold = ThresholdSha256(data['threshold']) for cond in data['subconditions']: cond = _fulfillment_from_details(cond, _depth+1) threshold.add_subfulfillment(cond) return threshold raise UnsupportedTypeError(data.get('type'))
Load a fulfillment for a signing spec dictionary Args: data: tx.output[].condition.details dictionary
juraj-google-style
def _randomInts(self, shape, low, high): val = np.random.randint(low=low, high=high, size=shape) return constant_op.constant(val, dtype=dtypes.int32)
Generate a tensor of random 32-bit integer values. Note that we use numpy to generate random numbers and then feed the result through a constant op to avoid the re-rolling of TensorFlow random ops on each run in graph mode. Args: shape: The output shape. low: Lower bound of random numbers generated, inclusive. high: Upper bound of random numbers generated, exclusive. Returns: A random tensor
github-repos
def generate_pyi(src, options=None, loader=None): options = options or config.Options.create() ret = generate_pyi_ast(src, options, loader) return (ret, _output_ast(ret.ast, options))
Run the inferencer on a string of source code, producing output. Args: src: The source code. options: config.Options object. loader: A load_pytd.Loader instance. Returns: A tuple, (analyze.Analysis, pyi ast as string). Raises: CompileError: If we couldn't parse the input file. UsageError: If the input filepath is invalid.
github-repos
def create_subtask(self, cor, name=None, stop_timeout=1.0): if self.stopped: raise InternalError('Cannot add a subtask to a parent that is already stopped') subtask = BackgroundTask(cor, name, loop=self._loop, stop_timeout=stop_timeout) self.add_subtask(subtask) return subtask
Create and add a subtask from a coroutine. This function will create a BackgroundTask and then call self.add_subtask() on it. Args: cor (coroutine): The coroutine that should be wrapped in a background task. name (str): An optional name for the task. stop_timeout (float): The maximum time to wait for this subtask to die after stopping it. Returns: Backgroundtask: The created subtask.
codesearchnet
def _format_csv(content, delimiter): reader = csv_reader(StringIO(content), delimiter=builtin_str(delimiter)) rows = [row for row in reader] max_widths = [max(map(len, column)) for column in zip(*rows)] lines = [' '.join(('{entry:{width}}'.format(entry=entry, width=(width + 2)) for (entry, width) in zip(row, max_widths))) for row in rows] return '\n'.join(lines)
Format delimited text to have same column width. Args: content (str): The content of a metric. delimiter (str): Value separator Returns: str: Formatted content. Example: >>> content = ( "value_mse,deviation_mse,data_set\n" "0.421601,0.173461,train\n" "0.67528,0.289545,testing\n" "0.671502,0.297848,validation\n" ) >>> _format_csv(content, ",") "value_mse deviation_mse data_set\n" "0.421601 0.173461 train\n" "0.67528 0.289545 testing\n" "0.671502 0.297848 validation\n"
codesearchnet
def make(self, path, metadata=None): if self.mode != 'w': raise ValueError('`make` is only allowed in write mode.') if not isinstance(metadata, (dict, type(None))): raise ValueError(f'`metadata` should be a dict or `None`. Received: {metadata}') self._h5_entry_path = path if metadata: self._create_h5_group(path, metadata=metadata) else: self._h5_entry_group = {} self._h5_entry_initialized = False return self
Make a new H5 entry group. This method is only available in write mode. It defers the creation of the H5 entry group until `__setitem__` is called, preventing the creation of empty groups. Args: path: `str`. The variable path. metadata: Optional `dict`. The metadata to save with the H5 entry group. Defaults to `None`.
github-repos
def limit_replace(self, accountID, orderID, **kwargs): return self.replace( accountID, orderID, order=LimitOrderRequest(**kwargs) )
Shortcut to replace a pending Limit Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Limit Order to replace kwargs : The arguments to create a LimitOrderRequest Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def sg_cast(tensor, opt): r assert opt.dtype is not None, 'dtype is mandatory.' return tf.cast(tensor, opt.dtype, name=opt.name)
r"""Casts a tensor to a new type. See `tf.cast()` in tensorflow. Args: tensor: A `Tensor` or `SparseTensor` (automatically given by chain). opt: dtype : The destination type. name : If provided, it replaces current tensor's name Returns: A `Tensor` or `SparseTensor` with same shape as `tensor`.
juraj-google-style
def _read_tags(self): tags = self._config.get('tags', {}) logging.info('Tags:') for tag_name in tags.keys(): tag = {} tag['Key'] = tag_name tag['Value'] = tags[tag_name] self._tags.append(tag) logging.info('{} = {}'.format(tag_name, tags[tag_name])) logging.debug(json.dumps(self._tags, indent=2, sort_keys=True)) return True
Fill in the _tags dict from the tags file. Args: None Returns: True Todo: Figure what could go wrong and at least acknowledge the the fact that Murphy was an optimist.
codesearchnet
def _create_hparam_extractor(hparam_name): def extractor_fn(session_group): if hparam_name in session_group.hparams: return _value_to_python(session_group.hparams[hparam_name]) return None return extractor_fn
Returns an extractor function that extracts an hparam from a session group. Args: hparam_name: str. Identies the hparam to extract from the session group. Returns: A function that takes a tensorboard.hparams.SessionGroup protobuffer and returns the value, as a native Python object, of the hparam identified by 'hparam_name'.
juraj-google-style
def _internal_kv_put(key, value, overwrite=False): worker = ray.worker.get_global_worker() if (worker.mode == ray.worker.LOCAL_MODE): exists = (key in _local) if ((not exists) or overwrite): _local[key] = value return exists if overwrite: updated = worker.redis_client.hset(key, 'value', value) else: updated = worker.redis_client.hsetnx(key, 'value', value) return (updated == 0)
Globally associates a value with a given binary key. This only has an effect if the key does not already have a value. Returns: already_exists (bool): whether the value already exists.
codesearchnet
def _compile_control_flow_expression(self, expr: Expression, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[List[tf.Tensor]]=None) -> TensorFluent: etype = expr.etype args = expr.args if (etype[1] == 'if'): condition = self._compile_expression(args[0], scope, batch_size, noise) true_case = self._compile_expression(args[1], scope, batch_size, noise) false_case = self._compile_expression(args[2], scope, batch_size, noise) fluent = TensorFluent.if_then_else(condition, true_case, false_case) else: raise ValueError('Invalid control flow expression:\n{}'.format(expr)) return fluent
Compile a control flow expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL control flow expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
codesearchnet
def __init__(self, rom_path): rom = ROM(rom_path) if rom.prg_rom_size == 0: raise ValueError('ROM has no PRG-ROM banks.') if rom.has_trainer: raise ValueError('ROM has trainer. trainer is not supported.') _ = rom.prg_rom _ = rom.chr_rom if rom.is_pal: raise ValueError('ROM is PAL. PAL is not supported.') elif rom.mapper not in {0, 1, 2, 3}: msg = 'ROM has an unsupported mapper number {}.' raise ValueError(msg.format(rom.mapper)) self.np_random = np.random.RandomState() self._rom_path = rom_path self._env = _LIB.Initialize(self._rom_path) self.viewer = None self._has_backup = False self.done = True self.controllers = [self._controller_buffer(port) for port in range(2)] self.screen = self._screen_buffer() self.ram = self._ram_buffer()
Create a new NES environment. Args: rom_path (str): the path to the ROM for the environment Returns: None
juraj-google-style
def ParseRecord(self, parser_mediator, key, structure): if key not in ( 'log_entry', 'log_entry_at_end', 'log_entry_offset', 'log_entry_offset_at_end'): raise errors.ParseError( 'Unable to parse record, unknown structure: {0:s}'.format(key)) try: date_time_string = self._GetISO8601String(structure) except ValueError as exception: parser_mediator.ProduceExtractionWarning( 'unable to determine date time string with error: {0!s}'.format( exception)) fraction_of_second_length = len(structure.fraction_of_second) if fraction_of_second_length == 3: date_time = dfdatetime_time_elements.TimeElementsInMilliseconds() elif fraction_of_second_length in (6, 7): date_time = dfdatetime_time_elements.TimeElementsInMicroseconds() try: date_time.CopyFromStringISO8601(date_time_string) except ValueError as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse date time value: {0:s} with error: {1!s}'.format( date_time_string, exception)) return event_data = SCCMLogEventData() event_data.component = structure.component event_data.offset = 0 event_data.text = structure.text event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Parse the record and return an SCCM log event object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the parsed structure. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Raises: ParseError: when the structure type is unknown.
juraj-google-style
def add_snippet_client(self, name, package): if name in self._snippet_clients: raise Error( self, 'Name "%s" is already registered with package "%s", it cannot ' 'be used again.' % (name, self._snippet_clients[name].client.package)) for snippet_name, client in self._snippet_clients.items(): if package == client.package: raise Error( self, 'Snippet package "%s" has already been loaded under name' ' "%s".' % (package, snippet_name)) client = snippet_client.SnippetClient(package=package, ad=self._device) client.start_app_and_connect() self._snippet_clients[name] = client
Adds a snippet client to the management. Args: name: string, the attribute name to which to attach the snippet client. E.g. `name='maps'` attaches the snippet client to `ad.maps`. package: string, the package name of the snippet apk to connect to. Raises: Error, if a duplicated name or package is passed in.
juraj-google-style
def groups(self, group_type=None, filters=None, params=None): group = self._tcex.ti.group(group_type) for g in self.tc_requests.groups_from_tag(group, self.name, filters=filters, params=params): yield g
Gets all groups from a tag. Args: filters: params: group_type:
juraj-google-style
def load(cls, pkid_or_path=None): path = pkid_or_path if isinstance(path, (int, np.int32, np.int64)): raise NotImplementedError('Lookup via CMS not implemented.') elif not os.path.isfile(path): raise FileNotFoundError('File {} not found.'.format(path)) kwargs = {} fields = defaultdict(dict) with pd.HDFStore(path) as store: for key in store.keys(): if 'kwargs' in key: kwargs.update(store.get_storer(key).attrs.metadata) elif "FIELD" in key: name, dname = "_".join(key.split("_")[1:]).split("/") dname = dname.replace('values', '') fields[name][dname] = store[key] else: name = str(key[1:]) kwargs[name] = store[key] for name, field_data in fields.items(): fps = field_data.pop('data') kwargs[name] = Field(fps, field_values=[field_data[str(arr)] for arr in sorted(map(int, field_data.keys()))]) return cls(**kwargs)
Load a container object from a persistent location or file path. Args: pkid_or_path: Integer pkid corresponding to the container table or file path Returns: container: The saved container object
juraj-google-style
def export_as_package(self, package_path, cv_source): if os.path.exists(package_path): raise exceptions.UserError('{} already exists'.format(package_path)) package_name = os.path.basename(os.path.normpath(package_path)) os.makedirs(package_path) with open(os.path.join(package_path, '__init__.py'), 'wb') as f: f.write('from {}.builder import xcessiv_ensemble'.format(package_name).encode('utf8')) os.makedirs(os.path.join(package_path, 'baselearners')) open(os.path.join(package_path, 'baselearners', '__init__.py'), 'a').close() for idx, base_learner in enumerate(self.base_learners): base_learner.export_as_file(os.path.join(package_path, 'baselearners', 'baselearner' + str(idx))) self.base_learner_origin.export_as_file( os.path.join(package_path, 'metalearner'), self.secondary_learner_hyperparameters ) with open(os.path.join(package_path, 'cv.py'), 'wb') as f: f.write(cv_source.encode('utf8')) ensemble_source = '' stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py') with open(stacker_file_loc) as f: ensemble_source += f.read() ensemble_source += '\n\n' \ ' def {}(self, X):\n' \ ' return self._process_using_' \ 'meta_feature_generator(X, "{}")\n\n'\ .format(self.base_learner_origin.meta_feature_generator, self.base_learner_origin.meta_feature_generator) with open(os.path.join(package_path, 'stacker.py'), 'wb') as f: f.write(ensemble_source.encode('utf8')) builder_source = '' for idx, base_learner in enumerate(self.base_learners): builder_source += 'from {}.baselearners import baselearner{}\n'.format(package_name, idx) builder_source += 'from {}.cv import return_splits_iterable\n'.format(package_name) builder_source += 'from {} import metalearner\n'.format(package_name) builder_source += 'from {}.stacker import XcessivStackedEnsemble\n'.format(package_name) builder_source += '\nbase_learners = [\n' for idx, base_learner in enumerate(self.base_learners): builder_source += ' baselearner{}.base_learner,\n'.format(idx) builder_source += ']\n' builder_source += '\nmeta_feature_generators = [\n' for idx, base_learner in enumerate(self.base_learners): builder_source += ' baselearner{}.meta_feature_generator,\n'.format(idx) builder_source += ']\n' builder_source += '\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners,' \ ' meta_feature_generators=meta_feature_generators,' \ ' secondary_learner=metalearner.base_learner,' \ ' cv_function=return_splits_iterable)\n' with open(os.path.join(package_path, 'builder.py'), 'wb') as f: f.write(builder_source.encode('utf8'))
Exports the ensemble as a Python package and saves it to `package_path`. Args: package_path (str, unicode): Absolute/local path of place to save package in cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features. Raises: exceptions.UserError: If os.path.join(path, name) already exists.
juraj-google-style
def get_variation_from_key(self, experiment_key, variation_key): variation_map = self.variation_key_map.get(experiment_key) if variation_map: variation = variation_map.get(variation_key) if variation: return variation else: self.logger.error('Variation key "%s" is not in datafile.' % variation_key) self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION_ERROR)) return None self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key) self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR)) return None
Get variation given experiment and variation key. Args: experiment: Key representing parent experiment of variation. variation_key: Key representing the variation. Returns Object representing the variation.
juraj-google-style
def sanitize_git_path(self, uri, ref=None): if uri.endswith('.git'): dir_name = uri[:(- 4)] else: dir_name = uri dir_name = self.sanitize_uri_path(dir_name) if (ref is not None): dir_name += ('-%s' % ref) return dir_name
Take a git URI and ref and converts it to a directory safe path. Args: uri (string): git URI (e.g. git@github.com:foo/bar.git) ref (string): optional git ref to be appended to the path Returns: str: Directory name for the supplied uri
codesearchnet
def decode_iter_request(data: dict) -> Optional[Union[(str, int)]]: if ('response_metadata' in data): return data['response_metadata'].get('next_cursor') elif ('paging' in data): current_page = int(data['paging'].get('page', 1)) max_page = int(data['paging'].get('pages', 1)) if (current_page < max_page): return (current_page + 1) elif (('has_more' in data) and data['has_more'] and ('latest' in data)): return data['messages'][(- 1)]['ts'] return None
Decode incoming response from an iteration request Args: data: Response data Returns: Next itervalue
codesearchnet