code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def segment_text(text, seg_regex=SEG_REGEX): for m in seg_regex.finditer(text): (yield m.group(0))
Return an iterator of segments in the text. Args: text (unicode): string of IPA Unicode text seg_regex (_regex.Pattern): compiled regex defining a segment (base + modifiers) Return: generator: segments in the input text
codesearchnet
def send_peers(self, connection_id): with self._lock: peer_endpoints = list(self._peers.values()) if self._endpoint: peer_endpoints.append(self._endpoint) peers_response = GetPeersResponse(peer_endpoints=peer_endpoints) try: self._network.send( validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE, peers_response.SerializeToString(), connection_id, one_way=True) except ValueError: LOGGER.debug("Connection disconnected: %s", connection_id)
Sends a message containing our peers to the connection identified by connection_id. Args: connection_id (str): A unique identifier which identifies an connection on the network server socket.
juraj-google-style
def _find_reader_dataset(self, dataset_key, **dfilter): too_many = False for (reader_name, reader_instance) in self.readers.items(): try: ds_id = reader_instance.get_dataset_key(dataset_key, **dfilter) except TooManyResults: LOG.trace('Too many datasets matching key {} in reader {}'.format(dataset_key, reader_name)) too_many = True continue except KeyError: LOG.trace("Can't find dataset %s in reader %s", str(dataset_key), reader_name) continue LOG.trace('Found {} in reader {} when asking for {}'.format(str(ds_id), reader_name, repr(dataset_key))) try: return self.getitem(ds_id) except KeyError: return Node(ds_id, {'reader_name': reader_name}) if too_many: raise TooManyResults('Too many keys matching: {}'.format(dataset_key))
Attempt to find a `DatasetID` in the available readers. Args: dataset_key (str, float, DatasetID): Dataset name, wavelength, or a combination of `DatasetID` parameters to use in searching for the dataset from the available readers. **dfilter (list or str): `DatasetID` parameters besides `name` and `wavelength` to use to filter the available datasets. Passed directly to `get_dataset_key` of the readers, see that method for more information.
codesearchnet
def NetshStaticIp(interface, ip=u'127.0.0.9', subnet=u'255.255.255.255', gw=u'127.0.0.1'): args = ['/c', 'netsh', 'interface', 'ip', 'set', 'address', interface, 'static', ip, subnet, gw, '1'] res = client_utils_common.Execute('cmd', args, time_limit=(- 1), bypass_whitelist=True) return res
Changes interface to a staticly set IP. Sets IP configs to local if no paramaters passed. Args: interface: Name of the interface. ip: IP address. subnet: Subnet mask. gw: IP address of the default gateway. Returns: A tuple of stdout, stderr, exit_status.
codesearchnet
def _CalculateYLines(self, dists): tot_dist = sum(dists) if tot_dist > 0: pixel_dist = [float(d * (self._gheight-20))/tot_dist for d in dists] pixel_grid = [0]+[int(pd + sum(pixel_dist[0:i])) for i,pd in enumerate(pixel_dist)] else: pixel_grid = [] return pixel_grid
Builds a list with y-coordinates for the horizontal lines in the graph. Args: # One integer for each pair of stations # indicating the approximate distance dists: [0,33,140, ... ,X] Returns: # One integer y-coordinate for each station normalized between # 0 and X, where X is the height of the graph in pixels [0, 33, 140, ... , X]
juraj-google-style
def display_arr(screen, arr, video_size, transpose): if transpose: pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1)) else: pyg_img = arr pyg_img = pygame.transform.scale(pyg_img, video_size) screen.blit(pyg_img, (0, 0))
Display an image to the pygame screen. Args: screen (pygame.Surface): the pygame surface to write frames to arr (np.ndarray): numpy array representing a single frame of gameplay video_size (tuple): the size to render the frame as transpose (bool): whether to transpose the frame before displaying Returns: None
juraj-google-style
def _get_run_debug_urls(self): return ['file:
Get the debug_urls value for the current run() call. Returns: debug_urls: (list of str) Debug URLs for the current run() call. Currently, the list consists of only one URL that is a file:// URL.
github-repos
def _deduplicate_indexed_slices(values, indices): unique_indices, new_index_positions = array_ops.unique(indices) summed_values = math_ops.unsorted_segment_sum(values, new_index_positions, array_ops.shape(unique_indices)[0]) return (summed_values, unique_indices)
Sums `values` associated with any non-unique `indices`. Args: values: A `Tensor` with rank >= 1. indices: A one-dimensional integer `Tensor`, indexing into the first dimension of `values` (as in an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of `indices` and `summed_values` contains the sum of `values` slices associated with each unique index.
github-repos
def gmeta_pop(gmeta, info=False): if (type(gmeta) is GlobusHTTPResponse): gmeta = json.loads(gmeta.text) elif (type(gmeta) is str): gmeta = json.loads(gmeta) elif (type(gmeta) is not dict): raise TypeError('gmeta must be dict, GlobusHTTPResponse, or JSON string') results = [] for res in gmeta['gmeta']: for con in res['content']: results.append(con) if info: fyi = {'total_query_matches': gmeta.get('total')} return (results, fyi) else: return results
Remove GMeta wrapping from a Globus Search result. This function can be called on the raw GlobusHTTPResponse that Search returns, or a string or dictionary representation of it. Arguments: gmeta (dict, str, or GlobusHTTPResponse): The Globus Search result to unwrap. info (bool): If ``False``, will return a list of the results and discard the metadata. If ``True``, will return a tuple containing the results list, and other information about the query. **Default**: ``False``. Returns: list (if ``info=False``): The unwrapped results. tuple (if ``info=True``): The unwrapped results, and a dictionary of query information.
codesearchnet
def has_chosen(state, correct, msgs): if (not issubclass(type(correct), int)): raise InstructorError('Inside `has_chosen()`, the argument `correct` should be an integer.') student_process = state.student_process if (not isDefinedInProcess(MC_VAR_NAME, student_process)): raise InstructorError('Option not available in the student process') else: selected_option = getOptionFromProcess(student_process, MC_VAR_NAME) if (not issubclass(type(selected_option), int)): raise InstructorError('selected_option should be an integer') if ((selected_option < 1) or (correct < 1)): raise InstructorError('selected_option and correct should be greater than zero') if ((selected_option > len(msgs)) or (correct > len(msgs))): raise InstructorError('there are not enough feedback messages defined') feedback_msg = msgs[(selected_option - 1)] state.reporter.success_msg = msgs[(correct - 1)] state.do_test(EqualTest(selected_option, correct, feedback_msg))
Test multiple choice exercise. Test for a MultipleChoiceExercise. The correct answer (as an integer) and feedback messages are passed to this function. Args: correct (int): the index of the correct answer (should be an instruction). Starts at 1. msgs (list(str)): a list containing all feedback messages belonging to each choice of the student. The list should have the same length as the number of options.
codesearchnet
def read_data_event(self, whence, complete=False, can_flush=False): return Transition(None, _read_data_handler(whence, self, complete, can_flush))
Creates a transition to a co-routine for retrieving data as bytes. Args: whence (Coroutine): The co-routine to return to after the data is satisfied. complete (Optional[bool]): True if STREAM_END should be emitted if no bytes are read or available; False if INCOMPLETE should be emitted in that case. can_flush (Optional[bool]): True if NEXT may be requested after INCOMPLETE is emitted as a result of this data request.
codesearchnet
def plot_residuals(self, plot=None): if plot is None: import matplotlib.pyplot as plot x = numpy.arange(1, len(self.residuals) + 1) y = _gvar.mean(self.residuals) yerr = _gvar.sdev(self.residuals) plot.errorbar(x=x, y=y, yerr=yerr, fmt='o', color='b') plot.ylabel('normalized residuals') xr = [x[0], x[-1]] plot.plot([x[0], x[-1]], [0, 0], 'r-') plot.fill_between( x=xr, y1=[-1,-1], y2=[1,1], color='r', alpha=0.075 ) return plot
Plot normalized fit residuals. The sum of the squares of the residuals equals ``self.chi2``. Individual residuals should be distributed about one, in a Gaussian distribution. Args: plot: :mod:`matplotlib` plotter. If ``None``, uses ``matplotlib.pyplot`. Returns: Plotter ``plot``.
juraj-google-style
def _ParseApplicationPasswordRecord(self, parser_mediator, record): key = record.get('_key_', None) if ((not key) or (not key.startswith(b'ssgp'))): raise errors.ParseError('Unsupported application password record key value does not start with: "ssgp".') event_data = KeychainApplicationRecordEventData() event_data.account_name = self._ParseBinaryDataAsString(parser_mediator, record['acct']) event_data.comments = self._ParseBinaryDataAsString(parser_mediator, record['crtr']) event_data.entry_name = self._ParseBinaryDataAsString(parser_mediator, record['PrintName']) ssgp_hash = codecs.encode(key[4:], 'hex') event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8') event_data.text_description = self._ParseBinaryDataAsString(parser_mediator, record['desc']) date_time = self._ParseDateTimeValue(parser_mediator, record['cdat']) if date_time: event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) date_time = self._ParseDateTimeValue(parser_mediator, record['mdat']) if date_time: event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts the information from an application password record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. record (dict[str, object]): database record. Raises: ParseError: if Internet password record cannot be parsed.
codesearchnet
def add_physical_qubit(self, physical_qubit): if (not isinstance(physical_qubit, int)): raise CouplingError('Physical qubits should be integers.') if (physical_qubit in self.physical_qubits): raise CouplingError(('The physical qubit %s is already in the coupling graph' % physical_qubit)) self.graph.add_node(physical_qubit) self._dist_matrix = None self._qubit_list = None
Add a physical qubit to the coupling graph as a node. physical_qubit (int): An integer representing a physical qubit. Raises: CouplingError: if trying to add duplicate qubit
codesearchnet
def get_last_next(self, date): (past, future) = ((None, None), (None, None)) for (mjd, value) in reversed(self.data): if (mjd <= date): past = (mjd, value) break future = (mjd, value) return (past, future)
Provide the last and next leap-second events relative to a date Args: date (float): Date in MJD Return: tuple:
codesearchnet
def get_csv(filename): check_if_this_file_exist(filename) filename = os.path.abspath(filename) s = command_line(['exiftool', '-G', '-csv', '-sort', filename]) if s: s = s.decode('utf-8') return s else: return 0
Return a csv representation of the exif get a filename and returns a unicode string with a CSV format Arguments: filename {string} -- your filename Returns: [unicode] -- unicode string
codesearchnet
def _read_template(template): template = _read_content_or_path(template) file_obj = StringIO.StringIO(template) return ET.parse(file_obj)
Read XSLT template. Args: template (str): Filename or XML string. Don't use ``\\n`` in case of filename. Returns: obj: Required XML parsed with ``lxml.etree``.
juraj-google-style
def _calibrate_ir(radiance, coefs): logger.debug('Calibrating to brightness temperature') n = coefs['n'] bteff = ((C2 * n) / xu.log((1 + ((C1 * (n ** 3)) / radiance.where((radiance > 0)))))) bt = xr.DataArray(((bteff * coefs['b']) + coefs['a'])) return bt.where(xu.logical_and((bt >= coefs['btmin']), (bt <= coefs['btmax'])))
Convert IR radiance to brightness temperature Reference: [IR] Args: radiance: Radiance [mW m-2 cm-1 sr-1] coefs: Dictionary of calibration coefficients. Keys: n: The channel's central wavenumber [cm-1] a: Offset [K] b: Slope [1] btmin: Minimum brightness temperature threshold [K] btmax: Maximum brightness temperature threshold [K] Returns: Brightness temperature [K]
codesearchnet
def seed(self, seed): if seed is None: self.env.seed = round(time.time()) else: self.env.seed = seed return self.env.seed
Sets the random seed of the environment to the given value (current time, if seed=None). Naturally deterministic Environments (e.g. ALE or some gym Envs) don't have to implement this method. Args: seed (int): The seed to use for initializing the pseudo-random number generator (default=epoch time in sec). Returns: The actual seed (int) used OR None if Environment did not override this method (no seeding supported).
juraj-google-style
def tscore(sample1, sample2): if len(sample1) != len(sample2): raise ValueError("different number of values") error = pooled_sample_variance(sample1, sample2) / len(sample1) diff = statistics.mean(sample1) - statistics.mean(sample2) return diff / math.sqrt(error * 2)
Calculate a t-test score for the difference between two samples. Args: sample1: one sample. sample2: the other sample. Returns: The t-test score, as a float.
juraj-google-style
def unarchive_user(self, user_id): url = self.record_url + "/unarchive" res = requests.patch(url=url, json={"user_id": user_id}, headers=HEADERS, verify=False) self.write_response_html_to_file(res,"bob.html") res.raise_for_status()
Unarchives the user with the specified user ID. Args: user_id: `int`. The ID of the user to unarchive. Returns: `NoneType`: None.
juraj-google-style
def _preserve_bonds(self, sliced_cartesian, use_lookup=None): if (use_lookup is None): use_lookup = settings['defaults']['use_lookup'] included_atoms_set = set(sliced_cartesian.index) assert included_atoms_set.issubset(set(self.index)), 'The sliced Cartesian has to be a subset of the bigger frame' bond_dic = self.get_bonds(use_lookup=use_lookup) new_atoms = set([]) for atom in included_atoms_set: new_atoms = (new_atoms | bond_dic[atom]) new_atoms = (new_atoms - included_atoms_set) while (not (new_atoms == set([]))): index_of_interest = new_atoms.pop() included_atoms_set = (included_atoms_set | self.get_coordination_sphere(index_of_interest, n_sphere=float('inf'), only_surface=False, exclude=included_atoms_set, give_only_index=True, use_lookup=use_lookup)) new_atoms = (new_atoms - included_atoms_set) molecule = self.loc[(included_atoms_set, :)] return molecule
Is called after cutting geometric shapes. If you want to change the rules how bonds are preserved, when applying e.g. :meth:`Cartesian.cut_sphere` this is the function you have to modify. It is recommended to inherit from the Cartesian class to tailor it for your project, instead of modifying the source code of ChemCoord. Args: sliced_frame (Cartesian): use_lookup (bool): Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: Cartesian:
codesearchnet
def get_value(data, key): ref = data try: for subkey in key.split('.'): if isinstance(ref, dict): ref = ref[subkey] else: print(('CRITICAL: Cannot use subkey %s on non-dictionary element' % subkey)) return None return ref except KeyError: return None
Follow the dot notation to get the proper field, then perform the action Args: data: the data as a dictionary (required to be a dictionary) key: the key (as dot notation) into the data that gives the field (IP.src) Returns: the value of the field(subfield) if it exist, otherwise None
codesearchnet
def add_moving_summary(*args, **kwargs): decay = kwargs.pop('decay', 0.95) coll = kwargs.pop('collection', MOVING_SUMMARY_OPS_KEY) summ_coll = kwargs.pop('summary_collections', None) assert (len(kwargs) == 0), ('Unknown arguments: ' + str(kwargs)) ctx = get_current_tower_context() if ((ctx is not None) and (not ctx.is_main_training_tower)): return [] graph = tf.get_default_graph() try: control_flow_ctx = graph._get_control_flow_context() if ((control_flow_ctx is not None) and control_flow_ctx.IsXLAContext()): return except Exception: pass if (tf.get_variable_scope().reuse is True): logger.warn('add_moving_summary() called under reuse=True scope, ignored.') return [] for x in args: assert isinstance(x, (tf.Tensor, tf.Variable)), x assert (x.get_shape().ndims == 0), 'add_moving_summary() only accepts scalar tensor! Got one with {}'.format(x.get_shape()) ema_ops = [] for c in args: name = re.sub('tower[0-9]+/', '', c.op.name) with tf.name_scope(None): if (not c.dtype.is_floating): c = tf.cast(c, tf.float32) with _enter_vs_reuse_ns('EMA') as vs: ema_var = tf.get_variable(name, shape=c.shape, dtype=c.dtype, initializer=tf.constant_initializer(), trainable=False) ns = vs.original_name_scope with tf.name_scope(ns): ema_op = moving_averages.assign_moving_average(ema_var, c, decay, zero_debias=True, name=(name + '_EMA_apply')) ema_ops.append(ema_op) with tf.name_scope(None): tf.summary.scalar((name + '-summary'), ema_op, collections=summ_coll) if (coll is not None): for op in ema_ops: tf.add_to_collection(coll, op) return ema_ops
Summarize the moving average for scalar tensors. This function is a no-op if not calling from main training tower. Args: args: scalar tensors to summarize decay (float): the decay rate. Defaults to 0.95. collection (str or None): the name of the collection to add EMA-maintaining ops. The default will work together with the default :class:`MovingAverageSummary` callback. summary_collections ([str]): the names of collections to add the summary op. Default is TF's default (`tf.GraphKeys.SUMMARIES`). Returns: [tf.Tensor]: list of tensors returned by assign_moving_average, which can be used to maintain the EMA.
codesearchnet
async def export_image(self, name: str): response = (await self.docker._query('images/{name}/get'.format(name=name), 'GET')) return response.content
Get a tarball of an image by name or id. Args: name: name/id of the image to be exported Returns: Streamreader of tarball image
codesearchnet
def print_args(output=sys.stdout): def decorator(func): 'The decorator function.\n ' @wraps(func) def _(*args, **kwargs): 'The decorated function.\n ' output.write('Args: {0}, KwArgs: {1}\n'.format(str(args), str(kwargs))) return func(*args, **kwargs) return _ return decorator
Decorate a function so that print arguments before calling it. Args: output: writable to print args. (Default: sys.stdout)
codesearchnet
def _rmsprop(self, grads, cache=None, decay_rate=0.95): if cache is None: cache = np.zeros_like(grads) cache = decay_rate * cache + (1 - decay_rate) * grads ** 2 step = -grads / np.sqrt(cache + K.epsilon()) return step, cache
Uses RMSProp to compute step from gradients. Args: grads: numpy array of gradients. cache: numpy array of same shape as `grads` as RMSProp cache decay_rate: How fast to decay cache Returns: A tuple of step: numpy array of the same shape as `grads` giving the step. Note that this does not yet take the learning rate into account. cache: Updated RMSProp cache.
juraj-google-style
def open(self, host, port=23): self._telnet_client.open(host, port) config_str = self._telnet_client.cmd("MN?") if config_str.startswith("MN="): config_str = config_str[len("MN="):] self.properties = dict( zip(['model', 'max_freq', 'max_atten'], config_str.split("-", 2))) self.max_atten = float(self.properties['max_atten'])
Opens a telnet connection to the desired AttenuatorDevice and queries basic information. Args: host: A valid hostname (IP address or DNS-resolvable name) to an MC-DAT attenuator instrument. port: An optional port number (defaults to telnet default 23)
juraj-google-style
def jacobian_s(nodes, degree, dimension): r num_nodes = (degree * (degree + 1)) result = np.empty((dimension, num_nodes), order="F") index = 0 i = 0 for num_vals in six.moves.xrange(degree, 0, -1): for _ in six.moves.xrange(num_vals): result[:, index] = nodes[:, i + 1] - nodes[:, i] index += 1 i += 1 i += 1 return float(degree) * result
r"""Compute :math:`\frac{\partial B}{\partial s}`. .. note:: This is a helper for :func:`_jacobian_both`, which has an equivalent Fortran implementation. Args: nodes (numpy.ndarray): Array of nodes in a surface. degree (int): The degree of the surface. dimension (int): The dimension the surface lives in. Returns: numpy.ndarray: Nodes of the Jacobian surface in B |eacute| zier form.
juraj-google-style
def set_label_list(self, label_lists): if isinstance(label_lists, annotations.LabelList): label_lists = [label_lists] for label_list in label_lists: if label_list.idx is None: label_list.idx = 'default' label_list.utterance = self self.label_lists[label_list.idx] = label_list
Set the given label-list for this utterance. If the label-list-idx is not set, ``default`` is used. If there is already a label-list with the given idx, it will be overriden. Args: label_list (LabelList, list): A single or multi. label-lists to add.
juraj-google-style
def needle_statistics(infile): alignments = list(AlignIO.parse(infile, 'emboss')) alignment_properties = defaultdict(dict) with open(infile) as f: line = f.readline() for i in range(len(alignments)): while (line.rstrip() != ' line = f.readline() if (not line): raise StopIteration while (line[0] == ' parts = line[1:].split(':', 1) key = parts[0].lower().strip() if (key == '1'): a_id = parts[1].strip() if (key == '2'): b_id = parts[1].strip() if (key == 'identity'): ident_parse = parts[1].strip().replace('(', '').replace(')', '').replace('%', '').split() ident_num = int(ident_parse[0].split('/')[0]) ident_percent = float(ident_parse[1]) alignment_properties[((a_id + '_') + b_id)]['identity'] = ident_num alignment_properties[((a_id + '_') + b_id)]['percent_identity'] = ident_percent if (key == 'similarity'): sim_parse = parts[1].strip().replace('(', '').replace(')', '').replace('%', '').split() sim_num = int(sim_parse[0].split('/')[0]) sim_percent = float(sim_parse[1]) alignment_properties[((a_id + '_') + b_id)]['similarity'] = sim_num alignment_properties[((a_id + '_') + b_id)]['percent_similarity'] = sim_percent if (key == 'gaps'): gap_parse = parts[1].strip().replace('(', '').replace(')', '').replace('%', '').split() gap_num = int(gap_parse[0].split('/')[0]) gap_percent = float(gap_parse[1]) alignment_properties[((a_id + '_') + b_id)]['gaps'] = gap_num alignment_properties[((a_id + '_') + b_id)]['percent_gaps'] = gap_percent if (key == 'score'): score = float(parts[1].strip()) alignment_properties[((a_id + '_') + b_id)]['score'] = score line = f.readline() return alignment_properties
Reads in a needle alignment file and spits out statistics of the alignment. Args: infile (str): Alignment file name Returns: dict: alignment_properties - a dictionary telling you the number of gaps, identity, etc.
codesearchnet
def __init__(self, outer_index, inner_index): if outer_index.batch_dims != inner_index.batch_dims: raise ValueError('outer_index.batch_dims and inner_index.batch_dims must be the same.') super().__init__(indices=inner_index.indices + outer_index.indices * inner_index.num_segments, num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims) self.outer_index = outer_index self.inner_index = inner_index
Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has *num_segments* equal to *outer_index.num_segments* * *inner_index.num_segments* Args: outer_index (`IndexMap`): IndexMap. inner_index (`IndexMap`): IndexMap, must have the same shape as *outer_index*.
github-repos
def seq_int_arr(seqs): return np.array([[NT_TO_INT[c] for c in x.upper()] for x in seqs])
Convert list of ACGT strings to matix of 1-4 ints Args: seqs (list of str): nucleotide sequences with only 'ACGT' characters Returns: numpy.array of int: matrix of integers from 1 to 4 inclusive representing A, C, G, and T str: nucleotide sequence string
codesearchnet
def _handle_is_dag_stopped(self, request): return Response(success=True, uid=request.uid, payload={'is_stopped': (request.payload['dag_name'] in self._stop_dags)})
The handler for the dag_stopped request. The dag_stopped request checks whether a dag is flagged to be terminated. Args: request (Request): Reference to a request object containing the incoming request. The payload has to contain the following fields: 'dag_name': the name of the dag that should be checked Returns: Response: A response object containing the following fields: - is_stopped: True if the dag is flagged to be stopped.
codesearchnet
def flags(cls): assert cls.__bases__ == (object,) d = dict(cls.__dict__) new_type = type(cls.__name__, (int,), d) new_type.__module__ = cls.__module__ map_ = {} for key, value in iteritems(d): if key.upper() == key and isinstance(value, integer_types): value_instance = new_type(value) setattr(new_type, key, value_instance) map_[value] = key def str_(self): value = int(self) matches = [] for k, v in map_.items(): if value & k: matches.append("%s.%s" % (type(self).__name__, v)) value &= ~k if value != 0 or not matches: matches.append(text_type(value)) return " | ".join(matches) def repr_(self): return "<%s: %d>" % (str(self), int(self)) setattr(new_type, "__repr__", repr_) setattr(new_type, "__str__", str_) return new_type
A decorator for creating an int flags class. Makes the values a subclass of the type and implements repr/str. The new class will be a subclass of int. Args: cls (type): The class to convert to an flags Returns: type: A new class :: @flags class Foo(object): FOO = 1 BAR = 2
juraj-google-style
def add_graph( self, y, x_label=None, y_label="", title="", x_run=None, y_run=None, svg_size_px=None, key_position="bottom right", ): if x_run is None: x_run = self.default_x_run if y_run is None: y_run = self.default_y_run if svg_size_px is None: svg_size_px = self.default_svg_size_px for panel in self.panels: x_run = self._load_x_run(x_run) y_run = self._load_y_run(y_run) svg_size_px = self._load_svg_size_px(svg_size_px) panel.add_graph( y=y, x_run=x_run, y_run=y_run, svg_size_px=svg_size_px, y_label=y_label, x_label=x_label if x_label is not None else self.default_x_label, title=title, key_position=key_position, )
Add a new graph to the overlap report. Args: y (str): Value plotted on y-axis. x_label (str): Label on x-axis. y_label (str): Label on y-axis. title (str): Title of the plot. x_run ((float,float)): x-range. y_run ((int,int)): y-rang. svg_size_px ((int,int): Size of SVG image in pixels. key_position (str): GnuPlot position of the legend.
juraj-google-style
def _maybe_partial_apply_variables(fn, args, kwargs): def is_distributed_var(x): flat = nest.flatten(x) return flat and isinstance(flat[0], values.DistributedVariable) var_kwargs = {} nonvar_kwargs = {} if kwargs: var_kwargs = {k: v for k, v in kwargs.items() if is_distributed_var(v)} if var_kwargs: nonvar_kwargs = {k: v for k, v in kwargs.items() if not is_distributed_var(v)} positional_args = [] index_of_star_args = None for i, p in enumerate(tf_inspect.signature(fn).parameters.values()): if i == 0 and p.name == 'self': continue if p.kind == tf_inspect.Parameter.POSITIONAL_OR_KEYWORD: positional_args.append(p.name) elif p.kind == tf_inspect.Parameter.VAR_POSITIONAL: index_of_star_args = i elif p.kind == tf_inspect.Parameter.POSITIONAL_ONLY: if var_kwargs or any((is_distributed_var(a) for a in args)): raise ValueError(f'Mixing Variables and positional-only parameters not supported by TPUStrategy. Received {len(var_kwargs)} DistributedVariables in **kwargs and {sum((is_distributed_var(a) for a in args))} in *args, expected zero for both.') return (fn, args, kwargs) star_args = [] have_seen_var_arg = False for i, a in enumerate(args): if is_distributed_var(a): if index_of_star_args is not None and i >= index_of_star_args: raise ValueError('TPUStrategy.run() cannot handle Variables passed to *args. Either name the function argument, or capture the Variable implicitly.') if len(positional_args) <= i: raise ValueError('Too many positional arguments passed to call to TPUStrategy.run().') var_kwargs[positional_args[i]] = a have_seen_var_arg = True else: if index_of_star_args is not None and i >= index_of_star_args: if have_seen_var_arg: raise ValueError('TPUStrategy.run() cannot handle both Variables and a mix of positional args and *args. Either remove the *args, or capture the Variable implicitly.') else: star_args.append(a) continue if len(positional_args) <= i: raise ValueError('Too many positional arguments passed to call to TPUStrategy.run().') nonvar_kwargs[positional_args[i]] = a if var_kwargs: return (functools.partial(fn, **var_kwargs), star_args, nonvar_kwargs) return (fn, args, kwargs)
Inspects arguments to partially apply any DistributedVariable. This avoids an automatic cast of the current variable value to tensor. Note that a variable may be captured implicitly with Python scope instead of passing it to run(), but supporting run() keeps behavior consistent with MirroredStrategy. Since positional arguments must be applied from left to right, this function does some tricky function inspection to move variable positional arguments into kwargs. As a result of this, we can't support passing Variables as *args, nor as args to functions which combine both explicit positional arguments and *args. Args: fn: The function to run, as passed to run(). args: Positional arguments to fn, as passed to run(). kwargs: Keyword arguments to fn, as passed to run(). Returns: A tuple of the function (possibly wrapped), args, kwargs (both possibly filtered, with members of args possibly moved to kwargs). If no variables are found, this function is a noop. Raises: ValueError: If the function signature makes unsupported use of *args, or if too many arguments are passed.
github-repos
def __init__(self, name, description=None): super(ArtifactDefinition, self).__init__() self.conditions = [] self.description = description self.name = name self.labels = [] self.provides = [] self.sources = [] self.supported_os = [] self.urls = []
Initializes an artifact definition. Args: name (str): name that uniquely identifiers the artifact definition. description (Optional[str]): description of the artifact definition.
juraj-google-style
def intersect(self, other): lowest_stop = min(self.stop_hz, other.stop_hz) highest_start = max(self.start_hz, other.start_hz) return FrequencyBand(highest_start, lowest_stop)
Return the intersection between this frequency band and another. Args: other (FrequencyBand): the instance to intersect with Examples:: >>> import zounds >>> b1 = zounds.FrequencyBand(500, 1000) >>> b2 = zounds.FrequencyBand(900, 2000) >>> intersection = b1.intersect(b2) >>> intersection.start_hz, intersection.stop_hz (900, 1000)
codesearchnet
def plot_weight_posteriors(names, qm_vals, qs_vals, fname): fig = figure.Figure(figsize=(6, 3)) canvas = backend_agg.FigureCanvasAgg(fig) ax = fig.add_subplot(1, 2, 1) for (n, qm) in zip(names, qm_vals): sns.distplot(qm.flatten(), ax=ax, label=n) ax.set_title('weight means') ax.set_xlim([(- 1.5), 1.5]) ax.legend() ax = fig.add_subplot(1, 2, 2) for (n, qs) in zip(names, qs_vals): sns.distplot(qs.flatten(), ax=ax) ax.set_title('weight stddevs') ax.set_xlim([0, 1.0]) fig.tight_layout() canvas.print_figure(fname, format='png') print('saved {}'.format(fname))
Save a PNG plot with histograms of weight means and stddevs. Args: names: A Python `iterable` of `str` variable names. qm_vals: A Python `iterable`, the same length as `names`, whose elements are Numpy `array`s, of any shape, containing posterior means of weight varibles. qs_vals: A Python `iterable`, the same length as `names`, whose elements are Numpy `array`s, of any shape, containing posterior standard deviations of weight varibles. fname: Python `str` filename to save the plot to.
codesearchnet
def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: TFBaseModelOutput | None=None, past_key_values: Tuple[Tuple[tf.Tensor]] | None=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: bool | None=None, output_attentions: bool | None=None, output_hidden_states: bool | None=None, return_dict: bool | None=None, labels: tf.Tensor | None=None, training: bool=False) -> Tuple[tf.Tensor] | TFSeq2SeqLMOutput: if labels is not None: labels = tf.where(labels == self.config.pad_token_id, tf.fill(shape_list(labels), tf.cast(-100, labels.dtype)), labels) use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns:
github-repos
def set_callback(self, property_name, callback): if property_name not in self._config: raise KeyError('%s is not a valid property name.' % property_name) if not callable(callback): raise TypeError('The callback object provided is not callable.') self._set_callbacks[property_name] = callback
Set a set-callback for given property. Args: property_name: Name of the property. callback: The callback as a `callable` of signature: def cbk(config): where config is the config after it is set to the new value. The callback is invoked each time the set() method is called with the matching property_name. Raises: KeyError: If property_name does not exist. TypeError: If `callback` is not callable.
github-repos
def get_clinvar_id(self, submission_id): submission_obj = self.clinvar_submission_collection.find_one({'_id': ObjectId(submission_id)}) clinvar_subm_id = submission_obj.get('clinvar_subm_id') return clinvar_subm_id
Returns the official Clinvar submission ID for a submission object Args: submission_id(str): submission_id(str) : id of the submission Returns: clinvar_subm_id(str): a string with a format: SUB[0-9]. It is obtained from clinvar portal when starting a new submission
codesearchnet
def optimize(self, sess, batch_index): feed_dict = {self._batch_index: batch_index, self._per_device_batch_size: self._loaded_per_device_batch_size, self._max_seq_len: self._loaded_max_seq_len} for tower in self._towers: feed_dict.update(tower.loss_graph.extra_compute_grad_feed_dict()) fetches = {'train': self._train_op} for tower in self._towers: fetches.update(tower.loss_graph.extra_compute_grad_fetches()) return sess.run(fetches, feed_dict=feed_dict)
Run a single step of SGD. Runs a SGD step over a slice of the preloaded batch with size given by self._loaded_per_device_batch_size and offset given by the batch_index argument. Updates shared model weights based on the averaged per-device gradients. Args: sess: TensorFlow session. batch_index: Offset into the preloaded data. This value must be between `0` and `tuples_per_device`. The amount of data to process is at most `max_per_device_batch_size`. Returns: The outputs of extra_ops evaluated over the batch.
codesearchnet
def preprocess_JPEG(self, image, **kwargs): save_kwargs = { 'progressive': VERSATILEIMAGEFIELD_PROGRESSIVE_JPEG, 'quality': QUAL } if image.mode != 'RGB': image = image.convert('RGB') return (image, save_kwargs)
Receive a PIL Image instance of a JPEG and returns 2-tuple. Args: * [0]: Image instance, converted to RGB * [1]: Dict with a quality key (mapped to the value of `QUAL` as defined by the `VERSATILEIMAGEFIELD_JPEG_RESIZE_QUALITY` setting)
juraj-google-style
def waitForEvent(self, event_name, predicate, timeout=DEFAULT_TIMEOUT): deadline = (time.time() + timeout) while (time.time() <= deadline): rpc_timeout = (deadline - time.time()) if (rpc_timeout < 0): break rpc_timeout = min(rpc_timeout, MAX_TIMEOUT) try: event = self.waitAndGet(event_name, rpc_timeout) except TimeoutError: break if predicate(event): return event raise TimeoutError(self._ad, ('Timed out after %ss waiting for an "%s" event that satisfies the predicate "%s".' % (timeout, event_name, predicate.__name__)))
Wait for an event of a specific name that satisfies the predicate. This call will block until the expected event has been received or time out. The predicate function defines the condition the event is expected to satisfy. It takes an event and returns True if the condition is satisfied, False otherwise. Note all events of the same name that are received but don't satisfy the predicate will be discarded and not be available for further consumption. Args: event_name: string, the name of the event to wait for. predicate: function, a function that takes an event (dictionary) and returns a bool. timeout: float, default is 120s. Returns: dictionary, the event that satisfies the predicate if received. Raises: TimeoutError: raised if no event that satisfies the predicate is received after timeout seconds.
codesearchnet
def total_stored(self, wanted, slots=None): if slots is None: slots = self.window.slots wanted = make_slot_check(wanted) return sum(slot.amount for slot in slots if wanted(slot))
Calculates the total number of items of that type in the current window or given slot range. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata)
juraj-google-style
def convert(self, value): if self._type is str: return str(value) elif self._type is int: try: return int(value) except (UnicodeError, ValueError): raise WorkflowArgumentError('Cannot convert {} to int'.format(value)) elif self._type is float: try: return float(value) except (UnicodeError, ValueError): raise WorkflowArgumentError('Cannot convert {} to float'.format(value)) elif self._type is bool: if isinstance(value, bool): return bool(value) value = value.lower() if value in ('true', '1', 'yes', 'y'): return True elif value in ('false', '0', 'no', 'n'): return False raise WorkflowArgumentError('Cannot convert {} to bool'.format(value)) else: return value
Convert the specified value to the type of the option. Args: value: The value that should be converted. Returns: The value with the type given by the option.
juraj-google-style
def ReceiveMessages(self, client_id, messages): if data_store.RelationalDBEnabled(): return self.ReceiveMessagesRelationalFlows(client_id, messages) now = time.time() with queue_manager.QueueManager(token=self.token) as manager: for (session_id, msgs) in iteritems(collection.Group(messages, operator.attrgetter('session_id'))): leftover_msgs = self.HandleWellKnownFlows(msgs) unprocessed_msgs = [] for msg in leftover_msgs: if ((msg.auth_state == msg.AuthorizationState.AUTHENTICATED) or (msg.session_id == self.unauth_allowed_session_id)): unprocessed_msgs.append(msg) if (len(unprocessed_msgs) < len(leftover_msgs)): logging.info('Dropped %d unauthenticated messages for %s', (len(leftover_msgs) - len(unprocessed_msgs)), client_id) if (not unprocessed_msgs): continue for msg in unprocessed_msgs: manager.QueueResponse(msg) for msg in unprocessed_msgs: if (msg.request_id == 0): manager.QueueNotification(session_id=msg.session_id) break elif (msg.type == rdf_flows.GrrMessage.Type.STATUS): if msg.HasTaskID(): manager.DeQueueClientRequest(msg) manager.QueueNotification(session_id=msg.session_id, last_status=msg.request_id) stat = rdf_flows.GrrStatus(msg.payload) if (stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED): crash_details = rdf_client.ClientCrash(client_id=client_id, session_id=session_id, backtrace=stat.backtrace, crash_message=stat.error_message, nanny_status=stat.nanny_status, timestamp=rdfvalue.RDFDatetime.Now()) events.Events.PublishEvent('ClientCrash', crash_details, token=self.token) logging.debug('Received %s messages from %s in %s sec', len(messages), client_id, (time.time() - now))
Receives and processes the messages from the source. For each message we update the request object, and place the response in that request's queue. If the request is complete, we send a message to the worker. Args: client_id: The client which sent the messages. messages: A list of GrrMessage RDFValues.
codesearchnet
def generate_host_passthrough(self, vcpu_num): cpu = ET.Element('cpu', mode='host-passthrough') cpu.append(self.generate_topology(vcpu_num)) if vcpu_num > 1: cpu.append(self.generate_numa(vcpu_num)) return cpu
Generate host-passthrough XML cpu node Args: vcpu_num(str): number of virtual CPUs Returns: lxml.etree.Element: CPU XML node
juraj-google-style
def _ReadParserPresetValues(self, preset_definition_values): if not preset_definition_values: raise errors.MalformedPresetError('Missing preset definition values.') name = preset_definition_values.get('name', None) if not name: raise errors.MalformedPresetError( 'Invalid preset definition missing name.') parsers = preset_definition_values.get('parsers', None) if not parsers: raise errors.MalformedPresetError( 'Invalid preset definition missing parsers.') parser_preset = ParserPreset(name, parsers) for operating_system_values in preset_definition_values.get( 'operating_systems', []): operating_system = self._ReadOperatingSystemArtifactValues( operating_system_values) parser_preset.operating_systems.append(operating_system) return parser_preset
Reads a parser preset from a dictionary. Args: preset_definition_values (dict[str, object]): preset definition values. Returns: ParserPreset: a parser preset. Raises: MalformedPresetError: if the format of the preset definition is not set or incorrect, or the preset of a specific operating system has already been set.
juraj-google-style
def __init__(self, sess): _check_type(sess, (session.BaseSession, monitored_session.MonitoredSession)) self.session = sess
Constructor. Args: sess: A tensorflow Session object.
github-repos
def on_raw_update( self=None, group: int = 0 ) -> callable: def decorator(func: callable) -> Tuple[Handler, int]: if isinstance(func, tuple): func = func[0].callback handler = pyrogram.RawUpdateHandler(func) if isinstance(self, int): return handler, group if self is None else group if self is not None: self.add_handler(handler, group) return handler, group return decorator
Use this decorator to automatically register a function for handling raw updates. This does the same thing as :meth:`add_handler` using the :class:`RawUpdateHandler`. Args: group (``int``, *optional*): The group identifier, defaults to 0.
juraj-google-style
def read_model(input_tflite_file): if not gfile.Exists(input_tflite_file): raise RuntimeError('Input file not found at %r\n' % input_tflite_file) with gfile.GFile(input_tflite_file, 'rb') as input_file_handle: model_bytearray = bytearray(input_file_handle.read()) return read_model_from_bytearray(model_bytearray)
Reads a tflite model as a python object. Args: input_tflite_file: Full path name to the input tflite file Raises: RuntimeError: If input_tflite_file path is invalid. IOError: If input_tflite_file cannot be opened. Returns: A python object corresponding to the input tflite file.
github-repos
def _GetTypeIndicators(cls, signature_scanner, specification_store, remainder_list, path_spec, resolver_context=None): type_indicator_list = [] file_object = resolver.Resolver.OpenFileObject(path_spec, resolver_context=resolver_context) scan_state = pysigscan.scan_state() try: signature_scanner.scan_file_object(scan_state, file_object) for scan_result in iter(scan_state.scan_results): format_specification = specification_store.GetSpecificationBySignature(scan_result.identifier) if (format_specification.identifier not in type_indicator_list): type_indicator_list.append(format_specification.identifier) for analyzer_helper in remainder_list: result = analyzer_helper.AnalyzeFileObject(file_object) if (result is not None): type_indicator_list.append(result) finally: file_object.close() return type_indicator_list
Determines if a file contains a supported format types. Args: signature_scanner (pysigscan.scanner): signature scanner. specification_store (FormatSpecificationStore): specification store. remainder_list (list[AnalyzerHelper]): remaining analyzer helpers that do not have a format specification. path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators.
codesearchnet
def delete_resource_group(access_token, subscription_id, rgname): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '?api-version=', RESOURCE_API]) return do_delete(endpoint, access_token)
Delete the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response.
juraj-google-style
def parse_pyc_string(data): return pyc.loads(data)
Parse pyc data from a string. Args: data: pyc data Returns: An instance of pycnite.types.CodeTypeBase.
github-repos
def get_address_coords(self, address): url = ('https: r = requests.get(url) r.raise_for_status() results = r.json()['results'] lat = results[0]['geometry']['location']['lat'] lng = results[0]['geometry']['location']['lng'] return (lat, lng)
Use the google geocoder to get latitude and longitude for an address string Args: address: any address string Returns: A tuple of (lat,lng)
codesearchnet
def __init__(self, latent_size): super(ProbabilisticGrammarVariational, self).__init__() self.latent_size = latent_size self.encoder_net = tf.keras.Sequential([ tf.keras.layers.Conv1D(64, 3, padding="SAME"), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation(tf.nn.elu), tf.keras.layers.Conv1D(128, 3, padding="SAME"), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation(tf.nn.elu), tf.keras.layers.Dropout(0.1), tf.keras.layers.GlobalAveragePooling1D(), tf.keras.layers.Dense(latent_size * 2, activation=None), ])
Constructs a variational posterior for a probabilistic grammar. Args: latent_size: Number of dimensions in the latent code.
juraj-google-style
def resolve(node, source_info, graphs, resolver): visitor = FunctionVisitor(source_info, graphs, resolver) node = visitor.visit(node) return node
Performs type inference. Args: node: ast.AST source_info: transformer.SourceInfo graphs: Dict[ast.FunctionDef, cfg.Graph] resolver: Resolver Returns: ast.AST
github-repos
def complain(distribution_name): try: pkg_resources.get_distribution(distribution_name) warnings.warn( "The {pkg} distribution is now obsolete. " "Please `pip uninstall {pkg}`. " "In the future, this warning will become an ImportError.".format( pkg=distribution_name ), DeprecationWarning, ) except pkg_resources.DistributionNotFound: pass
Issue a warning if `distribution_name` is installed. In a future release, this method will be updated to raise ImportError rather than just send a warning. Args: distribution_name (str): The name of the obsolete distribution.
juraj-google-style
def create_migration_template(name): assert name, 'Name of the migration can not be empty.' from . import migrations package = migrations prefix = package.__name__ + '.' all_versions = [] for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix): version = int(modname.split('.')[-1].split('_')[0]) all_versions.append(version) next_number = max(all_versions) + 1 next_migration_name = '{}_{}.py'.format(next_number, name) migration_fullname = os.path.join(package.__path__[0], next_migration_name) with open(migration_fullname, 'w') as f: f.write(MIGRATION_TEMPLATE) return migration_fullname
Creates migration file. Returns created file name. Args: name (str): name of the migration. Returns: str: name of the migration file.
juraj-google-style
def ipv4_lstrip_zeros(address): obj = address.strip().split('.') for x, y in enumerate(obj): obj[x] = y.split('/')[0].lstrip('0') if obj[x] in ['', None]: obj[x] = '0' return '.'.join(obj)
The function to strip leading zeros in each octet of an IPv4 address. Args: address (:obj:`str`): An IPv4 address. Returns: str: The modified IPv4 address.
juraj-google-style
def std(x, axis=None, keepdims=False): if x.dtype.base_dtype == dtypes_module.bool: x = math_ops.cast(x, floatx()) return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)
Standard deviation of a tensor, alongside the specified axis. It is an alias to `tf.math.reduce_std`. Args: x: A tensor or variable. It should have numerical dtypes. Boolean type inputs will be converted to float. axis: An integer, the axis to compute the standard deviation. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(x), rank(x))`. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the standard deviation of elements of `x` with same dtype. Boolean type input will be converted to float.
github-repos
def contains(self, time: datetime.datetime, inclusive: bool = True) -> bool: if inclusive: return self.start <= time <= self.end else: return self.start < time < self.end
Does the interval contain a momentary time? Args: time: the ``datetime.datetime`` to check inclusive: use inclusive rather than exclusive range checks?
juraj-google-style
def add(self, key, value): if isinstance(value, list): for val in value: self._add_arg_python(key, val) elif isinstance(value, dict): err = 'Dictionary types are not currently supported for field.' print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err)) else: mask = False env_var = re.compile(r'^\$env\.(.*)$') envs_var = re.compile(r'^\$envs\.(.*)$') if env_var.match(str(value)): env_key = env_var.match(str(value)).groups()[0] value = os.environ.get(env_key, value) elif envs_var.match(str(value)): env_key = envs_var.match(str(value)).groups()[0] value = os.environ.get(env_key, value) mask = True self._add_arg(key, value, mask)
Add CLI Arg to lists value. Args: key (string): The CLI Args key (e.g., --name). value (string): The CLI Args value (e.g., bob).
juraj-google-style
def gen_sl_transform_matricies(area_multiple): return [np.array(((i, j), (0, area_multiple / i))) for i in get_factors(area_multiple) for j in range(area_multiple
Generates the transformation matricies that convert a set of 2D vectors into a super lattice of integer area multiple as proven in Cassels: Cassels, John William Scott. An introduction to the geometry of numbers. Springer Science & Business Media, 2012. Args: area_multiple(int): integer multiple of unit cell area for super lattice area Returns: matrix_list: transformation matricies to covert unit vectors to super lattice vectors
juraj-google-style
def read(self, nodes=None, **kwargs): if nodes is None: required_nodes = self.wishlist - set(self.datasets.keys()) nodes = self.dep_tree.leaves(nodes=required_nodes) return self._read_datasets(nodes, **kwargs)
Load datasets from the necessary reader. Args: nodes (iterable): DependencyTree Node objects **kwargs: Keyword arguments to pass to the reader's `load` method. Returns: DatasetDict of loaded datasets
juraj-google-style
def cctop_submit(seq_str): url = 'http: r = requests.post(url) jobid = r.text.split('ID: ')[1] return jobid
Submit a protein sequence string to CCTOP and return the job ID. Args: seq_str (str): Protein sequence as a string Returns: dict: Job ID on the CCTOP server
codesearchnet
def KernelVersion(): rtl_osversioninfoexw = RtlOSVersionInfoExw() try: RtlGetVersion(rtl_osversioninfoexw) except OSError: return 'unknown' return ('%d.%d.%d' % (rtl_osversioninfoexw.dwMajorVersion, rtl_osversioninfoexw.dwMinorVersion, rtl_osversioninfoexw.dwBuildNumber))
Gets the kernel version as string, eg. "5.1.2600". Returns: The kernel version, or "unknown" in the case of failure.
codesearchnet
def __init__(self, resolver_context, file_system, path_spec, is_root=False): location = getattr(path_spec, 'location', None) is_windows_device = False if platform.system() == 'Windows' and location: try: is_windows_device = pysmdev.check_device(location) except IOError: pass stat_info = None if not is_windows_device and location: try: stat_info = os.lstat(location) except OSError as exception: raise errors.BackEndError( 'Unable to retrieve stat object with error: {0!s}'.format( exception)) super(OSFileEntry, self).__init__( resolver_context, file_system, path_spec, is_root=is_root, is_virtual=False) self._is_windows_device = is_windows_device self._name = None self._stat_info = stat_info if is_windows_device: self.entry_type = definitions.FILE_ENTRY_TYPE_DEVICE elif stat_info: is_link = os.path.islink(location) if stat.S_ISLNK(stat_info.st_mode) or is_link: self.entry_type = definitions.FILE_ENTRY_TYPE_LINK elif stat.S_ISREG(stat_info.st_mode): self.entry_type = definitions.FILE_ENTRY_TYPE_FILE elif stat.S_ISDIR(stat_info.st_mode): self.entry_type = definitions.FILE_ENTRY_TYPE_DIRECTORY elif (stat.S_ISCHR(stat_info.st_mode) or stat.S_ISBLK(stat_info.st_mode)): self.entry_type = definitions.FILE_ENTRY_TYPE_DEVICE elif stat.S_ISFIFO(stat_info.st_mode): self.entry_type = definitions.FILE_ENTRY_TYPE_PIPE elif stat.S_ISSOCK(stat_info.st_mode): self.entry_type = definitions.FILE_ENTRY_TYPE_SOCKET
Initializes a file entry. Args: resolver_context (Context): resolver context. file_system (FileSystem): file system. path_spec (PathSpec): path specification. is_root (Optional[bool]): True if the file entry is the root file entry of the corresponding file system. Raises: BackEndError: If an OSError comes up it is caught and an BackEndError error is raised instead.
juraj-google-style
def IsNTFS(self): tsk_fs_type = self.GetFsType() return (tsk_fs_type in [pytsk3.TSK_FS_TYPE_NTFS, pytsk3.TSK_FS_TYPE_NTFS_DETECT])
Determines if the file system is NTFS. Returns: bool: True if the file system is NTFS.
codesearchnet
def _package_path(package): from os import path confdir = config_dir() return path.join(confdir, '{}.cfg'.format(package))
Returns the full path to the default package configuration file. Args: package (str): name of the python package to return a path for.
codesearchnet
def prepend_block(self, node, reverse=False): if (not isinstance(node, grammar.STATEMENTS)): raise ValueError if reverse: self.to_prepend_block[(- 1)].appendleft(node) else: self.to_prepend_block[(- 1)].append(node)
Prepend a statement to the current block. Args: node: The statement to prepend. reverse: When called multiple times, this flag determines whether the statement should be prepended or appended to the already inserted statements. Raises: ValueError: If the given node is not a statement.
codesearchnet
def __init__(self, swap, expiry_date=None, dtype=None, name=None): self._name = name or 'swaption' with tf.name_scope(self._name): self._dtype = dtype self._expiry_date = dates.convert_to_date_tensor(expiry_date) self._swap = swap
Initialize a batch of European swaptions. Args: swap: An instance of `InterestRateSwap` specifying the interest rate swaps underlying the swaptions. The batch size of the swaptions being created would be the same as the batch size of the `swap`. expiry_date: An optional rank 1 `DateTensor` specifying the expiry dates for each swaption. The shape of the input should be the same as the batch size of the `swap` input. Default value: None in which case the option expity date is the same as the start date of each underlying swap. dtype: `tf.Dtype`. If supplied the dtype for the real variables or ops either supplied to the Swaption object or created by the Swaption object. Default value: None which maps to the default dtype inferred by TensorFlow. name: Python str. The name to give to the ops created by this class. Default value: `None` which maps to 'swaption'.
github-repos
def _music_lib_search(self, search, start, max_items): response = self.contentDirectory.Browse([('ObjectID', search), ('BrowseFlag', 'BrowseDirectChildren'), ('Filter', '*'), ('StartingIndex', start), ('RequestedCount', max_items), ('SortCriteria', '')]) metadata = {} for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']: metadata[camel_to_underscore(tag)] = int(response[tag]) return (response, metadata)
Perform a music library search and extract search numbers. You can get an overview of all the relevant search prefixes (like 'A:') and their meaning with the request: .. code :: response = device.contentDirectory.Browse([ ('ObjectID', '0'), ('BrowseFlag', 'BrowseDirectChildren'), ('Filter', '*'), ('StartingIndex', 0), ('RequestedCount', 100), ('SortCriteria', '') ]) Args: search (str): The ID to search. start (int): The index of the forst item to return. max_items (int): The maximum number of items to return. Returns: tuple: (response, metadata) where response is the returned metadata and metadata is a dict with the 'number_returned', 'total_matches' and 'update_id' integers
codesearchnet
def get_metric_values(self): group_names = self.properties.get('metric-groups', None) if (not group_names): group_names = self.manager.get_metric_values_group_names() ret = [] for group_name in group_names: try: mo_val = self.manager.get_metric_values(group_name) ret_item = (group_name, mo_val) ret.append(ret_item) except ValueError: pass return ret
Get the faked metrics, for all metric groups and all resources that have been prepared on the manager object of this context object. Returns: iterable of tuple (group_name, iterable of values): The faked metrics, in the order they had been added, where: group_name (string): Metric group name. values (:class:~zhmcclient.FakedMetricObjectValues`): The metric values for one resource at one point in time.
codesearchnet
def emit_code_from_ir(sql_query_tree, compiler_metadata): context = CompilationContext( query_path_to_selectable=dict(), query_path_to_location_info=sql_query_tree.query_path_to_location_info, query_path_to_output_fields=sql_query_tree.query_path_to_output_fields, query_path_to_filters=sql_query_tree.query_path_to_filters, query_path_to_node=sql_query_tree.query_path_to_node, compiler_metadata=compiler_metadata, ) return _query_tree_to_query(sql_query_tree.root, context)
Return a SQLAlchemy Query from a passed SqlQueryTree. Args: sql_query_tree: SqlQueryTree, tree representation of the query to emit. compiler_metadata: SqlMetadata, SQLAlchemy specific metadata. Returns: SQLAlchemy Query
juraj-google-style
def get_paginated_catalogs(self, querystring=None): return self._load_data(self.CATALOGS_ENDPOINT, default=[], querystring=querystring, traverse_pagination=False, many=False)
Return a paginated list of course catalogs, including name and ID. Returns: dict: Paginated response containing catalogs available for the user.
codesearchnet
def _find_mapreduce_yaml(start, checked): dir = start while dir not in checked: checked.add(dir) for mr_yaml_name in MR_YAML_NAMES: yaml_path = os.path.join(dir, mr_yaml_name) if os.path.exists(yaml_path): return yaml_path dir = os.path.dirname(dir) return None
Traverse the directory tree identified by start until a directory already in checked is encountered or the path of mapreduce.yaml is found. Checked is present both to make loop termination easy to reason about and so that the same directories do not get rechecked. Args: start: the path to start in and work upward from checked: the set of already examined directories Returns: the path of mapreduce.yaml file or None if not found.
juraj-google-style
def set_calibration(self, enabled, imus): if (len(imus) == 0): imus = list(range(MAX_IMUS)) for i in imus: if ((i < 0) or (i >= MAX_IMUS)): logger.warn('Invalid IMU index {} in set_calibration'.format(i)) continue self.imus[i]._use_calibration = enabled
Set calibration state for attached IMUs. Args: enabled (bool): True to apply calibration to IMU data (if available). False to output uncalibrated data. imus (list): indicates which IMUs the calibration state should be set on. Empty list or [0, 1, 2, 3, 4] will apply to all IMUs, [0, 1] only to first 2 IMUs, etc.
codesearchnet
def write_byte(self, value): if isinstance(value, bytes): self.stream.write(value) elif isinstance(value, str): self.stream.write(value.encode('utf-8')) elif isinstance(value, int): self.stream.write(bytes([value]))
Write a single byte to the stream. Args: value (bytes, str or int): value to write to the stream.
codesearchnet
def SetPreferredLanguageIdentifier(self, language_identifier): if not isinstance(language_identifier, py2to3.STRING_TYPES): raise ValueError('Language identifier is not a string.') values = language_ids.LANGUAGE_IDENTIFIERS.get( language_identifier.lower(), None) if not values: raise KeyError('Language identifier: {0:s} is not defined.'.format( language_identifier)) self._language_identifier = language_identifier self._lcid = values[0]
Sets the preferred language identifier. Args: language_identifier (str): language identifier string such as "en-US" for US English or "is-IS" for Icelandic. Raises: KeyError: if the language identifier is not defined. ValueError: if the language identifier is not a string type.
juraj-google-style
def combine_with_wd_noise(f_n, amp_n, f_n_wd, amp_n_wd): amp_n_wd_interp = interpolate.interp1d(f_n_wd, amp_n_wd, bounds_error=False, fill_value=1e-30) amp_n_wd = amp_n_wd_interp(f_n) amp_n = amp_n*(amp_n >= amp_n_wd) + amp_n_wd*(amp_n < amp_n_wd) return f_n, amp_n
Combine noise with wd noise. Combines noise and white dwarf background noise based on greater amplitude value at each noise curve step. Args: f_n (float array): Frequencies of noise curve. amp_n (float array): Amplitude values of noise curve. f_n_wd (float array): Frequencies of wd noise. amp_n_wd (float array): Amplitude values of wd noise. Returns: (tuple of float arrays): Amplitude values of combined noise curve.
juraj-google-style
def open(self, mode=None): if mode is None: mode = self.mode elif mode not in ['r', 'w', 'a']: raise ValueError('Invalid mode! Modes: [\'a\', \'r\', \'w\']') if self._file is None: self._file = h5py.File(self.path, mode=mode)
Open the container file. Args: mode (str): Either 'r' for read-only, 'w' for truncate and write or 'a' for append. (default: 'a'). If ``None``, uses ``self.mode``.
juraj-google-style
def write_gff_file(self, outfile, force_rerun=False): if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun): with open(outfile, "w") as out_handle: GFF.write([self], out_handle) self.feature_path = outfile
Write a GFF file for the protein features, ``features`` will now load directly from this file. Args: outfile (str): Path to new FASTA file to be written to force_rerun (bool): If an existing file should be overwritten
juraj-google-style
def get_sari_score(source_ids, prediction_ids, list_of_targets, max_gram_size=4, beta_for_deletion=0): addition_scores = [] keep_scores = [] deletion_scores = [] for n in range(1, (max_gram_size + 1)): source_counts = _get_ngram_counter(source_ids, n) prediction_counts = _get_ngram_counter(prediction_ids, n) target_counts = collections.Counter() weighted_target_counts = collections.Counter() num_nonempty_targets = 0 for target_ids_i in list_of_targets: target_counts_i = _get_ngram_counter(target_ids_i, n) if target_counts_i: weighted_target_counts += target_counts_i num_nonempty_targets += 1 for gram in weighted_target_counts.keys(): weighted_target_counts[gram] /= num_nonempty_targets target_counts[gram] = 1 keep_scores.append(get_keep_score(source_counts, prediction_counts, weighted_target_counts)) deletion_scores.append(get_deletion_score(source_counts, prediction_counts, weighted_target_counts, beta_for_deletion)) addition_scores.append(get_addition_score(source_counts, prediction_counts, target_counts)) avg_keep_score = (sum(keep_scores) / max_gram_size) avg_addition_score = (sum(addition_scores) / max_gram_size) avg_deletion_score = (sum(deletion_scores) / max_gram_size) sari = (((avg_keep_score + avg_addition_score) + avg_deletion_score) / 3.0) return (sari, avg_keep_score, avg_addition_score, avg_deletion_score)
Compute the SARI score for a single prediction and one or more targets. Args: source_ids: a list / np.array of SentencePiece IDs prediction_ids: a list / np.array of SentencePiece IDs list_of_targets: a list of target ID lists / np.arrays max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams, bigrams, and trigrams) beta_for_deletion: beta for deletion F score. Returns: the SARI score and its three components: add, keep, and deletion scores
codesearchnet
def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[0][:, 0, :] text_features = self.text_projection(pooled_output) return text_features
Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the final [CLS] hidden state of Text-Transformer. Examples: ```python >>> from transformers import AutoTokenizer, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) ```
github-repos
def add_update_users(self, users, capacity=None): if not isinstance(users, list): raise HDXError('Users should be a list!') for user in users: self.add_update_user(user, capacity)
Add new or update existing users in organization with new metadata. Capacity eg. member, admin must be supplied either within the User object or dictionary or using the capacity argument (which takes precedence). Args: users (List[Union[User,Dict,str]]): A list of either user ids or users metadata from User objects or dictionaries capacity (Optional[str]): Capacity of users eg. member, admin. Defaults to None. Returns: None
juraj-google-style
def replace_drive_enclosure(self, information): uri = "{}/replaceDriveEnclosure".format(self.data["uri"]) result = self._helper.create(information, uri) self.refresh() return result
When a drive enclosure has been physically replaced, initiate the replacement operation that enables the new drive enclosure to take over as a replacement for the prior drive enclosure. The request requires specification of both the serial numbers of the original drive enclosure and its replacement to be provided. Args: information: Options to replace the drive enclosure. Returns: dict: SAS Logical Interconnect.
juraj-google-style
def load(nifti_filename): nifti_filename = os.path.expanduser(nifti_filename) try: data = nib.load(nifti_filename) img = data.get_data() except Exception as e: raise ValueError("Could not load file {0} for conversion." .format(nifti_filename)) raise return img
Import a nifti file into a numpy array. TODO: Currently only transfers raw data for compatibility with annotation and ND formats Arguments: nifti_filename (str): A string filename of a nifti datafile Returns: A numpy array with data from the nifti file
juraj-google-style
def confirm(question): if FORCE_YES: return True while True: answer = input((question + ' <Yes|No>')).lower() if ((answer == 'yes') or (answer == 'y')): confirmed = True break if ((answer == 'no') or (answer == 'n')): confirmed = False break return confirmed
Ask the user if he really want something to happen. Args: question(str): What can happen Returns: (boolean): Confirmed or not
codesearchnet
def set_size(self, height=220, width=350, height_threshold=120, width_threshold=160): self.set_integer('height', height) self.set_integer('width', width) self.set_integer('small_height_threshold', height_threshold) self.set_integer('small_width_threshold', width_threshold)
Set the size of the chart. Args: height (int): height in pixels. width (int): width in pixels. height_threshold (int): height threshold in pixels width_threshold (int): width threshold in pixesls
codesearchnet
def _lookup_tensor_name(self, tensor): return self._tensor_aliases.get(tensor.name, tensor.name)
Look up the name of a graph tensor. This method maps the name of a debugger-generated Identity or DebugIdentityV2 tensor to the name of the original instrumented tensor, if `tensor` is such a debugger-created tensor. Otherwise, it returns the name of `tensor` as is. Args: tensor: The graph tensor to look up the name for. Returns: Name of the original instrumented tensor as known to the debugger.
github-repos
def get_variantid(variant_obj, family_id): new_id = parse_document_id( chrom=variant_obj['chromosome'], pos=str(variant_obj['position']), ref=variant_obj['reference'], alt=variant_obj['alternative'], variant_type=variant_obj['variant_type'], case_id=family_id, ) return new_id
Create a new variant id. Args: variant_obj(dict) family_id(str) Returns: new_id(str): The new variant id
juraj-google-style
def __is_json_error(self, status, headers): content_header = headers.get('content-type', '') (content_type, unused_params) = cgi.parse_header(content_header) return (status.startswith('400') and (content_type.lower() in _ALL_JSON_CONTENT_TYPES))
Determine if response is an error. Args: status: HTTP status code. headers: Dictionary of (lowercase) header name to value. Returns: True if the response was an error, else False.
codesearchnet
def get_size(self, value=None): if value is None: return sum(cls_val.get_size(obj_val) for obj_val, cls_val in self._get_attributes()) elif isinstance(value, type(self)): return value.get_size() else: msg = "{} is not an instance of {}".format(value, type(self).__name__) raise PackException(msg)
Calculate the total struct size in bytes. For each struct attribute, sum the result of each one's ``get_size()`` method. Args: value: In structs, the user can assign other value instead of a class' instance. Returns: int: Total number of bytes used by the struct. Raises: Exception: If the struct is not valid.
juraj-google-style
def _GetNetworkInfo(self, signatures_key): network_info = {} for category in signatures_key.GetSubkeys(): for signature in category.GetSubkeys(): profile_guid_value = signature.GetValueByName('ProfileGuid') if profile_guid_value: profile_guid = profile_guid_value.GetDataAsObject() else: continue default_gateway_mac_value = signature.GetValueByName( 'DefaultGatewayMac') if default_gateway_mac_value: default_gateway_mac = ':'.join([ '{0:02x}'.format(octet) for octet in bytearray(default_gateway_mac_value.data)]) else: default_gateway_mac = None dns_suffix_value = signature.GetValueByName('DnsSuffix') if dns_suffix_value: dns_suffix = dns_suffix_value.GetDataAsObject() else: dns_suffix = None network_info[profile_guid] = (default_gateway_mac, dns_suffix) return network_info
Retrieves the network info within the signatures subkey. Args: signatures_key (dfwinreg.WinRegistryKey): a Windows Registry key. Returns: dict[str, tuple]: a tuple of default_gateway_mac and dns_suffix per profile identifier (GUID).
juraj-google-style
def setup(self, check_all=None, exclude_private=None, exclude_uppercase=None, exclude_capitalized=None, exclude_unsupported=None, excluded_names=None, minmax=None, dataframe_format=None): assert self.shellwidget is not None self.check_all = check_all self.exclude_private = exclude_private self.exclude_uppercase = exclude_uppercase self.exclude_capitalized = exclude_capitalized self.exclude_unsupported = exclude_unsupported self.excluded_names = excluded_names self.minmax = minmax self.dataframe_format = dataframe_format if self.editor is not None: self.editor.setup_menu(minmax) self.editor.set_dataframe_format(dataframe_format) self.exclude_private_action.setChecked(exclude_private) self.exclude_uppercase_action.setChecked(exclude_uppercase) self.exclude_capitalized_action.setChecked(exclude_capitalized) self.exclude_unsupported_action.setChecked(exclude_unsupported) self.refresh_table() return self.editor = RemoteCollectionsEditorTableView( self, data=None, minmax=minmax, shellwidget=self.shellwidget, dataframe_format=dataframe_format) self.editor.sig_option_changed.connect(self.sig_option_changed.emit) self.editor.sig_files_dropped.connect(self.import_data) self.editor.sig_free_memory.connect(self.sig_free_memory.emit) self.setup_option_actions(exclude_private, exclude_uppercase, exclude_capitalized, exclude_unsupported) self.tools_layout = QHBoxLayout() toolbar = self.setup_toolbar() for widget in toolbar: self.tools_layout.addWidget(widget) self.tools_layout.addStretch() self.setup_options_button() layout = create_plugin_layout(self.tools_layout, self.editor) self.setLayout(layout) self.sig_option_changed.connect(self.option_changed)
Setup the namespace browser with provided settings. Args: dataframe_format (string): default floating-point format for DataFrame editor
juraj-google-style
def _verify_static_batch_size_equality(tensors, columns): expected_batch_size = None for i in range(0, len(tensors)): if tensors[i].shape.dims[0].value is not None: if expected_batch_size is None: bath_size_column_index = i expected_batch_size = tensors[i].shape.dims[0] elif not expected_batch_size.is_compatible_with(tensors[i].shape.dims[0]): raise ValueError('Batch size (first dimension) of each feature must be same. Batch size of columns ({}, {}): ({}, {})'.format(columns[bath_size_column_index].name, columns[i].name, expected_batch_size, tensors[i].shape.dims[0]))
Validates that the first dim (batch size) of all tensors are equal or None. Args: tensors: list of tensors to check. columns: list of feature columns matching tensors. Will be used for error messaging. Raises: ValueError: if one of the tensors has a variant batch size
github-repos