code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _viz_prototype(self, vis_fn): def _viz_logger(*args, **kwargs): self.win = vis_fn(*args, win=self.win, env=self.env, opts=self.opts, **kwargs) return _viz_logger
Outputs a function which will log the arguments to Visdom in an appropriate way. Args: vis_fn: A function, such as self.vis.image
juraj-google-style
def get_golden_chunk_records(): pattern = os.path.join(fsdb.golden_chunk_dir(), '*.zz') return sorted(tf.gfile.Glob(pattern), reverse=True)[:FLAGS.window_size]
Return up to num_records of golden chunks to train on. Returns: A list of golden chunks up to num_records in length, sorted by path.
codesearchnet
def print_debug(*args, **kwargs): if WTF_CONFIG_READER.get("debug", False) == True: print(*args, **kwargs)
Print if and only if the debug flag is set true in the config.yaml file. Args: args : var args of print arguments.
juraj-google-style
def delete_idx_status(self, rdf_class): sparql_template = rdf_types = [rdf_class.uri] + [item.uri for item in rdf_class.subclasses] sparql = sparql_template.format("\n\t\t".join(rdf_types)) log.warn("Deleting index status for %s", rdf_class.uri) return self.tstore_conn.update_query(sparql)
Removes all of the index status triples from the datastore Args: ----- rdf_class: The class of items to remove the status from
juraj-google-style
def __init__(self, volume, layers=None): if isinstance(volume, string_types): volume = nb.load(volume) self.volume = volume data = self.volume.get_data() self.dims = data.shape self.vox_dims = self.get_header().get_zooms() self.full = np.float64(data.ravel()) self.global_mask = np.where(self.full) self.reset() if layers is not None: self.add(layers)
Initialize a new Masker. Args: volume: A volume indicating the global space within which all subsequent layers must reside. Any voxel in the mask with a non-zero valid is considered valid for analyses. Can be either an image filename or a NiBabel image. layers: Optional masking layers to add; see docstring for add().
juraj-google-style
def lu_slogdet(LU): r LU = (asarray(LU[0], float), asarray(LU[1], float)) adet = _sum(log(_abs(LU[0].diagonal()))) s = prod(sign(LU[0].diagonal())) nrows_exchange = LU[1].size - _sum(LU[1] == arange(LU[1].size, dtype="int32")) odd = nrows_exchange % 2 == 1 if odd: s *= -1.0 return (s, adet)
r"""Natural logarithm of a LU decomposition. Args: LU (tuple): LU decomposition. Returns: tuple: sign and log-determinant.
juraj-google-style
def reindex(self): _map = dict(zip(self.micro_indices, reindex(self.micro_indices))) partition = tuple((tuple((_map[index] for index in group)) for group in self.partition)) output_indices = tuple((_map[i] for i in self.output_indices)) return Blackbox(partition, output_indices)
Squeeze the indices of this blackboxing to ``0..n``. Returns: Blackbox: a new, reindexed |Blackbox|. Example: >>> partition = ((3,), (2, 4)) >>> output_indices = (2, 3) >>> blackbox = Blackbox(partition, output_indices) >>> blackbox.reindex() Blackbox(partition=((1,), (0, 2)), output_indices=(0, 1))
codesearchnet
def create_volume(self, volume_name: str, driver_spec: str=None): if driver_spec: driver = driver_spec else: driver = 'local' if (not self._manager): raise RuntimeError('Services can only be deleted on swarm manager nodes') self._client.volumes.create(name=volume_name, driver=driver)
Create new docker volumes. Only the manager nodes can create a volume Args: volume_name (string): Name for the new docker volume driver_spec (string): Driver for the docker volume
codesearchnet
def write_double(self, value, little_endian=True): if little_endian: endian = "<" else: endian = ">" return self.pack('%sd' % endian, value)
Pack the value as a double and write 8 bytes to the stream. Args: value (number): the value to write to the stream. little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
juraj-google-style
def get_signature_def_map(saved_model_dir, tag_set): meta_graph = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set) return meta_graph.signature_def
Gets SignatureDef map from a MetaGraphDef in a SavedModel. Returns the SignatureDef map for the given tag-set in the SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect or execute. tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Returns: A SignatureDef map that maps from string keys to SignatureDefs.
github-repos
def _geodetic_to_cartesian(cls, lat, lon, alt): C = Earth.r / np.sqrt(1 - (Earth.e * np.sin(lat)) ** 2) S = Earth.r * (1 - Earth.e ** 2) / np.sqrt(1 - (Earth.e * np.sin(lat)) ** 2) r_d = (C + alt) * np.cos(lat) r_k = (S + alt) * np.sin(lat) norm = np.sqrt(r_d ** 2 + r_k ** 2) return norm * np.array([ np.cos(lat) * np.cos(lon), np.cos(lat) * np.sin(lon), np.sin(lat) ])
Conversion from latitude, longitude and altitude coordinates to cartesian with respect to an ellipsoid Args: lat (float): Latitude in radians lon (float): Longitude in radians alt (float): Altitude to sea level in meters Return: numpy.array: 3D element (in meters)
juraj-google-style
def _eager_metrics_fn(model, outputs, targets, sample_weights=None, masks=None): outputs = nest.flatten(outputs) targets = nest.flatten(targets) metric_results = [] if targets: if len(model._targets) != len(targets): new_targets = [None if t is None else targets.pop(0) for t in model._targets] targets = new_targets metric_results = model._handle_metrics(outputs, targets=targets, sample_weights=sample_weights, masks=masks, return_weighted_and_unweighted_metrics=True, skip_target_masks=model._prepare_skip_target_masks()) metric_results.extend([m.result() for m in model.metrics if m not in model._compile_metric_functions]) return metric_results
Calculates the metrics for each output of the given model. Args: model: The model on which metrics are being calculated. outputs: The outputs of the given model. targets: The predictions or targets of the given model. sample_weights: Optional list of sample weights for each output. masks: Optional list of masks for each output. Returns: Returns the metric results for each output of the model.
github-repos
def Write(self, string): try: encoded_string = codecs.encode(string, self._encoding, self._errors) except UnicodeEncodeError: if self._errors == 'strict': logger.error( 'Unable to properly write output due to encoding error. ' 'Switching to error tolerant encoding which can result in ' 'non Basic Latin (C0) characters to be replaced with "?" or ' '"\\ufffd".') self._errors = 'replace' encoded_string = codecs.encode(string, self._encoding, self._errors) self._file_object.write(encoded_string)
Writes a string to the output. Args: string (str): output.
juraj-google-style
def get_room_messages(self, room_id, token, direction, limit=10, to=None): query = { "roomId": room_id, "from": token, "dir": direction, "limit": limit, } if to: query["to"] = to return self._send("GET", "/rooms/{}/messages".format(quote(room_id)), query_params=query, api_path="/_matrix/client/r0")
Perform GET /rooms/{roomId}/messages. Args: room_id (str): The room's id. token (str): The token to start returning events from. direction (str): The direction to return events from. One of: ["b", "f"]. limit (int): The maximum number of events to return. to (str): The token to stop returning events at.
juraj-google-style
def signbit(x): if any_symbolic_tensors((x,)): return Signbit().symbolic_call(x) return backend.numpy.signbit(x)
Return the sign bit of the elements of `x`. The output boolean tensor contains `True` where the sign of `x` is negative, and `False` otherwise. Args: x: Input tensor. Returns: Output boolean tensor of same shape as `x`.
github-repos
def __init__(self, model): self._model_id = None if model is not None: self._model_id = model.id
Create a new base event. Args: model (Model) : a Bokeh model to register event callbacks on
juraj-google-style
def add_exit_callback_to_default_func_graph(fn) -> None: default_graph = get_default_graph() if not default_graph._building_function: raise RuntimeError('Cannot add scope exit callbacks when not building a function. Default graph: {}'.format(default_graph)) default_graph._add_scope_exit_callback(fn)
Add a callback to run when the default function graph goes out of scope. Usage: ```python @tf.function def fn(x, v): expensive = expensive_object(v) add_exit_callback_to_default_func_graph(lambda: expensive.release()) return g(x, expensive) fn(x=tf.constant(...), v=...) # `expensive` has been released. ``` Args: fn: A callable that takes no arguments and whose output is ignored. To be executed when exiting func graph scope. Raises: RuntimeError: If executed when the current default graph is not a FuncGraph, or not currently executing in function creation mode (e.g., if inside an init_scope).
github-repos
def minimize(self, session=None, feed_dict=None, fetches=None, step_callback=None, loss_callback=None, **run_kwargs): session = (session or ops.get_default_session()) feed_dict = (feed_dict or {}) fetches = (fetches or []) loss_callback = (loss_callback or (lambda *fetches: None)) step_callback = (step_callback or (lambda xk: None)) self._initialize_updated_shapes(session) loss_grad_func = self._make_eval_func([self._loss, self._packed_loss_grad], session, feed_dict, fetches, loss_callback) equality_funcs = self._make_eval_funcs(self._equalities, session, feed_dict, fetches) equality_grad_funcs = self._make_eval_funcs(self._packed_equality_grads, session, feed_dict, fetches) inequality_funcs = self._make_eval_funcs(self._inequalities, session, feed_dict, fetches) inequality_grad_funcs = self._make_eval_funcs(self._packed_inequality_grads, session, feed_dict, fetches) initial_packed_var_val = session.run(self._packed_var) packed_var_val = self._minimize(initial_val=initial_packed_var_val, loss_grad_func=loss_grad_func, equality_funcs=equality_funcs, equality_grad_funcs=equality_grad_funcs, inequality_funcs=inequality_funcs, inequality_grad_funcs=inequality_grad_funcs, packed_bounds=self._packed_bounds, step_callback=step_callback, optimizer_kwargs=self.optimizer_kwargs) var_vals = [packed_var_val[packing_slice] for packing_slice in self._packing_slices] session.run(self._var_updates, feed_dict=dict(zip(self._update_placeholders, var_vals)), **run_kwargs)
Minimize a scalar `Tensor`. Variables subject to optimization are updated in-place at the end of optimization. Note that this method does *not* just return a minimization `Op`, unlike `Optimizer.minimize()`; instead it actually performs minimization by executing commands to control a `Session`. Args: session: A `Session` instance. feed_dict: A feed dict to be passed to calls to `session.run`. fetches: A list of `Tensor`s to fetch and supply to `loss_callback` as positional arguments. step_callback: A function to be called at each optimization step; arguments are the current values of all optimization variables flattened into a single vector. loss_callback: A function to be called every time the loss and gradients are computed, with evaluated fetches supplied as positional arguments. **run_kwargs: kwargs to pass to `session.run`.
codesearchnet
def _extractPayload(response, slaveaddress, mode, functioncode): BYTEPOSITION_FOR_ASCII_HEADER = 0 BYTEPOSITION_FOR_SLAVEADDRESS = 0 BYTEPOSITION_FOR_FUNCTIONCODE = 1 NUMBER_OF_RESPONSE_STARTBYTES = 2 NUMBER_OF_CRC_BYTES = 2 NUMBER_OF_LRC_BYTES = 1 BITNUMBER_FUNCTIONCODE_ERRORINDICATION = 7 MINIMAL_RESPONSE_LENGTH_RTU = (NUMBER_OF_RESPONSE_STARTBYTES + NUMBER_OF_CRC_BYTES) MINIMAL_RESPONSE_LENGTH_ASCII = 9 _checkString(response, description='response') _checkSlaveaddress(slaveaddress) _checkMode(mode) _checkFunctioncode(functioncode, None) plainresponse = response if (mode == MODE_ASCII): if (len(response) < MINIMAL_RESPONSE_LENGTH_ASCII): raise ValueError('Too short Modbus ASCII response (minimum length {} bytes). Response: {!r}'.format(MINIMAL_RESPONSE_LENGTH_ASCII, response)) elif (len(response) < MINIMAL_RESPONSE_LENGTH_RTU): raise ValueError('Too short Modbus RTU response (minimum length {} bytes). Response: {!r}'.format(MINIMAL_RESPONSE_LENGTH_RTU, response)) if (mode == MODE_ASCII): if (response[BYTEPOSITION_FOR_ASCII_HEADER] != _ASCII_HEADER): raise ValueError('Did not find header ({!r}) as start of ASCII response. The plain response is: {!r}'.format(_ASCII_HEADER, response)) elif (response[(- len(_ASCII_FOOTER)):] != _ASCII_FOOTER): raise ValueError('Did not find footer ({!r}) as end of ASCII response. The plain response is: {!r}'.format(_ASCII_FOOTER, response)) response = response[1:(- 2)] if ((len(response) % 2) != 0): template = ('Stripped ASCII frames should have an even number of bytes, but is {} bytes. ' + 'The stripped response is: {!r} (plain response: {!r})') raise ValueError(template.format(len(response), response, plainresponse)) response = _hexdecode(response) if (mode == MODE_ASCII): calculateChecksum = _calculateLrcString numberOfChecksumBytes = NUMBER_OF_LRC_BYTES else: calculateChecksum = _calculateCrcString numberOfChecksumBytes = NUMBER_OF_CRC_BYTES receivedChecksum = response[(- numberOfChecksumBytes):] responseWithoutChecksum = response[0:(len(response) - numberOfChecksumBytes)] calculatedChecksum = calculateChecksum(responseWithoutChecksum) if (receivedChecksum != calculatedChecksum): template = 'Checksum error in {} mode: {!r} instead of {!r} . The response is: {!r} (plain response: {!r})' text = template.format(mode, receivedChecksum, calculatedChecksum, response, plainresponse) raise ValueError(text) responseaddress = ord(response[BYTEPOSITION_FOR_SLAVEADDRESS]) if (responseaddress != slaveaddress): raise ValueError('Wrong return slave address: {} instead of {}. The response is: {!r}'.format(responseaddress, slaveaddress, response)) receivedFunctioncode = ord(response[BYTEPOSITION_FOR_FUNCTIONCODE]) if (receivedFunctioncode == _setBitOn(functioncode, BITNUMBER_FUNCTIONCODE_ERRORINDICATION)): raise ValueError('The slave is indicating an error. The response is: {!r}'.format(response)) elif (receivedFunctioncode != functioncode): raise ValueError('Wrong functioncode: {} instead of {}. The response is: {!r}'.format(receivedFunctioncode, functioncode, response)) firstDatabyteNumber = NUMBER_OF_RESPONSE_STARTBYTES if (mode == MODE_ASCII): lastDatabyteNumber = (len(response) - NUMBER_OF_LRC_BYTES) else: lastDatabyteNumber = (len(response) - NUMBER_OF_CRC_BYTES) payload = response[firstDatabyteNumber:lastDatabyteNumber] return payload
Extract the payload data part from the slave's response. Args: * response (str): The raw response byte string from the slave. * slaveaddress (int): The adress of the slave. Used here for error checking only. * mode (str): The modbus protcol mode (MODE_RTU or MODE_ASCII) * functioncode (int): Used here for error checking only. Returns: The payload part of the *response* string. Raises: ValueError, TypeError. Raises an exception if there is any problem with the received address, the functioncode or the CRC. The received response should have the format: * RTU Mode: slaveaddress byte + functioncode byte + payloaddata + CRC (which is two bytes) * ASCII Mode: header (:) + slaveaddress byte + functioncode byte + payloaddata + LRC (which is two characters) + footer (CRLF) For development purposes, this function can also be used to extract the payload from the request sent TO the slave.
codesearchnet
def list(self, pattern='*'): if self._group_dict is None: self._group_dict = collections.OrderedDict( (group.id, group) for group in self._client.list_groups()) return [group for group in self._group_dict.values() if fnmatch.fnmatch(group.display_name, pattern)]
Returns a list of groups that match the filters. Args: pattern: An optional pattern to filter the groups based on their display name. This can include Unix shell-style wildcards. E.g. ``"Production*"``. Returns: A list of Group objects that match the filters.
juraj-google-style
def _get_rest_doc(self, request, start_response): api = request.body_json['api'] version = request.body_json['version'] generator = discovery_generator.DiscoveryGenerator(request=request) services = [s for s in self._backend.api_services if ((s.api_info.name == api) and (s.api_info.api_version == version))] doc = generator.pretty_print_config_to_json(services) if (not doc): error_msg = ('Failed to convert .api to discovery doc for version %s of api %s' % (version, api)) _logger.error('%s', error_msg) return util.send_wsgi_error_response(error_msg, start_response) return self._send_success_response(doc, start_response)
Sends back HTTP response with API directory. This calls start_response and returns the response body. It will return the discovery doc for the requested api/version. Args: request: An ApiRequest, the transformed request sent to the Discovery API. start_response: A function with semantics defined in PEP-333. Returns: A string, the response body.
codesearchnet
def _data_from_df(df): _df = df.copy() if isinstance(df.columns, pd.MultiIndex): try: _df.columns = ['_'.join(col) for col in _df.columns.values] except TypeError: raise TypeError('Could not flatten MultiIndex columns. ' 'use string column names or flatten manually') if isinstance(df.columns, pd.CategoricalIndex): _df.columns = df.columns.tolist() index_name = ColumnDataSource._df_index_name(df) if index_name == 'index': _df.index = pd.Index(_df.index.values) else: _df.index = pd.Index(_df.index.values, name=index_name) _df.reset_index(inplace=True) tmp_data = {c: v.values for c, v in _df.iteritems()} new_data = {} for k, v in tmp_data.items(): new_data[k] = v return new_data
Create a ``dict`` of columns from a Pandas ``DataFrame``, suitable for creating a ColumnDataSource. Args: df (DataFrame) : data to convert Returns: dict[str, np.array]
juraj-google-style
def cdot(L, out=None): L = asarray(L, float) layout_error = 'Wrong matrix layout.' if (L.ndim != 2): raise ValueError(layout_error) if (L.shape[0] != L.shape[1]): raise ValueError(layout_error) if (out is None): out = empty((L.shape[0], L.shape[1]), float) return einsum('ij,kj->ik', L, L, out=out)
r"""Product of a Cholesky matrix with itself transposed. Args: L (array_like): Cholesky matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`.
codesearchnet
def _sendMouseEvent(ev, x, y, dwData=0): assert x != None and y != None, 'x and y cannot be set to None' width, height = _size() convertedX = 65536 * x convertedY = 65536 * y ctypes.windll.user32.mouse_event(ev, ctypes.c_long(convertedX), ctypes.c_long(convertedY), dwData, 0)
The helper function that actually makes the call to the mouse_event() win32 function. Args: ev (int): The win32 code for the mouse event. Use one of the MOUSEEVENTF_* constants for this argument. x (int): The x position of the mouse event. y (int): The y position of the mouse event. dwData (int): The argument for mouse_event()'s dwData parameter. So far this is only used by mouse scrolling. Returns: None
juraj-google-style
def get_type(name, env, non_generic): if (name in env): if isinstance(env[name], MultiType): return clone(env[name]) return fresh(env[name], non_generic) else: print('W: Undefined symbol {0}'.format(name)) return TypeVariable()
Get the type of identifier name from the type environment env. Args: name: The identifier name env: The type environment mapping from identifier names to types non_generic: A set of non-generic TypeVariables Raises: ParseError: Raised if name is an undefined symbol in the type environment.
codesearchnet
def _stride(stride_spec): if (stride_spec is None): return [1, 1, 1, 1] elif isinstance(stride_spec, tf.compat.integral_types): return [1, stride_spec, stride_spec, 1] elif (len(stride_spec) == 1): return [1, stride_spec[0], stride_spec[0], 1] elif (len(stride_spec) == 2): return [1, stride_spec[0], stride_spec[1], 1] else: assert (len(stride_spec) == 4) return stride_spec
Expands the stride spec into a length 4 list. Args: stride_spec: If length 0, 1 or 2 then assign the inner dimensions, otherwise return stride_spec if it is length 4. Returns: A length 4 list.
codesearchnet
def parse_sv_frequencies(variant): frequency_keys = ['clingen_cgh_benignAF', 'clingen_cgh_benign', 'clingen_cgh_pathogenicAF', 'clingen_cgh_pathogenic', 'clingen_ngi', 'clingen_ngiAF', 'swegen', 'swegenAF', 'decipherAF', 'decipher'] sv_frequencies = {} for key in frequency_keys: value = variant.INFO.get(key, 0) if ('AF' in key): value = float(value) else: value = int(value) if (value > 0): sv_frequencies[key] = value return sv_frequencies
Parsing of some custom sv frequencies These are very specific at the moment, this will hopefully get better over time when the field of structural variants is more developed. Args: variant(cyvcf2.Variant) Returns: sv_frequencies(dict)
codesearchnet
def prompt(self, message, text_input=False, timeout_s=None, cli_color=''): self.start_prompt(message, text_input, cli_color) return self.wait_for_prompt(timeout_s)
Display a prompt and wait for a response. Args: message: A string to be presented to the user. text_input: A boolean indicating whether the user must respond with text. timeout_s: Seconds to wait before raising a PromptUnansweredError. cli_color: An ANSI color code, or the empty string. Returns: A string response, or the empty string if text_input was False. Raises: MultiplePromptsError: There was already an existing prompt. PromptUnansweredError: Timed out waiting for the user to respond.
codesearchnet
def __init__(self, pos_filename, interval=2): if not pos_filename: pos_filename = os.path.join(os.getcwd(), 'mysqlbinlog2blinker.binlog.pos') self.pos_storage_filename = pos_filename assert self.pos_storage_filename self.interval = interval self._log_file = None self._log_pos = None self._pos_changed = False self.save_log_pos_thread_stop_flag = threading.Event() self.save_log_pos_thread = \ threading.Thread(target=self._save_log_pos_thread_runner) self.save_log_pos_thread.daemon = True
Create instance of FileBasedBinlogPosMemory Args: pos_filename (str|None): position storage file. None will makes *mysqlbinlog2blinker.binlog.pos* at current working dir interval (float): the interval in second
juraj-google-style
def shapes_match(a, b): if isinstance(a, (tuple, list)) and isinstance(b, (tuple, list)): if len(a) != len(b): return False return all([shapes_match(ia, ib) for ia, ib in zip(a, b)]) elif isinstance(a, dict) and isinstance(b, dict): if len(a) != len(b): return False match = True for (ak, av), (bk, bv) in zip(a.items(), b.items()): match = match and all([ak == bk and shapes_match(av, bv)]) return match else: shape_checker = shape_checkers[(type(a), type(b))] return shape_checker(a, b)
Recursively check if shapes of object `a` and `b` match. Will walk lists, tuples and dicts. Args: a: object of type (numpy.ndarray,tf.Tensor,list,tuple,dict) to check for matching shapes against `b`. b: object to check for matching shape against `a`. Returns: A boolean indicating whether the shapes of `a` and `b` match.
juraj-google-style
def bootstrap_results(self, init_state): with tf.compat.v1.name_scope( name=mcmc_util.make_name(self.name, 'remc', 'bootstrap_results'), values=[init_state]): replica_results = [ self.replica_kernels[i].bootstrap_results(init_state) for i in range(self.num_replica) ] init_state_parts = ( list(init_state) if mcmc_util.is_list_like(init_state) else [init_state]) replica_states = [[ tf.convert_to_tensor(value=s) for s in init_state_parts ] for i in range(self.num_replica)] if not mcmc_util.is_list_like(init_state): replica_states = [s[0] for s in replica_states] return ReplicaExchangeMCKernelResults( replica_states=replica_states, replica_results=replica_results, sampled_replica_states=replica_states, sampled_replica_results=replica_results, )
Returns an object with the same type as returned by `one_step`. Args: init_state: `Tensor` or Python `list` of `Tensor`s representing the initial state(s) of the Markov chain(s). Returns: kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. This inculdes replica states.
juraj-google-style
def dec(self, byts): envl = s_msgpack.un(byts) iv = envl.get('iv', b'') asscd = envl.get('asscd', b'') data = envl.get('data', b'') decryptor = AESGCM(self.ekey) try: data = decryptor.decrypt(iv, data, asscd) except Exception: logger.exception('Error decrypting data') return None return data
Decode an envelope dict and decrypt the given bytes. Args: byts (bytes): Bytes to decrypt. Returns: bytes: Decrypted message.
juraj-google-style
def scan_storage(self, area_name, callable, start=0, stop=None): if (area_name == u'storage'): data = self.storage_data elif (area_name == u'streaming'): data = self.streaming_data else: raise ArgumentError(('Unknown area name in scan_storage (%s) should be storage or streaming' % area_name)) if (len(data) == 0): return 0 if (stop is None): stop = (len(data) - 1) elif (stop >= len(data)): raise ArgumentError('Given stop offset is greater than the highest offset supported', length=len(data), stop_offset=stop) scanned = 0 for i in range(start, (stop + 1)): scanned += 1 should_break = callable(i, data[i]) if (should_break is True): break return scanned
Iterate over streaming or storage areas, calling callable. Args: area_name (str): Either 'storage' or 'streaming' to indicate which storage area to scan. callable (callable): A function that will be called as (offset, reading) for each reading between start_offset and end_offset (inclusive). If the scan function wants to stop early it can return True. If it returns anything else (including False or None), scanning will continue. start (int): Optional offset to start at (included in scan). stop (int): Optional offset to end at (included in scan). Returns: int: The number of entries scanned.
codesearchnet
def contains(self, x: int, y: int) -> bool: return ( self.x <= x < self.x + self.width and self.y <= y < self.y + self.height )
Returns True if this node contains these coordinates. Args: x (int): X position to check. y (int): Y position to check. Returns: bool: True if this node contains these coordinates. Otherwise False.
juraj-google-style
def set_generation_type(self, num_processors=(- 1), num_splits=1000, verbose=(- 1)): self.parallel_input.num_processors = num_processors self.parallel_input.num_splits = num_splits self.parallel_input.verbose = verbose return
Change generation type. Choose weather to generate the data in parallel or on a single processor. Args: num_processors (int or None, optional): Number of parallel processors to use. If ``num_processors==-1``, this will use multiprocessing module and use available cpus. If single generation is desired, num_processors is set to ``None``. Default is -1. num_splits (int, optional): Number of binaries to run during each process. Default is 1000. verbose (int, optional): Describes the notification of when parallel processes are finished. Value describes cadence of process completion notifications. If ``verbose == -1``, no notifications are given. Default is -1.
codesearchnet
def __init__(self, name: Text, num_replicas: int, pivot: ops.Operation): super(TPUReplicateContext, self).__init__() self._num_replicas = num_replicas self._outer_device_function_stack = None self._oc_dev_fn_stack = None self._outside_compilation_cluster = None self._is_map_outside_compilation = False self._outside_compilation_v2_context = None self._outside_compilation_counter = 0 self._in_gradient_colocation = None self._gradient_colocation_stack = [] self._host_compute_core = [] self._name = name self._tpu_replicate_attr = attr_value_pb2.AttrValue(s=compat.as_bytes(self._name)) self._unsupported_ops = [] self._pivot = pivot self._replicated_vars = {}
Builds a new TPUReplicateContext. Args: name: a unique name for the context, used to populate the `_tpu_replicate` attribute. num_replicas: an integer that gives the number of replicas for the computation. pivot: a pivot node. Nodes in the TPUReplicateContext that do not have any inputs will have a control dependency on the pivot node. This ensures that nodes are correctly included in any enclosing control flow contexts.
github-repos
def categorical_accuracy(y_true, y_pred): return math_ops.cast(math_ops.equal(math_ops.argmax(y_true, axis=-1), math_ops.argmax(y_pred, axis=-1)), backend.floatx())
Calculates how often predictions match one-hot labels. Standalone usage: >>> y_true = [[0, 0, 1], [0, 1, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred) >>> assert m.shape == (2,) >>> m.numpy() array([0., 1.], dtype=float32) You can provide logits of classes as `y_pred`, since argmax of logits and probabilities are same. Args: y_true: One-hot ground truth values. y_pred: The prediction values. Returns: Categorical accuracy values.
github-repos
def build_album_art_full_uri(self, url): if (not url.startswith(('http:', 'https:'))): url = ((('http: return url
Ensure an Album Art URI is an absolute URI. Args: url (str): the album art URI. Returns: str: An absolute URI.
codesearchnet
def verify(self, message, signature): message = _helpers._to_bytes(message, encoding='utf-8') try: return rsa.pkcs1.verify(message, signature, self._pubkey) except (ValueError, rsa.pkcs1.VerificationError): return False
Verifies a message against a signature. Args: message: string or bytes, The message to verify. If string, will be encoded to bytes as utf-8. signature: string or bytes, The signature on the message. If string, will be encoded to bytes as utf-8. Returns: True if message was signed by the private key associated with the public key that this object was constructed with.
juraj-google-style
def load_supported_categories(categories_path: str): global _load_supported_categories if _load_supported_categories: return with open(categories_path, encoding='utf-8') as supported_categories: yaml_object = yaml.load(supported_categories.read(), Loader=yaml.SafeLoader) Tag.Config.supported_categories = yaml_object[TagFields.categories] _load_supported_categories = True
Load the list of supported categories from categories_path file into Tag model config Args: categories_path: path to the file with categories.
github-repos
def detect_format(program, attributes) -> str: def fmt(attr): '\n For internal use only.\n ' return ((attr.array_length * attr.dimension), attr.shape) return ' '.join((('%d%s' % fmt(program[a])) for a in attributes))
Detect format for vertex attributes. The format returned does not contain padding. Args: program (Program): The program. attributes (list): A list of attribute names. Returns: str
codesearchnet
def clear_cached_modules(modules: py_utils.StrOrStrList, *, recursive: bool=True, verbose: bool=False, invalidate: bool=True) -> None: modules_to_clear = get_module_names(modules, recursive=recursive) if not modules_to_clear: return modules = set(py_utils.normalize_str_to_list(modules)) for module_name in modules_to_clear: if verbose: print(f'Clearing {module_name}') invalidate_curr = invalidate and (not module_name.startswith('etils')) if invalidate_curr or module_name in modules: _clear_parent_module_attr(module_name) if invalidate_curr: _invalidate_module(sys.modules[module_name]) del sys.modules[module_name] for cleanup in typing._cleanups: cleanup()
Clear the `sys.modules` cache. Helpful for interactive development to reload from Jupyter notebook the code we're currently editing (without having to restart the notebook kernel). Usage: ```python ecolab.clear_cached_modules(['visu3d', 'other_module.submodule']) import visu3d import other_module.submodule ``` Args: modules: List of modules to clear recursive: Whether submodules are cleared too verbose: Whether to display the list of modules cleared. invalidate: If `True` (default), the instances of the module will raise an error when used (to avoid using 2 versions of a module at the same time)
github-repos
def BuildFindSpecs(self, environment_variables=None): path_attributes = {} if environment_variables: for environment_variable in environment_variables: attribute_name = environment_variable.name.lower() attribute_value = environment_variable.value if (not isinstance(attribute_value, py2to3.STRING_TYPES)): continue if ((len(attribute_value) > 2) and (attribute_value[1] == ':')): (_, _, attribute_value) = attribute_value.rpartition(':') if attribute_value.startswith('\\'): attribute_value = attribute_value.replace('\\', '/') path_attributes[attribute_name] = attribute_value find_specs = [] with open(self._path, 'r') as file_object: for line in file_object: line = line.strip() if line.startswith(' continue if path_attributes: try: line = line.format(**path_attributes) except KeyError as exception: logger.error('Unable to expand path filter: {0:s} with error: {1!s}'.format(line, exception)) continue if (not line.startswith('/')): logger.warning('The path filter must be defined as an absolute path: {0:s}'.format(line)) continue path_segments = line.split('/') path_segments.pop(0) if (not path_segments[(- 1)]): logger.warning('Empty last path segment in path filter: {0:s}'.format(line)) continue find_spec = file_system_searcher.FindSpec(location_regex=path_segments, case_sensitive=False) find_specs.append(find_spec) return find_specs
Build find specification from a filter file. Args: environment_variables (Optional[list[EnvironmentVariableArtifact]]): environment variables. Returns: list[dfvfs.FindSpec]: find specification.
codesearchnet
def __init__(self, key_dtype, value_dtype): self._key_dtype = dtypes.as_dtype(key_dtype) self._value_dtype = dtypes.as_dtype(value_dtype) super(LookupInterface, self).__init__()
Construct a lookup table interface. Args: key_dtype: The table key type. value_dtype: The table value type.
github-repos
def market(self, accountID, **kwargs): return self.create( accountID, order=MarketOrderRequest(**kwargs) )
Shortcut to create a Market Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a MarketOrderRequest Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def sget_timestamp(self, cycle, step, dataset_number=None): dataset_number = self._validate_dataset_number(dataset_number) if (dataset_number is None): self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt timestamp_header = self.headers_normal.test_time_txt step_index_header = self.headers_normal.step_index_txt test = self.datasets[dataset_number].dfdata if isinstance(step, (list, tuple)): warnings.warn(f'The varialbe step is a list.Should be an integer.{step}') step = step[0] c = test[((test[cycle_index_header] == cycle) & (test[step_index_header] == step))] if (not self.is_empty(c)): t = c[timestamp_header] return t else: return pd.Series()
Returns timestamp for cycle, step. Convinience function; same as issuing dfdata[(dfdata[cycle_index_header] == cycle) & (dfdata[step_index_header] == step)][timestamp_header] Args: cycle: cycle number step: step number dataset_number: the dataset number (automatic selection if None) Returns: pandas.Series
codesearchnet
def assert_no_text(self, *args, **kwargs): query = TextQuery(*args, **kwargs) @self.synchronize(wait=query.wait) def assert_no_text(): count = query.resolve_for(self) if (matches_count(count, query.options) and ((count > 0) or expects_none(query.options))): raise ExpectationNotMet(query.negative_failure_message) return True return assert_no_text()
Asserts that the page or current node doesn't have the given text content, ignoring any HTML tags. Args: *args: Variable length argument list for :class:`TextQuery`. **kwargs: Arbitrary keyword arguments for :class:`TextQuery`. Returns: True Raises: ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
codesearchnet
def create_ondemand_streaming_locator(access_token, encoded_asset_id, pid, starttime=None): path = '/Locators' endpoint = ''.join([ams_rest_endpoint, path]) if starttime is None: body = '{ \ "AccessPolicyId":"' + pid + '", \ "AssetId":"' + encoded_asset_id + '", \ "Type": "2" \ }' else: body = '{ \ "AccessPolicyId":"' + pid + '", \ "AssetId":"' + encoded_asset_id + '", \ "StartTime":"' + str(starttime) + '", \ "Type": "2" \ }' return do_ams_post(endpoint, path, body, access_token, "json_only")
Create Media Service OnDemand Streaming Locator. Args: access_token (str): A valid Azure authentication token. encoded_asset_id (str): A Media Service Encoded Asset ID. pid (str): A Media Service Encoded PID. starttime (str): A Media Service Starttime. Returns: HTTP response. JSON body.
juraj-google-style
def load_all_yamls(cls, directories): yaml_files = [] loaded_yamls = {} for d in directories: if d.startswith('/home') and not os.path.exists(d): os.makedirs(d) for dirname, subdirs, files in os.walk(d): yaml_files.extend(map(lambda x: os.path.join(dirname, x), filter(lambda x: x.endswith('.yaml'), files))) for f in yaml_files: loaded_yamls[f] = cls.load_yaml_by_path(f) return loaded_yamls
Loads yaml files from all given directories. Args: directories: list of directories to search Returns: dict of {fullpath: loaded_yaml_structure}
juraj-google-style
async def client_event_handler(self, client_id, event_tuple, user_data): (conn_string, event_name, _event) = event_tuple self._logger.debug('Ignoring event %s from device %s forwarded for client %s', event_name, conn_string, client_id) return None
Method called to actually send an event to a client. Users of this class should override this method to actually forward device events to their clients. It is called with the client_id passed to (or returned from) :meth:`setup_client` as well as the user_data object that was included there. The event tuple is a 3-tuple of: - connection string - event name - event object If you override this to be acoroutine, it will be awaited. The default implementation just logs the event. Args: client_id (str): The client_id that this event should be forwarded to. event_tuple (tuple): The connection_string, event_name and event_object that should be forwarded. user_data (object): Any user data that was passed to setup_client.
codesearchnet
class FlaxSampleOutput(ModelOutput): sequences: Optional[jnp.ndarray] = None
Flax Base class for outputs of decoder-only generation models using sampling. Args: sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): The generated sequences.
github-repos
def riak_multi_get(self, key_list_tuple): pool = PyokoMG() objs = self._client.multiget(key_list_tuple, pool=pool) pool.stop() return objs
Sends given tuples of list to multiget method and took riak objs' keys and data. For each multiget call, separate pools are used and after execution, pools are stopped. Args: key_list_tuple(list of tuples): [('bucket_type','bucket','riak_key')] Example: [('models','personel','McAPchPZzB6RVJ8QI2XSVQk4mUR')] Returns: objs(tuple): obj's key and obj's value
juraj-google-style
def heightmap_add_hill( hm: np.ndarray, x: float, y: float, radius: float, height: float ) -> None: lib.TCOD_heightmap_add_hill(_heightmap_cdata(hm), x, y, radius, height)
Add a hill (a half spheroid) at given position. If height == radius or -radius, the hill is a half-sphere. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. x (float): The x position at the center of the new hill. y (float): The y position at the center of the new hill. radius (float): The size of the new hill. height (float): The height or depth of the new hill.
juraj-google-style
def __init__(self, name, freevars, extra_locals): self._name = name self._freevars = freevars self._extra_locals = extra_locals self._unbound_factory = None self.module = None self.source_map = None
Creates a new factory for a Python function. Args: name: The function name. freevars: The list of non-global free variables for the function. extra_locals: Dict[Text, Any], names and values for custom variables that are accessible to the generated code as local variables.
github-repos
def get_cached_or_new(url, new=False): garbage_collection() old_req = DATABASE.get(url) if old_req and not new: return old_req if not (url.startswith("http: raise ValueError("Invalid URL `%s`!" % url) req = RequestInfo(url=url) DATABASE[url] = req return req
Look into the database and return :class:`RequestInfo` if the `url` was already analyzed, or create and return new instance, if not. If the `new` is set to True, always create new instance. Args: url (str): URL of the analyzed resource. new (bool, default False): Force new instance? Returns: obj: :class:`RequestInfo` instance.
juraj-google-style
def fill_rect(self, rect): check_int_err(lib.SDL_RenderFillRect(self._ptr, rect._ptr))
Fill a rectangle on the current rendering target with the drawing color. Args: rect (Rect): The destination rectangle, or None to fill the entire rendering target. Raises: SDLError: If an error is encountered.
codesearchnet
def EnsureAstName(ast, module_name, fix=False): raw_ast = ast.ast if fix and module_name != raw_ast.name: ast = ast.Replace(class_type_nodes=None) ast = ast.Replace(ast=raw_ast.Visit(visitors.RenameModuleVisitor(raw_ast.name, module_name))) else: assert module_name == raw_ast.name return ast
Verify that serializable_ast has the name module_name, or repair it. Args: ast: An instance of SerializableAst. module_name: The name under which ast.ast should be loaded. fix: If this function should repair the wrong name. Returns: The updated SerializableAst.
github-repos
def _show_tag_sets(saved_model_dir): tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir) print('The given SavedModel contains the following tag-sets:') for tag_set in sorted(tag_sets): print('%r' % ', '.join(sorted(tag_set)))
Prints the tag-sets stored in SavedModel directory. Prints all the tag-sets for MetaGraphs stored in SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect.
github-repos
def transmute_sites(self, old_site_label, new_site_label, n_sites_to_change): selected_sites = self.select_sites(old_site_label) for site in random.sample(selected_sites, n_sites_to_change): site.label = new_site_label self.site_labels = set([site.label for site in self.sites])
Selects a random subset of sites with a specific label and gives them a different label. Args: old_site_label (String or List(String)): Site label(s) of the sites to be modified.. new_site_label (String): Site label to be applied to the modified sites. n_sites_to_change (Int): Number of sites to modify. Returns: None
codesearchnet
def output_types(self): return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), self._element_spec)
Returns the type of each component of an element of this iterator. Returns: A (nested) structure of `tf.DType` objects corresponding to each component of an element of this dataset.
github-repos
def _CheckFormatTokenSubtypes(self, llines, list_of_expected): actual = [] for lline in llines: filtered_values = [(ft.value, ft.subtypes) for ft in lline.tokens if ft.name not in pytree_utils.NONSEMANTIC_TOKENS] if filtered_values: actual.append(filtered_values) self.assertEqual(list_of_expected, actual)
Check that the tokens in the LogicalLines have the expected subtypes. Args: llines: list of LogicalLine. list_of_expected: list of (name, subtype) pairs. Non-semantic tokens are filtered out from the expected values.
github-repos
def _dynamic_range_quantize(src_saved_model_path: str, dst_saved_model_path: str, quantization_options: _QuantizationOptions) -> autotrackable.AutoTrackable: mode_str = 'dynamic-range quantization' if _is_qat_saved_model(src_saved_model_path): raise ValueError('The models trained with quantization-aware training (QAT) is not supported for %s.' % mode_str) logging.info('Running post-training %s on model: %s', mode_str, src_saved_model_path) logging.info('QuantizationOptions: \n%s', quantization_options) signature_def_map = save_model.get_signatures_from_saved_model(src_saved_model_path, quantization_options.signature_keys, quantization_options.tags) pywrap_quantize_model.quantize_ptq_dynamic_range(src_saved_model_path, dst_saved_model_path, quantization_options_serialized=quantization_options.SerializeToString(), signature_keys=list(quantization_options.signature_keys), signature_def_map_serialized=_serialize_signature_def_map(signature_def_map), py_function_library=py_function_lib.PyFunctionLibrary()) return saved_model_load.load(dst_saved_model_path)
Quantizes the given SavedModel via post-training dynamic range quantization. Args: src_saved_model_path: Path to the saved model. dst_saved_model_path: The path to save the output SavedModel. The directory will be overwritten if not empty. quantization_options: QuantizationOptions proto describing quantization related config. Returns: A SavedModel object with TF quantization applied. Raises: ValueError: when the model is QAT model.
github-repos
def _attempt_shard_retry(self, shard_state, tstate): shard_attempts = shard_state.retries + 1 if shard_attempts >= parameters.config.SHARD_MAX_ATTEMPTS: logging.warning( "Shard attempt %s exceeded %s max attempts.", shard_attempts, parameters.config.SHARD_MAX_ATTEMPTS) return self._TASK_DIRECTIVE.FAIL_TASK if tstate.output_writer and ( not tstate.output_writer._supports_shard_retry(tstate)): logging.warning("Output writer %s does not support shard retry.", tstate.output_writer.__class__.__name__) return self._TASK_DIRECTIVE.FAIL_TASK shard_state.reset_for_retry() logging.warning("Shard %s attempt %s failed with up to %s attempts.", shard_state.shard_id, shard_state.retries, parameters.config.SHARD_MAX_ATTEMPTS) output_writer = None if tstate.output_writer: output_writer = tstate.output_writer.create( tstate.mapreduce_spec, shard_state.shard_number, shard_attempts + 1) tstate.reset_for_retry(output_writer) return self._TASK_DIRECTIVE.RETRY_SHARD
Whether to retry shard. This method may modify shard_state and tstate to prepare for retry or fail. Args: shard_state: model.ShardState for current shard. tstate: model.TransientShardState for current shard. Returns: A _TASK_DIRECTIVE enum. RETRY_SHARD if shard should be retried. FAIL_TASK otherwise.
juraj-google-style
def on_predict_batch_begin(self, batch, logs=None):
Called at the beginning of a batch in `predict` methods. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of `model.predict_step`, it typically returns a dict with a key 'outputs' containing the model's outputs.
github-repos
def __init__(self, path_elements: List[Union[str, int]], parent: Optional['Key']=None, project: Optional[str]=None, namespace: Optional[str]=None): self.path_elements = tuple(path_elements) self.parent = parent self.namespace = namespace self.project = project
Represents a Datastore key. The partition ID is represented by its components: namespace and project. If key has a parent, project and namespace should either be unset or match the parent's. Args: path_elements: (list of str and int) Key path: an alternating sequence of kind and identifier. The kind must be of type ``str`` and identifier may be a ``str`` or an ``int``. If the last identifier is omitted this is an incomplete key, which is unsupported in ``WriteToDatastore`` and ``DeleteFromDatastore``. See :class:`google.cloud.datastore.key.Key` for more details. parent: (:class:`~apache_beam.io.gcp.datastore.v1new.types.Key`) (optional) Parent for this key. project: (str) Project ID. Required unless set by parent. namespace: (str) (optional) Namespace ID
github-repos
def vec_size(nodes, s_val): r result_vec = evaluate_multi(nodes, np.asfortranarray([s_val])) return np.linalg.norm(result_vec[:, 0], ord=2)
r"""Compute :math:`\|B(s)\|_2`. .. note:: This is a helper for :func:`_compute_length` and does not have a Fortran speedup. Intended to be used with ``functools.partial`` to fill in the value of ``nodes`` and create a callable that only accepts ``s_val``. Args: nodes (numpy.ndarray): The nodes defining a curve. s_val (float): Parameter to compute :math:`B(s)`. Returns: float: The norm of :math:`B(s)`.
juraj-google-style
def pre_ref_resolution_callback(self, other_model): filename = other_model._tx_filename assert (filename) other_model._tx_model_repository = \ GlobalModelRepository(self.all_models) self.all_models.filename_to_model[filename] = other_model
(internal: used to store a model after parsing into the repository) Args: other_model: the parsed model Returns: nothing
juraj-google-style
def get_indices(self, axis=0, index_func=None, old_blocks=None): ErrorMessage.catch_bugs_and_request_email((not callable(index_func))) func = self.preprocess_func(index_func) if (axis == 0): new_indices = ([idx.apply(func).get() for idx in self._partitions_cache.T[0]] if len(self._partitions_cache.T) else []) if (old_blocks is not None): cumulative_block_lengths = np.array(old_blocks.block_lengths).cumsum() else: cumulative_block_lengths = np.array(self.block_lengths).cumsum() else: new_indices = ([idx.apply(func).get() for idx in self._partitions_cache[0]] if len(self._partitions_cache) else []) if (old_blocks is not None): cumulative_block_lengths = np.array(old_blocks.block_widths).cumsum() else: cumulative_block_lengths = np.array(self.block_widths).cumsum() full_indices = (new_indices[0] if len(new_indices) else new_indices) if (old_blocks is not None): for i in range(len(new_indices)): if ((i == 0) or (len(new_indices[i]) == 0)): continue try: append_val = (new_indices[i] + cumulative_block_lengths[(i - 1)]) except TypeError: append_val = new_indices[i] full_indices = full_indices.append(append_val) else: full_indices = full_indices.append(new_indices[1:]) return full_indices
This gets the internal indices stored in the partitions. Note: These are the global indices of the object. This is mostly useful when you have deleted rows/columns internally, but do not know which ones were deleted. Args: axis: This axis to extract the labels. (0 - index, 1 - columns). index_func: The function to be used to extract the function. old_blocks: An optional previous object that this object was created from. This is used to compute the correct offsets. Returns: A Pandas Index object.
codesearchnet
def InnermostClass(self): for i in range(len(self.stack), 0, (- 1)): classinfo = self.stack[(i - 1)] if isinstance(classinfo, _ClassInfo): return classinfo return None
Get class info on the top of the stack. Returns: A _ClassInfo object if we are inside a class, or None otherwise.
codesearchnet
def get(self, uri: str) -> Optional[_T]: resource = self.resources_by_uri.get(uri) if resource is None: return None if isinstance(resource, self.proto_cls): return resource parsed = self._parse_resource(uri, resource) self.resources_by_uri[uri] = parsed return parsed
Retrieves a protocol buffer for the resource with the given uri. Args: uri: URI of the resource to retrieve. Returns: A protocol buffer for the resource or `None` if the `uri` is not present in the ResourceCollection. Raises: RuntimeError: The resource could not be found or the retrieved resource did not have the expected URL. The .zip file may have changed on disk.
github-repos
def _read_arg(arg): if (arg is None): arg_out = arg else: if ((len(arg) == 1) and os.path.exists(arg[0])): arg_out = grp.read(arg[0]) else: arg_out = arg assert isinstance(arg_out, list), 'arg_out must be a list.' assert (type(arg_out[0]) == str), 'arg_out must be a list of strings.' return arg_out
If arg is a list with 1 element that corresponds to a valid file path, use set_io.grp to read the grp file. Otherwise, check that arg is a list of strings. Args: arg (list or None) Returns: arg_out (list or None)
codesearchnet
def folderExist(self, name, folders): if name is not None and name != '': folderID = None for folder in folders: if folder['title'].lower() == name.lower(): return True del folders return folderID else: return False
Determines if a folder exists, case insensitively. Args: name (str): The name of the folder to check. folders (list): A list of folder dicts to check against. The dicts must contain the key:value pair ``title``. Returns: bool: ``True`` if the folder exists in the list, ``False`` otherwise.
juraj-google-style
def save(self, config=None): if (config is not None): clist = [config] else: clist = [self._system_config, self._global_config, self._repo_config, self._local_config] for conf in clist: if (conf.filename is None): continue try: logger.debug("Writing '{}'.".format(conf.filename)) dname = os.path.dirname(os.path.abspath(conf.filename)) try: os.makedirs(dname) except OSError as exc: if (exc.errno != errno.EEXIST): raise conf.write() except Exception as exc: msg = "failed to write config '{}'".format(conf.filename) raise ConfigError(msg, exc)
Saves config to config files. Args: config (configobj.ConfigObj): optional config object to save. Raises: dvc.config.ConfigError: thrown if failed to write config file.
codesearchnet
def lineno(self): return self.first.lineno
Return the line number of this logical line. Returns: The line number of the first token in this logical line.
github-repos
def convert_md_to_rst(md_path, rst_temp_path): command = "pandoc --write=rst --output=%s %s" % (rst_temp_path, md_path) print("converting with pandoc: %s to %s\n-->%s" % (md_path, rst_temp_path, command)) if os.path.exists(rst_temp_path): os.remove(rst_temp_path) os.system(command) if not os.path.exists(rst_temp_path): s = ("Error running: %s\n" " Did you install pandoc per the %s docstring?" % (command, __file__)) sys.exit(s) return read(rst_temp_path)
Convert the contents of a file from Markdown to reStructuredText. Returns the converted text as a Unicode string. Arguments: md_path: a path to a UTF-8 encoded Markdown file to convert. rst_temp_path: a temporary path to which to write the converted contents.
juraj-google-style
def close(self): if self.reuse: logger.debug('Ipcontroller not shutting down: reuse enabled') return if (self.mode == 'manual'): logger.debug('Ipcontroller not shutting down: Manual mode') return try: pgid = os.getpgid(self.proc.pid) os.killpg(pgid, signal.SIGTERM) time.sleep(0.2) os.killpg(pgid, signal.SIGKILL) try: self.proc.wait(timeout=1) x = self.proc.returncode if (x == 0): logger.debug('Controller exited with {0}'.format(x)) else: logger.error('Controller exited with {0}. May require manual cleanup'.format(x)) except subprocess.TimeoutExpired: logger.warn('Ipcontroller process:{0} cleanup failed. May require manual cleanup'.format(self.proc.pid)) except Exception as e: logger.warn('Failed to kill the ipcontroller process[{0}]: {1}'.format(self.proc.pid, e))
Terminate the controller process and its child processes. Args: - None
codesearchnet
def no_company_with_insufficient_companies_house_data(value): for prefix, name in company_types_with_insufficient_companies_house_data: if value.upper().startswith(prefix): raise ValidationError( MESSAGE_INSUFFICIENT_DATA, params={'name': name} )
Confirms that the company number is not for for a company that Companies House does not hold information on. Args: value (string): The company number to check. Raises: django.forms.ValidationError
juraj-google-style
def _poll_once(self, timeout_ms, max_records): self._coordinator.poll() if (not self._subscription.has_all_fetch_positions()): self._update_fetch_positions(self._subscription.missing_fetch_positions()) (records, partial) = self._fetcher.fetched_records(max_records) if records: if (not partial): self._fetcher.send_fetches() return records self._fetcher.send_fetches() timeout_ms = min(timeout_ms, (self._coordinator.time_to_next_poll() * 1000)) self._client.poll(timeout_ms=timeout_ms) if self._coordinator.need_rejoin(): return {} (records, _) = self._fetcher.fetched_records(max_records) return records
Do one round of polling. In addition to checking for new data, this does any needed heart-beating, auto-commits, and offset updates. Arguments: timeout_ms (int): The maximum time in milliseconds to block. Returns: dict: Map of topic to list of records (may be empty).
codesearchnet
def listen_tcp(cls, host='', port=0, echo=False): return cls(TCPServerSocketChannel(host, port), echo=echo)
Set up a :class:`TCPServerSocketChannel` and create a :class:`Flow` instance for it. Args: host(str): The hostname or IP address to bind to. port(int): The port number to listen on. echo(bool): Whether to echo read/written data to stdout by default. Returns: :class:`Flow`: A Flow instance initialised with the TCP socket channel.
juraj-google-style
def getModelSummaryAsGeoJson(self, session, withStreamNetwork=True, withNodes=False): watershedMaskCard = self.getCard('WATERSHED_MASK') maskFilename = watershedMaskCard.value maskExtension = maskFilename.strip('"').split('.')[1] maskMap = session.query(RasterMapFile).\ filter(RasterMapFile.projectFile == self).\ filter(RasterMapFile.fileExtension == maskExtension).\ one() statement = .format('raster', maskMap.tableName, maskMap.id) result = session.execute(statement) maskMapJsonPolygon = '' for row in result: maskMapJsonPolygon = row.polygon jsonString = maskMapJsonPolygon if withStreamNetwork: channelInputFile = self.channelInputFile if channelInputFile is not None: jsonStreamNetwork = channelInputFile.getStreamNetworkAsGeoJson(session=session, withNodes=withNodes) featureCollection = json.loads(jsonStreamNetwork) jsonMaskMapObjects = json.loads(maskMapJsonPolygon) maskFeature = {"type": "Feature", "geometry": jsonMaskMapObjects, "properties": {}, "id": maskMap.id} tempFeatures = featureCollection['features'] tempFeatures.append(maskFeature) featureCollection['features'] = tempFeatures jsonString = json.dumps(featureCollection) return jsonString
Retrieve a GeoJSON representation of the model. Includes vectorized mask map and stream network. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withStreamNetwork (bool, optional): Include stream network. Defaults to True. withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: GeoJSON string
juraj-google-style
def _CheckKeyPath(self, registry_key, search_depth): if (self._key_path_segments is None): return False if ((search_depth < 0) or (search_depth > self._number_of_key_path_segments)): return False if (search_depth == 0): segment_name = '' else: segment_name = self._key_path_segments[(search_depth - 1)] if self._is_regex: if isinstance(segment_name, py2to3.STRING_TYPES): flags = ((re.DOTALL | re.IGNORECASE) | re.UNICODE) try: segment_name = '^{0:s}$'.format(segment_name) segment_name = re.compile(segment_name, flags=flags) except sre_constants.error: return False self._key_path_segments[(search_depth - 1)] = segment_name else: segment_name = segment_name.lower() self._key_path_segments[(search_depth - 1)] = segment_name if (search_depth > 0): if self._is_regex: if (not segment_name.match(registry_key.name)): return False elif (segment_name != registry_key.name.lower()): return False return True
Checks the key path find specification. Args: registry_key (WinRegistryKey): Windows Registry key. search_depth (int): number of key path segments to compare. Returns: bool: True if the Windows Registry key matches the find specification, False if not.
codesearchnet
def ensure_files(self, filenames): logger.debug('Testing {0} for the following files: {1}'.format(self.working_dir, filenames)) dircontent = os.listdir(self.working_dir) for fname in filenames: if (fname not in dircontent): return False return True
Checks the student submission for specific files. Args: filenames (tuple): The list of file names to be cjecked for. Returns: bool: Indicator if all files are found in the student archive.
codesearchnet
def __init__(self, base: ModelHandler[ExampleT, PredictionT, ModelT], postprocess_fn: Callable[[PredictionT], PostProcessT]): self._base = base self._env_vars = getattr(base, '_env_vars', {}) self._postprocess_fn = postprocess_fn
A ModelHandler that has a preprocessing function associated with it. Args: base: An implementation of the underlying model handler. postprocess_fn: the preprocessing function to use.
github-repos
def import_file_object(filename): try: handle = open(filename, 'r') file_obj = handle.read() dict_obj = json.loads(file_obj) except IOError as e: logger.critical( 'import_file_object: %s error opening %s' % (str(e), str(filename)) ) raise e except ValueError: logger.info( '%s: import_file_object: %s not json. file object returned' % (inspect.stack()[0][3], str(filename)) ) return file_obj return dict_obj
Summary: Imports block filesystem object Args: :filename (str): block filesystem object Returns: dictionary obj (valid json file), file data object
juraj-google-style
def __init__(self, config, input_size=None): super().__init__() dim = config.hidden_size num_heads = config.num_attention_heads self.num_heads = num_heads head_dim = dim self.scale = head_dim ** (-0.5) self.qkv = nn.Linear(dim, dim * 3, bias=config.qkv_bias) self.proj = nn.Linear(dim, dim) self.use_relative_position_embeddings = config.use_relative_position_embeddings if self.use_relative_position_embeddings: self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
Args: config (`VitDetConfig`): Model configuration. input_size (`Tuple[int]`, *optional*): Input resolution, only required in case relative position embeddings are added.
github-repos
def from_json(cls, json_data): if not isinstance(json_data, dict): json_data = json.loads(_helpers._from_bytes(json_data)) private_key_pkcs8_pem = None pkcs12_val = json_data.get(_PKCS12_KEY) password = None if pkcs12_val is None: private_key_pkcs8_pem = json_data['_private_key_pkcs8_pem'] signer = crypt.Signer.from_string(private_key_pkcs8_pem) else: pkcs12_val = base64.b64decode(pkcs12_val) password = json_data['_private_key_password'] signer = crypt.Signer.from_string(pkcs12_val, password) credentials = cls( json_data['_service_account_email'], signer, scopes=json_data['_scopes'], private_key_id=json_data['_private_key_id'], client_id=json_data['client_id'], user_agent=json_data['_user_agent'], **json_data['_kwargs'] ) if private_key_pkcs8_pem is not None: credentials._private_key_pkcs8_pem = private_key_pkcs8_pem if pkcs12_val is not None: credentials._private_key_pkcs12 = pkcs12_val if password is not None: credentials._private_key_password = password credentials.invalid = json_data['invalid'] credentials.access_token = json_data['access_token'] credentials.token_uri = json_data['token_uri'] credentials.revoke_uri = json_data['revoke_uri'] token_expiry = json_data.get('token_expiry', None) if token_expiry is not None: credentials.token_expiry = datetime.datetime.strptime( token_expiry, client.EXPIRY_FORMAT) return credentials
Deserialize a JSON-serialized instance. Inverse to :meth:`to_json`. Args: json_data: dict or string, Serialized JSON (as a string or an already parsed dictionary) representing a credential. Returns: ServiceAccountCredentials from the serialized data.
juraj-google-style
def wind44(msg): d = hex2bin(data(msg)) status = int(d[4]) if (not status): return None speed = bin2int(d[5:14]) direction = ((bin2int(d[14:23]) * 180.0) / 256.0) return (round(speed, 0), round(direction, 1))
Wind speed and direction. Args: msg (String): 28 bytes hexadecimal message string Returns: (int, float): speed (kt), direction (degree)
codesearchnet
def es_mapping(cls, base_class=None, role='rdf_class', **kwargs): def _prop_filter(prop, value, **kwargs): try: use_prop = len(set(value.owl_inverseOf) - parent_props) > 0 except AttributeError: use_prop = True if prop in nested_props and use_prop: return True return False if not base_class: base_class = cls es_map = {} if kwargs.get("depth"): kwargs['depth'] += 1 initial = False else: initial = True kwargs['depth'] = 1 kwargs['class'] = cls.__name__ kwargs['class_obj'] = cls if kwargs.get('class_obj'): parent_props = set(cls.properties) else: parent_props = set() if role == 'rdf_class': es_map = {} es_map = {prop: value.es_mapping(base_class) \ for prop, value in cls.properties.items()} elif role == 'es_Nested': if cls == base_class: nested_props = LABEL_FIELDS else: nested_props = cls.es_defs.get('kds_esNestedProps', list(cls.properties.keys())) es_map = {prop: value.es_mapping(base_class, **kwargs) \ for prop, value in cls.properties.items() \ if _prop_filter(prop, value, **kwargs)} ref_map = { "type" : "keyword" } lower_map = { "type": "text", "fields": { "lower": es_idx_types['es_Lower']['lower'], 'keyword': {'type': 'keyword'} } } ignore_map = { "index": False, "type": "text" } if cls == base_class: es_map['label'] = ref_map es_map['value'] = lower_map if cls.cls_defs.get('kds_storageType',[None])[0] != "blanknode" \ and cls == base_class: es_map['id'] = ref_map es_map['uri'] = ref_map rml_procs = cls.es_defs.get("kds_esRmlProcessor", []) rml_procs = [proc for proc in rml_procs if role == 'rdf_class' or proc['force']] if rml_procs: rml_maps = {} for rml in rml_procs: rml_maps[rml['name']] = ignore_map if rml_maps: es_map['rml_map'] = {"properties": rml_maps} return es_map
Returns the es mapping for the class args: ----- base_class: The root class being indexed role: the role states how the class should be mapped depending upon whether it is used as a subject of an object. options are es_Nested or rdf_class
juraj-google-style
def VerifyMaps(self, conf): retval = 0 for map_name in conf.maps: self.log.info('Verifying map: %s.', map_name) if map_name == config.MAP_NETGROUP: self.log.info('The netgroup map does not support enumeration, skipping.') continue if map_name == config.MAP_AUTOMOUNT: self.log.info('The automount map does not support enumeration, skipping.') continue try: nss_map = nss.GetMap(map_name) except error.UnsupportedMap: self.log.warning('Verification of %s map is unsupported!', map_name) continue self.log.debug('built NSS map of %d entries', len(nss_map)) cache_options = conf.options[map_name].cache cache = cache_factory.Create(cache_options, map_name) try: cache_map = cache.GetMap() except error.CacheNotFound: self.log.error('Cache missing!') retval += 1 continue self.log.debug('built cache map of %d entries', len(cache_map)) missing_entries = 0 for map_entry in cache_map: if map_entry not in nss_map: self.log.info('The following entry is present in the cache but not availible via NSS! %s', map_entry.name) self.log.debug('missing entry data: %s', map_entry) missing_entries += 1 if missing_entries > 0: self.log.warning('Missing %d entries in %s map', missing_entries, map_name) retval += 1 return retval
Compare each configured map against data retrieved from NSS. For each configured map, build a Map object from NSS and compare it against a Map object retrieved directly from the cache. We expect the cache Map to be a subset of the nss Map due to possible inclusion of other NSS map types (e.g. files, nis, ldap, etc). This could be done via series of get*nam calls, however at this time it appears to be more efficient to grab them in bulk and use the Map.__contains__() membership test. Args: conf: nss_cache.config.Config object Returns: count of failures when verifying
github-repos
def to_string(cls, error_code): if error_code == cls.ILLEGAL_COMMAND: return 'Failed to erase sector.' return super(JLinkEraseErrors, cls).to_string(error_code)
Returns the string message for the given ``error_code``. Args: cls (JLinkEraseErrors): the ``JLinkEraseErrors`` class error_code (int): error code to convert Returns: An error string corresponding to the error code. Raises: ValueError: if the error code is invalid.
juraj-google-style
def __init__(self, session_creator, hooks, should_recover, stop_grace_period_secs=120): self._graph_was_finalized = ops.get_default_graph().finalized self._hooks = hooks or [] for h in self._hooks: h.begin() worker_context = distribute_coordinator_context.get_current_worker_context() if not session_creator and worker_context: session_creator = worker_context.session_creator() self._coordinated_creator = self._CoordinatedSessionCreator(session_creator=session_creator or ChiefSessionCreator(), hooks=self._hooks, stop_grace_period_secs=stop_grace_period_secs) if should_recover: self._sess = _RecoverableSession(self._coordinated_creator) else: self._sess = self._coordinated_creator.create_session()
Sets up a Monitored or Hooked Session. Args: session_creator: A factory object to create session. Typically a `ChiefSessionCreator` or a `WorkerSessionCreator`. hooks: An iterable of `SessionRunHook' objects. should_recover: A bool. Indicates whether to recover from `AbortedError` and `UnavailableError` or not. stop_grace_period_secs: Number of seconds given to threads to stop after `close()` has been called.
github-repos
def get(self, key, mem_map=True): self.raise_error_if_not_open() if key in self._file: data = self._file[key] if not mem_map: data = data[()] return data else: return None
Read and return the data stored for the given key. Args: key (str): The key to read the data from. mem_map (bool): If ``True`` returns the data as memory-mapped array, otherwise a copy is returned. Note: The container has to be opened in advance. Returns: numpy.ndarray: The stored data.
juraj-google-style
def setFilter(self, search): if (not isinstance(search, DataSearch)): raise TypeError('The given parameter must an `qtpandas.DataSearch` object') self._search = search self.layoutAboutToBeChanged.emit() if (self._dataFrameOriginal is not None): self._dataFrame = self._dataFrameOriginal self._dataFrameOriginal = self._dataFrame.copy() self._search.setDataFrame(self._dataFrame) (searchIndex, valid) = self._search.search() if valid: self._dataFrame = self._dataFrame[searchIndex] self.layoutChanged.emit() else: self.clearFilter() self.layoutChanged.emit() self.dataFrameChanged.emit()
Apply a filter and hide rows. The filter must be a `DataSearch` object, which evaluates a python expression. If there was an error while parsing the expression, the data will remain unfiltered. Args: search(qtpandas.DataSearch): data search object to use. Raises: TypeError: An error is raised, if the given parameter is not a `DataSearch` object.
codesearchnet
def check_dihedral(self, construction_table): c_table = construction_table angles = self.get_angle_degrees(c_table.iloc[3:, :].values) problem_index = np.nonzero((175 < angles) | (angles < 5))[0] rename = dict(enumerate(c_table.index[3:])) problem_index = [rename[i] for i in problem_index] return problem_index
Checks, if the dihedral defining atom is colinear. Checks for each index starting from the third row of the ``construction_table``, if the reference atoms are colinear. Args: construction_table (pd.DataFrame): Returns: list: A list of problematic indices.
juraj-google-style
def _AddTokenOnNewline(self, dry_run, must_split): current = self.next_token previous = current.previous_token self.column = self._GetNewlineColumn() if not dry_run: indent_level = self.line.depth spaces = self.column if spaces: spaces -= indent_level * style.Get('INDENT_WIDTH') current.AddWhitespacePrefix(newlines_before=1, spaces=spaces, indent_level=indent_level) if not current.is_comment: self.stack[-1].last_space = self.column self.lowest_level_on_line = self.paren_level if previous.OpensScope() or (previous.is_comment and previous.previous_token is not None and previous.previous_token.OpensScope()): dedent = (style.Get('CONTINUATION_INDENT_WIDTH'), 0)[style.Get('INDENT_CLOSING_BRACKETS')] self.stack[-1].closing_scope_indent = max(0, self.stack[-1].indent - dedent) self.stack[-1].split_before_closing_bracket = True penalty = current.split_penalty if must_split: return penalty if previous.is_pseudo and previous.value == '(': penalty += 50 if current.value not in {'if', 'for'}: last = self.stack[-1] last.num_line_splits += 1 penalty += style.Get('SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT') * last.num_line_splits if current.OpensScope() and previous.OpensScope(): pprev = previous.previous_token if not pprev or not pprev.is_name: penalty += 10 return penalty + 10
Adds a line break and necessary indentation. Appends the next token to the state and updates information necessary for indentation. Arguments: dry_run: (bool) Don't commit whitespace changes to the FormatToken if True. must_split: (bool) A newline was required before this token. Returns: The split penalty for splitting after the current state.
github-repos
def from_string(key, password=b'notasecret'): key = _helpers._to_bytes(key) parsed_pem_key = _helpers._parse_pem_key(key) if parsed_pem_key: pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, parsed_pem_key) else: password = _helpers._to_bytes(password, encoding='utf-8') pkey = crypto.load_pkcs12(key, password).get_privatekey() return OpenSSLSigner(pkey)
Construct a Signer instance from a string. Args: key: string, private key in PKCS12 or PEM format. password: string, password for the private key file. Returns: Signer instance. Raises: OpenSSL.crypto.Error if the key can't be parsed.
juraj-google-style
def UploadSignedConfigBlob(content, aff4_path, client_context=None, limit=None, token=None): if limit is None: limit = config.CONFIG["Datastore.maximum_blob_size"] if client_context is None: client_context = ["Platform:Windows", "Client Context"] config.CONFIG.Validate( parameters="PrivateKeys.executable_signing_private_key") signing_key = config.CONFIG.Get( "PrivateKeys.executable_signing_private_key", context=client_context) verification_key = config.CONFIG.Get( "Client.executable_signing_public_key", context=client_context) signed_binary_utils.WriteSignedBinary( rdfvalue.RDFURN(aff4_path), content, signing_key, public_key=verification_key, chunk_size=limit, token=token) logging.info("Uploaded to %s", aff4_path)
Upload a signed blob into the datastore. Args: content: File content to upload. aff4_path: aff4 path to upload to. client_context: The configuration contexts to use. limit: The maximum size of the chunk to use. token: A security token. Raises: IOError: On failure to write.
juraj-google-style
def helper_list(access_token, oid, path): if oid != "": path = ''.join([path, "('", oid, "')"]) endpoint = ''.join([ams_rest_endpoint, path]) return do_ams_get(endpoint, path, access_token)
Helper Function to list a URL path. Args: access_token (str): A valid Azure authentication token. oid (str): An OID. path (str): A URL Path. Returns: HTTP response. JSON body.
juraj-google-style
def run(self, fn, args=None, kwargs=None): _check_initialization() multi_process_lib.Process() if self._runner is None: self._start() fn = dill.dumps(fn, dill.HIGHEST_PROTOCOL) for conn in self._conn.values(): conn.send((fn, args or [], kwargs or {})) process_statuses = [] for (task_type, task_id), conn in self._conn.items(): logging.info('Waiting for the result from %s-%d', task_type, task_id) try: process_statuses.append(conn.recv()) except EOFError: self.shutdown() raise RuntimeError('Unexpected EOF. Worker process may have died. Please report a bug') return_values = [] for process_status in process_statuses: assert isinstance(process_status, _ProcessStatusInfo) if not process_status.is_successful: six.reraise(*process_status.exc_info) if process_status.return_value is not None: return_values.append(process_status.return_value) return return_values
Runs `fn` with `args` and `kwargs` on all jobs. Args: fn: The function to be run. args: Optional positional arguments to be supplied in `fn`. kwargs: Optional keyword arguments to be supplied in `fn`. Returns: A list of return values.
github-repos
def __init__(self, num_experts, gates): self._gates = gates self._num_experts = num_experts where = tf.to_int32(tf.where(tf.transpose(gates) > 0)) self._expert_index, self._batch_index = tf.unstack(where, num=2, axis=1) self._part_sizes_tensor = tf.reduce_sum(tf.to_int32(gates > 0), [0]) self._nonzero_gates = tf.gather( tf.reshape(self._gates, [-1]), self._batch_index * num_experts + self._expert_index)
Create a SparseDispatcher. Args: num_experts: an integer. gates: a `Tensor` of shape `[batch_size, num_experts]`. Returns: a SparseDispatcher
juraj-google-style