code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def custom_apply(self, path: utils.KeyPath, value_spec: pg_typing.ValueSpec, allow_partial: bool, child_transform: Optional[Callable[[utils.KeyPath, pg_typing.Field, Any], Any]]=None) -> Tuple[bool, 'List']: proceed_with_standard_apply = True if self._value_spec: if value_spec and (not value_spec.is_compatible(self._value_spec)): raise ValueError(utils.message_on_path(f'List (spec={self._value_spec!r}) cannot be assigned to an incompatible field (spec={value_spec!r}).', path)) if self._allow_partial == allow_partial: proceed_with_standard_apply = False else: self._allow_partial = allow_partial elif isinstance(value_spec, pg_typing.List): self._value_spec = value_spec return (proceed_with_standard_apply, self)
Implement pg.typing.CustomTyping interface. Args: path: KeyPath of current object. value_spec: Origin value spec of the field. allow_partial: Whether allow partial object to be created. child_transform: Function to transform child node values in dict_obj into their final values. Transform function is called on leaf nodes first, then on their containers, recursively. Returns: A tuple (proceed_with_standard_apply, transformed value)
github-repos
def Lookup(self, name): if not self._name2item: self._InitCache() return self._name2item[name]
Convenience function: Look up a given name in the class namespace. Tries to find a method or constant by this name in the class. Args: name: Name to look up. Returns: A Constant or Function instance. Raises: KeyError: if this identifier doesn't exist in this class.
github-repos
def register_validator(flag_name, checker, message='Flag validation failed', flag_values=_flagvalues.FLAGS): v = SingleFlagValidator(flag_name, checker, message) _add_validator(flag_values, v)
Adds a constraint, which will be enforced during program execution. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_name: str, name of the flag to be checked. checker: callable, a function to validate the flag. input - A single positional argument: The value of the corresponding flag (string, boolean, etc. This value will be passed to checker by the library). output - bool, True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise flags.ValidationError(desired_error_message). message: str, error text to be shown to the user if checker returns False. If checker raises flags.ValidationError, message from the raised error will be shown. flag_values: flags.FlagValues, optional FlagValues instance to validate against. Raises: AttributeError: Raised when flag_name is not registered as a valid flag name.
codesearchnet
def copy_raw_block(self): ctable = [] (r, c) = (0, 0) try: for row_index in range(self.start[0], self.end[0]): r = row_index row = [] ctable.append(row) for column_index in range(self.start[1], self.end[1]): c = column_index row.append(self.table[row_index][column_index]) except IndexError: raise InvalidBlockError(('Missing table element at [%d, %d]' % (r, c))) return ctable
Copies the block as it was originally specified by start and end into a new table. Returns: A copy of the block with no block transformations.
codesearchnet
def decode_row(line, fields=None): cols = line.rstrip('\n').split(_field_delimiter) cols = list(map(unescape, cols)) if (fields is not None): if (len(cols) != len(fields)): raise ItsdbError('Wrong number of fields: {} != {}'.format(len(cols), len(fields))) for i in range(len(cols)): col = cols[i] if col: field = fields[i] col = _cast_to_datatype(col, field) cols[i] = col return cols
Decode a raw line from a profile into a list of column values. Decoding involves splitting the line by the field delimiter (`"@"` by default) and unescaping special characters. If *fields* is given, cast the values into the datatype given by their respective Field object. Args: line: a raw line from a [incr tsdb()] profile. fields: a list or Relation object of Fields for the row Returns: A list of column values.
codesearchnet
def quantize_flow(flow, max_val=0.02, norm=True): h, w, _ = flow.shape dx = flow[..., 0] dy = flow[..., 1] if norm: dx = dx / w dy = dy / h flow_comps = [ quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy] ] return tuple(flow_comps)
Quantize flow to [0, 255]. After this step, the size of flow will be much smaller, and can be dumped as jpeg images. Args: flow (ndarray): (h, w, 2) array of optical flow. max_val (float): Maximum value of flow, values beyond [-max_val, max_val] will be truncated. norm (bool): Whether to divide flow values by image width/height. Returns: tuple[ndarray]: Quantized dx and dy.
juraj-google-style
def fpn_map_rois_to_levels(boxes): sqrtarea = tf.sqrt(tf_area(boxes)) level = tf.cast(tf.floor((4 + (tf.log(((sqrtarea * (1.0 / 224)) + 1e-06)) * (1.0 / np.log(2))))), tf.int32) level_ids = [tf.where((level <= 2)), tf.where(tf.equal(level, 3)), tf.where(tf.equal(level, 4)), tf.where((level >= 5))] level_ids = [tf.reshape(x, [(- 1)], name='roi_level{}_id'.format((i + 2))) for (i, x) in enumerate(level_ids)] num_in_levels = [tf.size(x, name='num_roi_level{}'.format((i + 2))) for (i, x) in enumerate(level_ids)] add_moving_summary(*num_in_levels) level_boxes = [tf.gather(boxes, ids) for ids in level_ids] return (level_ids, level_boxes)
Assign boxes to level 2~5. Args: boxes (nx4): Returns: [tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level. [tf.Tensor]: 4 tensors, the gathered boxes in each level. Be careful that the returned tensor could be empty.
codesearchnet
def nb_ll(data, P, R): genes, cells = data.shape clusters = P.shape[1] lls = np.zeros((cells, clusters)) for c in range(clusters): P_c = P[:,c].reshape((genes, 1)) R_c = R[:,c].reshape((genes, 1)) ll = gammaln(R_c + data) - gammaln(R_c) ll += data*np.log(P_c) + xlog1py(R_c, -P_c) lls[:,c] = ll.sum(0) return lls
Returns the negative binomial log-likelihood of the data. Args: data (array): genes x cells P (array): NB success probability param - genes x clusters R (array): NB stopping param - genes x clusters Returns: cells x clusters array of log-likelihoods
juraj-google-style
def set_style(self, column, style): column_idx = None while len(self.headers) > len(self.__style_list): self.__style_list.append(None) if isinstance(column, six.integer_types): column_idx = column elif isinstance(column, six.string_types): try: column_idx = self.headers.index(column) except ValueError: pass if column_idx is not None: self.__style_list[column_idx] = style self.__clear_preprocess() self._dp_extractor.format_flags_list = [ _ts_to_flag[self.__get_thousand_separator(col_idx)] for col_idx in range(len(self.__style_list)) ] return raise ValueError("column must be an int or string: actual={}".format(column))
Set |Style| for a specific column. Args: column (|int| or |str|): Column specifier. column index or header name correlated with the column. style (|Style|): Style value to be set to the column. Raises: ValueError: If the column specifier is invalid.
juraj-google-style
def run_without_tensor_float_32(description: str) -> Callable[[Callable[..., Any]], Callable[..., None]]: def decorator(f: Callable[..., Any]) -> Callable[..., None]: @functools.wraps(f) def decorated(*args, **kwargs): allowed = config.tensor_float_32_execution_enabled() try: config.enable_tensor_float_32_execution(False) f(*args, **kwargs) finally: config.enable_tensor_float_32_execution(allowed) return tf_decorator.make_decorator(f, decorated) return decorator
Execute test with TensorFloat-32 disabled. While almost every real-world deep learning model runs fine with TensorFloat-32, many tests use assertAllClose or similar methods. TensorFloat-32 matmuls typically will cause such methods to fail with the default tolerances. Args: description: A description used for documentation purposes, describing why the test requires TensorFloat-32 to be disabled. Returns: Decorator which runs a test with TensorFloat-32 disabled.
github-repos
def _find_longest_parent_path(path_set, path): while (path not in path_set): if (not path): return None path = os.path.dirname(path) return path
Finds the longest "parent-path" of 'path' in 'path_set'. This function takes and returns "path-like" strings which are strings made of strings separated by os.sep. No file access is performed here, so these strings need not correspond to actual files in some file-system.. This function returns the longest ancestor path For example, for path_set=["/foo/bar", "/foo", "/bar/foo"] and path="/foo/bar/sub_dir", returns "/foo/bar". Args: path_set: set of path-like strings -- e.g. a list of strings separated by os.sep. No actual disk-access is performed here, so these need not correspond to actual files. path: a path-like string. Returns: The element in path_set which is the longest parent directory of 'path'.
codesearchnet
def profile(self, num): baseuri = self._BASE_URI + "company/{}".format(num) res = self.session.get(baseuri) self.handle_http_error(res) return res
Search for company profile by company number. Args: num (str): Company number to search on.
juraj-google-style
def from_json(cls, data): required_keys = ('name', 'day_type', 'location', 'dry_bulb_condition', 'humidity_condition', 'wind_condition', 'sky_condition') for key in required_keys: assert key in data, 'Required key "{}" is missing!'.format(key) return cls(data['name'], data['day_type'], Location.from_json(data['location']), DryBulbCondition.from_json(data['dry_bulb_condition']), HumidityCondition.from_json(data['humidity_condition']), WindCondition.from_json(data['wind_condition']), SkyCondition.from_json(data['sky_condition']))
Create a Design Day from a dictionary. Args: data = { "name": string, "day_type": string, "location": ladybug Location schema, "dry_bulb_condition": ladybug DryBulbCondition schema, "humidity_condition": ladybug HumidityCondition schema, "wind_condition": ladybug WindCondition schema, "sky_condition": ladybug SkyCondition schema}
juraj-google-style
def request_stop(self, ex=None): with self._lock: ex = self._filter_exception(ex) if self._joined: if isinstance(ex, tuple): _, ex_instance, _ = ex raise ex_instance elif ex is not None: _, ex_instance, _ = sys.exc_info() raise ex_instance if not self._stop_event.is_set(): if ex and self._exc_info_to_raise is None: if isinstance(ex, tuple): logging.info('Error reported to Coordinator: %s', compat.as_str_any(ex[1]), exc_info=ex) self._exc_info_to_raise = ex else: logging.info('Error reported to Coordinator: %s, %s', type(ex), compat.as_str_any(ex)) self._exc_info_to_raise = sys.exc_info() if len(self._exc_info_to_raise) != 3 or not self._exc_info_to_raise[0] or (not self._exc_info_to_raise[1]): try: raise ValueError('ex must be a tuple or sys.exc_info must return the current exception: %s' % self._exc_info_to_raise) except ValueError: self._exc_info_to_raise = sys.exc_info() self._stop_event.set()
Request that the threads stop. After this is called, calls to `should_stop()` will return `True`. Note: If an exception is being passed in, in must be in the context of handling the exception (i.e. `try: ... except Exception as ex: ...`) and not a newly created one. Args: ex: Optional `Exception`, or Python `exc_info` tuple as returned by `sys.exc_info()`. If this is the first call to `request_stop()` the corresponding exception is recorded and re-raised from `join()`.
github-repos
def _on_channel_close(self, channel, reply_code_or_reason, reply_text=None): if isinstance(reply_code_or_reason, pika_errs.ChannelClosed): reply_code = reply_code_or_reason.reply_code reply_text = reply_code_or_reason.reply_text elif isinstance(reply_code_or_reason, int): reply_code = reply_code_or_reason else: reply_code = 0 reply_text = str(reply_code_or_reason) _log.info("Channel %r closed (%d): %s", channel, reply_code, reply_text) self._channel = None
Callback invoked when the channel is closed. Args: channel (pika.channel.Channel): The channel that got closed. reply_code_or_reason (int|Exception): The reason why the channel was closed. In older versions of pika, this is the AMQP code. reply_text (str): The human-readable reason for the channel's closure (only in older versions of pika).
juraj-google-style
def Current(): return Architecture._MACHINE_TO_ARCHITECTURE.get(platform.machine().lower())
Determines the current system architecture. Returns: ArchitectureTuple, One of the Architecture constants or None if it cannot be determined.
github-repos
def loadnetcdf(filename, copy=True): filename = str(Path(filename).expanduser()) if copy: dataarray = xr.open_dataarray(filename).copy() else: dataarray = xr.open_dataarray(filename, chunks={}) if (dataarray.name is None): dataarray.name = filename.rstrip('.nc') for (key, val) in dataarray.coords.items(): if (val.dtype.kind == 'S'): dataarray[key] = val.astype('U') elif (val.dtype == np.int32): dataarray[key] = val.astype('i8') return dataarray
Load a dataarray from a NetCDF file. Args: filename (str): Filename (*.nc). copy (bool): If True, dataarray is copied in memory. Default is True. Returns: dataarray (xarray.DataArray): Loaded dataarray.
codesearchnet
def _find_docstring_line_for_no_body(self, start): tracked = sorted(list(self._tokenized_triple_quotes.keys())) for i in tracked: if (min(start, i) == start): return i return None
Find the docstring associated with a definition with no body in the node. In these cases, the provided start and end line number for that element are the same, so we must get the docstring based on the sequential position of known docstrings. Args: start: the row where the class / function starts. Returns: int: the row number where the docstring is found.
codesearchnet
def get(self, key=None): if key: key = ub_to_str(key) if settings.ENABLE_CACHING: return self.get_from_cache(key) or self.set_to_cache(self._get_from_riak(key)) else: return self._get_from_riak(key) else: self._exec_query() if not self._solr_cache['docs']: raise ObjectDoesNotExist("%s %s" % (self.index_name, self.compiled_query)) if self.count() > 1: raise MultipleObjectsReturned( "%s objects returned for %s" % (self.count(), self._model_class.__name__)) return self._get_from_riak(self._solr_cache['docs'][0]['_yz_rk'])
If key is not None, tries to get obj from cache first. If not found, tries to get from riak and sets to cache. If key is None, then execute solr query and checks result. Returns obj data and key tuple or raises exception ObjectDoesNotExist or MultipleObjectsReturned. Args: key(str): obj key Return: (tuple): obj data dict, obj key
juraj-google-style
def serialize_to_normalized_compact_json(py_obj): return json.dumps( py_obj, sort_keys=True, separators=(',', ':'), cls=ToJsonCompatibleTypes )
Serialize a native object to normalized, compact JSON. The JSON string is normalized by sorting any dictionary keys. It will be on a single line without whitespace between elements. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, compact JSON string.
juraj-google-style
def id_pools_vsn_ranges(self): if (not self.__id_pools_vsn_ranges): self.__id_pools_vsn_ranges = IdPoolsRanges('vsn', self.__connection) return self.__id_pools_vsn_ranges
Gets the IdPoolsRanges API Client for VSN Ranges. Returns: IdPoolsRanges:
codesearchnet
def percent_point(self, U): self.check_fit() return scipy.optimize.brentq(self._brentq_cdf(U), -1000.0, 1000.0)
Given a cdf value, returns a value in original space. Args: U(numpy.array): cdf values in [0,1] Returns: numpy.array: value in original space
juraj-google-style
def attribute(self, attribute_id, action='GET', params=None): if params is None: params = {} if not self.can_update(): self._tcex.handle_error(910, [self.type]) if action == 'GET': return self.tc_requests.get_attribute( self.api_type, self.api_sub_type, self.unique_id, attribute_id, owner=self.owner, params=params, ) if action == 'DELETE': return self.tc_requests.delete_attribute( self.api_type, self.api_sub_type, self.unique_id, attribute_id, owner=self.owner ) self._tcex.handle_error(925, ['action', 'attribute', 'action', 'action', action]) return None
Gets the attribute from a Group/Indicator or Victim Args: action: params: attribute_id: Returns: attribute json
juraj-google-style
def makeDoubleLinked(dom, parent=None): dom.parent = parent for child in dom.childs: child.parent = dom makeDoubleLinked(child, dom)
Standard output from `dhtmlparser` is single-linked tree. This will make it double-linked. Args: dom (obj): :class:`.HTMLElement` instance. parent (obj, default None): Don't use this, it is used in recursive call.
codesearchnet
def clone(self, to_namespace, to_name): r = fapi.clone_workspace(self.namespace, self.name, to_namespace, to_name, self.api_url) fapi._check_response_code(r, 201) return Workspace(to_namespace, to_name, self.api_url)
Clone this workspace. Args: to_namespace (str): Target workspace namespace to_name (str): Target workspace name
juraj-google-style
def project_surface(surface, angle=DEFAULT_ANGLE): z_coef = np.sin(np.radians(angle)) y_coef = np.cos(np.radians(angle)) (surface_height, surface_width) = surface.shape slope = np.tile(np.linspace(0.0, 1.0, surface_height), [surface_width, 1]).T return ((slope * y_coef) + (surface * z_coef))
Returns the height of the surface when projected at the given angle. Args: surface (surface): the surface to project angle (float): the angle at which to project the surface Returns: surface: A projected surface.
codesearchnet
def extract_paths(self, paths, ignore_nopath): try: if self._has_tar_and_gzip(): self._extract_paths_tar_gz(paths, ignore_nopath) else: self._extract_paths_scp(paths, ignore_nopath) except (ssh.LagoSSHTimeoutException, LagoVMNotRunningError): raise ExtractPathError( 'Unable to extract paths from {0}: unreachable with SSH'. format(self.vm.name()) )
Extract the given paths from the domain Args: paths(list of str): paths to extract ignore_nopath(boolean): if True will ignore none existing paths. Returns: None Raises: :exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing path was found on the VM, and ``ignore_nopath`` is True. :exc:`~lago.plugins.vm.ExtractPathError`: on all other failures.
juraj-google-style
def check_or_generate_pyi(options) -> AnalysisResult: loader = load_pytd.create_loader(options) compiler_error = None other_error_info = '' src = '' try: src = read_source_file(options.input, options.open_function) if options.check: ctx = check_py(src=src, options=options, loader=loader).context ast, result = (None, None) else: ret, result = generate_pyi(src=src, options=options, loader=loader) ctx = ret.context ast = ret.ast except utils.UsageError: raise except pyc.CompileError as e: compiler_error = (options.input, e.line, e.error) except constant_folding.ConstantError as e: compiler_error = (options.input, e.lineno, e.message) except IndentationError as e: compiler_error = (options.input, e.lineno, e.msg) except libcst.ParserSyntaxError as e: compiler_error = (options.input, e.raw_line, e.message) except SyntaxError as e: compiler_error = (options.input, e.lineno, e.msg) except directors.SkipFileError: other_error_info = ' except Exception as e: if options.nofail: log.warning('***Caught exception: %s', str(e), exc_info=True) if not options.check: other_error_info = ' else: prefix = str(e.args[0]) if e.args else '' e.args = (f'{prefix}\nFile: {options.input}',) + e.args[1:] raise else: return AnalysisResult(ctx, ast, result) ctx = context.Context(options, loader, src=src) if compiler_error: ctx.errorlog.python_compiler_error(*compiler_error) ast = pytd_builtins.GetDefaultAst(parser.PyiOptions.from_toplevel_options(options)) result = pytd_builtins.DEFAULT_SRC + other_error_info return AnalysisResult(ctx, ast, result)
Returns results from running pytype. Args: options: config.Options object. Returns: An AnalysisResult.
github-repos
def reports_progress(reporter): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): with progress_reporter(reporter): return func(*args, **kwargs) return wrapper return decorator
A decorator factory to mark functions which report progress. Args: reporter: A zero-argument callable to report progress. The callable provided should have the means to both retrieve and display current progress information.
codesearchnet
def get_hash(self, handle): handle = os.path.expanduser(os.path.expandvars(handle)) with open(self._prefixed('%s.hash' % handle)) as f: return f.read()
Returns the associated hash for the given handle, the hash file must exist (``handle + '.hash'``). Args: handle (str): Path to the template to get the hash from Returns: str: Hash for the given handle
juraj-google-style
def decode(self, spec, encoded_value): raise NotImplementedError(f'{type(self).__name__}.decode')
Decodes `value` from a batchable tensor encoding. Args: spec: The TypeSpec for the result value. If encoded values with spec `s` were batched, then `spec` should be `s.batch(batch_size)`; or if encoded values with spec `s` were unbatched, then `spec` should be `s.unbatch()`. encoded_value: A nest of values returned by `encode`; or a nest of values that was formed by stacking, unstacking, or concatenating the corresponding elements of values returned by `encode`. Returns: A value compatible with `type_spec`.
github-repos
def download_patric_genomes(self, ids, force_rerun=False): ids = ssbio.utils.force_list(ids) counter = 0 log.info('Downloading sequences from PATRIC...') for patric_id in tqdm(ids): f = ssbio.databases.patric.download_coding_sequences(patric_id=patric_id, seqtype='protein', outdir=self.sequences_by_organism_dir, force_rerun=force_rerun) if f: self.load_strain(patric_id, f) counter += 1 log.debug('{}: downloaded sequence'.format(patric_id)) else: log.warning('{}: unable to download sequence'.format(patric_id)) log.info('Created {} new strain GEM-PROs, accessible at "strains" attribute'.format(counter))
Download genome files from PATRIC given a list of PATRIC genome IDs and load them as strains. Args: ids (str, list): PATRIC ID or list of PATRIC IDs force_rerun (bool): If genome files should be downloaded again even if they exist
codesearchnet
def to_string(cls, error_code): if error_code == cls.EMU_NO_CONNECTION: return 'No connection to emulator.' elif error_code == cls.EMU_COMM_ERROR: return 'Emulator connection error.' elif error_code == cls.DLL_NOT_OPEN: return 'DLL has not been opened. Did you call \'.connect()\'?' elif error_code == cls.VCC_FAILURE: return 'Target system has no power.' elif error_code == cls.INVALID_HANDLE: return 'Given file / memory handle is invalid.' elif error_code == cls.NO_CPU_FOUND: return 'Could not find supported CPU.' elif error_code == cls.EMU_FEATURE_UNSUPPORTED: return 'Emulator does not support the selected feature.' elif error_code == cls.EMU_NO_MEMORY: return 'Emulator out of memory.' elif error_code == cls.TIF_STATUS_ERROR: return 'Target interface error.' elif error_code == cls.FLASH_PROG_COMPARE_FAILED: return 'Programmed data differs from source data.' elif error_code == cls.FLASH_PROG_PROGRAM_FAILED: return 'Programming error occured.' elif error_code == cls.FLASH_PROG_VERIFY_FAILED: return 'Error while verifying programmed data.' elif error_code == cls.OPEN_FILE_FAILED: return 'Specified file could not be opened.' elif error_code == cls.UNKNOWN_FILE_FORMAT: return 'File format of selected file is not supported.' elif error_code == cls.WRITE_TARGET_MEMORY_FAILED: return 'Could not write target memory.' elif error_code == cls.DEVICE_FEATURE_NOT_SUPPORTED: return 'Feature not supported by connected device.' elif error_code == cls.WRONG_USER_CONFIG: return 'User configured DLL parameters incorrectly.' elif error_code == cls.NO_TARGET_DEVICE_SELECTED: return 'User did not specify core to connect to.' elif error_code == cls.CPU_IN_LOW_POWER_MODE: return 'Target CPU is in low power mode.' elif error_code == cls.UNSPECIFIED_ERROR: return 'Unspecified error.' raise ValueError('Invalid error code: %d' % error_code)
Returns the string message for the given ``error_code``. Args: cls (JlinkGlobalErrors): the ``JLinkGlobalErrors`` class error_code (int): error code to convert Returns: An error string corresponding to the error code. Raises: ValueError: if the error code is invalid.
juraj-google-style
def _get_upload_cmd(self, mirror=False): if mirror: dest_uri = self.s3_mirror_uri else: dest_uri = self.s3_version_uri cmd = 'aws s3 sync {} {} --delete --exact-timestamps --profile {}'.format(self.artifact_path, dest_uri, self.env) return cmd
Generate the S3 CLI upload command Args: mirror (bool): If true, uses a flat directory structure instead of nesting under a version. Returns: str: The full CLI command to run.
codesearchnet
def info(self, server_id): result = self._storage[server_id].info() result['id'] = server_id return result
return dicionary object with info about server Args: server_id - server identity
juraj-google-style
def unreferenced_vert(script): if script.ml_version == '1.3.4BETA': filter_xml = ' <filter name="Remove Unreferenced Vertex"/>\n' else: filter_xml = ' <filter name="Remove Unreferenced Vertices"/>\n' util.write_filter(script, filter_xml) return None
Check for every vertex on the mesh: if it is NOT referenced by a face, removes it. Args: script: the FilterScript object or script filename to write the filter to. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
juraj-google-style
def avg_branch_length(self, terminal=True, internal=True): if (not isinstance(terminal, bool)): raise TypeError('terminal must be a bool') if (not isinstance(internal, bool)): raise TypeError('internal must be a bool') if ((not internal) and (not terminal)): raise RuntimeError('Must select either internal or terminal branches (or both)') tot = 0.0 num = 0 for node in self.traverse_preorder(): if (((node.edge_length is not None) and (internal and (not node.is_leaf()))) or (terminal and node.is_leaf())): tot += node.edge_length num += 1 return (tot / num)
Compute the average length of the selected branches of this ``Tree``. Edges with length ``None`` will be treated as 0-length Args: ``terminal`` (``bool``): ``True`` to include terminal branches, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal branches, otherwise ``False`` Returns: The average length of the selected branches
codesearchnet
def _ReadElementSequenceDataTypeDefinition(self, definitions_registry, definition_values, data_type_definition_class, definition_name, supported_definition_values): unsupported_definition_values = set(definition_values.keys()).difference(supported_definition_values) if unsupported_definition_values: error_message = 'unsupported definition values: {0:s}'.format(', '.join(unsupported_definition_values)) raise errors.DefinitionReaderError(definition_name, error_message) element_data_type = definition_values.get('element_data_type', None) if (not element_data_type): error_message = 'missing element data type' raise errors.DefinitionReaderError(definition_name, error_message) elements_data_size = definition_values.get('elements_data_size', None) elements_terminator = definition_values.get('elements_terminator', None) number_of_elements = definition_values.get('number_of_elements', None) size_values = (elements_data_size, elements_terminator, number_of_elements) size_values = [value for value in size_values if (value is not None)] if (not size_values): error_message = 'missing element data size, elements terminator and number of elements' raise errors.DefinitionReaderError(definition_name, error_message) if (len(size_values) > 1): error_message = 'element data size, elements terminator and number of elements not allowed to be set at the same time' raise errors.DefinitionReaderError(definition_name, error_message) element_data_type_definition = definitions_registry.GetDefinitionByName(element_data_type) if (not element_data_type_definition): error_message = 'undefined element data type: {0:s}.'.format(element_data_type) raise errors.DefinitionReaderError(definition_name, error_message) element_byte_size = element_data_type_definition.GetByteSize() element_type_indicator = element_data_type_definition.TYPE_INDICATOR if ((not element_byte_size) and (element_type_indicator != definitions.TYPE_INDICATOR_STRING)): error_message = 'unsupported variable size element data type: {0:s}'.format(element_data_type) raise errors.DefinitionReaderError(definition_name, error_message) aliases = definition_values.get('aliases', None) description = definition_values.get('description', None) urls = definition_values.get('urls', None) definition_object = data_type_definition_class(definition_name, element_data_type_definition, aliases=aliases, data_type=element_data_type, description=description, urls=urls) if (elements_data_size is not None): try: definition_object.elements_data_size = int(elements_data_size) except ValueError: definition_object.elements_data_size_expression = elements_data_size elif (elements_terminator is not None): if isinstance(elements_terminator, py2to3.UNICODE_TYPE): elements_terminator = elements_terminator.encode('ascii') definition_object.elements_terminator = elements_terminator elif (number_of_elements is not None): try: definition_object.number_of_elements = int(number_of_elements) except ValueError: definition_object.number_of_elements_expression = number_of_elements return definition_object
Reads an element sequence data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. data_type_definition_class (str): data type definition class. definition_name (str): name of the definition. supported_definition_values (set[str]): names of the supported definition values. Returns: SequenceDefinition: sequence data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
codesearchnet
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(ApplicationSpecificInformation, self).read(istream, kmip_version=kmip_version) tstream = BytearrayStream(istream.read(self.length)) self.application_namespace.read(tstream, kmip_version=kmip_version) self.application_data.read(tstream, kmip_version=kmip_version) self.is_oversized(tstream) self.validate()
Read the data encoding the ApplicationSpecificInformation object and decode it into its constituent parts. Args: istream (Stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
codesearchnet
def effect_emd(d1, d2): return sum((abs((marginal_zero(d1, i) - marginal_zero(d2, i))) for i in range(d1.ndim)))
Compute the EMD between two effect repertoires. Because the nodes are independent, the EMD between effect repertoires is equal to the sum of the EMDs between the marginal distributions of each node, and the EMD between marginal distribution for a node is the absolute difference in the probabilities that the node is OFF. Args: d1 (np.ndarray): The first repertoire. d2 (np.ndarray): The second repertoire. Returns: float: The EMD between ``d1`` and ``d2``.
codesearchnet
def _datetime_from_json(value, field): if _not_null(value, field): if "." in value: return datetime.datetime.strptime(value, _RFC3339_MICROS_NO_ZULU) else: return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION) else: return None
Coerce 'value' to a datetime, if set or not nullable. Args: value (str): The timestamp. field (.SchemaField): The field corresponding to the value. Returns: Optional[datetime.datetime]: The parsed datetime object from ``value`` if the ``field`` is not null (otherwise it is :data:`None`).
juraj-google-style
def _create_and_save_vocab_table_lookup_qat_model_tf1(self, output_path: str, tags: Collection[str], signature_def_key: str) -> Tuple[Mapping[str, core.Tensor], Mapping[str, core.Tensor]]: with session.Session(graph=ops.Graph()) as sess: input_vocabs_placeholder, lookup_tensor, output_tensor = self._create_vocab_table_lookup_qat_model_tf1(sess) inputs = {'input_vocabs': input_vocabs_placeholder} outputs = {'lookup': lookup_tensor, 'output': output_tensor} self._save_tf1_model(sess, output_path, signature_def_key, tags, inputs=inputs, outputs=outputs, init_op=lookup_ops.tables_initializer(), assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)) return (inputs, outputs)
Creates and saves a simple QAT model that uses a vocab table. Args: output_path: Path to the directory to save the created model. tags: Set of strings that identifies the saved meta graph. signature_def_key: Name of the SignatureDef. Used to identify the SignatureDef within the meta graph. Returns: inputs: A mapping of input_key -> input_tensor (placeholder). The input key is "input_vocabs". outputs: A mapping of output_key -> output_tensor. The output keys are "lookup" and "output".
github-repos
def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None): return self.__StreamMedia( callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=False)
Send this resumable upload in a single request. Args: callback: Progress callback function with inputs (http_wrapper.Response, transfer.Upload) finish_callback: Final callback function with inputs (http_wrapper.Response, transfer.Upload) additional_headers: Dict of headers to include with the upload http_wrapper.Request. Returns: http_wrapper.Response of final response.
juraj-google-style
class PerceiverClassificationPostprocessor(nn.Module): def __init__(self, config: PerceiverConfig, in_channels: int) -> None: super().__init__() self.classifier = nn.Linear(in_channels, config.num_labels) def forward(self, inputs, pos: Optional[torch.Tensor]=None, modality_sizes=None) -> torch.Tensor: logits = self.classifier(inputs) return logits[:, 0, :]
Classification postprocessing for Perceiver. Can be used to convert the decoder output to classification logits. Args: config ([*PerceiverConfig*]): Model configuration. in_channels (`int`): Number of channels in the input.
github-repos
def register_magics(store_name='_ampl_cells', ampl_object=None): from IPython.core.magic import Magics, magics_class, cell_magic, line_magic @magics_class class StoreAMPL(Magics): def __init__(self, shell=None, **kwargs): Magics.__init__(self, shell=shell, **kwargs) self._store = [] shell.user_ns[store_name] = self._store @cell_magic def ampl(self, line, cell): 'Store the cell in the store' self._store.append(cell) @cell_magic def ampl_eval(self, line, cell): 'Evaluate the cell' ampl_object.eval(cell) @line_magic def get_ampl(self, line): 'Retrieve the store' return self._store get_ipython().register_magics(StoreAMPL)
Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``. Args: store_name: Name of the store where ``%%ampl cells`` will be stored. ampl_object: Object used to evaluate ``%%ampl_eval`` cells.
codesearchnet
def _get_loss_object(self, loss): if loss is None: return None loss = losses_mod.get(loss) if not isinstance(loss, losses_mod.Loss): loss_name = get_custom_object_name(loss) if loss_name is None: raise ValueError('Loss should be a callable, found: {}'.format(loss)) loss = losses_mod.LossFunctionWrapper(loss, name=loss_name) loss._allow_sum_over_batch_size = True return loss
Returns a `Loss` object. Converts the user-supplied loss to a `Loss` object. Also allows `SUM_OVER_BATCH_SIZE` reduction to be used for this loss. Args: loss: A string, function, or `Loss` object. Returns: A `Loss` object.
github-repos
def interact_GxG(pheno, snps1, snps2=None, K=None, covs=None): if (K is None): K = SP.eye(N) N = snps1.shape[0] if (snps2 is None): snps2 = snps1 return interact_GxE(snps=snps1, pheno=pheno, env=snps2, covs=covs, K=K)
Epistasis test between two sets of SNPs Args: pheno: [N x 1] SP.array of 1 phenotype for N individuals snps1: [N x S1] SP.array of S1 SNPs for N individuals snps2: [N x S2] SP.array of S2 SNPs for N individuals K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional) If not provided, then linear regression analysis is performed covs: [N x D] SP.array of D covariates for N individuals Returns: pv: [S2 x S1] SP.array of P values for epistasis tests beten all SNPs in snps1 and snps2
codesearchnet
def dtime(sdat, tstart=None, tend=None): tseries = sdat.tseries_between(tstart, tend) time = tseries['t'].values return ((time[1:] - time[:(- 1)]), time[:(- 1)])
Time increment dt. Compute dt as a function of time. Args: sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance. tstart (float): time at which the computation should start. Use the beginning of the time series data if set to None. tend (float): time at which the computation should end. Use the end of the time series data if set to None. Returns: tuple of :class:`numpy.array`: dt and time arrays.
codesearchnet
def order_verification(self, institute, case, user, link, variant): LOG.info('Creating event for ordering validation for variant {0}'.format(variant['display_name'])) updated_variant = self.variant_collection.find_one_and_update({'_id': variant['_id']}, {'$set': {'sanger_ordered': True}}, return_document=pymongo.ReturnDocument.AFTER) self.create_event(institute=institute, case=case, user=user, link=link, category='variant', verb='sanger', variant=variant, subject=variant['display_name']) LOG.info('Creating event for ordering sanger for case {0}'.format(case['display_name'])) self.create_event(institute=institute, case=case, user=user, link=link, category='case', verb='sanger', variant=variant, subject=variant['display_name']) return updated_variant
Create an event for a variant verification for a variant and an event for a variant verification for a case Arguments: institute (dict): A Institute object case (dict): Case object user (dict): A User object link (str): The url to be used in the event variant (dict): A variant object Returns: updated_variant(dict)
codesearchnet
def merge(self, ref_name: str): if self.is_dirty(): LOGGER.error('repository is dirty; cannot merge: %s', ref_name) sys.exit((- 1)) LOGGER.info('merging ref: "%s" into branch: %s', ref_name, self.get_current_branch()) self.repo.git.merge(ref_name)
Merges two refs Args: ref_name: ref to merge in the current one
codesearchnet
def push_file(self, source, dest_dir): local_dest = dest_dir + '/' + os.path.basename(source) if os.path.dirname(source) != dest_dir: try: shutil.copyfile(source, local_dest) os.chmod(local_dest, 0o777) except OSError as e: raise FileCopyException(e, self.hostname) return local_dest
If the source files dirpath is the same as dest_dir, a copy is not necessary, and nothing is done. Else a copy is made. Args: - source (string) : Path to the source file - dest_dir (string) : Path to the directory to which the files is to be copied Returns: - destination_path (String) : Absolute path of the destination file Raises: - FileCopyException : If file copy failed.
juraj-google-style
def get_concept(self, conceptId, lang='en'): url = urljoin((self.concept_service + '/'), conceptId) (res, status_code) = self.get(url, params={'lang': lang}) if (status_code != 200): logger.debug('Fetch concept failed.') return (self.decode(res), status_code)
Fetch the concept from the Knowledge base Args: id (str): The concept id to be fetched, it can be Wikipedia page id or Wikiedata id. Returns: dict, int: A dict containing the concept information; an integer representing the response code.
codesearchnet
def get(self, key): self._create_file_if_none_exists() with open(self.filename, 'rb') as file_object: cache_pickle = pickle.load(file_object) val = cache_pickle.get(key, None) return val
Gets a value by a key. Args: key (str): Key to retrieve the value. Returns: Retrieved value.
codesearchnet
def draw_points(self, *points): point_array = ffi.new('SDL_Point[]', len(points)) for i, p in enumerate(points): point_array[i] = p._ptr[0] check_int_err(lib.SDL_RenderDrawPoints(self._ptr, point_array, len(points)))
Draw multiple points on the current rendering target. Args: *points (Point): The points to draw. Raises: SDLError: If an error is encountered.
juraj-google-style
def _hash_sequence(self, sighash_type, anyone_can_pay): if anyone_can_pay or sighash_type == shared.SIGHASH_SINGLE: return b'\x00' * 32 else: sequences = ByteData() for tx_in in self.tx_ins: sequences += tx_in.sequence return utils.hash256(sequences.to_bytes())
BIP143 hashSequence implementation Args: sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL anyone_can_pay (bool): true if ANYONECANPAY should be set Returns: (bytes): the hashSequence, a 32 byte hash
juraj-google-style
def _load_tmp_fact(filepath): from hamster_lib import Fact try: with open(filepath, 'rb') as fobj: fact = pickle.load(fobj) except IOError: fact = False else: if (not isinstance(fact, Fact)): raise TypeError(_("Something went wrong. It seems our pickled file does not contain valid Fact instance. [Content: '{content}'; Type: {type}".format(content=fact, type=type(fact)))) return fact
Load an 'ongoing fact' from a given location. Args: filepath: Full path to the tmpfile location. Returns: hamster_lib.Fact: ``Fact`` representing the 'ongoing fact'. Returns ``False`` if no file was found. Raises: TypeError: If for some reason our stored instance is no instance of ``hamster_lib.Fact``.
codesearchnet
def validate(request: Union[Dict, List], schema: dict) -> Union[Dict, List]: jsonschema_validate(request, schema) return request
Wraps jsonschema.validate, returning the same object passed in. Args: request: The deserialized-from-json request. schema: The jsonschema schema to validate against. Raises: jsonschema.ValidationError
juraj-google-style
def as_dataframe(self, max_rows=None): max_rows = (len(self._timeseries_list) if (max_rows is None) else max_rows) headers = [{'resource': ts.resource._asdict(), 'metric': ts.metric._asdict()} for ts in self._timeseries_list[:max_rows]] if (not headers): return pandas.DataFrame() dataframe = pandas.io.json.json_normalize(headers) dataframe.columns = pandas.MultiIndex.from_tuples([((col, '') if (col == 'resource.type') else col.rsplit('.', 1)) for col in dataframe.columns]) resource_keys = google.cloud.monitoring._dataframe._sorted_resource_labels(dataframe['resource.labels'].columns) sorted_columns = [('resource.type', '')] sorted_columns += [('resource.labels', key) for key in resource_keys] sorted_columns += sorted((col for col in dataframe.columns if (col[0] == 'metric.labels'))) dataframe = dataframe[sorted_columns] dataframe = dataframe.sort_values(sorted_columns) dataframe = dataframe.reset_index(drop=True).fillna('') return dataframe
Creates a pandas dataframe from the query metadata. Args: max_rows: The maximum number of timeseries metadata to return. If None, return all. Returns: A pandas dataframe containing the resource type, resource labels and metric labels. Each row in this dataframe corresponds to the metadata from one time series.
codesearchnet
def load_user_config(vcs): config_path = os.path.join(vcs.path, 'eci.yaml') if not os.path.exists(config_path): raise ConfigNotFoundError with open(config_path, 'r') as f: try: config = yaml.safe_load(f) except yaml.YAMLError: raise ConfigFormatError if not isinstance(config, dict): raise ConfigFormatError for k, v in _default_config.iteritems(): config.setdefault(k, v) for k, v in _config_types.iteritems(): if not isinstance(config[k], v): raise ConfigFormatError return config
Load the user config Args: vcs (easyci.vcs.base.Vcs) - the vcs object for the current project Returns: dict - the config Raises: ConfigFormatError ConfigNotFoundError
juraj-google-style
def test_encode_with_non_root_fhir_path_constraint_succeeds(self, fhir_path_expression: str, expected_sql_expression: str, expected_fhir_path_sql_expression: str, expected_fields_referenced: List[str]): self.maxDiff = None constraint = self.build_constraint(fhir_path_expression=fhir_path_expression) self.assert_constraint_is_equal_to_expression(base_id='Hospital', element_definition_id='Hospital.patients', constraint=constraint, expected_sql_expression=expected_sql_expression, expected_fhir_path_sql_expression=expected_fhir_path_sql_expression, expected_fields_referenced=expected_fields_referenced)
Tests that a "transitive constraint" is properly encoded. A "transitive constraint" is a constraint defined relative to a resource elsewhere in the FHIR resource graph than what we're querying against. Args: fhir_path_expression: The FHIRPath expression to encode. expected_sql_expression: The expected generated Standard SQL. expected_fhir_path_sql_expression: The expected generated Standard SQL without any contextual subqueries. expected_fields_referenced: The expected fields_referenced_by_expression attribute on the resulting constraint.
github-repos
def variable_summaries(vars_, groups=None, scope='weights'): groups = (groups or {'all': '.*'}) grouped = collections.defaultdict(list) for var in vars_: for (name, pattern) in groups.items(): if re.match(pattern, var.name): name = re.sub(pattern, name, var.name) grouped[name].append(var) for name in groups: if (name not in grouped): tf.logging.warn("No variables matching '{}' group.".format(name)) summaries = [] for (name, vars_) in grouped.items(): vars_ = [tf.reshape(var, [(- 1)]) for var in vars_] vars_ = tf.concat(vars_, 0) summaries.append(tf.summary.histogram(((scope + '/') + name), vars_)) return tf.summary.merge(summaries)
Create histogram summaries for the provided variables. Summaries can be grouped via regexes matching variables names. Args: vars_: List of variables to summarize. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor.
codesearchnet
def from_filenames(filenames, transformations=None, primitive=True, extend_collection=False): allcifs = [] for fname in filenames: with open(fname, "r") as f: allcifs.append(f.read()) return CifTransmuter("\n".join(allcifs), transformations, primitive=primitive, extend_collection=extend_collection)
Generates a TransformedStructureCollection from a cif, possibly containing multiple structures. Args: filenames: List of strings of the cif files transformations: New transformations to be applied to all structures primitive: Same meaning as in __init__. extend_collection: Same meaning as in __init__.
juraj-google-style
def extract(self, destination, format='csv', csv_delimiter=None, csv_header=True, compress=False): job = self.extract_async(destination, format=format, csv_delimiter=csv_delimiter, csv_header=csv_header, compress=compress) if (job is not None): job.wait() return job
Exports the table to GCS; blocks until complete. Args: destination: the destination URI(s). Can be a single URI or a list. format: the format to use for the exported data; one of 'csv', 'json', or 'avro' (default 'csv'). csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ',' csv_header: for CSV exports, whether to include an initial header line. Default true. compress: whether to compress the data on export. Compression is not supported for AVRO format. Defaults to False. Returns: A Job object for the completed export Job if it was started successfully; else None.
codesearchnet
def add_chain_ids(self, chains): chains = ssbio.utils.force_list(chains) for c in chains: if self.chains.has_id(c): log.debug('{}: chain already present'.format(c)) else: chain_prop = ChainProp(ident=c, pdb_parent=self.id) self.chains.append(chain_prop) log.debug('{}: added to chains list'.format(c))
Add chains by ID into the chains attribute Args: chains (str, list): Chain ID or list of IDs
codesearchnet
async def _handle_set_typing_notification(self, set_typing_notification): conv_id = set_typing_notification.conversation_id.id res = parsers.parse_typing_status_message(set_typing_notification) await self.on_typing.fire(res) try: conv = await self._get_or_fetch_conversation(conv_id) except exceptions.NetworkError: logger.warning( 'Failed to fetch conversation for typing notification: %s', conv_id ) else: await conv.on_typing.fire(res)
Receive SetTypingNotification and update the conversation. Args: set_typing_notification: hangouts_pb2.SetTypingNotification instance
juraj-google-style
def _parse_example_raw(serialized, names, params, name): if params.num_features == 0: raise ValueError('Must provide at least one feature key.') with ops.name_scope(name, 'ParseExample', [serialized, names]): names = [] if names is None else names serialized = ops.convert_to_tensor(serialized, name='serialized') if params.ragged_keys and serialized.shape.ndims is None: raise ValueError('serialized must have statically-known rank to parse ragged features.') outputs = gen_parsing_ops.parse_example_v2(serialized=serialized, names=names, sparse_keys=params.sparse_keys, dense_keys=params.dense_keys, ragged_keys=params.ragged_keys, dense_defaults=params.dense_defaults_vec, num_sparse=len(params.sparse_keys), sparse_types=params.sparse_types, ragged_value_types=params.ragged_value_types, ragged_split_types=params.ragged_split_types, dense_shapes=params.dense_shapes_as_proto, name=name) sparse_indices, sparse_values, sparse_shapes, dense_values, ragged_values, ragged_row_splits = outputs ragged_tensors = parsing_config._build_ragged_tensors(serialized.shape, ragged_values, ragged_row_splits) sparse_tensors = [sparse_tensor.SparseTensor(ix, val, shape) for ix, val, shape in zip(sparse_indices, sparse_values, sparse_shapes)] return dict(zip(params.sparse_keys + params.dense_keys + params.ragged_keys, sparse_tensors + dense_values + ragged_tensors))
Parses `Example` protos. Args: serialized: A vector (1-D Tensor) of strings, a batch of binary serialized `Example` protos. names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos. params: A `ParseOpParams` containing the parameters for the parse op. name: A name for this operation (optional). Returns: A `dict` mapping keys to `Tensor`s and `SparseTensor`s and `RaggedTensor`s.
github-repos
def add_comment(self, comment): if not comment: return self.__comments[comment.name] = comment self.comment_added_signal(self, comment)
Add a comment to the database. Args: comment (hotdoc.core.Comment): comment to add
juraj-google-style
def _pack_images(images, rows, cols): shape = onp.shape(images) width, height, depth = shape[-3:] images = onp.reshape(images, (-1, width, height, depth)) batch = onp.shape(images)[0] rows = onp.minimum(rows, batch) cols = onp.minimum(batch images = images[:rows * cols] images = onp.reshape(images, (rows, cols, width, height, depth)) images = onp.transpose(images, [0, 2, 1, 3, 4]) images = onp.reshape(images, [rows * width, cols * height, depth]) return images
Helper utility to make a tiled field of images from numpy arrays. Args: images: Image tensor in shape [N, W, H, C]. rows: Number of images per row in tiled image. cols: Number of images per column in tiled image. Returns: A tiled image of shape [W * rows, H * cols, C]. Truncates incomplete rows.
juraj-google-style
def export_to_dir(network, export_dir): package_path = ding0.__path__[0] network.export_to_csv_folder(os.path.join(package_path, 'output', 'debug', 'grid', export_dir))
Exports PyPSA network as CSV files to directory Args: network: pypsa.Network export_dir: str Sub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to.
juraj-google-style
def hashed(field_name, percent, fields=None, count=0): if field_name is None: raise Exception('Hash field must be specified') def _hashed_sampling(sql): projection = Sampling._create_projection(fields) sql = 'SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d' % \ (projection, sql, field_name, percent) if count != 0: sql = '%s LIMIT %d' % (sql, count) return sql return _hashed_sampling
Provides a sampling strategy based on hashing and selecting a percentage of data. Args: field_name: the name of the field to hash. percent: the percentage of the resulting hashes to select. fields: an optional list of field names to retrieve. count: optional maximum count of rows to pick. Returns: A sampling function that can be applied to get a hash-based sampling.
juraj-google-style
def create_detector(self, detector): resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX), data=detector) resp.raise_for_status() return resp.json()
Creates a new detector. Args: detector (object): the detector model object. Will be serialized as JSON. Returns: dictionary of the response (created detector model).
juraj-google-style
def dummy_inputs(self): input_ids = tf.constant(DUMMY_INPUTS, dtype=tf.int32) batch_size, seq_len = input_ids.shape VISION_DUMMY_INPUTS = tf.random.uniform(shape=(batch_size, self.config.vision_config.num_channels, self.config.vision_config.image_size, self.config.vision_config.image_size), dtype=tf.float32) pixel_values = tf.constant(VISION_DUMMY_INPUTS) dummy = {'pixel_values': pixel_values, 'input_ids': input_ids} return dummy
Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs.
github-repos
def __init__(self, shape, layout_rules): self._shape = convert_to_shape(shape) self._layout_rules = convert_to_layout_rules(layout_rules)
Creates a mesh implementation. Args: shape: Shape. layout_rules: LayoutRules.
juraj-google-style
def MapByteStream(self, byte_stream, byte_offset=0, **kwargs): byte_stream = super(StringMap, self).MapByteStream( byte_stream, byte_offset=byte_offset, **kwargs) if self._HasElementsTerminator(): elements_terminator = self._data_type_definition.elements_terminator elements_terminator_size = len(elements_terminator) byte_offset = 0 byte_stream_size = len(byte_stream) while byte_offset < byte_stream_size: end_offset = byte_offset + elements_terminator_size if byte_stream[byte_offset:end_offset] == elements_terminator: break byte_offset += elements_terminator_size byte_stream = byte_stream[:byte_offset] try: return byte_stream.decode(self._data_type_definition.encoding) except Exception as exception: error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: {2!s}').format( self._data_type_definition.name, byte_offset, exception) raise errors.MappingError(error_string)
Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. Returns: str: mapped values. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
juraj-google-style
def _get_profile_data_generator(self): node_to_traceback = defaultdict(list) node_to_op_type = defaultdict(str) for op in self._graph.get_operations(): node_to_traceback[op.name] = op.traceback node_to_op_type[op.name] = op.type def profile_data_generator(device_step_stats): for node_stats in device_step_stats.node_stats: if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK': continue yield ProfileDatum(node_stats, node_to_op_type[node_stats.node_name], node_to_traceback[node_stats.node_name]) return profile_data_generator
Get function that generates `ProfileDatum` objects. Returns: A function that generates `ProfileDatum` objects.
github-repos
def ones(shape, dtype=None): return backend.numpy.ones(shape, dtype=dtype)
Return a new tensor of given shape and type, filled with ones. Args: shape: Shape of the new tensor. dtype: Desired data type of the tensor. Returns: Tensor of ones with the given shape and dtype.
github-repos
def add_get_parameters(url, parameters, percent_encode=True): url_parts = list(parse.urlparse(url)) query = dict(parse.parse_qs(url_parts[4])) query.update(parameters) if percent_encode: url_parts[4] = parse.urlencode(query) else: url_parts[4] = "&".join([key + "=" + value for key, value in query.items()]) return parse.urlunparse(url_parts)
Utility function to add GET parameters to an existing URL. Args: parameters A dictionary of the parameters that should be added. percent_encode Whether the query parameters should be percent encoded. Returns: The updated URL.
juraj-google-style
def _FindKeys(self, key, names, matches): for (name, subkey) in iter(key.items()): if (name in names): matches.append((name, subkey)) if isinstance(subkey, dict): self._FindKeys(subkey, names, matches)
Searches the plist key hierarchy for keys with matching names. If a match is found a tuple of the key name and value is added to the matches list. Args: key (dict[str, object]): plist key. names (list[str]): names of the keys to match. matches (list[str]): keys with matching names.
codesearchnet
def __init__(self, pattern, flags=0): self.regex = re.compile(pattern, flags=flags)
Initialize. Args: # pattern is the regular expression to search for pattern: str # flags passed to re.compile function as the second argument flags: int
juraj-google-style
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0): local_buffer = utils.BytearrayStream() if self._unique_identifier: self._unique_identifier.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField('The GetAttributeList response payload is missing the unique identifier field.') if self._attribute_names: if (kmip_version < enums.KMIPVersion.KMIP_2_0): for attribute_name in self._attribute_names: attribute_name.write(local_buffer, kmip_version=kmip_version) else: for attribute_name in self._attribute_names: t = enums.convert_attribute_name_to_tag(attribute_name.value) e = primitives.Enumeration(enums.Tags, value=t, tag=enums.Tags.ATTRIBUTE_REFERENCE) e.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField('The GetAttributeList response payload is missing the attribute names field.') self.length = local_buffer.length() super(GetAttributeListResponsePayload, self).write(output_buffer, kmip_version=kmip_version) output_buffer.write(local_buffer.buffer)
Write the data encoding the GetAttributeList response payload to a stream. Args: output_buffer (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: InvalidField: Raised if the unique identifier or attribute name are not defined.
codesearchnet
def _construct_graph(self, vertex_dict, edge_dict, default_vertex_attrs, default_edge_attrs): with self._lock: self._graph = pydot.Dot() if default_vertex_attrs: self._graph.set_node_defaults(**default_vertex_attrs) if default_edge_attrs: self._graph.set_edge_defaults(**default_edge_attrs) self._vertex_refs = {} self._edge_refs = {} for vertex, vertex_attrs in vertex_dict.items(): vertex_ref = pydot.Node(vertex, **vertex_attrs) self._vertex_refs[vertex] = vertex_ref self._graph.add_node(vertex_ref) for edge, edge_attrs in edge_dict.items(): vertex_src = self._vertex_refs[edge[0]] vertex_dst = self._vertex_refs[edge[1]] edge_ref = pydot.Edge(vertex_src, vertex_dst, **edge_attrs) self._edge_refs[edge] = edge_ref self._graph.add_edge(edge_ref)
Constructs the pydot.Dot object for the pipeline graph. Args: vertex_dict: (Dict[str, Dict[str, str]]) maps vertex names to attributes edge_dict: (Dict[(str, str), Dict[str, str]]) maps vertex name pairs to attributes default_vertex_attrs: (Dict[str, str]) a dict of attributes default_edge_attrs: (Dict[str, str]) a dict of attributes
github-repos
def vectorize(self, token_list): sentence_list = [token_list] test_observed_arr = self.__setup_dataset(sentence_list, self.__token_master_list) pred_arr = self.__controller.inference(test_observed_arr) return self.__controller.get_feature_points()
Tokenize token list. Args: token_list: The list of tokens.. Returns: [vector of token, vector of token, vector of token, ...]
juraj-google-style
def tournament_selection(population, fitnesses, num_competitors=2, diversity_weight=0.0): if (diversity_weight <= 0.0): fitness_pop = zip(fitnesses, population) return [max(random.sample(fitness_pop, num_competitors))[1] for _ in range(len(population))] else: indices = range(len(population)) selected_solutions = [] for _ in range(len(population)): competitor_indices = random.sample(indices, num_competitors) if (random.uniform(0.0, 1.0) < (1.0 / (1.0 + diversity_weight))): selected_solutions.append(max(zip([fitnesses[i] for i in competitor_indices], [population[i] for i in competitor_indices]))[(- 1)]) else: selected_solutions.append(max(zip([_diversity_metric(population[i], selected_solutions) for i in competitor_indices], [fitnesses[i] for i in competitor_indices], [population[i] for i in competitor_indices]))[(- 1)]) return selected_solutions
Create a list of parents with tournament selection. Args: population: A list of solutions. fitnesses: A list of fitness values corresponding to solutions in population. num_competitors: Number of solutions to compare every round. Best solution among competitors is selected. diversity_weight: Weight of diversity metric. Determines how frequently diversity is used to select tournament winners. Note that fitness is given a weight of 1.0. diversity_weight == 1.0 gives equal weight to diversity and fitness.
codesearchnet
def __init__(self, sess, watch_fn=None, thread_name_filter=None, pass_through_operrors=False): BaseDebugWrapperSession.__init__(self, sess, thread_name_filter=thread_name_filter, pass_through_operrors=pass_through_operrors) self._watch_fn = None if watch_fn is not None: if not callable(watch_fn): raise TypeError('watch_fn is not callable') self._watch_fn = watch_fn
Constructor of NonInteractiveDebugWrapperSession. Args: sess: The TensorFlow `Session` object being wrapped. watch_fn: (`Callable`) A Callable that maps the fetches and feeds of a debugged `Session.run()` call to `WatchOptions.` * Args: * `fetches`: the fetches to the `Session.run()` call. * `feeds`: the feeds to the `Session.run()` call. * Returns: (`tf_debug.WatchOptions`) An object containing debug options including the debug ops to use, the node names, op types and/or tensor data types to watch, etc. See the documentation of `tf_debug.WatchOptions` for more details. thread_name_filter: Regular-expression white list for threads on which the wrapper session will be active. See doc of `BaseDebugWrapperSession` for more details. pass_through_operrors: If true, all captured OpErrors will be propagated. By default this captures all OpErrors. Raises: TypeError: If a non-None `watch_fn` is specified and it is not callable.
github-repos
def write_to_file(self, file_path): with gfile.Open(file_path, 'w') as f: for line in self._lines: f.write(line + '\n')
Write the object itself to file, in a plain format. The font_attr_segs and annotations are ignored. Args: file_path: (str) path of the file to write to.
github-repos
def call(command, collect_missing=False, silent=True): r return (_execCommand if silent else execCommand)(shlex.split(command), collect_missing)
r"""Calls a task, as if it were called from the command line. Args: command (str): A route followed by params (as if it were entered in the shell). collect_missing (bool): Collects any missing argument for the command through the shell. Defaults to False. Returns: The return value of the called command.
juraj-google-style
def ParsePageVisitedRow( self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs): query_hash = hash(query) from_visit = self._GetRowValue(query_hash, row, 'from_visit') hidden = self._GetRowValue(query_hash, row, 'hidden') rev_host = self._GetRowValue(query_hash, row, 'rev_host') typed = self._GetRowValue(query_hash, row, 'typed') extras = [] if from_visit: extras.append('visited from: {0:s}'.format( self._GetUrl(from_visit, cache, database))) if hidden == '1': extras.append('(url hidden)') if typed == '1': extras.append('(directly typed)') else: extras.append('(URL not typed directly)') event_data = FirefoxPlacesPageVisitedEventData() event_data.host = self._ReverseHostname(rev_host) event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.title = self._GetRowValue(query_hash, row, 'title') event_data.url = self._GetRowValue(query_hash, row, 'url') event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count') event_data.visit_type = self._GetRowValue(query_hash, row, 'visit_type') if extras: event_data.extra = extras timestamp = self._GetRowValue(query_hash, row, 'visit_date') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a page visited row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. cache (Optional[SQLiteCache]): cache. database (Optional[SQLiteDatabase]): database.
juraj-google-style
def load_parameters(self, path): nn.load_parameters(path) for v in self.get_modules(): if not isinstance(v, tuple): continue prefix, module = v for k, v in module.__dict__.items(): if not isinstance(v, nn.Variable): continue pname = k name = "{}/{}".format(prefix, pname) param0 = v param1 = nn.parameter.pop_parameter(name) if param0 is None: raise ValueError( "Model does not have {} parameter.".format(name)) param0.d = param1.d.copy() nn.logger.info("`{}` loaded.)".format(name))
Load parameters from a file with the specified format. Args: path : path or file object
juraj-google-style
def _get_hash(self, file_obj): size = 0 hash_buider = self.hash_builder() for piece in self._get_file_iterator(file_obj): hash_buider.update(piece) size += len(piece) file_obj.seek(0) return ('%s_%x' % (hash_buider.hexdigest(), size))
Compute hash for the `file_obj`. Attr: file_obj (obj): File-like object with ``.write()`` and ``.seek()``. Returns: str: Hexdigest of the hash.
codesearchnet
class PerceiverClassificationDecoder(PerceiverAbstractDecoder): def __init__(self, config, **decoder_kwargs): super().__init__() self.num_labels = config.num_labels self.decoder = PerceiverBasicDecoder(config, output_num_channels=self.num_labels, output_index_dims=1, **decoder_kwargs) @property def num_query_channels(self) -> int: return self.decoder.num_query_channels def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): return self.decoder.decoder_query(inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_points) def forward(self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> PerceiverDecoderOutput: decoder_outputs = self.decoder(query, z, output_attentions=output_attentions) logits = decoder_outputs.logits[:, 0, :] return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)
Cross-attention based classification decoder. Light-weight wrapper of [`PerceiverBasicDecoder`] for logit output. Will turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of shape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels). Args: config ([`PerceiverConfig`]): Model configuration.
github-repos
def mark_done(task_id): task = Task.get_by_id(task_id) if (task is None): raise ValueError(('Task with id %d does not exist' % task_id)) task.done = True task.put()
Marks a task as done. Args: task_id: The integer id of the task to update. Raises: ValueError: if the requested task doesn't exist.
codesearchnet
def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()): for (i, ext) in enumerate(exclude_exts): if (not ext.strip().startswith('.')): exclude_exts[i] = ('.' + ext.strip()) paths = [] for fname in os.listdir(dirname): (root, ext) = os.path.splitext(fname) path = os.path.join(dirname, fname) if ((ext in exclude_exts) or (fname in exclude_fnames) or fname.startswith('.') or (not os.path.isfile(path))): continue paths.append(path) pseudos = [] for path in paths: try: pseudo = self.parse(path) except: pseudo = None if (pseudo is not None): pseudos.append(pseudo) self._parsed_paths.extend(path) else: self._wrong_paths.extend(path) return pseudos
Analyze the files contained in directory dirname. Args: dirname: directory path exclude_exts: list of file extensions that should be skipped. exclude_fnames: list of file names that should be skipped. Returns: List of pseudopotential objects.
codesearchnet
def indicators(self, indicator_type=None, filters=None, params=None): indicator = self._tcex.ti.indicator(indicator_type) for i in self.tc_requests.indicators_from_tag( indicator, self.name, filters=filters, params=params ): yield i
Gets all indicators from a tag. Args: params: filters: indicator_type:
juraj-google-style
def use_test_undeclared_outputs_dir(self): return self.is_flag_on(FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR)
Decides the output directory of the report and trace files. Args: None. Returns: True if the output files should be written to the test-undeclared-outputs-directory defined via an env variable.
github-repos
def VFSMultiOpen(pathspecs, progress_callback=None): precondition.AssertIterableType(pathspecs, rdf_paths.PathSpec) vfs_open = functools.partial(VFSOpen, progress_callback=progress_callback) return context.MultiContext(map(vfs_open, pathspecs))
Opens multiple files specified by given path-specs. See documentation for `VFSOpen` for more information. Args: pathspecs: A list of pathspec instances of files to open. progress_callback: A callback function to call to notify about progress Returns: A context manager yielding file-like objects.
codesearchnet
def jax_gather(params, indices, batch_dims=2): def _jax_gather(params, indices): return params[indices] for _ in range(batch_dims): _jax_gather = jax.vmap(_jax_gather, in_axes=(0, 0)) return _jax_gather(params, indices)
Gather the indices from params correctly (equivalent to tf.gather but with modifications) Args: params: (bsz, n_heads, num_blocks, block_size, head_dim) indices: (<num_blocks, 1)
github-repos
def save(self, path): self.clip.write_videofile(path, audio_fps=self.clip.audio.fps)
Save source video to file. Args: path (str): Filename to save to. Notes: Saves entire source video to file, not just currently selected frames.
codesearchnet
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(BigInteger, self).read(istream, kmip_version=kmip_version) if (self.length % 8): raise exceptions.InvalidPrimitiveLength('invalid big integer length read; expected: multiple of 8, observed: {0}'.format(self.length)) sign = 1 binary = '' for _ in range(self.length): byte = struct.unpack('!B', istream.read(1))[0] bits = '{0:b}'.format(byte) pad = (len(bits) % 8) if pad: bits = (('0' * (8 - pad)) + bits) binary += bits if (binary[0] == '1'): sign = (- 1) binary = binary.replace('1', 'i') binary = binary.replace('0', '1') binary = binary.replace('i', '0') pivot = binary.rfind('0') binary = ((binary[0:pivot] + '1') + ('0' * len(binary[(pivot + 1):]))) self.value = (int(binary, 2) * sign)
Read the encoding of the BigInteger from the input stream. Args: istream (stream): A buffer containing the encoded bytes of the value of a BigInteger. Usually a BytearrayStream object. Required. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: InvalidPrimitiveLength: if the big integer encoding read in has an invalid encoded length.
codesearchnet
def parse_args(test: ArgList=None) -> argparse.Namespace: parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('encoded_train_data', help='File path for the encoded training data.') parser.add_argument('-o', '--output', help=f'Output file path for the learned weights. (default: {DEFAULT_OUTPUT_NAME})', type=str, default=DEFAULT_OUTPUT_NAME) parser.add_argument('--log', help=f'Output file path for the training log. (default: {DEFAULT_LOG_NAME})', type=str, default=DEFAULT_LOG_NAME) parser.add_argument('--feature-thres', help=f'Threshold value of the minimum feature frequency. (default: {DEFAULT_FEATURE_THRES})', type=int, default=DEFAULT_FEATURE_THRES) parser.add_argument('--iter', help=f'Number of iterations for training. (default: {DEFAULT_ITERATION})', type=int, default=DEFAULT_ITERATION) parser.add_argument('--out-span', help=f'Iteration span to output metrics and weights. (default: {DEFAULT_OUT_SPAN})', type=int, default=DEFAULT_OUT_SPAN) parser.add_argument('--val-data', help='File path for the encoded validation data.', type=str) if test is None: return parser.parse_args() else: return parser.parse_args(test)
Parses commandline arguments. Args: test (typing.Optional[typing.List[str]], optional): Commandline args for testing. Defaults to None. Returns: argparse.Namespace: Parsed data of args.
github-repos
def _VerifyValues(self, input_sizes=None, filter_sizes=None, strides=None, dilations=None, padding=None, data_format_src='NHWC', data_format_dst='NHWC', expected=None, op_name='Conv2D'): total_size_1 = np.prod(input_sizes) total_size_2 = np.prod(filter_sizes) x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes) x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(filter_sizes) strides = [1] + strides + [1] if dilations is None: dilations = [1, 1] dilations = [1] + dilations + [1] expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src, data_format_dst) x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src, data_format_dst) input_sizes = test_utils.PermuteDimsBetweenDataFormats(input_sizes, data_format_src, data_format_dst) strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src, data_format_dst) dilations = test_utils.PermuteDimsBetweenDataFormats(dilations, data_format_src, data_format_dst) with self.session() as sess: t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes) t2 = array_ops.placeholder(dtypes.float32, shape=filter_sizes) with self.test_scope(): if op_name == 'Conv': conv_format = 'CHANNELS_LAST' if data_format_dst == 'NHWC' else 'CHANNELS_FIRST' out = gen_nn_ops.conv(t1, t2, strides=strides, padding=padding, data_format=conv_format, dilations=dilations) elif op_name == 'Conv2D': out = nn_ops.conv2d(t1, t2, strides=strides, padding=padding, data_format=data_format_dst, dilations=dilations) else: raise ValueError('Invalid op name: %s' % op_name) value = sess.run(out, {t1: x1, t2: x2}) self.assertAllClose(expected, value, 0.001)
Tests that tf.nn.conv2d produces the expected value. Args: input_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. strides: Strides. dilations: RHS dilations. padding: Padding type. data_format_src: Data format input is in. data_format_dst: Data format verification will run and input is converted to. expected: Expected output. op_name: Name of operation to test (Conv/Conv2D)
github-repos