code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def failure_reason(self, failure_index=None): (phase, _) = self._get_failed_phase(failure_index) return phase.failure_reason
Get the reason for a failure. Args: failure_index: Index of the fail to return the graph for (can be negative). If None, the most appropriate failure is chosen according to these rules: - If the fail is cyclic, the most recent fail (the one containing the cycle) is used; - If a callback has caused a failure, the most recent fail is used; - Otherwise, the first fail is used. Returns: A `FailureReason` subclass instance describing the failure.
codesearchnet
def SetTimezone(self, timezone): if (not timezone): return try: self._timezone = pytz.timezone(timezone) except pytz.UnknownTimeZoneError: raise ValueError('Unsupported timezone: {0:s}'.format(timezone))
Sets the timezone. Args: timezone (str): timezone. Raises: ValueError: if the timezone is not supported.
codesearchnet
def malware(self, malware, password, file_name): if not self.can_update(): self._tcex.handle_error(910, [self.type]) self._data['malware'] = malware self._data['password'] = password self._data['fileName'] = file_name request = {'malware': malware, 'password': password, 'fileName': file_name} return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
Uploads to malware vault. Args: malware: password: file_name:
juraj-google-style
def bbox_rotate(bbox, angle, rows, cols, interpolation): scale = cols / float(rows) x = np.array([bbox[0], bbox[2], bbox[2], bbox[0]]) y = np.array([bbox[1], bbox[1], bbox[3], bbox[3]]) x = x - 0.5 y = y - 0.5 angle = np.deg2rad(angle) x_t = (np.cos(angle) * x * scale + np.sin(angle) * y) / scale y_t = (-np.sin(angle) * x * scale + np.cos(angle) * y) x_t = x_t + 0.5 y_t = y_t + 0.5 return [min(x_t), min(y_t), max(x_t), max(y_t)]
Rotates a bounding box by angle degrees Args: bbox (tuple): A tuple (x_min, y_min, x_max, y_max). angle (int): Angle of rotation in degrees rows (int): Image rows. cols (int): Image cols. interpolation (int): interpolation method. return a tuple (x_min, y_min, x_max, y_max)
juraj-google-style
def delete_channel(self, channel_name, project_name, dataset_name): return self.resources.delete_channel(channel_name, project_name, dataset_name)
Deletes a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: bool: True if channel deleted, False if not
juraj-google-style
def index_data(self, data, index_name, doc_type): if not isinstance(data, dict): raise RuntimeError('Index failed, data needs to be a dict!') try: self.els_search.index(index=index_name, doc_type=doc_type, body=data) except Exception, error: print 'Index failed: %s' % str(error) raise RuntimeError('Index failed: %s' % str(error))
Take an arbitrary dictionary of data and index it with ELS. Args: data: data to be Indexed. Should be a dictionary. index_name: Name of the index. doc_type: The type of the document. Raises: RuntimeError: When the Indexing fails.
juraj-google-style
def set_device_name(self, new_name): device_name = self.get_characteristic_handle_from_uuid(UUID_DEVICE_NAME) if (device_name is None): logger.warn('Failed to find handle for device name') return False if (len(new_name) > MAX_DEVICE_NAME_LEN): logger.error('Device name exceeds maximum length ({} > {})'.format(len(new_name), MAX_DEVICE_NAME_LEN)) return False if self.dongle._write_attribute(self.conn_handle, device_name, new_name.encode('ascii')): self.name = new_name return True return False
Sets a new BLE device name for this SK8. Args: new_name (str): the new device name as an ASCII string, max 20 characters. Returns: True if the name was updated successfully, False otherwise.
codesearchnet
def add_to_tensor(self, mat, name='add_to_tensor'): with self._name_scope(name): mat = tensor_conversion.convert_to_tensor_v2_with_dispatch(mat, name='mat') mat_diag = array_ops.matrix_diag_part(mat) new_diag = 1 + mat_diag return array_ops.matrix_set_diag(mat, new_diag)
Add matrix represented by this operator to `mat`. Equiv to `I + mat`. Args: mat: `Tensor` with same `dtype` and shape broadcastable to `self`. name: A name to give this `Op`. Returns: A `Tensor` with broadcast shape and same `dtype` as `self`.
github-repos
def _as_indexed_slices(x, optimize=True): if not isinstance(x, (tensor_lib.Tensor, indexed_slices.IndexedSlices)): raise TypeError(f'Not a Tensor or IndexedSlices: {type(x)}.') if isinstance(x, indexed_slices.IndexedSlices): return x x_shape = array_ops.shape_internal(x, optimize=optimize) return indexed_slices.IndexedSlices(x, range(0, x_shape[0]), x_shape)
Convert 'x' to IndexedSlices. Convert a dense Tensor to a block-sparse IndexedSlices. Args: x: Either a Tensor object, or an IndexedSlices object. optimize: if true, attempt to optimize the conversion of 'x'. Returns: An IndexedSlices object. Raises: TypeError: If 'x' is not a Tensor or an IndexedSlices object.
github-repos
def register_event(self, *names): for name in names: if name in self.__events: continue self.__events[name] = Event(name)
Registers new events after instance creation Args: *names (str): Name or names of the events to register
juraj-google-style
def _maintain_LC(self, obj, slice_id, last_slice=False, begin_slice=True, shard_ctx=None, slice_ctx=None): if ((obj is None) or (not isinstance(obj, shard_life_cycle._ShardLifeCycle))): return shard_context = (shard_ctx or self.shard_context) slice_context = (slice_ctx or self.slice_context) if begin_slice: if (slice_id == 0): obj.begin_shard(shard_context) obj.begin_slice(slice_context) else: obj.end_slice(slice_context) if last_slice: obj.end_shard(shard_context)
Makes sure shard life cycle interface are respected. Args: obj: the obj that may have implemented _ShardLifeCycle. slice_id: current slice_id last_slice: whether this is the last slice. begin_slice: whether this is the beginning or the end of a slice. shard_ctx: shard ctx for dependency injection. If None, it will be read from self. slice_ctx: slice ctx for dependency injection. If None, it will be read from self.
codesearchnet
def destringize(self, string): m = segment_destr_pattern.match(string) self.genome_id = int(m.group(1)) self.chr_id = int(m.group(2)) self.direction = m.group(3) self.left = int(m.group(4)) self.right = int(m.group(5))
Get RNF values for this segment from its textual representation and save them into this object. Args: string (str): Textual representation of a segment.
codesearchnet
def absolute_hinge_difference(arr1, arr2, min_diff=10, dtype=np.uint8): diff = np.abs((arr1.astype(np.int) - arr2), dtype=np.int) return np.maximum((diff - min_diff), 0).astype(dtype)
Point-wise, hinge loss-like, difference between arrays. Args: arr1: integer array to compare. arr2: integer array to compare. min_diff: minimal difference taken into consideration. dtype: dtype of returned array. Returns: array
codesearchnet
def set_wd_noise(self, wd_noise): if isinstance(wd_noise, bool): wd_noise = str(wd_noise) if ((wd_noise.lower() == 'yes') or (wd_noise.lower() == 'true')): wd_noise = 'True' elif ((wd_noise.lower() == 'no') or (wd_noise.lower() == 'false')): wd_noise = 'False' elif (wd_noise.lower() == 'both'): wd_noise = 'Both' else: raise ValueError('wd_noise must be yes, no, True, False, or Both.') self.sensitivity_input.add_wd_noise = wd_noise return
Add White Dwarf Background Noise This adds the White Dwarf (WD) Background noise. This can either do calculations with, without, or with and without WD noise. Args: wd_noise (bool or str, optional): Add or remove WD background noise. First option is to have only calculations with the wd_noise. For this, use `yes` or True. Second option is no WD noise. For this, use `no` or False. For both calculations with and without WD noise, use `both`. Raises: ValueError: Input value is not one of the options.
codesearchnet
def register(cls, name: str, plugin: Type[ConnectionPlugin]) -> None: existing_plugin = cls.available.get(name) if (existing_plugin is None): cls.available[name] = plugin elif (existing_plugin != plugin): raise ConnectionPluginAlreadyRegistered(f"Connection plugin {plugin.__name__} can't be registered as {name!r} because plugin {existing_plugin.__name__} was already registered under this name")
Registers a connection plugin with a specified name Args: name: name of the connection plugin to register plugin: defined connection plugin class Raises: :obj:`nornir.core.exceptions.ConnectionPluginAlreadyRegistered` if another plugin with the specified name was already registered
codesearchnet
def validate(self, value): cast_callback = (self.cast_callback if self.cast_callback else self.cast_type) try: return (value if isinstance(value, self.cast_type) else cast_callback(value)) except Exception: raise NodeTypeError('Invalid value `{}` for {}.'.format(value, self.cast_type))
Base validation method. Check if type is valid, or try brute casting. Args: value (object): A value for validation. Returns: Base_type instance. Raises: SchemaError, if validation or type casting fails.
codesearchnet
def parse_html(self, text): bs = BeautifulSoup(text, "html5lib") file_reg = re.compile(MARKDOWN_IMAGE_REGEX, flags=re.IGNORECASE) tags = bs.findAll('img') for tag in tags: src_text = tag.get("src") or "" formatted_src_match = file_reg.search(src_text) src_text = formatted_src_match.group(2) if formatted_src_match else src_text alt_text = tag.get("alt") or "" tag.replaceWith("![{alt}]({src})".format(alt=alt_text, src=src_text)) return html.unescape(bs.find('body').renderContents().decode('utf-8'))
parse_html: Properly formats any img tags that might be in content Args: text (str): text to parse Returns: string with properly formatted images
juraj-google-style
def make_mutant_tuples(example_protos, original_feature, index_to_mutate, viz_params): mutant_features = make_mutant_features(original_feature, index_to_mutate, viz_params) mutant_examples = [] for example_proto in example_protos: for mutant_feature in mutant_features: copied_example = copy.deepcopy(example_proto) feature_name = mutant_feature.original_feature.feature_name try: feature_list = proto_value_for_feature(copied_example, feature_name) if (index_to_mutate is None): new_values = mutant_feature.mutant_value else: new_values = list(feature_list) new_values[index_to_mutate] = mutant_feature.mutant_value del feature_list[:] feature_list.extend(new_values) mutant_examples.append(copied_example) except (ValueError, IndexError): mutant_examples.append(copied_example) return (mutant_features, mutant_examples)
Return a list of `MutantFeatureValue`s and a list of mutant Examples. Args: example_protos: The examples to mutate. original_feature: A `OriginalFeatureList` that encapsulates the feature to mutate. index_to_mutate: The index of the int64_list or float_list to mutate. viz_params: A `VizParams` object that contains the UI state of the request. Returns: A list of `MutantFeatureValue`s and a list of mutant examples.
codesearchnet
def children(self, sourcepath, recursive=True): return self._get_recursive_dependancies( self._CHILDREN_MAP, sourcepath, recursive=True )
Recursively find all children that are imported from the given source path. Args: sourcepath (str): Source file path to search for. Keyword Arguments: recursive (bool): Switch to enabled recursive finding (if True). Default to True. Returns: set: List of finded parents path.
juraj-google-style
def __init__(self, cache: PagedAttentionCache, config: PretrainedConfig, generation_config: GenerationConfig, input_queue: queue.Queue, output_queue: queue.Queue, stop_event: threading.Event, model_device: torch.device, model_dtype: torch.dtype, scheduler: Scheduler, streaming: bool=False, manual_eviction: bool=False): self.cache = cache self.config = config self.generation_config = generation_config self.input_queue = input_queue self.output_queue = output_queue self.stop_event = stop_event self.model_device = model_device self.model_dtype = model_dtype self.scheduler = scheduler self.streaming = streaming self.manual_eviction = manual_eviction self.requests_in_batch: List[RequestState] = [] self._configure_batch_parameters() self.metrics = ContinuousBatchProcessorMetrics(self.max_batch_tokens) self.setup_static_tensors()
Initialize the continuous batch processor. Args: cache: The paged attention cache to use generation_config: The generation configuration input_queue: Queue for incoming requests output_queue: Queue for outgoing results stop_event: Event to signal processing should stop model_device: Device for model inputs/outputs model_dtype: Data type for model inputs/outputs streaming: Whether to stream tokens as they're generated
github-repos
def distribute_dataset(self, dataset): raise NotImplementedError()
Create a distributed dataset instance from the original user dataset. Args: dataset: the original global dataset instance. Only `tf.data.Dataset` is supported at the moment. Returns: a sharded `tf.data.Dataset` instance, which will produce data for the current local worker/process.
github-repos
def create_parser(default_name: str) -> argparse.ArgumentParser: argparser = argparse.ArgumentParser(fromfile_prefix_chars='@') argparser.add_argument('-H', '--host', help='Host to which the app binds. [%(default)s]', default='0.0.0.0') argparser.add_argument('-p', '--port', help='Port to which the app binds. [%(default)s]', default=5000, type=int) argparser.add_argument('-o', '--output', help='Logging output. [%(default)s]') argparser.add_argument('-n', '--name', help='Service name. This will be used as prefix for all endpoints. [%(default)s]', default=default_name) argparser.add_argument('--debug', help='Run the app in debug mode. [%(default)s]', action='store_true') argparser.add_argument('--eventbus-host', help='Hostname at which the eventbus can be reached [%(default)s]', default='eventbus') argparser.add_argument('--eventbus-port', help='Port at which the eventbus can be reached [%(default)s]', default=5672, type=int) return argparser
Creates the default brewblox_service ArgumentParser. Service-agnostic arguments are added. The parser allows calling code to add additional arguments before using it in create_app() Args: default_name (str): default value for the --name commandline argument. Returns: argparse.ArgumentParser: a Python ArgumentParser with defaults set.
codesearchnet
def ctc_label_dense_to_sparse(labels, label_lengths): label_shape = array_ops.shape(labels) num_batches_tns = array_ops_stack.stack([label_shape[0]]) max_num_labels_tns = array_ops_stack.stack([label_shape[1]]) def range_less_than(old_input, current_input): return array_ops.expand_dims(math_ops.range(array_ops.shape(old_input)[1]), 0) < array_ops.fill(max_num_labels_tns, current_input) init = math_ops.cast(array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool) dense_mask = functional_ops.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1) dense_mask = dense_mask[:, 0, :] label_array = array_ops.reshape(array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns), label_shape) label_ind = array_ops.boolean_mask(label_array, dense_mask) batch_array = array_ops.transpose(array_ops.reshape(array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns), reverse(label_shape, 0))) batch_ind = array_ops.boolean_mask(batch_array, dense_mask) indices = array_ops.transpose(array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1])) vals_sparse = array_ops.gather_nd(labels, indices) return sparse_tensor.SparseTensor(math_ops.cast(indices, dtypes_module.int64), vals_sparse, math_ops.cast(label_shape, dtypes_module.int64))
Converts CTC labels from dense to sparse. Args: labels: dense CTC labels. label_lengths: length of the labels. Returns: A sparse tensor representation of the labels.
github-repos
def codepointsInNamelist(namFilename, unique_glyphs=False, cache=None): key = ('charset' if (not unique_glyphs) else 'ownCharset') internals_dir = os.path.dirname(os.path.abspath(__file__)) target = os.path.join(internals_dir, namFilename) result = readNamelist(target, unique_glyphs, cache) return result[key]
Returns the set of codepoints contained in a given Namelist file. This is a replacement CodepointsInSubset and implements the "#$ include" header format. Args: namFilename: The path to the Namelist file. unique_glyphs: Optional, whether to only include glyphs unique to subset. Returns: A set containing the glyphs in the subset.
codesearchnet
def populate_readme(revision, rtd_version, **extra_kwargs): with open(TEMPLATE_FILE, 'r') as file_obj: template = file_obj.read() img_prefix = IMG_PREFIX.format(revision=revision) extra_links = EXTRA_LINKS.format(rtd_version=rtd_version, revision=revision) docs_img = DOCS_IMG.format(rtd_version=rtd_version) bernstein_basis = BERNSTEIN_BASIS_PLAIN.format(img_prefix=img_prefix) bezier_defn = BEZIER_DEFN_PLAIN.format(img_prefix=img_prefix) sum_to_unity = SUM_TO_UNITY_PLAIN.format(img_prefix=img_prefix) template_kwargs = {'code_block1': PLAIN_CODE_BLOCK, 'code_block2': PLAIN_CODE_BLOCK, 'code_block3': PLAIN_CODE_BLOCK, 'testcleanup': '', 'toctree': '', 'bernstein_basis': bernstein_basis, 'bezier_defn': bezier_defn, 'sum_to_unity': sum_to_unity, 'img_prefix': img_prefix, 'extra_links': extra_links, 'docs': '|docs| ', 'docs_img': docs_img, 'pypi': '\n\n|pypi| ', 'pypi_img': PYPI_IMG, 'versions': '|versions|\n\n', 'versions_img': VERSIONS_IMG, 'rtd_version': rtd_version, 'revision': revision, 'circleci_badge': CIRCLECI_BADGE, 'circleci_path': '', 'travis_badge': TRAVIS_BADGE, 'travis_path': '', 'appveyor_badge': APPVEYOR_BADGE, 'appveyor_path': '', 'coveralls_badge': COVERALLS_BADGE, 'coveralls_path': COVERALLS_PATH, 'zenodo': '|zenodo|', 'zenodo_img': ZENODO_IMG, 'joss': ' |JOSS|', 'joss_img': JOSS_IMG} template_kwargs.update(**extra_kwargs) readme_contents = template.format(**template_kwargs) readme_contents = INLINE_MATH_EXPR.sub(inline_math, readme_contents) sphinx_modules = [] to_replace = functools.partial(mod_replace, sphinx_modules=sphinx_modules) readme_contents = MOD_EXPR.sub(to_replace, readme_contents) if (sphinx_modules != ['bezier.curve', 'bezier.surface']): raise ValueError('Unexpected sphinx_modules', sphinx_modules) sphinx_docs = [] to_replace = functools.partial(doc_replace, sphinx_docs=sphinx_docs) readme_contents = DOC_EXPR.sub(to_replace, readme_contents) if (sphinx_docs != ['python/reference/bezier', 'development']): raise ValueError('Unexpected sphinx_docs', sphinx_docs) return readme_contents
Populate README template with values. Args: revision (str): The branch, commit, etc. being referred to (e.g. ``master``). rtd_version (str): The version to use for RTD (Read the Docs) links (e.g. ``latest``). extra_kwargs (Dict[str, str]): Over-ride for template arguments. Returns: str: The populated README contents. Raises: ValueError: If the ``sphinx_modules`` encountered are not as expected. ValueError: If the ``sphinx_docs`` encountered are not as expected.
codesearchnet
def generate_password_hash(password, salt, N=(1 << 14), r=8, p=1, buflen=64): if PYTHON2: password = password.encode('utf-8') salt = salt.encode('utf-8') pw_hash = scrypt_hash(password, salt, N, r, p, buflen) return enbase64(pw_hash)
Generate password hash givin the password string and salt. Args: - ``password``: Password string. - ``salt`` : Random base64 encoded string. Optional args: - ``N`` : the CPU cost, must be a power of 2 greater than 1, defaults to 1 << 14. - ``r`` : the memory cost, defaults to 8. - ``p`` : the parallelization parameter, defaults to 1. The parameters r, p, and buflen must satisfy r * p < 2^30 and buflen <= (2^32 - 1) * 32. The recommended parameters for interactive logins as of 2009 are N=16384, r=8, p=1. Remember to use a good random salt. Returns: - base64 encoded scrypt hash.
codesearchnet
def create_container_instance_group(access_token, subscription_id, resource_group, container_group_name, container_list, location, ostype='Linux', port=80, iptype='public'): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API]) container_group_body = {'location': location} properties = {'osType': ostype} properties['containers'] = container_list ipport = {'protocol': 'TCP'} ipport['port'] = port ipaddress = {'ports': [ipport]} ipaddress['type'] = iptype properties['ipAddress'] = ipaddress container_group_body['properties'] = properties body = json.dumps(container_group_body) return do_put(endpoint, body, access_token)
Create a new container group with a list of containers specifified by container_list. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. container_list (list): A list of container properties. Use create_container_definition to create each container property set. location (str): Azure data center location. E.g. westus. ostype (str): Container operating system type. Linux or Windows. port (int): TCP port number. E.g. 8080. iptype (str): Type of IP address. E.g. public. Returns: HTTP response with JSON body of container group.
codesearchnet
def method(*args, **kwargs): assert len(args) == 0 assert len(kwargs) == 1 assert "num_return_vals" in kwargs num_return_vals = kwargs["num_return_vals"] def annotate_method(method): method.__ray_num_return_vals__ = num_return_vals return method return annotate_method
Annotate an actor method. .. code-block:: python @ray.remote class Foo(object): @ray.method(num_return_vals=2) def bar(self): return 1, 2 f = Foo.remote() _, _ = f.bar.remote() Args: num_return_vals: The number of object IDs that should be returned by invocations of this actor method.
juraj-google-style
def deploy_template_uri_param_uri(access_token, subscription_id, resource_group, deployment_name, template_uri, parameters_uri): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.Resources/deployments/', deployment_name, '?api-version=', DEPLOYMENTS_API]) properties = {'templateLink': {'uri': template_uri}} properties['mode'] = 'Incremental' properties['parametersLink'] = {'uri': parameters_uri} template_body = {'properties': properties} body = json.dumps(template_body) return do_put(endpoint, body, access_token)
Deploy a template with both template and parameters referenced by URIs. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. deployment_name (str): A name you give to the deployment. template_uri (str): URI which points to a JSON template (e.g. github raw location). parameters_uri (str): URI which points to a JSON parameters file (e.g. github raw location). Returns: HTTP response.
codesearchnet
def temp44(msg): d = hex2bin(data(msg)) sign = int(d[23]) value = bin2int(d[24:34]) if sign: value = value - 1024 temp = value * 0.25 temp = round(temp, 2) temp_alternative = value * 0.125 temp_alternative = round(temp, 3) return temp, temp_alternative
Static air temperature. Args: msg (String): 28 bytes hexadecimal message string Returns: float, float: temperature and alternative temperature in Celsius degree. Note: Two values returns due to what seems to be an inconsistancy error in ICAO 9871 (2008) Appendix A-67.
juraj-google-style
def regularize_cost_from_collection(name='regularize_cost'): ctx = get_current_tower_context() if (not ctx.is_training): return tf.constant(0, dtype=tf.float32, name=('empty_' + name)) if ctx.has_own_variables: losses = ctx.get_collection_in_tower(tfv1.GraphKeys.REGULARIZATION_LOSSES) else: losses = tfv1.get_collection(tfv1.GraphKeys.REGULARIZATION_LOSSES) if (len(losses) > 0): logger.info('regularize_cost_from_collection() found {} regularizers in REGULARIZATION_LOSSES collection.'.format(len(losses))) def maploss(l): assert l.dtype.is_floating, l if (l.dtype != tf.float32): l = tf.cast(l, tf.float32) return l losses = [maploss(l) for l in losses] reg_loss = tf.add_n(losses, name=name) return reg_loss else: return tf.constant(0, dtype=tf.float32, name=('empty_' + name))
Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``. If in replicated mode, will only regularize variables created within the current tower. Args: name (str): the name of the returned tensor Returns: tf.Tensor: a scalar, the total regularization cost.
codesearchnet
def register_views(self, app): self.add_resource(LoginRedirectView, '/auth/login') self.add_resource(LogoutRedirectView, '/auth/logout') for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.auth']['plugins']: cls = entry_point.load() app.available_auth_systems[cls.name] = cls if app.register_auth_system(cls): for vcls in cls.views: self.add_resource(vcls, *vcls.URLS) logger.debug('Registered auth system view {} for paths: {}'.format( cls.__name__, ', '.join(vcls.URLS) )) if not app.active_auth_system: logger.error('No auth systems active, please enable an auth system and then start the system again') sys.exit(-1) for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.views']['plugins']: view = entry_point.load() self.add_resource(view, *view.URLS) app.register_menu_item(view.MENU_ITEMS) logger.debug('Registered view {} for paths: {}'.format(view.__name__, ', '.join(view.URLS)))
Iterates all entry points for views and auth systems and dynamically load and register the routes with Flask Args: app (`CINQFlask`): CINQFlask object to register views for Returns: `None`
juraj-google-style
def __init__(self, scope, parent, expression=None): CodeStatement.__init__(self, scope, parent) self.expression = expression
Constructor for expression statements. Args: scope (CodeEntity): The program scope where this object belongs. parent (CodeEntity): This object's parent in the program tree. Kwargs: expression (CodeExpression): The expression of this statement.
juraj-google-style
def find_executable_batch_size(function: Optional[callable]=None, starting_batch_size: int=128, auto_find_batch_size: bool=False): if function is None: return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size, auto_find_batch_size=auto_find_batch_size) if auto_find_batch_size: requires_backends(find_executable_batch_size, 'accelerate') from accelerate.utils import find_executable_batch_size as accelerate_find_executable_batch_size return accelerate_find_executable_batch_size(function=function, starting_batch_size=starting_batch_size) return functools.partial(function, batch_size=starting_batch_size)
Args: A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or CUDNN, the batch size is cut in half and passed to `function`. `function` must take in a `batch_size` parameter as its first argument. function (`callable`, *optional*) A function to wrap starting_batch_size (`int`, *optional*) The batch size to try and fit into memory auto_find_batch_size (`bool`, *optional*) If False, will just execute `function`
github-repos
def visualize_training(images_val, reconstructed_images_val, random_images_val, log_dir, prefix, viz_n=10): save_imgs(images_val[:viz_n], os.path.join(log_dir, '{}_inputs.png'.format(prefix))) save_imgs(reconstructed_images_val[:viz_n], os.path.join(log_dir, '{}_reconstructions.png'.format(prefix))) if (random_images_val is not None): save_imgs(random_images_val[:viz_n], os.path.join(log_dir, '{}_prior_samples.png'.format(prefix)))
Helper method to save images visualizing model reconstructions. Args: images_val: Numpy array containing a batch of input images. reconstructed_images_val: Numpy array giving the expected output (mean) of the decoder. random_images_val: Optionally, a Numpy array giving the expected output (mean) of decoding samples from the prior, or `None`. log_dir: The directory to write images (Python `str`). prefix: A specific label for the saved visualizations, which determines their filenames (Python `str`). viz_n: The number of images from each batch to visualize (Python `int`).
codesearchnet
def from_string(cls, s, name=None, modules=None, active=None): r = cls(name=name, modules=modules, active=active) _parse_repp(s.splitlines(), r, None) return r
Instantiate a REPP from a string. Args: name (str, optional): the name of the REPP module modules (dict, optional): a mapping from identifiers to REPP modules active (iterable, optional): an iterable of default module activations
juraj-google-style
def success(channel, title, datapacks): gui = ui_embed.UI( channel, title, "", modulename=modulename, datapacks=datapacks ) return gui
Creates an embed UI containing the help message Args: channel (discord.Channel): The Discord channel to bind the embed to title (str): The title of the embed datapacks (list): The hex value Returns: ui (ui_embed.UI): The embed UI object
juraj-google-style
def convert(self, vroot, entry_variables): self.graph_info = GraphInfo(vroot) self.entry_variables = entry_variables with nn.parameter_scope(self.name): for func in self.graph_info.funcs: o = self._identity_conversion(func) self.end_variable = o return self.end_variable
All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
juraj-google-style
def refresh_access_token(self, refresh_token): request = self._get_request() response = request.post(self.OAUTH_TOKEN_URL, {'grant_type': 'refresh_token', 'refresh_token': refresh_token}) self.auth = HSAccessTokenAuth.from_response(response) return self.auth.access_token
Refreshes the current access token. Gets a new access token, updates client auth and returns it. Args: refresh_token (str): Refresh token to use Returns: The new access token
codesearchnet
def min_row_dist_sum_idx(dists): row_sums = np.apply_along_axis(arr=dists, axis=0, func1d=np.sum) return row_sums.argmin()
Find the index of the row with the minimum row distance sum This should return the index of the row index with the least distance overall to all other rows. Args: dists (np.array): must be square distance matrix Returns: int: index of row with min dist row sum
juraj-google-style
def highlight_code(text, lexer_name='python', **kwargs): lexer_name = {'py': 'python', 'h': 'cpp', 'cpp': 'cpp', 'cxx': 'cpp', 'c': 'cpp'}.get(lexer_name.replace('.', ''), lexer_name) try: import pygments import pygments.lexers import pygments.formatters import pygments.formatters.terminal if sys.platform.startswith('win32'): import colorama colorama.init() formater = pygments.formatters.terminal.TerminalFormatter(bg='dark') lexer = pygments.lexers.get_lexer_by_name(lexer_name, **kwargs) new_text = pygments.highlight(text, lexer, formater) except ImportError: import warnings warnings.warn('pygments is not installed, code will not be highlighted') new_text = text return new_text
Highlights a block of text using ANSI tags based on language syntax. Args: text (str): plain text to highlight lexer_name (str): name of language **kwargs: passed to pygments.lexers.get_lexer_by_name Returns: str: text : highlighted text If pygments is not installed, the plain text is returned. CommandLine: python -c "import pygments.formatters; print(list(pygments.formatters.get_all_formatters()))" Example: >>> import ubelt as ub >>> text = 'import ubelt as ub; print(ub)' >>> new_text = ub.highlight_code(text) >>> print(new_text)
codesearchnet
def get(self, workflow_id): try: db = self._client[self.database] fs = GridFSProxy(GridFS(db.unproxied_object)) return DataStoreDocument(db[WORKFLOW_DATA_COLLECTION_NAME], fs, workflow_id) except ConnectionFailure: raise DataStoreNotConnected()
Returns the document for the given workflow id. Args: workflow_id (str): The id of the document that represents a workflow run. Raises: DataStoreNotConnected: If the data store is not connected to the server. Returns: DataStoreDocument: The document for the given workflow id.
codesearchnet
def _BatchNormWithGlobalNormalizationGrad(op: ops.Operation, grad): dx, dm, dv, db, dg = gen_nn_ops.batch_norm_with_global_normalization_grad(op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad, op.get_attr('variance_epsilon'), op.get_attr('scale_after_normalization')) return (dx, dm, dv, db, dg)
Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization. We do not backprop anything for the mean and var intentionally as they are not being trained with backprop in the operation. Args: op: The BatchNormOp for which we need to generate gradients. grad: Tensor. The gradients passed to the BatchNormOp. Returns: dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon))) dm: Backprop for mean, which is sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon)) dv: Backprop for variance, which is sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2) db: Backprop for beta, which is grad reduced in all except the last dimension. dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
github-repos
def get_resize_output_image_size(image, size, input_data_format) -> Tuple[int, int]: height, width = get_image_size(image, channel_dim=input_data_format) min_len = size['shortest_edge'] max_len = size['longest_edge'] aspect_ratio = width / height if width >= height and width > max_len: width = max_len height = int(width / aspect_ratio) elif height > width and height > max_len: height = max_len width = int(height * aspect_ratio) height = max(height, min_len) width = max(width, min_len) return (height, width)
Get the output size of the image after resizing given a dictionary specifying the max and min sizes. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image containing the keys "shortest_edge" and "longest_edge". input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: The output size of the image after resizing.
github-repos
def tokenize(self, text): output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = ' if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` will return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens.
github-repos
def add_file_locations(self, file_locations=[]): if (not hasattr(self, '__file_locations__')): self.__file_locations__ = copy.copy(file_locations) else: self.__file_locations__ += copy.copy(file_locations)
Adds a list of file locations to the current list Args: file_locations: list of file location tuples
codesearchnet
def mark_parent_tasks_as_failed(self, task_name, flush_logs=False): for existing_task_name in self.tasks: if existing_task_name == task_name: break if flush_logs: self.tasks[existing_task_name].clear() self.tasks[existing_task_name].failed = True self.mark_main_tasks_as_failed()
Marks all the parent tasks as failed Args: task_name (str): Name of the child task flush_logs (bool): If ``True`` will discard all the logs form parent tasks Returns: None
juraj-google-style
def sync_results(vcs, signature): results_directory = _get_results_directory(vcs, signature) if (not os.path.exists(results_directory)): raise ResultsNotFoundError with open(os.path.join(results_directory, 'patterns'), 'r') as f: patterns = f.read().strip().split() includes = ['--include={}'.format(x) for x in patterns] cmd = ((['rsync', '-r'] + includes) + ['--exclude=*', os.path.join(results_directory, 'results', ''), os.path.join(vcs.path, '')]) subprocess.check_call(cmd)
Sync the saved results for `signature` back to the project. Args: vcs (easyci.vcs.base.Vcs) signature (str) Raises: ResultsNotFoundError
codesearchnet
def predict(self, X=None, **kwargs): context = {'X': X} context.update(kwargs) last_block_name = list(self.blocks.keys())[(- 1)] for (block_name, block) in self.blocks.items(): LOGGER.debug('Producing block %s', block_name) try: produce_args = self._get_block_args(block_name, block.produce_args, context) outputs = block.produce(**produce_args) if (block_name != last_block_name): output_dict = self._get_outputs(block_name, outputs, block.produce_output) context.update(output_dict) except Exception: LOGGER.exception('Exception caught producing MLBlock %s', block_name) raise return outputs
Produce predictions using the blocks of this pipeline. Sequentially call the `produce` method of each block, capturing the outputs before calling the next one. During the whole process a context dictionary is built, where both the passed arguments and the captured outputs of the `produce` methods are stored, and from which the arguments for the next `produce` calls will be taken. Args: X: Data which the pipeline will use to make predictions. **kwargs: Any additional keyword arguments will be directly added to the context dictionary and available for the blocks.
codesearchnet
def _get_target_encoder(self, x, y): assert len(x) == len(y) df = pd.DataFrame({y.name: y, x.name: x.fillna(NAN_INT)}) return df.groupby(x.name)[y.name].mean().to_dict()
Return a mapping from categories to average target values. Args: x (pandas.Series): a categorical column to encode. y (pandas.Series): the target column Returns: target_encoder (dict): mapping from categories to average target values
juraj-google-style
def unit_pos_to_spot(unit_pos) -> ParkingSpot: min_ = 50 res = None for airport in parkings: for spot in parkings[airport]: spot_pos = parkings[airport][spot] dist = math.hypot((unit_pos[0] - spot_pos[0]), (unit_pos[1] - spot_pos[1])) if (dist < min_): min_ = dist res = ParkingSpot(airport=airport, spot=spot) return res
Translates a unit position to a known parking spot Args: unit_pos: unit position as Vec2 Returns: ParkingSpot object
codesearchnet
def write_build_statement(self, module, action, deps, imports, suffix): output = path_utils.join(self.pyi_dir, _module_to_output_path(module) + '.pyi' + suffix) logging.info('%s %s\n imports: %s\n deps: %s\n output: %s', action, module.name, imports, deps, output) if deps: deps = ' | ' + ' '.join((escape_ninja_path(dep) for dep in deps)) else: deps = '' with open(self.ninja_file, 'a') as f: f.write('build {output}: {action} {input}{deps}\n imports = {imports}\n module = {module}\n'.format(output=escape_ninja_path(output), action=action, input=escape_ninja_path(module.full_path), deps=deps, imports=escape_ninja_path(imports), module=module.name)) return output
Write a build statement for the given module. Args: module: A module_utils.Module object. action: An Action object. deps: The module's dependencies. imports: An imports file. suffix: An output file suffix. Returns: The expected output of the build statement.
github-repos
def process_data_fn(self, inputs: dict[str, common_types.ConsistentTensorType]) -> dict[str, common_types.ConsistentTensorType]: outputs = inputs.copy() for transform in self.transforms: columns = transform.columns for col in columns: intermediate_result = transform(outputs[col], output_column_name=col) for key, value in intermediate_result.items(): outputs[key] = value return outputs
This method is used in the AnalyzeAndTransformDataset step. It applies the transforms to the `inputs` in sequential order on the columns provided for a given transform. Args: inputs: A dictionary of column names and data. Returns: A dictionary of column names and transformed data.
github-repos
def resolve(node, source_info, graphs): visitor = TreeAnnotator(source_info, graphs) node = visitor.visit(node) return node
Resolves reaching definitions for each symbol. Args: node: ast.AST source_info: transformer.SourceInfo graphs: Dict[ast.FunctionDef, cfg.Graph] Returns: ast.AST
github-repos
def remove(self, force=False): return self.client.api.remove_node(self.id, force=force)
Remove this node from the swarm. Args: force (bool): Force remove an active node. Default: `False` Returns: `True` if the request was successful. Raises: :py:class:`docker.errors.NotFound` If the node doesn't exist in the swarm. :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def l2_loss(tensor, weight=1.0, scope=None): with tf.name_scope(scope, 'L2Loss', [tensor]): weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='loss_weight') loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value') tf.add_to_collection(LOSSES_COLLECTION, loss) return loss
Define a L2Loss, useful for regularize, i.e. weight decay. Args: tensor: tensor to regularize. weight: an optional weight to modulate the loss. scope: Optional scope for name_scope. Returns: the L2 loss op.
juraj-google-style
def __call__(self, shape, dtype=dtypes.float32, **kwargs): self._validate_kwargs(kwargs) dtype = dtypes.as_dtype(dtype) if not dtype.is_numpy_compatible or dtype == dtypes.string: raise ValueError(f'Argument `dtype` expected to be numeric or boolean. Received {dtype}.') if _PARTITION_SHAPE in kwargs: shape = kwargs[_PARTITION_SHAPE] return array_ops.ones(shape, dtype)
Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. **kwargs: Additional keyword arguments. Raises: ValuesError: If the dtype is not numeric or boolean.
github-repos
def getUrlMeta(self, url): return self.conn("GET", SkypeConnection.API_URL, params={"url": url}, auth=SkypeConnection.Auth.Authorize).json()
Retrieve various metadata associated with a URL, as seen by Skype. Args: url (str): address to ping for info Returns: dict: metadata for the website queried
juraj-google-style
def merge(profile, head, base, commit_message=None): if (not commit_message): commit_message = (((('Merged ' + head) + ' into ') + base) + '.') payload = {'base': base, 'head': head, 'commit_message': commit_message} response = api.post_merge_request(profile, payload) data = None if (response.status_code == 201): json_data = response.json() data = prepare(json_data) return data
Merge the head of a branch into the base branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. head The head to merge. It can be a SHA, or a branch name. base The name of the branch to merge the specified head into. commit_message The message to give for the commit. Returns: A dict with data about the merge.
codesearchnet
def add(self, profile_datum): self.total_op_time += profile_datum.op_time self.total_exec_time += profile_datum.exec_time device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name) device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name) if device_and_node in self._node_to_exec_count: self._node_to_exec_count[device_and_node] += 1 else: self._node_to_exec_count[device_and_node] = 1
Accumulate a new instance of ProfileDatum. Args: profile_datum: (`ProfileDatum`) an instance of `ProfileDatum` to accumulate to this object.
github-repos
def _WaitForStartup(self, deadline): start = time.time() sleep = 0.05 def Elapsed(): return time.time() - start while True: try: response, _ = self._http.request(self._host) if response.status == 200: logging.info('emulator responded after %f seconds', Elapsed()) return True except (socket.error, httplib.ResponseNotReady): pass if Elapsed() >= deadline: return False else: time.sleep(sleep) sleep *= 2
Waits for the emulator to start. Args: deadline: deadline in seconds Returns: True if the emulator responds within the deadline, False otherwise.
juraj-google-style
def Initialize(self, filename=None, data=None, fd=None, reset=True, must_exist=False, parser=ConfigFileParser): self.FlushCache() if reset: self.raw_data = collections.OrderedDict() self.writeback_data = collections.OrderedDict() self.writeback = None self.initialized = False if (fd is not None): self.parser = self.LoadSecondaryConfig(parser=parser(fd=fd)) elif (filename is not None): self.parser = self.LoadSecondaryConfig(filename) if (must_exist and (not self.parser.parsed)): raise ConfigFormatError(('Unable to parse config file %s' % filename)) elif (data is not None): self.parser = self.LoadSecondaryConfig(parser=parser(data=data)) elif must_exist: raise RuntimeError('Registry path not provided.') self.initialized = True
Initializes the config manager. This method is used to add more config options to the manager. The config can be given as one of the parameters as described in the Args section. Args: filename: The name of the configuration file to use. data: The configuration given directly as a long string of data. fd: A file descriptor of a configuration file. reset: If true, the previous configuration will be erased. must_exist: If true the data source must exist and be a valid configuration file, or we raise an exception. parser: The parser class to use (i.e. the format of the file). If not specified guess from the filename. Raises: RuntimeError: No configuration was passed in any of the parameters. ConfigFormatError: Raised when the configuration file is invalid or does not exist..
codesearchnet
def write_additional(self, productversion, channel): self.fileobj.seek(self.additional_offset) extras = extras_header.build(dict(count=1, sections=[dict(channel=six.u(channel), productversion=six.u(productversion), size=(((len(channel) + len(productversion)) + 2) + 8), padding=b'')])) self.fileobj.write(extras) self.last_offset = self.fileobj.tell()
Write the additional information to the MAR header. Args: productversion (str): product and version string channel (str): channel string
codesearchnet
def inv(self, q_data, max_iterations=100, tollerance=1e-05): q_data = numpy.asfarray(q_data) assert numpy.all(((q_data >= 0) & (q_data <= 1))), 'sanitize your inputs!' shape = q_data.shape q_data = q_data.reshape(len(self), (- 1)) x_data = evaluation.evaluate_inverse(self, q_data) (lower, upper) = evaluation.evaluate_bound(self, x_data) x_data = numpy.clip(x_data, a_min=lower, a_max=upper) x_data = x_data.reshape(shape) return x_data
Inverse Rosenblatt transformation. If possible the transformation is done analytically. If not possible, transformation is approximated using an algorithm that alternates between Newton-Raphson and binary search. Args: q_data (numpy.ndarray): Probabilities to be inverse. If any values are outside ``[0, 1]``, error will be raised. ``q_data.shape`` must be compatible with distribution shape. max_iterations (int): If approximation is used, this sets the maximum number of allowed iterations in the Newton-Raphson algorithm. tollerance (float): If approximation is used, this set the error tolerance level required to define a sample as converged. Returns: (numpy.ndarray): Inverted probability values where ``out.shape == q_data.shape``.
codesearchnet
def one_step(self, current_state, previous_kernel_results): previous_step_size_assign = ([] if (self.step_size_update_fn is None) else (previous_kernel_results.extra.step_size_assign if mcmc_util.is_list_like(previous_kernel_results.extra.step_size_assign) else [previous_kernel_results.extra.step_size_assign])) with tf.control_dependencies(previous_step_size_assign): (next_state, kernel_results) = self._impl.one_step(current_state, previous_kernel_results) if (self.step_size_update_fn is not None): step_size_assign = self.step_size_update_fn(self.step_size, kernel_results) kernel_results = kernel_results._replace(extra=HamiltonianMonteCarloExtraKernelResults(step_size_assign=step_size_assign)) return (next_state, kernel_results)
Runs one iteration of Hamiltonian Monte Carlo. Args: current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). The first `r` dimensions index independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`. previous_kernel_results: `collections.namedtuple` containing `Tensor`s representing values from previous calls to this function (or from the `bootstrap_results` function.) Returns: next_state: Tensor or Python list of `Tensor`s representing the state(s) of the Markov chain(s) after taking exactly one step. Has same type and shape as `current_state`. kernel_results: `collections.namedtuple` of internal calculations used to advance the chain. Raises: ValueError: if there isn't one `step_size` or a list with same length as `current_state`.
codesearchnet
def easeInOutBack(n, s=1.70158): _checkRange(n) n = (n * 2) if (n < 1): s *= 1.525 return (0.5 * ((n * n) * (((s + 1) * n) - s))) else: n -= 2 s *= 1.525 return (0.5 * (((n * n) * (((s + 1) * n) + s)) + 2))
A "back-in" tween function that overshoots both the start and destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
codesearchnet
def download_sifts_xml(pdb_id, outdir='', force_rerun=False): baseURL = 'ftp: filename = '{}.xml.gz'.format(pdb_id.lower()) outfile = op.join(outdir, (filename.split('.')[0] + '.sifts.xml')) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): response = urlopen((baseURL + filename)) with open(outfile, 'wb') as f: f.write(gzip.decompress(response.read())) return outfile
Download the SIFTS file for a PDB ID. Args: pdb_id (str): PDB ID outdir (str): Output directory, current working directory if not specified. force_rerun (bool): If the file should be downloaded again even if it exists Returns: str: Path to downloaded file
codesearchnet
def wrap_warnings(logger): def decorator(func): @functools.wraps(func) def new_func(*args, **kwargs): showwarning = warnings.showwarning warnings.showwarning = warn_logging(logger) try: return func(*args, **kwargs) finally: warnings.showwarning = showwarning return new_func return decorator
Have the function patch `warnings.showwarning` with the given logger. Arguments: logger (~logging.logger): the logger to wrap warnings with when the decorated function is called. Returns: `function`: a decorator function.
codesearchnet
def search(self, search_phrase, limit=None): query, query_params = self._make_query_from_terms(search_phrase, limit=limit) self._parsed_query = (str(query), query_params) if query is not None: self.backend.library.database.set_connection_search_path() results = self.execute(query, **query_params) for result in results: vid, dataset_vid, score = result yield PartitionSearchResult( vid=vid, dataset_vid=dataset_vid, score=score)
Finds partitions by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to generate. None means without limit. Generates: PartitionSearchResult instances.
juraj-google-style
def flatten(index, name='segmented_flatten'): batch_size = tf.reduce_prod(index.batch_shape()) offset = tf.range(batch_size) * index.num_segments offset = tf.reshape(offset, index.batch_shape()) for _ in range(index.batch_dims, index.indices.shape.rank): offset = tf.expand_dims(offset, -1) indices = tf.cast(offset, index.indices.dtype) + index.indices return IndexMap(indices=tf.reshape(indices, [-1]), num_segments=index.num_segments * batch_size, batch_dims=0)
Flattens a batched index map to a 1d index map. This operation relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by `num_segments` * (k - 1). The result is a tensor with `num_segments` multiplied by the number of elements in the batch. Args: index: IndexMap to flatten. name: Name for the TensorFlow operation. Returns: The flattened IndexMap.
github-repos
def transform_to_mods_multimono(marc_xml, uuid, url): marc_xml = _read_content_or_path(marc_xml) transformed = xslt_transformation( marc_xml, _absolute_template_path("MARC21toMultiMonographTitle.xsl") ) return _apply_postprocessing( marc_xml=marc_xml, xml=transformed, func=mods_postprocessor.postprocess_multi_mono, uuid=uuid, url=url, )
Convert `marc_xml` to multimonograph MODS data format. Args: marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. uuid (str): UUID string giving the package ID. url (str): URL of the publication (public or not). Returns: list: Collection of transformed xml strings.
juraj-google-style
def stop(self, threads=None, close_summary_writer=True, ignore_live_threads=False): self._coord.request_stop() try: self._coord.join(threads, stop_grace_period_secs=self._stop_grace_secs, ignore_live_threads=ignore_live_threads) finally: if close_summary_writer and self._summary_writer: self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP)) self._summary_writer.close() self._graph_added_to_summary = False
Stop the services and the coordinator. This does not close the session. Args: threads: Optional list of threads to join with the coordinator. If `None`, defaults to the threads running the standard services, the threads started for `QueueRunners`, and the threads started by the `loop()` method. To wait on additional threads, pass the list in this parameter. close_summary_writer: Whether to close the `summary_writer`. Defaults to `True` if the summary writer was created by the supervisor, `False` otherwise. ignore_live_threads: If `True` ignores threads that remain running after a grace period when joining threads via the coordinator, instead of raising a RuntimeError.
github-repos
class RTDetrHungarianMatcher(nn.Module): def __init__(self, config): super().__init__() requires_backends(self, ['scipy']) self.class_cost = config.matcher_class_cost self.bbox_cost = config.matcher_bbox_cost self.giou_cost = config.matcher_giou_cost self.use_focal_loss = config.use_focal_loss self.alpha = config.matcher_alpha self.gamma = config.matcher_gamma if self.class_cost == self.bbox_cost == self.giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): batch_size, num_queries = outputs['logits'].shape[:2] out_bbox = outputs['pred_boxes'].flatten(0, 1) target_ids = torch.cat([v['class_labels'] for v in targets]) target_bbox = torch.cat([v['boxes'] for v in targets]) if self.use_focal_loss: out_prob = F.sigmoid(outputs['logits'].flatten(0, 1)) out_prob = out_prob[:, target_ids] neg_cost_class = (1 - self.alpha) * out_prob ** self.gamma * -(1 - out_prob + 1e-08).log() pos_cost_class = self.alpha * (1 - out_prob) ** self.gamma * -(out_prob + 1e-08).log() class_cost = pos_cost_class - neg_cost_class else: out_prob = outputs['logits'].flatten(0, 1).softmax(-1) class_cost = -out_prob[:, target_ids] bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v['boxes']) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
This class computes an assignment between the targets and the predictions of the network For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: config: RTDetrConfig
github-repos
def add_options(cls, parser): kwargs = {'action': 'store', 'default': '', 'parse_from_config': True, 'comma_separated_list': True} for num in range(cls.min_check, cls.max_check): parser.add_option(None, "--filename_check{}".format(num), **kwargs)
Required by flake8 add the possible options, called first Args: parser (OptionsManager):
juraj-google-style
def __init__(self, header, metadata, content): self.header = header self.metadata = metadata self.content = content self._buffers = []
Initialize a new message from header, metadata, and content dictionaries. To assemble a message from existing JSON fragments, use the ``assemble`` method. To create new messages with automatically generated headers, use subclass ``create`` methods. Args: header (JSON-like) : metadata (JSON-like) : content (JSON-like) :
juraj-google-style
def NamedPlaceholders(iterable): placeholders = ', '.join(('%({})s'.format(key) for key in sorted(iterable))) return '({})'.format(placeholders)
Returns named placeholders from all elements of the given iterable. Use this function for VALUES of MySQL INSERTs. To account for Iterables with undefined order (dicts before Python 3.6), this function sorts column names. Examples: >>> NamedPlaceholders({"password": "foo", "name": "bar"}) u'(%(name)s, %(password)s)' Args: iterable: The iterable of strings to be used as placeholder keys. Returns: A string containing a tuple of comma-separated, sorted, named, placeholders.
codesearchnet
def __init__(self, resolver_context): super(FVDEFileSystem, self).__init__(resolver_context) self._fvde_volume = None self._file_object = None
Initializes a file system. Args: resolver_context (Context): resolver context.
juraj-google-style
def find_replace_string(obj, find, replace): try: strobj = str(obj) newStr = string.replace(strobj, find, replace) if newStr == strobj: return obj else: return newStr except: line, filename, synerror = trace() raise ArcRestHelperError({ "function": "find_replace_string", "line": line, "filename": filename, "synerror": synerror, } ) finally: pass
Performs a string.replace() on the input object. Args: obj (object): The object to find/replace. It will be cast to ``str``. find (str): The string to search for. replace (str): The string to replace with. Returns: str: The replaced string.
juraj-google-style
def add_unique_id(self): uid = 0 for feature in self._data['features']: if feature['properties'].get('id'): raise Exception('one of the features already had an id field') feature['properties']['id'] = uid uid += 1
Adds a unique id property to each feature. Raises: - An Exception if any of the features already have an "id" field.
codesearchnet
def register_backend(name, backend, allow_overwrite=False): if hasattr(Circuit, "run_with_" + name): if allow_overwrite: warnings.warn(f"Circuit has attribute `run_with_{name}`.") else: raise ValueError(f"Circuit has attribute `run_with_{name}`.") if not allow_overwrite: if name in BACKENDS: raise ValueError(f"Backend '{name}' is already registered as backend.") BACKENDS[name] = backend
Register new backend. Args: name (str): The name of backend. gateclass (type): The type object of backend allow_overwrite (bool, optional): If True, allow to overwrite the existing backend. Otherwise, raise the ValueError. Raises: ValueError: The name is duplicated with existing backend. When `allow_overwrite=True`, this error is not raised.
juraj-google-style
def P(value, bits=None, endian=None, target=None): return globals()[('P%d' % _get_bits(bits, target))](value, endian=endian, target=target)
Pack an unsigned pointer for a given target. Args: value(int): The value to pack. bits(:class:`~pwnypack.target.Target.Bits`): Override the default word size. If ``None`` it will look at the word size of ``target``. endian(:class:`~pwnypack.target.Target.Endian`): Override the default byte order. If ``None``, it will look at the byte order of the ``target`` argument. target(:class:`~pwnypack.target.Target`): Override the default byte order. If ``None``, it will look at the byte order of the global :data:`~pwnypack.target.target`.
codesearchnet
def __init__(self, *, start_msg: Optional[str] = None, end_msg: Optional[str] = None, start_no_nl: bool = False): if start_msg is None and end_msg is None: raise ValueError( "At least one of 'start_msg' and 'end_msg' must be specified.") self._raise_if_not_none_nor_string(start_msg, "start_msg") self._raise_if_not_none_nor_string(end_msg, "end_msg") self._start_msg = start_msg self._end_msg = end_msg self._start_no_nl = start_no_nl
Note that both arguments are keyword only arguments. Args: start_msg: A message to print before the function runs. end_msg: A message to print after the function has finished. start_no_nl: If True, no newline is appended after the start_msg.
juraj-google-style
def scrape(text, ptype=None): for ruletype, rule, info in scrape_types: if ptype and ptype != ruletype: continue regx = regexes.get(ruletype) for valu in regx.findall(text): yield (ruletype, valu)
Scrape types from a blob of text and return node tuples. Args: text (str): Text to scrape. ptype (str): Optional ptype to scrape. If present, only scrape rules which match the provided type. Returns: (str, str): Yield tuples of type, valu strings.
juraj-google-style
def block_reducible(cm, nodes1, nodes2): if ((not nodes1) or (not nodes2)): return True cm = cm[np.ix_(nodes1, nodes2)] if ((not cm.sum(0).all()) or (not cm.sum(1).all())): return True if ((len(nodes1) > 1) and (len(nodes2) > 1)): return block_cm(cm) return False
Return whether connections from ``nodes1`` to ``nodes2`` are reducible. Args: cm (np.ndarray): The network's connectivity matrix. nodes1 (tuple[int]): Source nodes nodes2 (tuple[int]): Sink nodes
codesearchnet
def has_access(user, required_roles, match_all=True): if (ROLE_ADMIN in user.roles): return True if isinstance(required_roles, str): if (required_roles in user.roles): return True return False if match_all: for role in required_roles: if (role not in user.roles): return False return True else: for role in required_roles: if (role in user.roles): return True return False
Check if the user meets the role requirements. If mode is set to AND, all the provided roles must apply Args: user (:obj:`User`): User object required_roles (`list` of `str`): List of roles that the user must have applied match_all (`bool`): If true, all the required_roles must be applied to the user, else any one match will return `True` Returns: `bool`
codesearchnet
def save_csv(X, y, path): if sparse.issparse(X): X = X.todense() np.savetxt(path, np.hstack((y.reshape(((- 1), 1)), X)), delimiter=',')
Save data as a CSV file. Args: X (numpy or scipy sparse matrix): Data matrix y (numpy array): Target vector. path (str): Path to the CSV file to save data.
codesearchnet
def CreateDynamicDisplayAdSettings(client, opener): media_service = client.GetService('MediaService', 'v201809') logo = { 'xsi_type': 'Image', 'mediaId': _CreateImage(media_service, opener, 'https: } dynamic_settings = { 'landscapeLogoImage': logo, 'pricePrefix': 'as low as', 'promoText': 'Free shipping!' } return dynamic_settings
Creates dynamic display ad settings. Args: client: an AdWordsClient instance. opener: an OpenerDirector instance. Returns: A dict containing the dynamic display ad settings.
juraj-google-style
def tf_step(self, x, iteration, deltas, improvement, last_improvement, estimated_improvement): (x, next_iteration, deltas, improvement, last_improvement, estimated_improvement) = super(LineSearch, self).tf_step(x, iteration, deltas, improvement, last_improvement, estimated_improvement) next_x = [(t + delta) for (t, delta) in zip(x, deltas)] if (self.mode == 'linear'): next_deltas = deltas next_estimated_improvement = (estimated_improvement + self.estimated_incr) elif (self.mode == 'exponential'): next_deltas = [(delta * self.parameter) for delta in deltas] next_estimated_improvement = (estimated_improvement * self.parameter) target_value = self.fn_x(next_deltas) next_improvement = tf.divide(x=(target_value - self.base_value), y=tf.maximum(x=next_estimated_improvement, y=util.epsilon)) return (next_x, next_iteration, next_deltas, next_improvement, improvement, next_estimated_improvement)
Iteration loop body of the line search algorithm. Args: x: Current solution estimate $x_t$. iteration: Current iteration counter $t$. deltas: Current difference $x_t - x'$. improvement: Current improvement $(f(x_t) - f(x')) / v'$. last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$. estimated_improvement: Current estimated value $v'$. Returns: Updated arguments for next iteration.
codesearchnet
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides, padding): x1 = np.random.rand(*tensor_in_sizes).astype(np.float32) x2 = np.random.rand(*filter_in_sizes).astype(np.float32) def _SetupVal(data_format, use_gpu): with test_util.device(use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) strides = [1] + conv_strides + [1] if data_format == 'NCHW': t1 = test_util.NHWCToNCHW(t1) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding, data_format=data_format) if data_format == 'NCHW': conv = test_util.NCHWToNHWC(conv) return conv tensors = [] for data_format, use_gpu in GetTestConfigs(): tensors.append(_SetupVal(data_format, use_gpu)) values = self.evaluate(tensors) for i in range(1, len(values)): self.assertAllClose(values[0], values[i], rtol=0.001, atol=0.001)
Verifies that CPU and GPU produce the same values. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. conv_strides: [row_stride, col_stride] for the convolution; padding: Padding type.
github-repos
def from_yaml(cls, yaml_path, filename=None): if filename: yaml_path = os.path.join(os.path.dirname(yaml_path), filename) assert yaml_path.endswith('.yaml'), ('Expected a/path/to/<yamlname>.yaml, got %r' % yaml_path) yamlname = os.path.basename(yaml_path)[:(- 5)] log.debug('Parsing %s', yaml_path) with open(yaml_path) as f: text = f.read() ds = yaml.load(text, Loader=yaml.RoundTripLoader) docstring = None sections = [] for d in ds: assert (len(d) == 1), ('Expected section length 1, got %d' % len(d)) lineno = (d._yaml_line_col.line + 1) name = list(d)[0] sections.append(cls(yaml_path, lineno, name, d[name])) if (name == 'builtin.defines.docstring'): docstring = d[name]['value'] return (sections, yamlname, docstring)
Split a dictionary into parameters controllers parts blocks defines Args: yaml_path (str): File path to YAML file, or a file in the same dir filename (str): If give, use this filename as the last element in the yaml_path (so yaml_path can be __file__) Returns: tuple: (sections, yamlname, docstring) where sections is a list of created sections
codesearchnet
def read_var_str(self, max_size=sys.maxsize): length = self.read_var_int(max_size) return self.unpack(str(length) + 's', length)
Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator. Args: max_size (int): (Optional) maximum number of bytes to read. Returns: bytes:
juraj-google-style
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0): local_buffer = utils.BytearrayStream() if self._unique_identifier: self._unique_identifier.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField('The GetAttributes response payload is missing the unique identifier field.') if (kmip_version < enums.KMIPVersion.KMIP_2_0): for attribute in self._attributes: attribute.write(local_buffer, kmip_version=kmip_version) elif self._attributes: template_attribute = objects.TemplateAttribute(attributes=self.attributes) attributes = objects.convert_template_attribute_to_attributes(template_attribute) attributes.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField('The GetAttributes response payload is missing the attributes list.') self.length = local_buffer.length() super(GetAttributesResponsePayload, self).write(output_buffer, kmip_version=kmip_version) output_buffer.write(local_buffer.buffer)
Write the data encoding the GetAttributes response payload to a stream. Args: output_buffer (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
codesearchnet
def submit(self, port_id, tuple_): port_index = self._splpy_output_ports[port_id] ec._submit(self, port_index, tuple_)
Submit a tuple to the output port. The value to be submitted (``tuple_``) can be a ``None`` (nothing will be submitted), ``tuple``, ``dict` or ``list`` of those types. For details on how the ``tuple_`` is mapped to an SPL tuple see :ref:`submit-from-python`. Args: port_id: Identifier of the port specified in the ``output_ports`` parameter of the ``@spl.primitive_operator`` decorator. tuple_: Tuple (or tuples) to be submitted to the output port.
codesearchnet
def _RetryLoop(self, func, timeout=None): timeout = (timeout or self.DEFAULT_TIMEOUT) deadline = (time.time() + timeout) sleep = 1 while True: try: return func(timeout) except grpc.RpcError: if ((time.time() + sleep) > deadline): raise time.sleep(sleep) sleep *= 2 timeout = (deadline - time.time())
Retries an operation until success or deadline. Args: func: The function to run. Must take a timeout, in seconds, as a single parameter. If it raises grpc.RpcError and deadline has not be reached, it will be run again. timeout: Retries will continue until timeout seconds have passed.
codesearchnet
def fasta_format_check(fasta_path, logger): header_count = 0 line_count = 1 nt_count = 0 with open(fasta_path) as f: for l in f: l = l.strip() if l == '': continue if l[0] == '>': header_count += 1 continue if header_count == 0 and l[0] != '>': error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with ">" expected.' \ .format(line_count=line_count) logger.error(error_msg) raise Exception(error_msg) non_nucleotide_chars_in_line = set(l) - VALID_NUCLEOTIDES if len(non_nucleotide_chars_in_line) > 0: error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}' \ .format(line=line_count, non_nt_chars=', '.join([x for x in non_nucleotide_chars_in_line])) logger.error(error_msg) raise Exception(error_msg) nt_count += len(l) line_count += 1 if nt_count == 0: error_msg = 'File "{}" does not contain any nucleotide sequence.'.format(fasta_path) logger.error(error_msg) raise Exception(error_msg) logger.info('Valid FASTA format "{}" ({} bp)'.format(fasta_path, nt_count))
Check that a file is valid FASTA format. - First non-blank line needs to begin with a '>' header character. - Sequence can only contain valid IUPAC nucleotide characters Args: fasta_str (str): FASTA file contents string Raises: Exception: If invalid FASTA format
juraj-google-style
def button_number(self): if (self.type != EventType.TABLET_PAD_BUTTON): raise AttributeError(_wrong_prop.format(self.type)) return self._libinput.libinput_event_tablet_pad_get_button_number(self._handle)
The button number that triggered this event, starting at 0. For events that are not of type :attr:`~libinput.constant.Event.TABLET_PAD_BUTTON`, this property raises :exc:`AttributeError`. Note that the number returned is a generic sequential button number and not a semantic button code as defined in ``linux/input.h``. See `Tablet pad button numbers`_ for more details. Returns: int: The button triggering this event. Raises: AttributeError
codesearchnet
def ProcessCompletedRequests(self, notification, thread_pool): with queue_manager.QueueManager(token=self.token) as manager: for request, _ in manager.FetchCompletedRequests( self.session_id, timestamp=(0, notification.timestamp)): if request.HasField("request"): manager.DeQueueClientRequest(request.request) processing = [] while True: try: for request, responses in self.queue_manager.FetchCompletedResponses( self.session_id, timestamp=(0, notification.timestamp)): if request.id == 0 or not responses: continue if len(responses) != responses[-1].response_id: if request.transmission_count < 5: stats_collector_instance.Get().IncrementCounter( "grr_request_retransmission_count") request.transmission_count += 1 self.QueueRequest(request) break self.hunt_obj.HeartBeat() self._Process( request, responses, thread_pool=thread_pool, events=processing) self.queue_manager.DeleteRequest(request) self.context.next_processed_request += 1 return except queue_manager.MoreDataException: for event in processing: event.wait() self.FlushMessages() self.hunt_obj.Flush() continue finally: for event in processing: event.wait()
Go through the list of requests and process the completed ones. We take a snapshot in time of all requests and responses for this hunt. We then process as many completed requests as possible. If responses are not quite here we leave it for next time. Args: notification: The notification object that triggered this processing. thread_pool: The thread pool to process the responses on.
juraj-google-style
def update_mongo_compound_variants(self, bulk): requests = [] for var_id in bulk: var_obj = bulk[var_id] if (not var_obj.get('compounds')): continue operation = pymongo.UpdateOne({'_id': var_obj['_id']}, {'$set': {'compounds': var_obj['compounds']}}) requests.append(operation) if (not requests): return try: self.variant_collection.bulk_write(requests, ordered=False) except BulkWriteError as err: LOG.warning('Updating compounds failed') raise err
Update the compound information for a bulk of variants in the database Args: bulk(dict): {'_id': scout.models.Variant}
codesearchnet
def request(url, args=None, data=None, headers=None, method=None, credentials=None, raw_response=False, stats=None): if (headers is None): headers = {} headers['user-agent'] = 'GoogleCloudDataLab/1.0' if (args is not None): qs = urllib.parse.urlencode(args) url = ((url + '?') + qs) if (data is not None): if (method is None): method = 'POST' if (data != ''): if ('Content-Type' not in headers): data = json.dumps(data) headers['Content-Type'] = 'application/json' headers['Content-Length'] = str(len(data)) elif (method == 'POST'): headers['Content-Length'] = '0' if (method is None): method = 'GET' http = Http.http if (credentials is not None): http = copy.copy(http) http = google_auth_httplib2.AuthorizedHttp(credentials) if (stats is not None): stats['duration'] = datetime.datetime.utcnow() response = None try: log.debug(('request: method[%(method)s], url[%(url)s], body[%(data)s]' % locals())) (response, content) = http.request(url, method=method, body=data, headers=headers) if (200 <= response.status < 300): if raw_response: return content if (type(content) == str): return json.loads(content) else: return json.loads(str(content, encoding='UTF-8')) else: raise RequestException(response.status, content) except ValueError: raise Exception('Failed to process HTTP response.') except httplib2.HttpLib2Error: raise Exception('Failed to send HTTP request.') finally: if (stats is not None): stats['data_size'] = len(data) stats['status'] = response.status stats['duration'] = (datetime.datetime.utcnow() - stats['duration']).total_seconds()
Issues HTTP requests. Args: url: the URL to request. args: optional query string arguments. data: optional data to be sent within the request. headers: optional headers to include in the request. method: optional HTTP method to use. If unspecified this is inferred (GET or POST) based on the existence of request data. credentials: optional set of credentials to authorize the request. raw_response: whether the raw response content should be returned as-is. stats: an optional dictionary that, if provided, will be populated with some useful info about the request, like 'duration' in seconds and 'data_size' in bytes. These may be useful optimizing the access to rate-limited APIs. Returns: The parsed response object. Raises: Exception when the HTTP request fails or the response cannot be processed.
codesearchnet
def _project_to_part_level(hist: Hist, outliers_removal_axis: OutliersRemovalAxis) -> Hist: import ROOT if isinstance(hist, (ROOT.TH2, ROOT.TH3)): projection_information: Dict[(str, Any)] = {} output_object = _OutputObject(None) projector = projectors.HistProjector(observable_to_project_from=hist, output_observable=output_object, output_attribute_name='output', projection_name_format='outliers_removal_hist', projection_information=projection_information) projector.projection_axes.append(projectors.HistAxisRange(axis_type=outliers_removal_axis, axis_range_name='outliers_removal_axis', min_val=projectors.HistAxisRange.apply_func_to_find_bin(None, 1), max_val=projectors.HistAxisRange.apply_func_to_find_bin(ROOT.TAxis.GetNbins))) projector.project() return output_object.output return hist
Project the input histogram to the particle level axis. Args: hist: Histogram to check for outliers. outliers_removal_axis: Axis along which outliers removal will be performed. Usually the particle level aixs. Returns: The histogram to check for outliers.
codesearchnet