code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def list_fastboot_devices(): out = fastboot.FastbootProxy().devices() return parse_device_list(out)
List all android devices connected to the computer that are in in fastboot mode. These are detected by fastboot. This function doesn't raise any error if `fastboot` binary doesn't exist, because `FastbootProxy` itself doesn't raise any error. Returns: A list of android device serials. Empty if there's none.
github-repos
def set_user(self, user): self.session['user_id'] = user.key self.session['user_data'] = user.clean_value() role = self.get_role() self.session['role_id'] = role.key self.current.role_id = role.key self.current.user_id = user.key self.session['permissions'] = role.get_permissions()
Writes user data to session. Args: user: User object
juraj-google-style
def get_search_space(ss_indicator): info = nats_bench.search_space_info('nats-bench', ss_indicator) if ss_indicator == 'tss': total = info['num_nodes'] * (info['num_nodes'] - 1) return model_tss_spc(pg.sublist_of(total, info['op_names'], choices_distinct=False), info['num_nodes']) elif ss_indicator == 'sss': return model_sss_spc(pg.sublist_of(info['num_layers'], info['candidates'], choices_distinct=False))
The default search space in NATS-Bench. Args: ss_indicator: tss or sss, indicating the topology or size search space. Returns: A hyper model object that repesents a search space.
github-repos
def find_existing_record(env, zone_id, dns_name, check_key=None, check_value=None): client = boto3.Session(profile_name=env).client('route53') pager = client.get_paginator('list_resource_record_sets') existingrecord = None for rset in pager.paginate(HostedZoneId=zone_id): for record in rset['ResourceRecordSets']: if check_key: if ((record['Name'].rstrip('.') == dns_name) and (record.get(check_key) == check_value)): LOG.info('Found existing record: %s', record) existingrecord = record break return existingrecord
Check if a specific DNS record exists. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. dns_name (str): FQDN of application's dns entry to add/update. check_key(str): Key to look for in record. Example: "Type" check_value(str): Value to look for with check_key. Example: "CNAME" Returns: json: Found Record. Returns None if no record found
codesearchnet
def __init__(self, parameter_name, value, type_name): super(BasicTypeParameterError, self).__init__(parameter_name, value) self.type_name = type_name
Constructor for BasicTypeParameterError. Args: parameter_name: String; the name of the parameter which had a value rejected. value: The actual value passed in for the enum. Usually string. type_name: Descriptive name of the data type expected.
juraj-google-style
def __init__(self, query_builder, field): self._field = field self._query_builder = query_builder self._awql = None
Creates the WHERE builder with specified query builder and field. This class should be instantiated through _QueryBuilder.Where. Don't call this constructor directly. Args: query_builder: The query builder that this WHERE builder links to. field: The field to be used in the WHERE condition. Returns: The WHERE builder.
juraj-google-style
def _get_css_files(cls, extra_files): packager = Packager() css_packages = getattr(cls, 'css_packages', {}) return dict( (media_target, cls._get_media_files(packager=packager, media_packages=media_packages, media_type='css', extra_files=extra_files.get(media_target, []))) for media_target, media_packages in six.iteritems(css_packages) )
Return all CSS files from the Media class. Args: extra_files (dict): The contents of the Media class's original :py:attr:`css` attribute, if one was provided. Returns: dict: The CSS media types and files to return for the :py:attr:`css` attribute.
juraj-google-style
def __init__( self, session, output_file, storage_type=definitions.STORAGE_TYPE_SESSION, task=None): super(StorageFileWriter, self).__init__( session, storage_type=storage_type, task=task) self._merge_task_storage_path = '' self._output_file = output_file self._processed_task_storage_path = '' self._storage_file = None self._task_storage_path = None
Initializes a storage writer. Args: session (Session): session the storage changes are part of. output_file (str): path to the output file. storage_type (Optional[str]): storage type. task(Optional[Task]): task.
juraj-google-style
def plugin_privileges(self, name): params = {'remote': name} headers = {} (registry, repo_name) = auth.resolve_repository_name(name) header = auth.get_config_header(self, registry) if header: headers['X-Registry-Auth'] = header url = self._url('/plugins/privileges') return self._result(self._get(url, params=params, headers=headers), True)
Retrieve list of privileges to be granted to a plugin. Args: name (string): Name of the remote plugin to examine. The ``:latest`` tag is optional, and is the default if omitted. Returns: A list of dictionaries representing the plugin's permissions
codesearchnet
def save_image(figure, filename): path = os.path.join(IMAGES_DIR, filename) figure.savefig(path, bbox_inches="tight") plt.close(figure)
Save an image to the docs images directory. Args: filename (str): The name of the file (not containing directory info).
juraj-google-style
def _ParseOrMerge(self, lines, message): tokenizer = Tokenizer(lines) while (not tokenizer.AtEnd()): self._MergeField(tokenizer, message)
Converts a text representation of a protocol message into a message. Args: lines: Lines of a message's text representation. message: A protocol buffer message to merge into. Raises: ParseError: On text parsing problems.
codesearchnet
def is_greater(a,b): a_chrom = CHROM_TO_INT.get(a.chrom,0) b_chrom = CHROM_TO_INT.get(b.chrom,0) if (a_chrom == 0 or b_chrom == 0): return False if a_chrom > b_chrom: return True if a_chrom == b_chrom: if a.pos > b.pos: return True return False
Check if position a is greater than position b This will look at chromosome and position. For example a position where chrom = 2 and pos = 300 is greater than a position where chrom = 1 and pos = 1000 If any of the chromosomes is outside [1-22,X,Y,MT] we can not say which is biggest. Args: a,b(Position) Returns: bool: True if a is greater than b
juraj-google-style
def log_first_n(level, msg, n, *args): count = _GetNextLogCountPerToken(_GetFileAndLine()) log_if(level, msg, count < n, *args)
Log 'msg % args' at level 'level' only first 'n' times. Not threadsafe. Args: level: The level at which to log. msg: The message to be logged. n: The number of times this should be called before it is logged. *args: The args to be substituted into the msg.
github-repos
def _uniquify_fetches(fetch_mappers): unique_fetches = [] value_indices = [] seen_fetches = {} for m in fetch_mappers: m_value_indices = [] for f in m.unique_fetches(): j = seen_fetches.get(id(f)) if j is None: j = len(seen_fetches) seen_fetches[id(f)] = j unique_fetches.append(f) m_value_indices.append(j) value_indices.append(m_value_indices) return (unique_fetches, value_indices)
Uniquifies fetches from a list of fetch_mappers. This is a utility function used by _ListFetchMapper and _DictFetchMapper. It gathers all the unique fetches from a list of mappers and builds a list containing all of them but without duplicates (unique_fetches). It also returns a 2-D list of integers (values_indices) indicating at which index in unique_fetches the fetches of the mappers are located. This list is as follows: values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index Args: fetch_mappers: list of fetch mappers. Returns: A list of fetches. A 2-D list of integers.
github-repos
def Log(self, frame): if not self._log_message: return {'isError': True, 'description': {'format': LOG_ACTION_NOT_SUPPORTED}} if self._quota_recovery_start_time: ms_elapsed = (time.time() - self._quota_recovery_start_time) * 1000 if ms_elapsed > self.quota_recovery_ms: self._quota_recovery_start_time = None else: return message = 'LOGPOINT: ' + _FormatMessage( self._definition.get('logMessageFormat', ''), self._EvaluateExpressions(frame)) line = self._definition['location']['line'] cdbg_logging_location = (NormalizePath(frame.f_code.co_filename), line, _GetFrameCodeObjectName(frame)) if native.ApplyDynamicLogsQuota(len(message)): self._log_message(message) else: self._quota_recovery_start_time = time.time() self._log_message(DYNAMIC_LOG_OUT_OF_QUOTA) del cdbg_logging_location return None
Captures the minimal application states, formats it and logs the message. Args: frame: Python stack frame of breakpoint hit. Returns: None on success or status message on error.
juraj-google-style
def no_results(channel): gui = ui_embed.UI( channel, "No results", ":c", modulename=modulename, colour=0xFF8800 ) return gui
Creates an embed UI for when there were no results Args: channel (discord.Channel): The Discord channel to bind the embed to Returns: ui (ui_embed.UI): The embed UI object
juraj-google-style
def FlatbufferToDict(fb, preserve_as_numpy): if isinstance(fb, int) or isinstance(fb, float) or isinstance(fb, str): return fb elif hasattr(fb, '__dict__'): result = {} for attribute_name in dir(fb): attribute = fb.__getattribute__(attribute_name) if not callable(attribute) and attribute_name[0] != '_': snake_name = CamelCaseToSnakeCase(attribute_name) preserve = True if attribute_name == 'buffers' else preserve_as_numpy result[snake_name] = FlatbufferToDict(attribute, preserve) return result elif isinstance(fb, np.ndarray): return fb if preserve_as_numpy else fb.tolist() elif hasattr(fb, '__len__'): return [FlatbufferToDict(entry, preserve_as_numpy) for entry in fb] else: return fb
Converts a hierarchy of FB objects into a nested dict. We avoid transforming big parts of the flat buffer into python arrays. This speeds conversion from ten minutes to a few seconds on big graphs. Args: fb: a flat buffer structure. (i.e. ModelT) preserve_as_numpy: true if all downstream np.arrays should be preserved. false if all downstream np.array should become python arrays Returns: A dictionary representing the flatbuffer rather than a flatbuffer object.
github-repos
def can_handle(x, y=None): raise NotImplementedError
Whether the current DataAdapter could handle the input x and y. Structure wise, x and y can be single object, or list of objects if there multiple input/output, or dictionary of objects when the intput/output are named. Args: x: input features. y: target labels. Note that y could be None in the case of prediction. Returns: boolean
github-repos
def run_local(self, commands): process = subprocess.Popen( commands.get('cli_command'), shell=self.shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out, err = process.communicate() self.run_display_app_output(out) self.run_display_app_errors(err) return self.run_exit_code(process.returncode)
Run the App on local system. Args: commands (dict): A dictionary of the CLI commands. Returns: int: The exit code of the subprocess command.
juraj-google-style
def add_subtask(self, subtask): if self.stopped: raise InternalError('Cannot add a subtask to a parent that is already stopped') if (not isinstance(subtask, BackgroundTask)): raise ArgumentError('Subtasks must inherit from BackgroundTask, task={}'.format(subtask)) if (subtask._loop != self._loop): raise ArgumentError('Subtasks must run in the same BackgroundEventLoop as their parent', subtask=subtask, parent=self) self.subtasks.append(subtask)
Link a subtask to this parent task. This will cause stop() to block until the subtask has also finished. Calling stop will not directly cancel the subtask. It is expected that your finalizer for this parent task will cancel or otherwise stop the subtask. Args: subtask (BackgroundTask): Another task that will be stopped when this task is stopped.
codesearchnet
def init(module_paths, work_db, config): operator_names = cosmic_ray.plugins.operator_names() work_db.set_config(config=config) work_db.clear() for module_path in module_paths: module_ast = get_ast( module_path, python_version=config.python_version) for op_name in operator_names: operator = get_operator(op_name)(config.python_version) visitor = WorkDBInitVisitor(module_path, op_name, work_db, operator) visitor.walk(module_ast) apply_interceptors(work_db, config.sub('interceptors').get('enabled', ()))
Clear and initialize a work-db with work items. Any existing data in the work-db will be cleared and replaced with entirely new work orders. In particular, this means that any results in the db are removed. Args: module_paths: iterable of pathlib.Paths of modules to mutate. work_db: A `WorkDB` instance into which the work orders will be saved. config: The configuration for the new session.
juraj-google-style
def delete(self, project_id): self.logger.debug('Deleting project by id: ' + project_id) url = '%(base_url)s/%(project_id)s' % { 'base_url': self.base_url, 'project_id': project_id } r = self.gbdx_connection.delete(url) r.raise_for_status()
Deletes a project by id Args: project_id: The project id to delete Returns: Nothing
juraj-google-style
def enum_from_yaml(cls: Type[T_EnumFromYAML], constructor: Constructor, node: ruamel.yaml.nodes.ScalarNode) -> T_EnumFromYAML: return cls[node.value]
Decode YAML representation. This is a mixin method for reading enum values from YAML. It needs to be added to the enum as a classmethod. See the module docstring for further information on this approach and how to implement it. Note: This method assumes that the name of the enumeration value was stored as a scalar node. Args: constructor: Constructor from the YAML object. node: Scalar node extracted from the YAML being read. Returns: The constructed YAML value from the name of the enumerated value.
codesearchnet
def _validate_state_spec(cell_state_sizes, init_state_specs): validation_error = ValueError('An `initial_state` was passed that is not compatible with `cell.state_size`. Received `state_spec`={}; however `cell.state_size` is {}'.format(init_state_specs, cell_state_sizes)) flat_cell_state_sizes = nest.flatten(cell_state_sizes) flat_state_specs = nest.flatten(init_state_specs) if len(flat_cell_state_sizes) != len(flat_state_specs): raise validation_error for cell_state_spec, cell_state_size in zip(flat_state_specs, flat_cell_state_sizes): if not tensor_shape.TensorShape(cell_state_spec.shape[1:]).is_compatible_with(tensor_shape.TensorShape(cell_state_size)): raise validation_error
Validate the state spec between the initial_state and the state_size. Args: cell_state_sizes: list, the `state_size` attribute from the cell. init_state_specs: list, the `state_spec` from the initial_state that is passed in `call()`. Raises: ValueError: When initial state spec is not compatible with the state size.
github-repos
def create_chebyshev_samples(order, dim=1): x_data = ((0.5 * numpy.cos(((numpy.arange(order, 0, (- 1)) * numpy.pi) / (order + 1)))) + 0.5) x_data = chaospy.quad.combine(([x_data] * dim)) return x_data.T
Chebyshev sampling function. Args: order (int): The number of samples to create along each axis. dim (int): The number of dimensions to create samples for. Returns: samples following Chebyshev sampling scheme mapped to the ``[0, 1]^dim`` hyper-cube and ``shape == (dim, order)``.
codesearchnet
def dump(voevent, file, pretty_print=True, xml_declaration=True): file.write(dumps(voevent, pretty_print, xml_declaration))
Writes the voevent to the file object. e.g.:: with open('/tmp/myvoevent.xml','wb') as f: voeventparse.dump(v, f) Args: voevent(:class:`Voevent`): Root node of the VOevent etree. file (io.IOBase): An open (binary mode) file object for writing. pretty_print pretty_print(bool): See :func:`dumps` xml_declaration(bool): See :func:`dumps`
juraj-google-style
def master(self, task_type=None, task_id=None, rpc_layer=None): task_type = task_type if task_type is not None else self.task_type task_id = task_id if task_id is not None else self.task_id if task_type is not None and task_id is not None: return format_master_url(self.cluster_spec().task_address(task_type, task_id), rpc_layer or self.rpc_layer) return ''
Returns the master address to use when creating a session. You must have set the task_type and task_id object properties before calling this function, or pass in the `task_type` and `task_id` parameters when using this function. If you do both, the function parameters will override the object properties. Note: this is only useful for TensorFlow 1.x. Args: task_type: (Optional) The type of the TensorFlow task of the master. task_id: (Optional) The index of the TensorFlow task of the master. rpc_layer: (Optional) The RPC protocol for the given cluster. Returns: The name or URL of the session master.
github-repos
def AddArguments(cls, argument_group): argument_group.add_argument( '--viper-hash', '--viper_hash', dest='viper_hash', type=str, action='store', choices=viper.ViperAnalyzer.SUPPORTED_HASHES, default=cls._DEFAULT_HASH, metavar='HASH', help=( 'Type of hash to use to query the Viper server, the default is: ' '{0:s}. Supported options: {1:s}').format( cls._DEFAULT_HASH, ', '.join( viper.ViperAnalyzer.SUPPORTED_HASHES))) argument_group.add_argument( '--viper-host', '--viper_host', dest='viper_host', type=str, action='store', default=cls._DEFAULT_HOST, metavar='HOST', help=( 'Hostname of the Viper server to query, the default is: ' '{0:s}'.format(cls._DEFAULT_HOST))) argument_group.add_argument( '--viper-port', '--viper_port', dest='viper_port', type=int, action='store', default=cls._DEFAULT_PORT, metavar='PORT', help=( 'Port of the Viper server to query, the default is: {0:d}.'.format( cls._DEFAULT_PORT))) argument_group.add_argument( '--viper-protocol', '--viper_protocol', dest='viper_protocol', type=str, choices=viper.ViperAnalyzer.SUPPORTED_PROTOCOLS, action='store', default=cls._DEFAULT_PROTOCOL, metavar='PROTOCOL', help=( 'Protocol to use to query Viper, the default is: {0:s}. ' 'Supported options: {1:s}').format( cls._DEFAULT_PROTOCOL, ', '.join( viper.ViperAnalyzer.SUPPORTED_PROTOCOLS)))
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
juraj-google-style
def forward(self, key_value_states: torch.Tensor, attn_mask: Optional[torch.Tensor]=None): batch_size, num_patches = (key_value_states.shape[0], key_value_states.shape[1]) if num_patches not in self.patch_to_query_dict.keys(): raise KeyError(f'Number of patches {num_patches} not found in patch_to_query_dict amongst possible values {self.patch_to_query_dict.keys()}.') query_num = self.patch_to_query_dict[num_patches] queries = self.query[:query_num].unsqueeze(0).repeat(batch_size, 1, 1) if attn_mask is not None: attn_mask = attn_mask.repeat_interleave(self.num_heads, 0) attn_mask = attn_mask.unsqueeze(1).expand(-1, queries.size(1), -1) attention_out = self.cross_attn(key_value_states, queries, attn_mask=attn_mask) out = self.feed_forward(self.layer_norm(attention_out)) return out
Forward pass of the Projector module. Args: key_value_states (`torch.Tensor`): Input tensor of shape (batch_size, num_patches, kv_dim). attn_mask (`torch.Tensor`, *optional*, default is None): Attention mask. Returns: `torch.Tensor`: Output tensor of shape (batch_size, query_number, output_dim).
github-repos
def download_links(self, dir_path): links = self.links if not path.exists(dir_path): makedirs(dir_path) for i, url in enumerate(links): if 'start' in self.cseargs: i += int(self.cseargs['start']) ext = self.cseargs['fileType'] ext = '.html' if ext == '' else '.' + ext file_name = self.cseargs['q'].replace(' ', '_') + '_' + str(i) + ext file_path = path.join(dir_path, file_name) r = requests.get(url, stream=True) if r.status_code == 200: with open(file_path, 'wb') as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f)
Download web pages or images from search result links. Args: dir_path (str): Path of directory to save downloads of :class:`api.results`.links
juraj-google-style
def write_libraries(dir, libraries): files = [open(os.path.join(dir, k), 'w') for (k, _) in libraries] for (f, (_, v)) in zip(files, libraries): v.write_markdown_to_file(f) for (f, (_, v)) in zip(files, libraries): v.write_other_members(f) f.close()
Write a list of libraries to disk. Args: dir: Output directory. libraries: List of (filename, library) pairs.
codesearchnet
def scale_vmss(access_token, subscription_id, resource_group, vmss_name, capacity): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) body = (('{"sku":{"capacity":"' + str(capacity)) + '"}}') return do_patch(endpoint, body, access_token)
Change the instance count of an existing VM Scale Set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. capacity (int): New number of VMs. Returns: HTTP response.
codesearchnet
def set_job(self, key, func, args): (res, pk) = key (jobs, lock) = self._jobs task = _tasks.UpdateTask(func(*args), key) with lock: job = jobs[res].get(pk) had = bool(job) if (not job): job = task jobs[res][pk] = job else: task.cancel() self._log.debug('Scheduling: %s-%s (%s)', res.tag, pk, ('new task' if (not had) else 'dup')) return job
Get a scheduled task or set if none exists. Returns: - task coroutine/continuation
codesearchnet
def get_uniform_frame_indices(total_num_frames: int, num_frames: Optional[int]=None): if num_frames is not None: indices = np.arange(0, total_num_frames, total_num_frames / num_frames).astype(int) else: indices = np.arange(0, total_num_frames).astype(int) return indices
Creates a numpy array for uniform sampling of `num_frame` frames from `total_num_frames` when loading a video. Args: total_num_frames (`int`): Total number of frames that a video has. num_frames (`int`, *optional*): Number of frames to sample uniformly. If not specified, all frames are sampled. Returns: np.ndarray: np array of frame indices that will be sampled.
github-repos
def add_class_error(self, test_record): test_record.update_record() self.error.append(test_record)
Add a record to indicate a test class has failed before any test could execute. This is only called before any test is actually executed. So it only adds an error entry that describes why the class failed to the tally and does not affect the total number of tests requrested or exedcuted. Args: test_record: A TestResultRecord object for the test class.
github-repos
def guarantee_const(input, name=None): return gen_array_ops.guarantee_const(input=input, name=name)
Promise to the TF runtime that the input tensor is a constant. The runtime is then free to make optimizations based on this. Returns the input tensor without modification. Args: input: A `Tensor`. name: A name for this operation. Returns: A `Tensor`. Has the same dtype as `input`.
github-repos
def mv(src, dst): if (not exists(src)): raise File404(src) try: shutil.move(src, dst) except Exception as e: raise IOError(str(e))
Move a file or directory. If the destination already exists, this will attempt to overwrite it. Arguments: src (string): path to the source file or directory. dst (string): path to the destination file or directory. Raises: File404: if source does not exist. IOError: in case of error.
codesearchnet
def add_continue_node(self, ast_node, section_id, guards): node = self._add_jump_node(ast_node, guards) self.continues[section_id].add(node)
Grows the graph by adding a reentry node. This node causes control flow to go back to the loop section's entry. Args: ast_node: ast.AST section_id: Hashable, the node for which ast_node should be considered to be an exit node guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
github-repos
def ComputeRoot(hashes): if not len(hashes): raise Exception('Hashes must have length') if len(hashes) == 1: return hashes[0] tree = MerkleTree(hashes) return tree.Root.Hash
Compute the root hash. Args: hashes (list): the list of hashes to build the root from. Returns: bytes: the root hash.
juraj-google-style
def make_list_of_images(images, expected_ndims: int=3) -> list[ImageInput]: if is_batched(images): return images if is_pil_image(images): return [images] if is_valid_image(images): if images.ndim == expected_ndims + 1: images = list(images) elif images.ndim == expected_ndims: images = [images] else: raise ValueError(f'Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got {images.ndim} dimensions.') return images raise ValueError(f'Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray, but got {type(images)}.')
Ensure that the output is a list of images. If the input is a single image, it is converted to a list of length 1. If the input is a batch of images, it is converted to a list of images. Args: images (`ImageInput`): Image of images to turn into a list of images. expected_ndims (`int`, *optional*, defaults to 3): Expected number of dimensions for a single input image. If the input image has a different number of dimensions, an error is raised.
github-repos
def get_weights_of_nn_sites(self, structure, n): return [e['weight'] for e in self.get_nn_info(structure, n)]
Get weight associated with each near neighbor of site with index n in structure. Args: structure (Structure): input structure. n (integer): index of site for which to determine the weights. Returns: weights (list of floats): near-neighbor weights.
juraj-google-style
def _exec_query(self): if (not self._solr_locked): if (not self.compiled_query): self._compile_query() try: solr_params = self._process_params() if settings.DEBUG: t1 = time.time() self._solr_cache = self.bucket.search(self.compiled_query, self.index_name, **solr_params) if (settings.DEBUG and (settings.DEBUG_LEVEL >= 5)): print(('QRY => %s\nSOLR_PARAMS => %s' % (self.compiled_query, solr_params))) except riak.RiakError as err: err.value += self._get_debug_data() raise self._solr_locked = True return self._solr_cache['docs']
Executes solr query if it hasn't already executed. Returns: Self.
codesearchnet
def update_reorders_v2(output_file_path): spec = tf_upgrade_v2.TFAPIChangeSpec() reordered_function_names = spec.reordered_function_names need_kwargs_function_names = spec.function_transformers.keys() function_renames = spec.symbol_renames all_reorders = collect_function_arg_names(reordered_function_names, need_kwargs_function_names, function_renames) rename_lines = [get_reorder_line(name, arg_names) for name, arg_names in all_reorders.items()] renames_file_text = '%sreorders = {\n%s\n}\n' % (_FILE_HEADER, ',\n'.join(sorted(rename_lines))) file_io.write_string_to_file(output_file_path, renames_file_text)
Writes a Python dictionary mapping function name to argument order. Args: output_file_path: File path to write output to. Any existing contents would be replaced.
github-repos
def from_string(cls, string_input): correlation_grid = {} Exc_DFT_option = {} COHSEX_options = {} GW_options = {} BSE_TDDFT_options = {} lines = string_input.strip().split("\n") lines.pop(0) l = lines.pop(0).strip() toks = l.split() nat = toks[0] nsp = toks[1] lines.pop(0) l = lines.pop(0).strip() toks = l.split() nvbands = toks[0] lines.pop(0) l = lines.pop(0).strip() toks = l.split() correlation_grid['n_grid'] = toks[0] correlation_grid['dE_grid'] = toks[1] lines.pop(0) l = lines.pop(0).strip() toks = l.split() Exc_DFT_option['rdVxcpsi'] = toks[0] lines.pop(0) l = lines.pop(0).strip() toks = l.split() COHSEX_options['nv_cohsex'] = toks[0] COHSEX_options['nc_cohsex'] = toks[1] COHSEX_options['eigMethod'] = toks[2] lines.pop(0) l = lines.pop(0).strip() toks = l.split() COHSEX_options['nit_cohsex'] = toks[0] COHSEX_options['resMethod'] = toks[1] COHSEX_options['scf_cohsex_wf'] = toks[2] COHSEX_options['mix_cohsex'] = toks[3] lines.pop(0) l = lines.pop(0).strip() toks = l.split() GW_options['nv_corr'] = toks[0] GW_options['nc_corr'] = toks[1] lines.pop(0) l = lines.pop(0).strip() toks = l.split() GW_options['nit_gw'] = toks[0] lines.pop(0) l = lines.pop(0).strip() toks = l.split() BSE_TDDFT_options['do_bse'] = toks[0] BSE_TDDFT_options['do_tddft'] = toks[1] lines.pop(0) l = lines.pop(0).strip() toks = l.split() BSE_TDDFT_options['nv_bse'] = toks[0] BSE_TDDFT_options['nc_bse'] = toks[1] lines.pop(0) l = lines.pop(0).strip() toks = l.split() BSE_TDDFT_options['npsi_bse'] = toks[0] BSE_TDDFT_options['nit_bse'] = toks[1] lines.pop(0) atname = [] i = int(nsp) while i != 0: l = lines.pop(0).strip() toks = l.split() atname.append(toks[0]) i -= 1 lines.pop(0) l = lines.pop(0).strip() toks = l.split() scale = toks[0] lines.pop(0) species = [] coords = [] i = int(nat) while i != 0: l = lines.pop(0).strip() toks = l.split() coords.append([float(j) for j in toks[0:3]]) species.append(atname[int(toks[3]) - 1]) i -= 1 mol = Molecule(species, coords) return FiestaInput(mol=mol, correlation_grid=correlation_grid, Exc_DFT_option=Exc_DFT_option, COHSEX_options=COHSEX_options, GW_options=GW_options, BSE_TDDFT_options=BSE_TDDFT_options)
Read an FiestaInput from a string. Currently tested to work with files generated from this class itself. Args: string_input: string_input to parse. Returns: FiestaInput object
juraj-google-style
def _ParseLine(self, parser_mediator, structure): month, day_of_month, year, hours, minutes, seconds, milliseconds = ( structure.date_time) year += 2000 time_elements_tuple = ( year, month, day_of_month, hours, minutes, seconds, milliseconds) try: date_time = dfdatetime_time_elements.TimeElementsInMilliseconds( time_elements_tuple=time_elements_tuple) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return event_data = SkyDriveLogEventData() event_data.detail = structure.detail.replace('\n', ' ') event_data.log_level = structure.log_level event_data.module = structure.module event_data.source_code = structure.source_code event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a logline and store appropriate attributes. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file.
juraj-google-style
def insert(self, i, species, coords, validate_proximity=False, properties=None): new_site = Site(species, coords, properties=properties) if validate_proximity: for site in self: if (site.distance(new_site) < self.DISTANCE_TOLERANCE): raise ValueError('New site is too close to an existing site!') self._sites.insert(i, new_site)
Insert a site to the molecule. Args: i (int): Index to insert site species: species of inserted site coords (3x1 array): coordinates of inserted site validate_proximity (bool): Whether to check if inserted site is too close to an existing site. Defaults to True. properties (dict): Dict of properties for the Site. Returns: New molecule with inserted site.
codesearchnet
def get_phonopy_structure(pmg_structure): symbols = [site.specie.symbol for site in pmg_structure] return PhonopyAtoms(symbols=symbols, cell=pmg_structure.lattice.matrix, scaled_positions=pmg_structure.frac_coords)
Convert a pymatgen Structure object to a PhonopyAtoms object. Args: pmg_structure (pymatgen Structure): A Pymatgen structure object.
codesearchnet
def get_remote_info(url_id): try: data = _send_request(url_id) except Exception as e: sys.stderr.write('Seeder GET error: ') sys.stderr.write(str(e.message)) return None return _convert_to_wakat_format(data)
Download data and convert them to dict used in frontend. Args: url_id (str): ID used as identification in Seeder. Returns: dict: Dict with data for frontend or None in case of error.
codesearchnet
def validate(self, tags, confidence): (intent, tags) = self.validate_with_tags(tags, confidence) return intent
Using this method removes tags from the result of validate_with_tags Returns: intent(intent): Resuts from validate_with_tags
codesearchnet
def CredibleInterval(self, percentage=90): prob = (1 - percentage / 100.0) / 2 interval = self.Value(prob), self.Value(1 - prob) return interval
Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high
juraj-google-style
def create_course_completion(self, user_id, payload): url = (self.enterprise_configuration.sapsf_base_url + self.global_sap_config.completion_status_api_path) return self._call_post_with_user_override(user_id, url, payload)
Send a completion status payload to the SuccessFactors OCN Completion Status endpoint Args: user_id (str): The sap user id that the completion status is being sent for. payload (str): JSON encoded object (serialized from SapSuccessFactorsLearnerDataTransmissionAudit) containing completion status fields per SuccessFactors documentation. Returns: The body of the response from SAP SuccessFactors, if successful Raises: HTTPError: if we received a failure response code from SAP SuccessFactors
codesearchnet
def lunr(ref, fields, documents, languages=None): if ((languages is not None) and lang.LANGUAGE_SUPPORT): if isinstance(languages, basestring): languages = [languages] unsupported_languages = (set(languages) - set(lang.SUPPORTED_LANGUAGES)) if unsupported_languages: raise RuntimeError('The specified languages {} are not supported, please choose one of {}'.format(', '.join(unsupported_languages), ', '.join(lang.SUPPORTED_LANGUAGES.keys()))) builder = lang.get_nltk_builder(languages) else: builder = Builder() builder.pipeline.add(trimmer, stop_word_filter, stemmer) builder.search_pipeline.add(stemmer) builder.ref(ref) for field in fields: if isinstance(field, dict): builder.field(**field) else: builder.field(field) for document in documents: if isinstance(document, (tuple, list)): builder.add(document[0], attributes=document[1]) else: builder.add(document) return builder.build()
A convenience function to configure and construct a lunr.Index. Args: ref (str): The key in the documents to be used a the reference. fields (list): A list of strings defining fields in the documents to index. Optionally a list of dictionaries with three keys: `field_name` defining the document's field, `boost` an integer defining a boost to be applied to the field, and `extractor` a callable taking the document as a single argument and returning a string located in the document in a particular way. documents (list): The list of dictonaries representing the documents to index. Optionally a 2-tuple of dicts, the first one being the document and the second the associated attributes to it. languages (str or list, optional): The languages to use if using NLTK language support, ignored if NLTK is not available. Returns: Index: The populated Index ready to search against.
codesearchnet
def convert_attribute_name_to_tag(value): if not isinstance(value, six.string_types): raise ValueError("The attribute name must be a string.") for entry in attribute_name_tag_table: if value == entry[0]: return entry[1] raise ValueError("Unrecognized attribute name: '{}'".format(value))
A utility function that converts an attribute name string into the corresponding attribute tag. For example: 'State' -> enums.Tags.STATE Args: value (string): The string name of the attribute. Returns: enum: The Tags enumeration value that corresponds to the attribute name string. Raises: ValueError: if the attribute name string is not a string or if it is an unrecognized attribute name
juraj-google-style
async def get_auth(request): auth_val = request.get(AUTH_KEY) if auth_val: return auth_val auth_policy = request.get(POLICY_KEY) if (auth_policy is None): raise RuntimeError('auth_middleware not installed') request[AUTH_KEY] = (await auth_policy.get(request)) return request[AUTH_KEY]
Returns the user_id associated with a particular request. Args: request: aiohttp Request object. Returns: The user_id associated with the request, or None if no user is associated with the request. Raises: RuntimeError: Middleware is not installed
codesearchnet
def spherical_vert(script, radius=1.0, center_pt=(0.0, 0.0, 0.0)): function = 'sqrt((x-{})^2+(y-{})^2+(z-{})^2)<={}'.format( center_pt[0], center_pt[1], center_pt[2], radius) vert_function(script, function=function) return None
Select all vertices within a spherical radius Args: radius (float): radius of the sphere center_pt (3 coordinate tuple or list): center point of the sphere Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
juraj-google-style
def input_mask(self): inputs = self.input if isinstance(inputs, list): return [getattr(x, '_keras_mask', None) for x in inputs] else: return getattr(inputs, '_keras_mask', None)
Retrieves the input mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer. Returns: Input mask tensor (potentially None) or list of input mask tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers.
github-repos
def transform(self, col): out = pd.DataFrame() column = col[self.col_name].replace({np.nan: None}) out[self.col_name] = column.apply(self.get_val) return out
Prepare the transformer to convert data and return the processed table. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame
juraj-google-style
def get_values(self, field_name: str) -> List[object]: result = list() if self.validate_field(field_name): for value_key in self._kg.get(field_name): result.append(value_key["value"]) return result
Get a list of all the values of a field. Args: field_name: Returns: the list of values (not the keys)
juraj-google-style
def get_commands_in_namespace(namespace=None, level=1): from ..command import Command commands = {} if (namespace is None): frame = inspect.stack()[level][0] namespace = frame.f_globals elif inspect.ismodule(namespace): namespace = vars(namespace) for name in namespace: obj = namespace[name] if isinstance(obj, Command): commands[name] = obj return OrderedDict(((name, commands[name]) for name in sorted(commands)))
Get commands in namespace. Args: namespace (dict|module): Typically a module. If not passed, the globals from the call site will be used. level (int): If not called from the global scope, set this appropriately to account for the call stack. Returns: OrderedDict: The commands found in the namespace, ordered by name. Can be used to create ``__all__`` lists:: __all__ = list(get_commands_in_namespace())
codesearchnet
def make_batched_videos(videos) -> List[Union['np.ndarray', 'torch.Tensor']]: if not valid_videos: raise ValueError(f'Invalid video input. Expected either a list of video frames or an input of 4 or 5 dimensions, but got type {type(videos)}.') if is_batched_video(videos): pass elif is_valid_video(videos): videos = [videos] elif is_valid_image(videos): videos = [np.array(videos)[None, ...]] elif isinstance(videos[0], (list, tuple)) and is_valid_video(videos[0][0]): videos = [video for sublist in videos for video in sublist] return convert_pil_frames_to_video(videos)
Ensure that the input is a list of videos. If the input is a single video, it is converted to a list of length 1. If the input is a batch of videos, it is converted to a list of 4D video arrays. Videos passed as list `PIL.Image` frames are converted to 4D arrays. We assume that all inputs in the list are in the same format, based on the type of the first element. Args: videos (`VideoInput`): Video inputs to turn into a list of videos.
github-repos
def SetCACertificatesPath(self, ca_certificates_path): if not ca_certificates_path: return if not os.path.exists(ca_certificates_path): raise errors.BadConfigOption( 'No such certificate file: {0:s}.'.format(ca_certificates_path)) self._ca_certs = ca_certificates_path logger.debug('Elasticsearch ca_certs: {0!s}'.format(ca_certificates_path))
Sets the path to the CA certificates. Args: ca_certificates_path (str): path to file containing a list of root certificates to trust. Raises: BadConfigOption: if the CA certificates file does not exist.
juraj-google-style
def _ReadAppJsonFile(self, relative_path): try: with open(os.path.join(sys.path[0], relative_path), 'r') as f: return json.load(f) except (IOError, ValueError): return None
Reads JSON file from an application directory. Args: relative_path: file name relative to application root directory. Returns: Parsed JSON data or None if the file does not exist, can't be read or not a valid JSON file.
juraj-google-style
def resolve_trust_remote_code(trust_remote_code, model_name, has_local_code, has_remote_code, error_message=None, upstream_repo=None): if error_message is None: if upstream_repo is not None: error_message = f'The repository {model_name} references custom code contained in {upstream_repo} which must be executed to correctly load the model. You can inspect the repository content at https: elif os.path.isdir(model_name): error_message = f'The repository {model_name} contains custom code which must be executed to correctly load the model. You can inspect the repository content at {os.path.abspath(model_name)} .\n' else: error_message = f'The repository {model_name} contains custom code which must be executed to correctly load the model. You can inspect the repository content at https: if trust_remote_code is None: if has_local_code: trust_remote_code = False elif has_remote_code and TIME_OUT_REMOTE_CODE > 0: prev_sig_handler = None try: prev_sig_handler = signal.signal(signal.SIGALRM, _raise_timeout_error) signal.alarm(TIME_OUT_REMOTE_CODE) while trust_remote_code is None: answer = input(f'{error_message} You can inspect the repository content at https: if answer.lower() in ['yes', 'y', '1']: trust_remote_code = True elif answer.lower() in ['no', 'n', '0', '']: trust_remote_code = False signal.alarm(0) except Exception: raise ValueError(f'{error_message} You can inspect the repository content at https: finally: if prev_sig_handler is not None: signal.signal(signal.SIGALRM, prev_sig_handler) signal.alarm(0) elif has_remote_code: _raise_timeout_error(None, None) if has_remote_code and (not has_local_code) and (not trust_remote_code): raise ValueError(f'{error_message} You can inspect the repository content at https: return trust_remote_code
Resolves the `trust_remote_code` argument. If there is remote code to be loaded, the user must opt-in to loading it. Args: trust_remote_code (`bool` or `None`): User-defined `trust_remote_code` value. model_name (`str`): The name of the model repository in huggingface.co. has_local_code (`bool`): Whether the model has local code. has_remote_code (`bool`): Whether the model has remote code. error_message (`str`, *optional*): Custom error message to display if there is remote code to load and the user didn't opt-in. If unset, the error message will be regarding loading a model with custom code. Returns: The resolved `trust_remote_code` value.
github-repos
def get_config_path(): try: return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR] except KeyError: pass if (os.name != 'nt'): return os.path.join(os.path.expanduser('~'), '.config', _CONFIG_DIRECTORY) else: try: return os.path.join(os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR], _CONFIG_DIRECTORY) except KeyError: drive = os.environ.get('SystemDrive', 'C:') return os.path.join(drive, '\\', _CONFIG_DIRECTORY)
Returns the absolute path the the Cloud SDK's configuration directory. Returns: str: The Cloud SDK config path.
codesearchnet
def api_server(connection, server_class): return server_class(link=xbahn.connection.link.Link(receive=connection, respond=connection))
Establishes an API Server on the supplied connection Arguments: - connection (xbahn.connection.Connection) - server_class (xbahn.api.Server) Returns: - server_class: server instance
codesearchnet
def Deserialize(self, reader): super(StorageItem, self).Deserialize(reader) self.Value = reader.ReadVarBytes()
Deserialize full object. Args: reader (neocore.IO.BinaryReader):
juraj-google-style
def tetragonal(a: float, c: float): return Lattice.from_parameters(a, a, c, 90, 90, 90)
Convenience constructor for a tetragonal lattice. Args: a (float): *a* lattice parameter of the tetragonal cell. c (float): *c* lattice parameter of the tetragonal cell. Returns: Tetragonal lattice of dimensions a x a x c.
juraj-google-style
def unsubscribe(self, subscription, max=None): if max is None: self._send('UNSUB %d' % subscription.sid) self._subscriptions.pop(subscription.sid) else: subscription.max = max self._send('UNSUB %d %s' % (subscription.sid, max))
Unsubscribe will remove interest in the given subject. If max is provided an automatic Unsubscribe that is processed by the server when max messages have been received Args: subscription (pynats.Subscription): a Subscription object max (int=None): number of messages
juraj-google-style
def build_individual(ind): try: ind_obj = dict(individual_id=ind['individual_id']) log.info('Building Individual with id:{0}'.format(ind['individual_id'])) except KeyError as err: raise PedigreeError('Individual is missing individual_id') ind_obj['display_name'] = ind.get('display_name', ind_obj['individual_id']) sex = ind.get('sex', 'unknown') try: int(sex) ind_obj['sex'] = str(sex) except ValueError as err: try: ind_obj['sex'] = REV_SEX_MAP[sex] except KeyError as err: raise PedigreeError(('Unknown sex: %s' % sex)) phenotype = ind.get('phenotype', 'unknown') try: ped_phenotype = REV_PHENOTYPE_MAP[phenotype] if (ped_phenotype == (- 9)): ped_phenotype = 0 ind_obj['phenotype'] = ped_phenotype except KeyError as err: raise PedigreeError(('Unknown phenotype: %s' % phenotype)) ind_obj['father'] = ind.get('father') ind_obj['mother'] = ind.get('mother') ind_obj['capture_kits'] = ind.get('capture_kits', []) ind_obj['bam_file'] = ind.get('bam_file') ind_obj['mt_bam'] = ind.get('mt_bam') ind_obj['vcf2cytosure'] = ind.get('vcf2cytosure') ind_obj['confirmed_sex'] = ind.get('confirmed_sex') ind_obj['confirmed_parent'] = ind.get('confirmed_parent') ind_obj['predicted_ancestry'] = ind.get('predicted_ancestry') analysis_type = ind.get('analysis_type', 'unknown') if (not (analysis_type in ANALYSIS_TYPES)): raise PedigreeError('Analysis type %s not allowed', analysis_type) ind_obj['analysis_type'] = analysis_type if ('tmb' in ind): ind_obj['tmb'] = ind['tmb'] if ('msi' in ind): ind_obj['msi'] = ind['msi'] if ('tumor_purity' in ind): ind_obj['tumor_purity'] = ind['tumor_purity'] if ('tumor_type' in ind): ind_obj['tumor_type'] = ind['tumor_type'] return ind_obj
Build a Individual object Args: ind (dict): A dictionary with individual information Returns: ind_obj (dict): A Individual object dict( individual_id = str, # required display_name = str, sex = str, phenotype = int, father = str, # Individual id of father mother = str, # Individual id of mother capture_kits = list, # List of names of capture kits bam_file = str, # Path to bam file vcf2cytosure = str, # Path to CGH file analysis_type = str, # choices=ANALYSIS_TYPES )
codesearchnet
def raster_dilation(rasterfile): if is_string(rasterfile): origin_raster = RasterUtilClass.read_raster(str(rasterfile)) elif isinstance(rasterfile, Raster): origin_raster = rasterfile.data elif isinstance(rasterfile, numpy.ndarray): origin_raster = rasterfile else: return 'Your rasterfile has a wrong type. Type must be string or ' \ 'numpy.array or class Raster in pygeoc.' min_value_raster = origin_raster.min() dilation_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1])) add_row = numpy.full((1, origin_raster.shape[1]), min_value_raster) temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row)) add_col = numpy.full((origin_raster.shape[0] + 2, 1), min_value_raster) expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col)) for i in range(origin_raster.shape[0]): for j in range(origin_raster.shape[1]): max_pixel_value = min_value_raster for k in range(3): for l in range(3): if expand_origin_raster[i + k, j + l] >= max_pixel_value: max_pixel_value = expand_origin_raster[i + k, j + l] dilation_raster[i, j] = max_pixel_value return dilation_raster
Dilate the raster image. Find the max pixel's value in 8-neighborhood. Then change the compute pixel's value into the max pixel's value. Args: rasterfile: input original raster image, type can be filename(string, like "test1.tif"), rasterfile(class Raster) or numpy.ndarray. Returns: dilation_raster: raster image after dilation, type is numpy.ndarray.
juraj-google-style
def add_dir(self, path, compress): if not os.path.isdir(path): raise ValueError('{} is not a directory'.format(path)) for root, dirs, files in os.walk(path): for f in files: self.add_file(os.path.join(root, f), compress)
Add all files under directory `path` to the MAR file. Args: path (str): path to directory to add to this MAR file compress (str): One of 'xz', 'bz2', or None. Defaults to None.
juraj-google-style
def __call__(self, utterances: list, batch_history: list, *responses: list) -> list: result = [random.choice([t for t, sc in r if t]) for r in zip(*responses)] return result
Selects result of a random skill for each utterance. Args: utterances_batch: Not used. history_batch: Not used. responses: Each response positional argument corresponds to response of one of Agent skills and is represented by batch (list) of (response, confidence) tuple structures. Returns: result: A batch of responses corresponding to the utterance batch received by agent.
juraj-google-style
def split_input(cls, mapper_spec): params = _get_params(mapper_spec) entity_kind_name = params[cls.ENTITY_KIND_PARAM] batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE)) shard_count = mapper_spec.shard_count namespace = params.get(cls.NAMESPACE_PARAM) app = params.get(cls._APP_PARAM) filters = params.get(cls.FILTERS_PARAM) if (namespace is None): namespace_query = datastore.Query('__namespace__', keys_only=True, _app=app) namespace_keys = namespace_query.Get(limit=(cls.MAX_NAMESPACES_FOR_KEY_SHARD + 1)) if (len(namespace_keys) > cls.MAX_NAMESPACES_FOR_KEY_SHARD): ns_ranges = namespace_range.NamespaceRange.split(n=shard_count, contiguous=True, _app=app) return [cls(entity_kind_name, key_ranges=None, ns_range=ns_range, batch_size=batch_size, filters=filters) for ns_range in ns_ranges] elif (not namespace_keys): return [cls(entity_kind_name, key_ranges=None, ns_range=namespace_range.NamespaceRange(_app=app), batch_size=shard_count, filters=filters)] else: namespaces = [(namespace_key.name() or '') for namespace_key in namespace_keys] else: namespaces = [namespace] readers = cls._split_input_from_params(app, namespaces, entity_kind_name, params, shard_count) if filters: for reader in readers: reader._filters = filters return readers
Splits query into shards without fetching query results. Tries as best as it can to split the whole query result set into equal shards. Due to difficulty of making the perfect split, resulting shards' sizes might differ significantly from each other. Args: mapper_spec: MapperSpec with params containing 'entity_kind'. May have 'namespace' in the params as a string containing a single namespace. If specified then the input reader will only yield values in the given namespace. If 'namespace' is not given then values from all namespaces will be yielded. May also have 'batch_size' in the params to specify the number of entities to process in each batch. Returns: A list of InputReader objects. If the query results are empty then the empty list will be returned. Otherwise, the list will always have a length equal to number_of_shards but may be padded with Nones if there are too few results for effective sharding.
codesearchnet
def get_appliances(self, location_id): url = 'https: headers = self.__gen_headers() headers['Content-Type'] = 'application/json' params = {'locationId': location_id} url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
Get the appliances added for a specified location. Args: location_id (string): identifiying string of appliance Returns: list: dictionary objects containing appliances data
codesearchnet
def predict_features(self, df_features, df_target, idx=0, **kwargs): estimator = SVR(kernel='linear') selector = RFECV(estimator, step=1) selector = selector.fit(df_features.values, df_target.values[:, 0]) return selector.grid_scores_
For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target
juraj-google-style
def __init__(self, campaign_db, campaign_runner, check_repo=True): self.db = campaign_db self.runner = campaign_runner self.check_repo = check_repo if self.check_repo: self.check_repo_ok()
Initialize the Simulation Execution Manager, using the provided CampaignManager and SimulationRunner instances. This method should never be used on its own, but only as a constructor from the new and load @classmethods. Args: campaign_db (DatabaseManager): the DatabaseManager object to associate to this campaign. campaign_runner (SimulationRunner): the SimulationRunner object to associate to this campaign.
juraj-google-style
def __init__(self, init_args, init_func, next_func, finalize_func, output_signature, name=None): self._init_args = init_args self._init_structure = structure.type_spec_from_value(init_args) self._init_func = structured_function.StructuredFunctionWrapper(init_func, self._transformation_name(), input_structure=self._init_structure) self._next_func = structured_function.StructuredFunctionWrapper(next_func, self._transformation_name(), input_structure=self._init_func.output_structure) self._finalize_func = structured_function.StructuredFunctionWrapper(finalize_func, self._transformation_name(), input_structure=self._init_func.output_structure) self._output_signature = output_signature self._name = name variant_tensor = gen_dataset_ops.generator_dataset(structure.to_tensor_list(self._init_structure, self._init_args) + self._init_func.function.captured_inputs, self._next_func.function.captured_inputs, self._finalize_func.function.captured_inputs, init_func=self._init_func.function, next_func=self._next_func.function, finalize_func=self._finalize_func.function, **self._common_args) super().__init__(variant_tensor)
Constructs a `_GeneratorDataset`. Args: init_args: A (nested) structure representing the arguments to `init_func`. init_func: A TensorFlow function that will be called on `init_args` each time a C++ iterator over this dataset is constructed. Returns a (nested) structure representing the "state" of the dataset. next_func: A TensorFlow function that will be called on the result of `init_func` to produce each element, and that raises `OutOfRangeError` to terminate iteration. finalize_func: A TensorFlow function that will be called on the result of `init_func` immediately before a C++ iterator over this dataset is destroyed. The return value is ignored. output_signature: A (nested) structure of `tf.TypeSpec` objects describing the output of `next_func`. name: Optional. A name for the tf.data transformation.
github-repos
def _generate_G_points(self, kpoint): gpoints = [] for i in range(((2 * self._nbmax[2]) + 1)): i3 = (((i - (2 * self._nbmax[2])) - 1) if (i > self._nbmax[2]) else i) for j in range(((2 * self._nbmax[1]) + 1)): j2 = (((j - (2 * self._nbmax[1])) - 1) if (j > self._nbmax[1]) else j) for k in range(((2 * self._nbmax[0]) + 1)): k1 = (((k - (2 * self._nbmax[0])) - 1) if (k > self._nbmax[0]) else k) G = np.array([k1, j2, i3]) v = (kpoint + G) g = np.linalg.norm(np.dot(v, self.b)) E = ((g ** 2) / self._C) if (E < self.encut): gpoints.append(G) return np.array(gpoints, dtype=np.float64)
Helper function to generate G-points based on nbmax. This function iterates over possible G-point values and determines if the energy is less than G_{cut}. Valid values are appended to the output array. This function should not be called outside of initialization. Args: kpoint (np.array): the array containing the current k-point value Returns: a list containing valid G-points
codesearchnet
def openning(input_rasterfilename, times): input_raster = RasterUtilClass.read_raster(input_rasterfilename) openning_raster = input_raster for i in range(times): openning_raster = RasterUtilClass.raster_erosion(openning_raster) for i in range(times): openning_raster = RasterUtilClass.raster_dilation(openning_raster) return openning_raster
Do openning. Openning: Erode firstly, then Dilate. Args: input_rasterfilename: input original raster image filename. times: Erode and Dilate times. Returns: openning_raster: raster image after open.
juraj-google-style
def __write_to_hdf5_light(self, filename_out, *args, **kwargs): block_size = 0 with h5py.File(filename_out, 'w') as h5: h5.attrs[b'CLASS'] = b'FILTERBANK' h5.attrs[b'VERSION'] = b'1.0' if HAS_BITSHUFFLE: bs_compression = bitshuffle.h5.H5FILTER bs_compression_opts = (block_size, bitshuffle.h5.H5_COMPRESS_LZ4) else: bs_compression = None bs_compression_opts = None logger.warning("Warning: bitshuffle not found. No compression applied.") dset = h5.create_dataset('data', data=self.data, compression=bs_compression, compression_opts=bs_compression_opts) dset_mask = h5.create_dataset('mask', shape=self.file_shape, compression=bs_compression, compression_opts=bs_compression_opts, dtype='uint8') dset.dims[0].label = b"frequency" dset.dims[1].label = b"feed_id" dset.dims[2].label = b"time" dset_mask.dims[0].label = b"frequency" dset_mask.dims[1].label = b"feed_id" dset_mask.dims[2].label = b"time" for key, value in self.header.items(): dset.attrs[key] = value
Write data to HDF5 file in one go. Args: filename_out (str): Name of output file
juraj-google-style
def __init__(self, input_reader=None, output_writer=None): super(ExtractionTool, self).__init__( input_reader=input_reader, output_writer=output_writer) self._artifacts_registry = None self._buffer_size = 0 self._mount_path = None self._operating_system = None self._parser_filter_expression = None self._preferred_year = None self._presets_file = None self._process_archives = False self._process_compressed_streams = True self._process_memory_limit = None self._queue_size = self._DEFAULT_QUEUE_SIZE self._resolver_context = dfvfs_context.Context() self._single_process_mode = False self._storage_file_path = None self._storage_format = definitions.STORAGE_FORMAT_SQLITE self._temporary_directory = None self._text_prepend = None self._use_zeromq = True self._yara_rules_string = None
Initializes an CLI tool. Args: input_reader (Optional[InputReader]): input reader, where None indicates that the stdin input reader should be used. output_writer (Optional[OutputWriter]): output writer, where None indicates that the stdout output writer should be used.
juraj-google-style
def write(self, output): view_str = output.encode('ascii', 'ignore') if (len(view_str) > 0): self.m_ser.write(view_str) self.m_ser.flush() self.m_ser.reset_input_buffer() time.sleep(self.m_force_wait) pass
Passthrough for pyserial Serial.write(). Args: output (str): Block to write to port
juraj-google-style
def decode_prob(self, class_probabilities): results = [] for row in class_probabilities: entries = [] for (i, prob) in enumerate(row): entries.append({'index': i, 'name': str(i), 'prob': prob}) entries = sorted(entries, key=itemgetter('prob'), reverse=True)[:self.top_probs] for entry in entries: entry['prob'] = '{:.3f}'.format(entry['prob']) results.append(entries) return results
Given predicted class probabilites for a set of examples, annotate each logit with a class name. By default, we name each class using its index in the logits array. Args: class_probabilities (array): Class probabilities as output by `self.predict`, i.e., a numpy array of shape (num_examples, num_classes). Returns: Annotated class probabilities for each input example, as a list of dicts where each dict is formatted as: { 'index': class_index, 'name': class_name, 'prob': class_probability }
codesearchnet
def execute(self, triple_map, output, **kwargs): sparql = (PREFIX + triple_map.logicalSource.query.format(**kwargs)) bindings = self.__get_bindings__(sparql) iterator = str(triple_map.logicalSource.iterator) for binding in bindings: entity_dict = binding.get(iterator) if isinstance(entity_dict, rdflib.term.Node): entity = entity_dict elif isinstance(entity_dict, dict): raw_value = entity_dict.get('value') if entity_dict.get('type').startswith('bnode'): entity = rdflib.BNode(raw_value) else: entity = rdflib.URIRef(raw_value) if (triple_map.subjectMap.class_ is not None): output.add((entity, rdflib.RDF.type, triple_map.subjectMap.class_)) sparql_query = self.__construct_compound_query__(triple_map).format(**kwargs) properties = self.__get_bindings__(sparql_query) for pred_obj_map in triple_map.predicateObjectMap: predicate = pred_obj_map.predicate if (pred_obj_map.constant is not None): output.add((entity, predicate, pred_obj_map.constant)) continue if (' key = str(predicate).split(' else: key = str(predicate).split('/')[(- 1)] for property_ in properties: if (key in property_.keys()): info = {'about': property_.get(key)} object_ = __get_object__(info) output.add((entity, predicate, object_))
Method iterates through triple map's predicate object maps and processes query. Args: triple_map(SimpleNamespace): Triple Map
codesearchnet
def pytd_type_to_value(self, typ: pytd.Type) -> abstract.BaseValue: if typ not in self._cache.types: self._cache.types[typ] = self._pytd_type_to_value(typ) return self._cache.types[typ]
Converts a pytd type to an abstract value. Args: typ: The type. Returns: The abstract representation of the type. For example, when passed `pytd.ClassType(pytd.Class(int))`, this function returns `abstract.SimpleClass(int)`.
github-repos
def write_dict_to_new_file(file_name, localization_key_to_comment): output_file_descriptor = open_strings_file(file_name, 'w') for (entry_key, entry_comment) in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)): write_entry_to_file(output_file_descriptor, entry_comment, entry_key) output_file_descriptor.write(u'\n') output_file_descriptor.close()
Writes dictionary of localization keys and comments to a file. Args: localization_key_to_comment (dict): A mapping between localization keys and comments. file_name (str): The path of the file to append to.
codesearchnet
def set_property_filter(filter_proto, name, op, value): filter_proto.Clear() pf = filter_proto.property_filter pf.property.name = name pf.op = op set_value(pf.value, value) return filter_proto
Set property filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message name: property name op: datastore.PropertyFilter.Operation value: property value Returns: the same datastore.Filter. Usage: >>> set_property_filter(filter_proto, 'foo', ... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a'
codesearchnet
def meas_gate(self, circuit, qreg, op): if self.meas_fun is None: pass else: self.meas_fun(circuit, qreg, op)
Add measurement gates to a circuit. Args: circuit (QuantumCircuit): circuit to add measurement to. qreg (tuple(QuantumRegister,int)): quantum register being measured. op (str): the basis label for the measurement.
juraj-google-style
def add_op(self, graph_op_creation_digest): if graph_op_creation_digest.op_name in self._op_by_name: raise ValueError('Duplicate op name: %s (op type: %s)' % (graph_op_creation_digest.op_name, graph_op_creation_digest.op_type)) self._op_by_name[graph_op_creation_digest.op_name] = graph_op_creation_digest
Add an op creation data object. Args: graph_op_creation_digest: A GraphOpCreationDigest data object describing the creation of an op inside this graph.
github-repos
def __init__(self, mimeType=PandasCellMimeType): super(MimeData, self).__init__() self._mimeType = mimeType
create a new MimeData object. Args: mimeType (str): the mime type.
juraj-google-style
def aggr(array, op, initial_value, ty): weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array weld_template = '\n result(\n for(\n %(array)s,\n merger[%(ty)s,%(op)s],\n |b, i, e| merge(b, e)\n )\n )\n ' weld_obj.weld_code = (weld_template % {'array': array_var, 'ty': ty, 'op': op}) return weld_obj
Computes the aggregate of elements in the array. Args: array (WeldObject / Numpy.ndarray): Input array to aggregate op (str): Op string used to aggregate the array (+ / *) initial_value (int): Initial value for aggregation ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
codesearchnet
def get_parents_graph(self, item_ids, language=None): def _parents(item_ids): if item_ids is None: items = Item.objects.filter(active=True).prefetch_related('parents') else: item_ids = [ii for iis in item_ids.values() for ii in iis] items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('parents') return {item.id: sorted([_item.id for _item in item.parents.all()]) for item in items} return self._reachable_graph(item_ids, _parents, language=language) if item_ids is None: return self._reachable_graph(None, _parents, language=language) else: graph = self.get_parents_graph(None, language) return self._subset_graph(graph, item_ids)
Get a subgraph of items reachable from the given set of items through the 'parent' relation. Args: item_ids (list): items which are taken as roots for the reachability language (str): if specified, filter out items which are not available in the given language Returns: dict: item id -> list of items (parent items), root items are referenced by None key
juraj-google-style
def _CheckSignature(self, value_data): signature_map = self._GetDataTypeMap('uint32le') try: signature = self._ReadStructureFromByteStream( value_data, 0, signature_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse signature value with error: {0!s}'.format( exception)) format_type = self._HEADER_SIGNATURES.get(signature, None) if format_type == self._FORMAT_TYPE_2003: return self._FORMAT_TYPE_2003 if format_type == self._FORMAT_TYPE_8: cached_entry_signature = value_data[signature:signature + 4] if cached_entry_signature in ( self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1): return self._FORMAT_TYPE_8 elif format_type == self._FORMAT_TYPE_10: cached_entry_signature = value_data[signature:signature + 4] if cached_entry_signature == self._CACHED_ENTRY_SIGNATURE_8_1: return self._FORMAT_TYPE_10 return format_type
Parses and validates the signature. Args: value_data (bytes): value data. Returns: int: format type or None if format could not be determined. Raises: ParseError: if the value data could not be parsed.
juraj-google-style
def remove_bucket_list_item(self, id, collection, item): if (type(id) is not ObjectId): id = ObjectId(id) obj = getattr(self.db, collection) result = obj.update({'_id': id}, {'$pull': {'bucket_list': item}}) return result
Removes an item from the bucket list Args: id: the CRITs object id of the TLO collection: The db collection. See main class documentation. item: the bucket list item to remove Returns: The mongodb result
codesearchnet
def _create_table_init_from_file_model_tf1(self, sess: session.Session) -> Tuple[core.Tensor, core.Tensor, core.Tensor]: asset_dir = self.create_tempdir('assets').full_path asset_file = os.path.join(asset_dir, 'vocab_file.txt') content = '\n'.join(['static', 'range', 'quantization']) file_io.write_string_to_file(filename=asset_file, file_content=content) init = lookup_ops.TextFileInitializer(filename=asset_file, key_dtype=dtypes.string, key_index=lookup_ops.TextFileIndex.WHOLE_LINE, value_dtype=dtypes.int64, value_index=lookup_ops.TextFileIndex.LINE_NUMBER) table = lookup_ops.StaticHashTable(init, default_value=-1) input_vocabs_placeholder = array_ops.placeholder(dtypes.string, shape=(None,), name='input_vocabs') lookup_vals = math_ops.cast(table.lookup(input_vocabs_placeholder), dtypes.float32) matmul_input = array_ops_stack.stack([lookup_vals, lookup_vals]) weight_row = array_ops.ones(shape=array_ops.shape(input_vocabs_placeholder), dtype=dtypes.float32) weight = array_ops.transpose_v2(array_ops_stack.stack([weight_row, weight_row])) output_tensor = math_ops.matmul(matmul_input, weight) return (input_vocabs_placeholder, lookup_vals, output_tensor)
Creates a simple model that initializes a table from an asset file. This model creates an asset file at "vocab_file.txt" containing comma-separated vocabularies and uses it to initialize a `StaticVocabularyTable`. For inference, the model performs a lookup with a 1D string tensor input vocabs. Args: sess: Tensorflow Session to create the model in. Returns: (input_vocabs_placeholder, lookup_vals, output_tensor), where * input_vocabs_placeholder is a placeholder tensor of 1D strings * lookup_vals is an output tensor that is a direct result of table lookup * output_tensor is a float 2x2 matrix
github-repos
def gaussian_square(times: np.ndarray, amp: complex, center: float, width: float, sigma: float, zeroed_width: Union[None, float] = None) -> np.ndarray: r square_start = center-width/2 square_stop = center+width/2 if zeroed_width: zeroed_width = min(width, zeroed_width) gauss_zeroed_width = zeroed_width-width else: gauss_zeroed_width = None funclist = [functools.partial(gaussian, amp=amp, center=square_start, sigma=sigma, zeroed_width=gauss_zeroed_width, rescale_amp=True), functools.partial(gaussian, amp=amp, center=square_stop, sigma=sigma, zeroed_width=gauss_zeroed_width, rescale_amp=True), functools.partial(constant, amp=amp)] condlist = [times <= square_start, times >= square_stop] return np.piecewise(times.astype(np.complex_), condlist, funclist)
r"""Continuous gaussian square pulse. Args: times: Times to output pulse for. amp: Pulse amplitude. center: Center of the square pulse component. width: Width of the square pulse component. sigma: Width (standard deviation) of gaussian rise/fall portion of the pulse. zeroed_width: Subtract baseline of gaussian square pulse to enforce $\OmegaSquare(center \pm zeroed_width/2)=0$.
juraj-google-style
class ChineseCLIPVisionEncoder(nn.Module): def __init__(self, config: ChineseCLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, output_attentions) else: layer_outputs = encoder_layer(hidden_states, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`ChineseCLIPVisionEncoderLayer`]. Args: config: ChineseCLIPConfig
github-repos
def _GetArgType(arg, spec): if arg in spec.annotations: arg_type = spec.annotations[arg] try: return arg_type.__qualname__ except AttributeError: return repr(arg_type) return ''
Returns a string describing the type of an argument. Args: arg: The name of the argument. spec: An instance of fire.inspectutils.FullArgSpec, containing type and default information about the arguments to a callable. Returns: A string to be used in constructing the help screen for the function, the empty string if the argument type is not available.
github-repos
def _from_dict_record(data): return [Schema._get_field_entry(name, value) for (name, value) in list(data.items())]
Infer a BigQuery table schema from a dictionary. If the dictionary has entries that are in turn OrderedDicts these will be turned into RECORD types. Ideally this will be an OrderedDict but it is not required. Args: data: The dict to infer a schema from. Returns: A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a BigQuery Tables resource schema.
codesearchnet
def concat(self, axis, other, **kwargs): return self._append_list_of_managers(other, axis, **kwargs)
Concatenates two objects together. Args: axis: The axis index object to join (0 for columns, 1 for index). other: The other_index to concat with. Returns: Concatenated objects.
juraj-google-style