code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def display_hierarchy_helper(root, parent_id_to_children, depth): print '%s%s (%s)' % ('%s+--' % ('|'.join([' '] * depth)), root['name'], root['id']) for child in parent_id_to_children.get(root['id'], []): display_hierarchy_helper(child, parent_id_to_children, depth + 1)
Recursive helper for displaying the hierarchy. Args: root: The current root ad unit. parent_id_to_children: The overall map of parent ids to children. depth: The current depth.
juraj-google-style
def generate_sbi_config(num_pbs: int=3, project: str='sip', programme_block: str='sip_demos', pb_config: Union[(dict, List[dict])]=None, workflow_config: Union[(dict, List[dict])]=None, register_workflows=False) -> dict: if isinstance(workflow_config, dict): workflow_config = [workflow_config] if isinstance(pb_config, dict): pb_config = [pb_config] utc_now = datetime.datetime.utcnow() pb_list = [] for i in range(num_pbs): pb_id = ProcessingBlock.get_id(utc_now) if (workflow_config is not None): _workflow_config = workflow_config[i] else: _workflow_config = None if (pb_config is not None): _pb_config = pb_config[i] else: _pb_config = None pb_dict = generate_pb_config(pb_id, _pb_config, _workflow_config) pb_list.append(pb_dict) sbi_config = dict(id=SchedulingBlockInstance.get_id(utc_now, project), version=__sbi_version__, scheduling_block=generate_sb(utc_now, project, programme_block), processing_blocks=pb_list) if register_workflows: add_workflow_definitions(sbi_config) return sbi_config
Generate a SBI configuration dictionary. Args: num_pbs (int, optional): Number of Processing Blocks (default = 3) project (str, optional): Project to associate the SBI with. programme_block (str, optional): SBI programme block pb_config (dict, List[dict], optional): PB configuration workflow_config (dict, List[dict], optional): Workflow configuration register_workflows (bool, optional): If true also register workflows. Returns: dict, SBI configuration dictionary
codesearchnet
def copy_to_mesh(tensor: Any, layout: layout_lib.Layout, source_layout: Optional[layout_lib.Layout]=None) -> tensor_lib.Tensor: del source_layout return relayout(tensor, layout)
Copies a tf.Tensor onto the DTensor device with the given layout. Copies a regular tf.Tensor onto the DTensor device. Use the mesh attached to `layout` as target mesh. This method currently only supports replicated layouts, or one-to-one copies for sharded layouts. Args: tensor: A regular tf.Tensor to be copied as a DTensor. layout: Target layout (and mesh) for the result DTensor. source_layout: Source layout of the tensor before copy. This argument is deprecated. Returns: A DTensor on the DTensor device with the given layout.
github-repos
def _Conv2DBackpropInputGrad(op: ops.Operation, grad): return [None, gen_nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr('explicit_paddings'), use_cudnn_on_gpu=op.get_attr('use_cudnn_on_gpu'), data_format=op.get_attr('data_format').decode()), gen_nn_ops.conv2d(grad, op.inputs[1], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr('explicit_paddings'), use_cudnn_on_gpu=op.get_attr('use_cudnn_on_gpu'), data_format=op.get_attr('data_format').decode())]
The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter
github-repos
def distance(p_a, p_b): return sqrt((((p_a.lat - p_b.lat) ** 2) + ((p_a.lon - p_b.lon) ** 2)))
Euclidean distance, between two points Args: p_a (:obj:`Point`) p_b (:obj:`Point`) Returns: float: distance, in degrees
codesearchnet
def _gather_saveables_for_checkpoint(self): def _saveable_factory(name=self._common_name): return _MirroredSaveable(self, self._primary, name) return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
Overrides Trackable method. This allows both name-based and object-based save and restore of MirroredVariables. Returns: A dictionary mapping attribute names to `SaveableObject` factories.
github-repos
def preprocess(self, xs): return [self.nesting_field.preprocess(x) for x in super(NestedField, self).preprocess(xs)]
Preprocess a single example. Firstly, tokenization and the supplied preprocessing pipeline is applied. Since this field is always sequential, the result is a list. Then, each element of the list is preprocessed using ``self.nesting_field.preprocess`` and the resulting list is returned. Arguments: xs (list or str): The input to preprocess. Returns: list: The preprocessed list.
juraj-google-style
def begin_block(self, req_begin_block): self.abort_if_abci_chain_is_not_synced() chain_shift = 0 if self.chain is None else self.chain['height'] logger.debug('BEGIN BLOCK, height:%s, num_txs:%s', req_begin_block.header.height + chain_shift, req_begin_block.header.num_txs) self.block_txn_ids = [] self.block_transactions = [] return ResponseBeginBlock()
Initialize list of transaction. Args: req_begin_block: block object which contains block header and block hash.
juraj-google-style
def transfer_project(self, to_project_id, **kwargs): path = ('/groups/%s/projects/%s' % (self.id, to_project_id)) self.manager.gitlab.http_post(path, **kwargs)
Transfer a project to this group. Args: to_project_id (int): ID of the project to transfer **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTransferProjectError: If the project could not be transfered
codesearchnet
class SwinPatchMerging(nn.Module): def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None: super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def maybe_pad(self, input_feature, height, width): should_pad = height % 2 == 1 or width % 2 == 1 if should_pad: pad_values = (0, 0, 0, width % 2, 0, height % 2) input_feature = nn.functional.pad(input_feature, pad_values) return input_feature def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor: height, width = input_dimensions batch_size, dim, num_channels = input_feature.shape input_feature = input_feature.view(batch_size, height, width, num_channels) input_feature = self.maybe_pad(input_feature, height, width) input_feature_0 = input_feature[:, 0::2, 0::2, :] input_feature_1 = input_feature[:, 1::2, 0::2, :] input_feature_2 = input_feature[:, 0::2, 1::2, :] input_feature_3 = input_feature[:, 1::2, 1::2, :] input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = input_feature.view(batch_size, -1, 4 * num_channels) input_feature = self.norm(input_feature) input_feature = self.reduction(input_feature) return input_feature
Patch Merging Layer. Args: input_resolution (`Tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class.
github-repos
def ProcessGlobalSuppresions(lines): for line in lines: if _SEARCH_C_FILE.search(line): for category in _DEFAULT_C_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True if _SEARCH_KERNEL_FILE.search(line): for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True
Updates the list of global error suppressions. Parses any lint directives in the file that have global effect. Args: lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline.
juraj-google-style
def HasOutputClass(cls, name): if (not isinstance(name, py2to3.STRING_TYPES)): return False return (name.lower() in cls._output_classes)
Determines if a specific output class is registered with the manager. Args: name (str): name of the output module. Returns: bool: True if the output class is registered.
codesearchnet
def collect(manifest=default_manifest, tmp_path=None, compress=False): manifest = load_manifest(manifest) client = manifest.get('client', {}) plugins = manifest.get('plugins', {}) run_strategy = client.get('run_strategy', {'name': 'parallel'}) apply_default_enabled(plugins.get('default_component_enabled', False)) load_packages(plugins.get('packages', [])) apply_blacklist(client.get('blacklist', {})) apply_configs(plugins) to_persist = get_to_persist(client.get('persist', set())) hostname = call('hostname -f', env=SAFE_ENV).strip() suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S') relative_path = ('insights-%s-%s' % (hostname, suffix)) tmp_path = (tmp_path or tempfile.gettempdir()) output_path = os.path.join(tmp_path, relative_path) fs.ensure_path(output_path) fs.touch(os.path.join(output_path, 'insights_archive.txt')) broker = dr.Broker() ctx = create_context(client.get('context', {})) broker[ctx.__class__] = ctx parallel = (run_strategy.get('name') == 'parallel') pool_args = run_strategy.get('args', {}) with get_pool(parallel, pool_args) as pool: h = Hydration(output_path, pool=pool) broker.add_observer(h.make_persister(to_persist)) dr.run_all(broker=broker, pool=pool) if compress: return create_archive(output_path) return output_path
This is the collection entry point. It accepts a manifest, a temporary directory in which to store output, and a boolean for optional compression. Args: manifest (str or dict): json document or dictionary containing the collection manifest. See default_manifest for an example. tmp_path (str): The temporary directory that will be used to create a working directory for storing component output as well as the final tar.gz if one is generated. compress (boolean): True to create a tar.gz and remove the original workspace containing output. False to leave the workspace without creating a tar.gz Returns: The full path to the created tar.gz or workspace.
codesearchnet
def parents(self, as_resources=False): parents = [o for (s, p, o) in self.rdf.graph.triples((None, self.rdf.prefixes.fedora.hasParent, None))] if as_resources: logger.debug('retrieving parent as resource') parents = [self.repo.get_resource(parent) for parent in parents] return parents
method to return hierarchical parents of this resource Args: as_resources (bool): if True, opens each as appropriate resource type instead of return URI only Returns: (list): list of resources
codesearchnet
def get_dir_size(path: str='.') -> int: total = 0 for root, _, files in os.walk(path): for filename in files: total += os.path.getsize(os.path.join(root, filename)) return total
Get the total size of files and sub-directories under the path. Args: path: Path of a directory or a file to calculate the total size. Returns: Total size of the directory or a file.
github-repos
def get_replacement_transform(self, ptransform): raise NotImplementedError
Provides a runner specific override for a given PTransform. Args: ptransform: PTransform to be replaced. Returns: A PTransform that will be the replacement for the PTransform given as an argument.
github-repos
def _handle_error(response): code = response.status_code if (200 <= code < 400): return if (code == 400): sys.stderr.write((response.text + '\n')) raise BadRequest(response) elif (code == 401): sys.stderr.write((response.text + '\n')) raise UnauthorizedAccess(response) elif (code == 403): sys.stderr.write((response.text + '\n')) raise ForbiddenAccess(response) elif (code == 404): sys.stderr.write((response.text + '\n')) raise ResourceNotFound(response) elif (code == 405): sys.stderr.write((response.text + '\n')) raise MethodNotAllowed(response) elif (code == 409): sys.stderr.write((response.text + '\n')) raise ResourceConflict(response) elif (code == 422): sys.stderr.write((response.text + '\n')) raise ResourceInvalid(response) elif (code in (449, 502, 503, 504)): sys.stderr.write((response.text + '\n')) raise RetryWithDelay(response) elif (401 <= code < 500): sys.stderr.write((response.text + '\n')) raise ClientError(response) elif (500 <= code < 600): sys.stderr.write((response.text + '\n')) raise ServerError(response) else: raise ConnectionError(response)
Raise exceptions in response to any http errors Args: response: A Response object Raises: BadRequest: if HTTP error code 400 returned. UnauthorizedAccess: if HTTP error code 401 returned. ForbiddenAccess: if HTTP error code 403 returned. ResourceNotFound: if HTTP error code 404 is returned. MethodNotAllowed: if HTTP error code 405 is returned. ResourceConflict: if HTTP error code 409 is returned. ResourceInvalid: if HTTP error code 422 is returned. ClientError: if HTTP error code falls in 401 - 499. ServerError: if HTTP error code falls in 500 - 599. ConnectionError: if unknown HTTP error code returned.
codesearchnet
def ReceiveMessages(self, client_id, messages): if data_store.RelationalDBEnabled(): return self.ReceiveMessagesRelationalFlows(client_id, messages) now = time.time() with queue_manager.QueueManager(token=self.token) as manager: for session_id, msgs in iteritems( collection.Group(messages, operator.attrgetter("session_id"))): leftover_msgs = self.HandleWellKnownFlows(msgs) unprocessed_msgs = [] for msg in leftover_msgs: if (msg.auth_state == msg.AuthorizationState.AUTHENTICATED or msg.session_id == self.unauth_allowed_session_id): unprocessed_msgs.append(msg) if len(unprocessed_msgs) < len(leftover_msgs): logging.info("Dropped %d unauthenticated messages for %s", len(leftover_msgs) - len(unprocessed_msgs), client_id) if not unprocessed_msgs: continue for msg in unprocessed_msgs: manager.QueueResponse(msg) for msg in unprocessed_msgs: if msg.request_id == 0: manager.QueueNotification(session_id=msg.session_id) break elif msg.type == rdf_flows.GrrMessage.Type.STATUS: if msg.HasTaskID(): manager.DeQueueClientRequest(msg) manager.QueueNotification( session_id=msg.session_id, last_status=msg.request_id) stat = rdf_flows.GrrStatus(msg.payload) if stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED: crash_details = rdf_client.ClientCrash( client_id=client_id, session_id=session_id, backtrace=stat.backtrace, crash_message=stat.error_message, nanny_status=stat.nanny_status, timestamp=rdfvalue.RDFDatetime.Now()) events.Events.PublishEvent( "ClientCrash", crash_details, token=self.token) logging.debug("Received %s messages from %s in %s sec", len(messages), client_id, time.time() - now)
Receives and processes the messages from the source. For each message we update the request object, and place the response in that request's queue. If the request is complete, we send a message to the worker. Args: client_id: The client which sent the messages. messages: A list of GrrMessage RDFValues.
juraj-google-style
def forEach(self) -> 'ColumnExpressionBuilder': return ColumnExpressionBuilder(self._builder, self._column_name, self._children, True, True)
The forEach() function. Unnests the repeated values from a FHIR path. If the FHIR path does not return a collection, we treat that as a collection with a single value. Once this function is called, the FHIR path is sealed to be immutable. Returns: A new ColumnExpressionBuilder with needs_unnest set to True.
github-repos
def __init__(self, object_local_name: str, checkpoint_local_names: Sequence[str], to_shard_layout: Optional[Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]]=None, to_unshard_layout: Optional[Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]]=None): self._object_local_name = object_local_name self._checkpoint_local_names = checkpoint_local_names self._to_shard_layout = to_shard_layout self._to_unshard_layout = to_unshard_layout self._main_checkpoint_name = checkpoint_local_names[0]
Initializes Reshard callback. Args: object_local_name: The local name of the object being restored. checkpoint_local_names: The local names of the checkpoint positions that need to be read. to_shard_layout: (Optional) Target layouts as specified in the embedding being restored. to_unshard_layout: (Optional) Layouts as stored in checkpoint being restored from.
github-repos
def negative(x): if any_symbolic_tensors((x,)): return Negative().symbolic_call(x) return backend.numpy.negative(x)
Numerical negative, element-wise. Args: x: Input tensor. Returns: Output tensor, `y = -x`.
github-repos
def __calculate_boltzmann_factor(self, state_key, next_action_list): sigmoid = self.__calculate_sigmoid() q_df = self.q_df[(self.q_df.state_key == state_key)] q_df = q_df[q_df.isin(next_action_list)] q_df['boltzmann_factor'] = (q_df['q_value'] / sigmoid) q_df['boltzmann_factor'] = q_df['boltzmann_factor'].apply(np.exp) q_df['boltzmann_factor'] = (q_df['boltzmann_factor'] / q_df['boltzmann_factor'].sum()) return q_df
Calculate boltzmann factor. Args: state_key: The key of state. next_action_list: The possible action in `self.t+1`. If the length of this list is 0, all action should be possible. Returns: [(`The key of action`, `boltzmann probability`)]
codesearchnet
def prepare_aot(aot: list[str], srcs_dir: str) -> None: for file in aot: if 'external/local_tsl/' in file: copy_file(file, srcs_dir, 'external/local_tsl/') elif 'external/local_xla/' in file: copy_file(file, srcs_dir, 'external/local_xla/') else: copy_file(file, srcs_dir) shutil.move(os.path.join(srcs_dir, 'tensorflow/tools/pip_package/xla_build/CMakeLists.txt'), os.path.join(srcs_dir, 'CMakeLists.txt'))
Rearrange xla_aot files in target the target directory. Args: aot: a list of paths to files that should be in xla_aot directory. srcs_dir: target directory where files are copied to.
github-repos
def _get_resource_list(self, rsrc_dict): if ('collections' in rsrc_dict): return rsrc_dict['collections'] if ('experiments' in rsrc_dict): return rsrc_dict['experiments'] if ('channels' in rsrc_dict): return rsrc_dict['channels'] if ('coords' in rsrc_dict): return rsrc_dict['coords'] raise RuntimeError('Invalid list response received from Boss. No known resource type returned.')
Extracts list of resources from the HTTP response. Args: rsrc_dict (dict): HTTP response encoded in a dictionary. Returns: (list[string]): List of a type of resource (collections, experiments, etc). Raises: (RuntimeError): If rsrc_dict does not contain any known resources.
codesearchnet
def pytd_cls_to_instance_var(self, cls, subst=None, node=None, source_sets=None, discard_concrete_values=False): source_sets = source_sets or [[]] node = node or self.ctx.root_node kwargs = {'subst': subst, 'node': node, 'source_sets': source_sets, 'discard_concrete_values': discard_concrete_values} def constant_to_instance_value(new_type): return self.constant_to_value(abstract_utils.AsInstance(new_type), subst, node) if isinstance(cls, pytd.AnythingType): return self.unsolvable.to_variable(node) elif isinstance(cls, pytd.GenericType) and cls.name == 'typing.ClassVar': param, = cls.parameters return self.pytd_cls_to_instance_var(param, **kwargs) var = self.ctx.program.NewVariable() for t in pytd_utils.UnpackUnion(cls): if isinstance(t, pytd.TypeParameter): if not subst or t.full_name not in subst: raise self.TypeParameterError(t.full_name) else: for v in subst[t.full_name].bindings: for source_set in source_sets: if discard_concrete_values: value = self.get_maybe_abstract_instance(v.data) else: value = v.data var.AddBinding(value, source_set + [v], node) elif isinstance(t, pytd.NothingType): pass else: if isinstance(t, pytd.Annotated): typ = constant_to_instance_value(t.base_type) value = self._apply_metadata_annotations(typ, t.annotations) else: value = constant_to_instance_value(t) for source_set in source_sets: var.AddBinding(value, source_set, node) return var
Convert a constant instance to a Variable. This converts a constant to a cfg.Variable. Unlike constant_to_value, it can handle things that need to be represented as a Variable with multiple possible values (i.e., a union type), like pytd.Function. Args: cls: The pytd class to convert. subst: The current type parameters. node: The current CFG node. (For instances) source_sets: An iterator over instances of SourceSet (or just tuples). discard_concrete_values: Whether concrete values should be discarded from type parameters. Returns: A cfg.Variable. Raises: TypeParameterError: if conversion is attempted on a type parameter without a substitution. ValueError: if pytype is not of a known type.
github-repos
def __init__(self, value_type, default: typing.Optional[numbers.Number]=MISSING_VALUE, min_value: typing.Optional[numbers.Number]=None, max_value: typing.Optional[numbers.Number]=None, is_noneable: bool=False, frozen: bool=False): if min_value is not None and max_value is not None and (min_value > max_value): raise ValueError(f'"max_value" must be equal or greater than "min_value". Encountered: min_value={min_value}, max_value={max_value}.') self._min_value = min_value self._max_value = max_value super().__init__(value_type, default, is_noneable=is_noneable, frozen=frozen)
Constructor. Args: value_type: Type of number. default: Default value for this spec. min_value: (Optional) minimum value of acceptable values. max_value: (Optional) maximum value of acceptable values. is_noneable: If True, None is acceptable. frozen: If True, values other than the default value is not accceptable.
github-repos
def should_update(stack): if stack.locked: if (not stack.force): logger.debug('Stack %s locked and not in --force list. Refusing to update.', stack.name) return False else: logger.debug('Stack %s locked, but is in --force list.', stack.name) return True
Tests whether a stack should be submitted for updates to CF. Args: stack (:class:`stacker.stack.Stack`): The stack object to check. Returns: bool: If the stack should be updated, return True.
codesearchnet
def _summary(self, name, tensor): if (tensor.shape.ndims == 0): return tf.summary.scalar(name, tensor) else: return tf.summary.histogram(name, tensor)
Create a scalar or histogram summary matching the rank of the tensor. Args: name: Name for the summary. tensor: Tensor to summarize. Returns: Summary tensor.
codesearchnet
def get_extra_locals(self): raise NotImplementedError('subclasses must override this')
Returns extra static local variables to be made to transformed code. Subclasses must override this. Returns: extra_locals: A Dict[Text, Any] containing additional variables to make available to the transformed code.
github-repos
def load_glove(file): model = {} with open(file, encoding='utf8', errors='ignore') as f: for line in f: line = line.split(' ') word = line[0] vector = np.array([float(val) for val in line[1:]]) model[word] = vector return model
Loads GloVe vectors in numpy array. Args: file (str): a path to a glove file. Return: dict: a dict of numpy arrays.
codesearchnet
def get_program_type_by_slug(self, slug): return self._load_data( self.PROGRAM_TYPES_ENDPOINT, resource_id=slug, default=None, )
Get a program type by its slug. Arguments: slug (str): The slug to identify the program type. Returns: dict: A program type object.
juraj-google-style
def scatter_mul(self, sparse_delta, use_locking=False, name=None): raise NotImplementedError
Multiply this variable by `tf.IndexedSlices`. Args: sparse_delta: `tf.IndexedSlices` to multiply this variable by. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`.
github-repos
def old_collective_correlation(self): if self.has_run: return (self.atoms.collective_dr_squared() / float(self.number_of_jumps)) else: return None
Returns the collective correlation factor, f_I Args: None Returns: (Float): The collective correlation factor, f_I. Notes: This function assumes that the jump distance between sites has been normalised to a=1. If the jumps distance is not equal to 1 then the value returned by this function should be divided by a^2. Even better, use self.collective_correlation
codesearchnet
def clean_for_serialization(self, data): if isinstance(data, dict): for k in data.keys(): if (k.startswith('__')): del data[k] elif isinstance(data[k], bson.objectid.ObjectId): del data[k] elif isinstance(data[k], datetime.datetime): data[k] = data[k].isoformat()+'Z' elif isinstance(data[k], dict): data[k] = self.clean_for_serialization(data[k]) elif isinstance(data[k], list): data[k] = [self.clean_for_serialization(item) for item in data[k]] return data
Clean data in preparation for serialization. Deletes items having key either a BSON, datetime, dict or a list instance, or starting with __. Args: data: Sample data to be serialized. Returns: Cleaned data dictionary.
juraj-google-style
def getsize(self, path=None, client_kwargs=None, header=None): return self._getsize_from_header(self.head(path, client_kwargs, header))
Return the size, in bytes, of path. Args: path (str): File path or URL. client_kwargs (dict): Client arguments. header (dict): Object header. Returns: int: Size in bytes.
codesearchnet
def _get_control_flow_context(self): return self._control_flow_context
Returns the current control flow context. Returns: A context object.
github-repos
def run_repair_pdb(self, silent=False, force_rerun=False): foldx_repair_pdb = 'foldx --command=RepairPDB --pdb={}'.format(self.pdb_file) foldx_repair_outfile = '{}_Repair.pdb'.format(op.splitext(self.pdb_file)[0]) ssbio.utils.command_runner(shell_command=foldx_repair_pdb, force_rerun_flag=force_rerun, silent=silent, outfile_checker=foldx_repair_outfile, cwd=self.foldx_dir) self.repaired_pdb_outfile = foldx_repair_outfile
Run FoldX RepairPDB on this PDB file. Original command:: foldx --command=RepairPDB --pdb=4bxi.pdb Args: silent (bool): If FoldX output should be silenced from printing to the shell. force_rerun (bool): If FoldX RepairPDB should be rerun even if a repaired file exists.
juraj-google-style
def __init__(self, a_schedule, b_schedule, merged_schedule, problem_reporter): self.a_schedule = a_schedule self.b_schedule = b_schedule self.merged_schedule = merged_schedule self.a_merge_map = {} self.b_merge_map = {} self.a_zone_map = {} self.b_zone_map = {} self._mergers = [] self._idnum = max(self._FindLargestIdPostfixNumber(self.a_schedule), self._FindLargestIdPostfixNumber(self.b_schedule)) self.problem_reporter = problem_reporter
Initialise the merger. Once this initialiser has been called, a_schedule and b_schedule should not be modified. Args: a_schedule: The old schedule, an instance of transitfeed.Schedule. b_schedule: The new schedule, an instance of transitfeed.Schedule. problem_reporter: The problem reporter, an instance of transitfeed.ProblemReporter.
juraj-google-style
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A MobileBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def point_consensus(self, consensus_type): if "mean" in consensus_type: consensus_data = np.mean(self.data, axis=0) elif "std" in consensus_type: consensus_data = np.std(self.data, axis=0) elif "median" in consensus_type: consensus_data = np.median(self.data, axis=0) elif "max" in consensus_type: consensus_data = np.max(self.data, axis=0) elif "percentile" in consensus_type: percentile = int(consensus_type.split("_")[1]) consensus_data = np.percentile(self.data, percentile, axis=0) else: consensus_data = np.zeros(self.data.shape[1:]) consensus = EnsembleConsensus(consensus_data, consensus_type, self.ensemble_name, self.run_date, self.variable, self.start_date, self.end_date, self.units) return consensus
Calculate grid-point statistics across ensemble members. Args: consensus_type: mean, std, median, max, or percentile_nn Returns: EnsembleConsensus containing point statistic
juraj-google-style
def propagate(self, token, channel): if self.get_propagate_status(token, channel) != u'0': return url = self.url('sd/{}/{}/setPropagate/1/'.format(token, channel)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Propagate fail: {}'.format(req.text)) return True
Kick off the propagate function on the remote server. Arguments: token (str): The token to propagate channel (str): The channel to propagate Returns: boolean: Success
juraj-google-style
def listtransactions(self, user_id='', count=10, start_at=0): txlist = self.rpc.call('listtransactions', user_id, count, start_at) self.logger.debug(('Got transaction list for ' + str(user_id))) return txlist
List all transactions associated with this account. Args: user_id (str): this user's unique identifier count (int): number of transactions to return (default=10) start_at (int): start the list at this transaction (default=0) Returns: list [dict]: transactions associated with this user's account
codesearchnet
def _CreateRouteOptions(self, **kwargs): options = {'proto': self.proto_id, 'scope': 'host'} options.update(kwargs) return options
Create a dictionary of parameters to append to the ip route command. Args: **kwargs: dict, the string parameters to update in the ip route command. Returns: dict, the string parameters to append to the ip route command.
codesearchnet
def delete_with_casper_admin_save(self, pkg): if (pkg.__class__.__name__ == 'Package'): package_to_delete = pkg.id elif isinstance(pkg, int): package_to_delete = pkg elif isinstance(pkg, str): package_to_delete = self.connection['jss'].Package(pkg).id else: raise TypeError data_dict = {'username': self.connection['jss'].user, 'password': self.connection['jss'].password, 'deletedPackageID': package_to_delete} self.connection['jss'].session.post(url=self.connection['delete_url'], data=data_dict)
Delete a pkg from the distribution server. Args: pkg: Can be a jss.Package object, an int ID of a package, or a filename.
codesearchnet
def set_energy(self, spins, target_energy): spin_energy = self.energy(spins) self.assertions.add(Equals(spin_energy, limitReal(target_energy)))
Set the energy of Theta with spins fixed to target_energy. Args: spins (dict): Spin values for a subset of the variables in Theta. target_energy (float): The desired energy for Theta with spins fixed. Notes: Add equality constraint to assertions.
codesearchnet
def __setitem__(self, key, value): if not self._is_valid(value): value = self._fix_value(value) self._inner.__setitem__(key, value)
Attempt to set the value at position `key` to the `value`. If a value is not the correct type, an attempt will be made to convert it to the correct type. Args: key: An index. value: A value to set.
juraj-google-style
def __init__(self, binaryDirectory=None): if binaryDirectory is None: self._impl = amplpython.Environment() else: self._impl = amplpython.Environment(binaryDirectory)
Constructor with ability to select the location of the AMPL binary. Note that if binaryDirectory is set, the automatic lookup for an AMPL executable will not be executed. Args: binaryDirectory: The directory in which look for the AMPL Binary.
juraj-google-style
def cpfs(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], noise: Optional[Noise]=None) -> Tuple[(List[TensorFluent], List[TensorFluent])]: scope = self.transition_scope(state, action) batch_size = int(state[0].shape[0]) (interm_fluents, next_state_fluents) = self.compile_cpfs(scope, batch_size, noise) interms = [fluent for (_, fluent) in interm_fluents] next_state = [fluent for (_, fluent) in next_state_fluents] return (interms, next_state)
Compiles the intermediate and next state fluent CPFs given the current `state` and `action`. Args: state (Sequence[tf.Tensor]): A tuple of state tensors. action (Sequence[tf.Tensor]): A tuple of action tensors. Returns: Tuple[List[TensorFluent], List[TensorFluent]]: A pair of lists of TensorFluent representing the intermediate and state CPFs.
codesearchnet
def assimilate(self, path): try: d = self.get_task_doc(path) if ((self.mapi_key is not None) and (d['state'] == 'successful')): self.calculate_stability(d) tid = self._insert_doc(d) return tid except Exception as ex: import traceback logger.error(traceback.format_exc()) return False
Parses vasp runs. Then insert the result into the db. and return the task_id or doc of the insertion. Returns: If in simulate_mode, the entire doc is returned for debugging purposes. Else, only the task_id of the inserted doc is returned.
codesearchnet
def check_requirements_file(req_file, skip_packages): reqs = read_requirements(req_file) if skip_packages is not None: reqs = [req for req in reqs if req.name not in skip_packages] outdated_reqs = filter(None, [check_req(req) for req in reqs]) return outdated_reqs
Return list of outdated requirements. Args: req_file (str): Filename of requirements file skip_packages (list): List of package names to ignore.
juraj-google-style
def _circuit_as_layers(circuit: circuits.Circuit, grouping: _QubitGrouping) -> List[_TransformsThenCzs]: frontier = {q: 0 for q in circuit.all_qubits()} layers = [] while True: any_group_matrices = False group_matrices = [] for g in grouping.groups: start_frontier = {q: frontier[q] for q in g} end_frontier = circuit.reachable_frontier_from(start_frontier) mergeable_ops = circuit.findall_operations_between(start_frontier, end_frontier) for (q, v) in end_frontier.items(): frontier[q] = v group_matrix = np.eye((1 << len(g))).reshape(((2, 2) * len(g))) if mergeable_ops: any_group_matrices = True for (_, op) in mergeable_ops: group_matrix = linalg.targeted_left_multiply(left_matrix=protocols.unitary(op).reshape(((2, 2) * len(op.qubits))), right_target=group_matrix, target_axes=[grouping.loc(q)[1] for q in op.qubits]) group_matrices.append(np.transpose(group_matrix.reshape((1 << len(g)), (1 << len(g))))) end_frontier = circuit.reachable_frontier_from(frontier, is_blocker=(lambda op: grouping.all_in_same_group(*op.qubits))) cz_ops = circuit.findall_operations_between(frontier, end_frontier) frontier = end_frontier cz_indices = [] for (_, cz) in cz_ops: (a, b) = cz.qubits assert (cz == ops.CZ(a, b)) cz_indices.append((grouping.ind(a), grouping.ind(b))) if ((not any_group_matrices) and (not cz_indices)): break layer = _TransformsThenCzs(group_matrices=group_matrices, cz_indices=cz_indices) layers.append(layer) assert (frontier == {q: len(circuit) for q in circuit.all_qubits()}) return layers
Transforms a circuit into a series of GroupMatrix+CZ layers. Args: circuit: The circuit to transform. grouping: How the circuit's qubits are combined into groups. Returns: A list of layers. Each layer has a matrix to apply to each group of qubits, and a list of CZs to apply to pairs of qubits crossing between groups.
codesearchnet
def qualifyContracts(self, *contracts: List[Contract]) -> List[Contract]: return self._run(self.qualifyContractsAsync(*contracts))
Fully qualify the given contracts in-place. This will fill in the missing fields in the contract, especially the conId. Returns a list of contracts that have been successfully qualified. This method is blocking. Args: contracts: Contracts to qualify.
codesearchnet
def __init__(self, name, aliases=None, description=None, urls=None): super(StorageDataTypeDefinition, self).__init__( name, aliases=aliases, description=description, urls=urls) self.byte_order = definitions.BYTE_ORDER_NATIVE
Initializes a storage data type definition. Args: name (str): name. aliases (Optional[list[str]]): aliases. description (Optional[str]): description. urls (Optional[list[str]]): URLs.
juraj-google-style
def recipe_policebot(config, recipe_name): drive(config, {'auth': 'user', 'hour': [], 'copy': {'source': 'https:
A tool that helps enforce CM object name conventions by checking names against a set of client-defined patterns, and emailing violations to appropriate agency teams on a daily basis. Args: recipe_name (string) - Name of document to deploy to.
github-repos
def find_nearest(a, value, index=False): i = np.abs((a - value)).argmin() if index: return i else: return a[i]
Find the array value, or index of the array value, closest to some given value. Args: a (ndarray) value (float) index (bool): whether to return the index instead of the array value. Returns: float. The array value (or index, as int) nearest the specified value.
codesearchnet
def _check_jwt_claims(jwt_claims): current_time = time.time() expiration = jwt_claims[u'exp'] if (not isinstance(expiration, INT_TYPES)): raise suppliers.UnauthenticatedException(u'Malformed claim: "exp" must be an integer') if (current_time >= expiration): raise suppliers.UnauthenticatedException(u'The auth token has already expired') if (u'nbf' not in jwt_claims): return not_before_time = jwt_claims[u'nbf'] if (not isinstance(not_before_time, INT_TYPES)): raise suppliers.UnauthenticatedException(u'Malformed claim: "nbf" must be an integer') if (current_time < not_before_time): raise suppliers.UnauthenticatedException(u'Current time is less than the "nbf" time')
Checks whether the JWT claims should be accepted. Specifically, this method checks the "exp" claim and the "nbf" claim (if present), and raises UnauthenticatedException if 1) the current time is before the time identified by the "nbf" claim, or 2) the current time is equal to or after the time identified by the "exp" claim. Args: jwt_claims: the JWT claims whose expiratio to be checked. Raises: UnauthenticatedException: When the "exp" claim is malformed or the JWT has already expired.
codesearchnet
def FindExtensionByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) message_name, _, extension_name = full_name.rpartition('.') try: scope = self.FindMessageTypeByName(message_name) except KeyError: scope = self.FindFileContainingSymbol(full_name) return scope.extensions_by_name[extension_name]
Loads the named extension descriptor from the pool. Args: full_name: The full name of the extension descriptor to load. Returns: A FieldDescriptor, describing the named extension.
juraj-google-style
def fetch_lid(self, woeid): rss = self._fetch_xml(LID_LOOKUP_URL.format(woeid, 'f')) try: link = rss.find('channel/link').text except AttributeError: return None lid = re.search('[A-Za-z]{4}[0-9]{4}', link).group() return lid
Fetch a location's corresponding LID. Args: woeid: (string) the location's WOEID. Returns: a string containing the requested LID or None if the LID could not be found. Raises: urllib.error.URLError: urllib.request could not open the URL (Python 3). urllib2.URLError: urllib2 could not open the URL (Python 2). xml.etree.ElementTree.ParseError: xml.etree.ElementTree failed to parse the XML document.
codesearchnet
def detect_gpt(self, filename, offset, fs_guid): self.logger.debug('Detecting GPT partition type') if fs_guid not in self.__gpt_plugins: return None else: plugins = self.__gpt_plugins.get(fs_guid) for plugin in plugins: if plugin.detect(filename, offset): return plugin.get_volume_object() return None
Used by rawdisk.session.Session to match gpt partitions agains filesystem plugins. Args: filename: device or file that it will read in order to detect the filesystem fs_id: filesystem guid to match (ex. {EBD0A0A2-B9E5-4433-87C0-68B6B72699C7}) offset: offset for the filesystem that is being matched Returns: Volume object supplied by matched plugin. If there is no match, None is returned
juraj-google-style
def print_version(): v = get_version() try: s = _STR_WIN[v] except KeyError: s = "Unknow OS" print("-----------------------------------------------------------") print(" print("Python Version : {}.{}.{}".format(*sys.version_info[:3])) print("Windows Version String : {}".format(s)) print("Windows Major Version : {}".format(v[0])) print("Windows Minor Version : {}".format(v[1])) print("Windows Service Pack (or Build) Version : {}".format(v[2])) print("Is Windows Server : {}".format('Yes' if v[3]==1 else 'No')) print("Is Windows 10 (or Windows Server 2016) : {}".format('Yes' if v >= WIN_10 else 'No')) print("-----------------------------------------------------------")
Print get_version() return value in a readable format. Params: None Returns: None
juraj-google-style
def _cache_at_least(self, size): try: while (len(self._result_cache) < size): self._result_cache.append(next(self._result_iter)) return True except StopIteration: return False
Attempts to fill the result cache with at least the given number of results. Returns: bool: Whether the cache contains at least the given size.
codesearchnet
def ParseRow(self, parser_mediator, row_offset, row): timestamp = self._ParseTimestamp(parser_mediator, row) if timestamp is None: return try: action = int(row['action'], 10) except (ValueError, TypeError): action = None try: scan_type = int(row['scan_type'], 10) except (ValueError, TypeError): scan_type = None event_data = TrendMicroAVEventData() event_data.action = action event_data.filename = row['filename'] event_data.offset = row_offset event_data.path = row['path'] event_data.scan_type = scan_type event_data.threat = row['threat'] event = time_events.DateTimeValuesEvent( timestamp, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a line of the log file and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row_offset (int): line number of the row. row (dict[str, str]): fields of a single row, as specified in COLUMNS.
juraj-google-style
def forward(self, hidden_states: torch.Tensor): gate_score = self.act(self.w_0(hidden_states)) hidden_states = self.w_1(hidden_states) hidden_states = gate_score * hidden_states return hidden_states
Transform an input tensor from one feature space to another via a nonlinear operation Args: hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`)
github-repos
def image_channel_compress_top(body_output, targets, model_hparams, vocab_size): del targets with tf.variable_scope('image_channel_compress_modality'): hidden_size = model_hparams.hidden_size img_len = model_hparams.img_len channels = 3 batch = common_layers.shape_list(body_output)[0] x = tf.layers.conv2d(body_output, (hidden_size * channels), kernel_size=(1, 1), strides=(1, 1), padding='VALID', activation=tf.nn.relu, name='decompress_conv') x = tf.reshape(x, [batch, img_len, (img_len * channels), hidden_size]) x = common_layers.layer_preprocess(x, model_hparams) x = tf.layers.dense(x, vocab_size, use_bias=True, activation=None, name='output_conv') x = tf.reshape(x, [batch, img_len, img_len, channels, vocab_size]) return x
Transforms body output to return logits. Args: body_output: Tensor of shape [batch, img_len, img_len, depth]. targets: model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: Tensor of shape [batch, img_len, img_len, channels, vocab_size].
codesearchnet
def read_config_string_options(obj: Any, parser: ConfigParser, section: str, options: Iterable[str], default: str = None) -> None: for o in options: setattr(obj, o, get_config_string_option(parser, section, o, default=default))
Reads config options and writes them as attributes of ``obj``, with attribute names as per ``options``. Args: obj: the object to modify parser: instance of :class:`ConfigParser` section: section name within config file options: option (variable) names within that section default: value to use for any missing options Returns:
juraj-google-style
def replace(s, pattern, replacement): def _replacement(matchobj): return replacement return re.sub(pattern, _replacement, s)
Replaces occurrences of a match string in a given string and returns the new string. The match string can be a regex expression. Args: s (str): the string to modify pattern (str): the search expression replacement (str): the string to replace each match with
juraj-google-style
def prune(self, limit=None, n=None, percentile=None, keep_ends=False): strip = self.copy() if not (limit or n or percentile): m = "You must provide a limit or n or percentile for pruning." raise StriplogError(m) if limit: prune = [i for i, iv in enumerate(strip) if iv.thickness < limit] if n: prune = strip.thinnest(n=n, index=True) if percentile: n = np.floor(len(strip)*percentile/100) prune = strip.thinnest(n=n, index=True) if keep_ends: first, last = 0, len(strip) - 1 if first in prune: prune.remove(first) if last in prune: prune.remove(last) del strip[prune] return strip
Remove intervals below a certain limit thickness. In place. Args: limit (float): Anything thinner than this will be pruned. n (int): The n thinnest beds will be pruned. percentile (float): The thinnest specified percentile will be pruned. keep_ends (bool): Whether to keep the first and last, regardless of whether they meet the pruning criteria.
juraj-google-style
def to_frame(self, **kwargs): r df = export.write_dataframe(self._values, **kwargs) df.name = self.title return df
r"""Return a pandas DataFrame loaded from the worksheet data. Args: \**kwargs: passed to ``pandas.read_csv()`` (e.g. ``header``, ``index_col``) Returns: pandas.DataFrame: new ``DataFrame`` instance
juraj-google-style
def _PendingCount(to_ops: list[ops.Operation], from_ops: list[ops.Operation], colocate_gradients_with_ops, func_graphs, xs_set): reached_ops = set() _MarkReachedOps(from_ops, reached_ops, func_graphs) reachable_to_ops = set((op for op in to_ops if op in reached_ops)) between_ops = set() between_op_list = [] queue = collections.deque() queue.extend(to_ops) while queue: op = queue.popleft() if op in reached_ops: between_ops.add(op) between_op_list.append(op) reached_ops.remove(op) for inp in _NonEagerInputs(op, xs_set): queue.append(inp.op) loop_state = control_flow_state.MaybeCreateControlFlowState(between_op_list, between_ops, colocate_gradients_with_ops) pending_count = collections.defaultdict(int) for op in between_op_list: for x in _NonEagerInputs(op, xs_set): if x.op in between_ops: pending_count[x.op] += 1 return (reachable_to_ops, pending_count, loop_state)
Initialize the pending count for ops between two lists of Operations. 'pending_count[op]' indicates the number of backprop inputs to this operation. Args: to_ops: list of Operations. from_ops: list of Operations. colocate_gradients_with_ops: Python bool. See docstring of gradients(). func_graphs: list of FuncGraphs. This method will traverse through these functions if they capture from_ops or any reachable ops. This is useful if to_ops occur in a function and from_ops are in an outer function or graph. xs_set: ObjectIdentitySet of Tensors. Returns: A tuple containing: (1) the subset of to_ops reachable from from_ops by a path of zero or more backpropagatable tensors, (2) a mapping from operation to the number of backprop inputs to that op, and (3) a ControlFlowState object which is not None if the ops between from_ops and to_ops contain control flow loops.
github-repos
def PushBack(self, string='', **unused_kwargs): self.buffer = string + self.buffer self.processed_buffer = self.processed_buffer[:-len(string)]
Push the match back on the stream. Args: string: optional data.
juraj-google-style
def get_ip_prefixes_from_bird(filename): prefixes = [] with open(filename, 'r') as bird_conf: lines = bird_conf.read() for line in lines.splitlines(): line = line.strip(', ') if valid_ip_prefix(line): prefixes.append(line) return prefixes
Build a list of IP prefixes found in Bird configuration. Arguments: filename (str): The absolute path of the Bird configuration file. Notes: It can only parse a file with the following format define ACAST_PS_ADVERTISE = [ 10.189.200.155/32, 10.189.200.255/32 ]; Returns: A list of IP prefixes.
juraj-google-style
def transpose(self, name=None): if (name is None): name = (self.module_name + '_transpose') if (self._data_format == DATA_FORMAT_NWC): stride = self._stride[1:(- 1)] else: stride = self._stride[2:] return Conv1D(output_channels=(lambda : self.input_channels), kernel_shape=self.kernel_shape, stride=stride, padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name)
Returns matching `Conv1D` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv1D` module.
codesearchnet
def _GetSignedBinaryMetadata(binary_type, relative_path): root_urn = _GetSignedBlobsRoots()[binary_type] binary_urn = root_urn.Add(relative_path) blob_iterator, timestamp = signed_binary_utils.FetchBlobsForSignedBinary( binary_urn) binary_size = 0 has_valid_signature = True for blob in blob_iterator: binary_size += len(blob.data) if not has_valid_signature: continue try: blob.Verify(config.CONFIG["Client.executable_signing_public_key"]) except rdf_crypto.Error: has_valid_signature = False return ApiGrrBinary( path=relative_path, type=binary_type, size=binary_size, timestamp=timestamp, has_valid_signature=has_valid_signature)
Fetches metadata for the given binary from the datastore. Args: binary_type: ApiGrrBinary.Type of the binary. relative_path: Relative path of the binary, relative to the canonical URN roots for signed binaries (see _GetSignedBlobsRoots()). Returns: An ApiGrrBinary RDFProtoStruct containing metadata for the binary.
juraj-google-style
def _call(sig, *inputs, **kwargs): if len(inputs) != len(sig.input_arg): raise ValueError(f'Expected {len(sig.input_arg):d} arguments, got {len(inputs):d}.') name = kwargs.pop('name', None) g = ops.get_default_graph() func_name = sig.name if name is None: name = func_name attrs = _parse_kwargs_as_attrs(func_name, **kwargs) output_types = [dtypes.DType(x.type) for x in sig.output_arg] op = g._create_op_internal(func_name, list(inputs), output_types, name=name, attrs=attrs, op_def=sig) if op.outputs: if len(op.outputs) == 1: ret = op.outputs[0] else: ret = tuple(op.outputs) else: ret = op return (ret, op)
Adds a node calling a function. This adds a `call` op to the default graph that calls the function of signature `sig`, passing the tensors in `inputs` as arguments. It returns the outputs of the call, which are one or more tensors. `sig` is OpDefArg.a `_DefinedFunction` object. You can pass an optional keyword parameter `name=string` to name the added operation. You can pass an optional keyword parameter `noinline=True|False` to instruct the runtime not to inline the function body into the call site. Args: sig: OpDefArg. The signature of the function. *inputs: arguments to the function. **kwargs: Optional keyword arguments. Can only contain 'name' or 'noinline'. Returns: A 2-element tuple. First element: a Tensor if the function returns a single value; a list of Tensors if the function returns multiple value; the Operation if the function returns no values. Second element: the Operation. Raises: ValueError: if the arguments are invalid.
github-repos
def valid(self, name): name = re.sub('[^0-9a-zA-Z_]', '', name) if re.match('[0-9]', name): name = ('_' + name) return name
Ensure a variable name is valid. Note: Assumes variable names are ASCII, which isn't necessarily true in Python 3. Args: name: A proposed variable name. Returns: A valid version of the name.
codesearchnet
def quality_score(self, tests, alias=None): results = self.quality(tests, alias=alias).values() if results: return sum(results) / len(results) return -1
Run a series of tests and return the normalized score. 1.0: Passed all tests. (0-1): Passed a fraction of tests. 0.0: Passed no tests. -1.0: Took no tests. Args: tests (list): a list of functions. alias (dict): a dictionary mapping mnemonics to lists of mnemonics. Returns: float. The fraction of tests passed, or -1 for 'took no tests'.
juraj-google-style
def _get_target(self, target): depth = (target.count('.') + 1) parts = target.split('.', 1) for m in self.modules: if (parts[0] == m.name): if (depth < 3): return m for p in self.packages: if (parts[0] == p.name): if (depth == 1): return p target = p._get_target(parts[1]) if target: return target if (depth < 3): return p return None
Get the Package or Module related to given target. Args: target (str): target to find. Returns: Package/Module: package containing target or corresponding module.
codesearchnet
def pathcase(string): string = snakecase(string) if (not string): return string return re.sub('_', '/', string)
Convert string into path case. Join punctuation with slash. Args: string: String to convert. Returns: string: Path cased string.
codesearchnet
def LogUpdate(self, data): for hypo in self.Values(): like = self.LogLikelihood(data, hypo) self.Incr(hypo, like)
Updates a suite of hypotheses based on new data. Modifies the suite directly; if you want to keep the original, make a copy. Note: unlike Update, LogUpdate does not normalize. Args: data: any representation of the data
codesearchnet
def ideal_atom_mask(prot: Protein) -> np.ndarray: return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
Computes an ideal atom mask. `Protein.atom_mask` typically is defined according to the atoms that are reported in the PDB. This function computes a mask according to heavy atoms that should be present in the given sequence of amino acids. Args: prot: `Protein` whose fields are `numpy.ndarray` objects. Returns: An ideal atom mask.
github-repos
def rapidfire(self, check_status=True, max_nlaunch=-1, max_loops=1, sleep_time=5, **kwargs): self.check_pid_file() self.set_spectator_mode(False) if check_status: self.check_status() from .launcher import PyLauncher return PyLauncher(self, **kwargs).rapidfire(max_nlaunch=max_nlaunch, max_loops=max_loops, sleep_time=sleep_time)
Use :class:`PyLauncher` to submits tasks in rapidfire mode. kwargs contains the options passed to the launcher. Args: check_status: max_nlaunch: Maximum number of launches. default: no limit. max_loops: Maximum number of loops sleep_time: seconds to sleep between rapidfire loop iterations Return: Number of tasks submitted.
juraj-google-style
def unpack_guid(self, offset): o = self._offset + offset try: _bin = bytes(self._buf[o:o + 16]) except IndexError: raise OverrunBufferException(o, len(self._buf)) h = [six.indexbytes(_bin, i) for i in range(len(_bin))] return .format( h[3], h[2], h[1], h[0], h[5], h[4], h[7], h[6], h[8], h[9], h[10], h[11], h[12], h[13], h[14], h[15])
Returns a string containing a GUID starting at the relative offset. Arguments: - `offset`: The relative offset from the start of the block. Throws: - `OverrunBufferException`
juraj-google-style
def start(self, timeout=None): assert (self.state == STOPPED), 'Process already started' self.state = STARTING should_publish = self._start_controllers(self._controllers.values(), timeout) if should_publish: self._publish_controllers(timeout) self.state = STARTED
Start the process going Args: timeout (float): Maximum amount of time to wait for each spawned process. None means forever
codesearchnet
def __init__(self, definition): self._definition = definition self.max_value_len = 256 self.max_depth = 2 self.max_list_items = 10 self.max_sublist_items = 5 self.quota_recovery_ms = 500 self._quota_recovery_start_time = None level = self._definition.get('logLevel') if not level or level == 'INFO': self._log_message = log_info_message elif level == 'WARNING': self._log_message = log_warning_message elif level == 'ERROR': self._log_message = log_error_message else: self._log_message = None
Class constructor. Args: definition: breakpoint definition indicating log level, message, etc.
juraj-google-style
def single_lf_summary(Y_p, Y=None): L = sparse.csr_matrix(arraylike_to_numpy(Y_p).reshape((- 1), 1)) return lf_summary(L, Y)
Calculates coverage, overlap, conflicts, and accuracy for a single LF Args: Y_p: a np.array or torch.Tensor of predicted labels Y: a np.array or torch.Tensor of true labels (if known)
codesearchnet
def __init__(self, filename): super(FileNameFileEntryFilter, self).__init__() self._filename = filename.lower()
Initializes a file entry filter. Args: filename (str): name of the file.
juraj-google-style
def do_ams_put(endpoint, path, body, access_token, rformat="json", ds_min_version="3.0;NetFx"): min_ds = dsversion_min content_acceptformat = json_acceptformat if rformat == "json_only": min_ds = ds_min_version content_acceptformat = json_only_acceptformat headers = {"Content-Type": content_acceptformat, "DataServiceVersion": min_ds, "MaxDataServiceVersion": dsversion_max, "Accept": json_acceptformat, "Accept-Charset" : charset, "Authorization": "Bearer " + access_token, "x-ms-version" : xmsversion} response = requests.put(endpoint, data=body, headers=headers, allow_redirects=False) if response.status_code == 301: redirected_url = ''.join([response.headers['location'], path]) response = requests.put(redirected_url, data=body, headers=headers) return response
Do a AMS HTTP PUT request and return JSON. Args: endpoint (str): Azure Media Services Initial Endpoint. path (str): Azure Media Services Endpoint Path. body (str): Azure Media Services Content Body. access_token (str): A valid Azure authentication token. rformat (str): A required JSON Accept Format. ds_min_version (str): A required DS MIN Version. Returns: HTTP response. JSON body.
juraj-google-style
def convert_collections_to_typing(typ): if hasattr(typ, '__iter__'): if hasattr(typ, '__next__'): typ = typing.Iterator[typ.__args__] elif hasattr(typ, 'send') and hasattr(typ, 'throw'): typ = typing.Generator[typ.__args__] elif _match_is_exactly_iterable(typ): typ = typing.Iterable[typ.__args__] return typ
Converts a given collections.abc type to a typing object. Args: typ: an object inheriting from a collections.abc object Returns: type: The corresponding typing object.
github-repos
def load(source, triples=False, cls=PENMANCodec, **kwargs): decode = cls(**kwargs).iterdecode if hasattr(source, 'read'): return list(decode(source.read())) else: with open(source) as fh: return list(decode(fh.read()))
Deserialize a list of PENMAN-encoded graphs from *source*. Args: source: a filename or file-like object to read from triples: if True, read graphs as triples instead of as PENMAN cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: a list of Graph objects
juraj-google-style
def _canonicalize_jit_arguments(inp): return nest.map_structure(_canonicalize_jit_arg, inp)
Canonicalize arguments to be used for jit. Args: inp: a nested structure of arguments to be canonicalized (i.e. to be converted to Tensors). Only tf_np.ndarray and things accepted by `tf.convert_to_tensor` will be converted. Returns: The canonicalized version.
github-repos
def get_help(self, prefix='', include_special_flags=True): flags_by_module = self.flags_by_module_dict() if flags_by_module: modules = sorted(flags_by_module) main_module = sys.argv[0] if (main_module in modules): modules.remove(main_module) modules = ([main_module] + modules) return self._get_help_for_modules(modules, prefix, include_special_flags) else: output_lines = [] values = six.itervalues(self._flags()) if include_special_flags: values = itertools.chain(values, six.itervalues(_helpers.SPECIAL_FLAGS._flags())) self._render_flag_list(values, output_lines, prefix) return '\n'.join(output_lines)
Returns a help string for all known flags. Args: prefix: str, per-line output prefix. include_special_flags: bool, whether to include description of SPECIAL_FLAGS, i.e. --flagfile and --undefok. Returns: str, formatted help message.
codesearchnet
def __init__(self, device): super(OneDeviceStrategy, self).__init__(OneDeviceExtended(self, device)) distribute_lib.distribution_strategy_gauge.get_cell('V2').set('OneDeviceStrategy')
Creates a `OneDeviceStrategy`. Args: device: Device string identifier for the device on which the variables should be placed. See class docs for more details on how the device is used. Examples: "/cpu:0", "/gpu:0", "/device:CPU:0", "/device:GPU:0"
github-repos
def slice_naive(self, key): cls = self.__class__ key = check_key(self, key) return cls(self.loc[key])
Slice a data object based on its index, either by value (.loc) or position (.iloc). Args: key: Single index value, slice, tuple, or list of indices/positionals Returns: data: Slice of self
juraj-google-style
def filepattern(self, data_dir, mode, shard=None): path = os.path.join(data_dir, self.dataset_filename()) shard_str = "-%05d" % shard if shard is not None else "" if mode == DatasetSplit.TRAIN: suffix = "train" elif mode in [DatasetSplit.EVAL, tf.estimator.ModeKeys.PREDICT]: suffix = "dev" else: assert mode == DatasetSplit.TEST suffix = "test" return "%s-%s%s*" % (path, suffix, shard_str)
Get filepattern for data files for mode. Matches mode to a suffix. * DatasetSplit.TRAIN: train * DatasetSplit.EVAL: dev * DatasetSplit.TEST: test * tf.estimator.ModeKeys.PREDICT: dev Args: data_dir: str, data directory. mode: DatasetSplit shard: int, if provided, will only read data from the specified shard. Returns: filepattern str
juraj-google-style
def _info_to_string(info): for key in _TENSORBOARD_INFO_FIELDS: field_type = _TENSORBOARD_INFO_FIELDS[key] if (not isinstance(getattr(info, key), field_type.runtime_type)): raise ValueError(('expected %r of type %s, but found: %r' % (key, field_type.runtime_type, getattr(info, key)))) if (info.version != version.VERSION): raise ValueError(("expected 'version' to be %r, but found: %r" % (version.VERSION, info.version))) json_value = {k: _TENSORBOARD_INFO_FIELDS[k].serialize(getattr(info, k)) for k in _TENSORBOARD_INFO_FIELDS} return json.dumps(json_value, sort_keys=True, indent=4)
Convert a `TensorBoardInfo` to string form to be stored on disk. The format returned by this function is opaque and should only be interpreted by `_info_from_string`. Args: info: A valid `TensorBoardInfo` object. Raises: ValueError: If any field on `info` is not of the correct type. Returns: A string representation of the provided `TensorBoardInfo`.
codesearchnet
def __init__(self, image, segments): self.image = image self.segments = segments self.intercept = {} self.local_exp = {} self.local_pred = None
Init function. Args: image: 3d numpy array segments: 2d numpy array, with the output from skimage.segmentation
juraj-google-style
def account(transition, direction=Direction.BIDIRECTIONAL): if direction != Direction.BIDIRECTIONAL: return directed_account(transition, direction) return Account(directed_account(transition, Direction.CAUSE) + directed_account(transition, Direction.EFFECT))
Return the set of all causal links for a |Transition|. Args: transition (Transition): The transition of interest. Keyword Args: direction (Direction): By default the account contains actual causes and actual effects.
juraj-google-style
def clone(self, spec=None, **overrides): settings = dict(self.get_param_values(), **overrides) if (spec is None): spec = (self.name, overrides.get('label', self.label)) if (('label' in overrides) and isinstance(spec, basestring)): spec = (spec, overrides['label']) elif (('label' in overrides) and isinstance(spec, tuple)): if (overrides['label'] != spec[1]): self.param.warning('Using label as supplied by keyword ({!r}), ignoring tuple value {!r}'.format(overrides['label'], spec[1])) spec = (spec[0], overrides['label']) return self.__class__(spec, **{k: v for (k, v) in settings.items() if (k not in ['name', 'label'])})
Clones the Dimension with new parameters Derive a new Dimension that inherits existing parameters except for the supplied, explicit overrides Args: spec (tuple, optional): Dimension tuple specification **overrides: Dimension parameter overrides Returns: Cloned Dimension object
codesearchnet
async def send_with_attachments(subject, message, filepaths, config): email_ = MIMEMultipart() email_.attach(MIMEText(message)) email_["Subject"] = subject email_["From"] = get_attribute_from_config(config, EMAIL_SECTION_KEY, USER_KEY) email_["To"] = get_attribute_from_config(config, EMAIL_SECTION_KEY, RECEIVER_KEY) _attach_files(filepaths, email_) await _send_email(email_, config)
Send an email from the user (a gmail) to the receiver. Args: subject (str): Subject of the email. message (str): A message. filepaths (list(str)): Filepaths to files to be attached. config (defaultdict): A defaultdict.
juraj-google-style
async def _pb_request(self, endpoint, request_pb, response_pb): logger.debug('Sending Protocol Buffer request %s:\n%s', endpoint, request_pb) res = (await self._base_request('https: try: response_pb.ParseFromString(base64.b64decode(res.body)) except binascii.Error as e: raise exceptions.NetworkError('Failed to decode base64 response: {}'.format(e)) except google.protobuf.message.DecodeError as e: raise exceptions.NetworkError('Failed to decode Protocol Buffer response: {}'.format(e)) logger.debug('Received Protocol Buffer response:\n%s', response_pb) status = response_pb.response_header.status if (status != hangouts_pb2.RESPONSE_STATUS_OK): description = response_pb.response_header.error_description raise exceptions.NetworkError("Request failed with status {}: '{}'".format(status, description))
Send a Protocol Buffer formatted chat API request. Args: endpoint (str): The chat API endpoint to use. request_pb: The request body as a Protocol Buffer message. response_pb: The response body as a Protocol Buffer message. Raises: NetworkError: If the request fails.
codesearchnet