code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def open_file(cls, filename: str, response: BaseResponse, mode='wb+'): _logger.debug('Saving file to {0}, mode={1}.', filename, mode) dir_path = os.path.dirname(filename) if (dir_path and (not os.path.exists(dir_path))): os.makedirs(dir_path) response.body = Body(open(filename, mode))
Open a file object on to the Response Body. Args: filename: The path where the file is to be saved response: Response mode: The file mode This function will create the directories if not exist.
codesearchnet
def _check_consistent_returns(self, node): explicit_returns = [_node for _node in self._return_nodes[node.name] if (_node.value is not None)] if (not explicit_returns): return if ((len(explicit_returns) == len(self._return_nodes[node.name])) and self._is_node_return_ended(node)): return self.add_message('inconsistent-return-statements', node=node)
Check that all return statements inside a function are consistent. Return statements are consistent if: - all returns are explicit and if there is no implicit return; - all returns are empty and if there is, possibly, an implicit return. Args: node (astroid.FunctionDef): the function holding the return statements.
codesearchnet
def lattice_2_lmpbox(lattice, origin=(0, 0, 0)): (a, b, c) = lattice.abc (xlo, ylo, zlo) = origin xhi = (a + xlo) m = lattice.matrix xy = np.dot(m[1], (m[0] / a)) yhi = (np.sqrt(((b ** 2) - (xy ** 2))) + ylo) xz = np.dot(m[2], (m[0] / a)) yz = ((np.dot(m[1], m[2]) - (xy * xz)) / (yhi - ylo)) zhi = (np.sqrt((((c ** 2) - (xz ** 2)) - (yz ** 2))) + zlo) tilt = (None if lattice.is_orthogonal else [xy, xz, yz]) rot_matrix = np.linalg.solve([[(xhi - xlo), 0, 0], [xy, (yhi - ylo), 0], [xz, yz, (zhi - zlo)]], m) bounds = [[xlo, xhi], [ylo, yhi], [zlo, zhi]] symmop = SymmOp.from_rotation_and_translation(rot_matrix, origin) return (LammpsBox(bounds, tilt), symmop)
Converts a lattice object to LammpsBox, and calculates the symmetry operation used. Args: lattice (Lattice): Input lattice. origin: A (3,) array/list of floats setting lower bounds of simulation box. Default to (0, 0, 0). Returns: LammpsBox, SymmOp
codesearchnet
def expected_error(self, expected: str) -> str: if self.finished: return super().expected_error(expected) else: line_index, character_index, line, pointer = self.current_line() return 'Expected {} but found {}\nLine {}, character {}\n\n{}{}'.format( expected, repr(self.next_token()), line_index, character_index, line, pointer)
Generate a basic error to include the current state. A parser can supply only a representation of what it is expecting to this method and the reader will provide the context, including the line and character positions. Args: expected: A representation of what the parser is currently expecting Returns: A full error message
juraj-google-style
def get(self, key, default_value=__NoDefaultSpecified__): os_env_string = (ConfigReader.ENV_PREFIX + key) os_env_string = os_env_string.replace('.', '_') if (type(os.getenv(os_env_string)) != NoneType): return os.getenv(os_env_string) for data_map in self._dataMaps: try: if ('.' in key): namespaces = key.split('.') temp_var = data_map for name in namespaces: temp_var = temp_var[name] return temp_var else: value = data_map[key] return value except (AttributeError, TypeError, KeyError): pass if (default_value == self.__NoDefaultSpecified__): raise KeyError(u("Key '{0}' does not exist").format(key)) else: return default_value
Gets the value from the yaml config based on the key. No type casting is performed, any type casting should be performed by the caller. Args: key (str) - Config setting key. Kwargs: default_value - Default value to return if config is not specified. Returns: Returns value stored in config file.
codesearchnet
def load_template(filename): template_file = os.path.join(PKG_DIR, 'templates', filename) with open(template_file) as fp: return fp.read()
Load template from file. The templates are part of the package and must be included as ``package_data`` in project ``setup.py``. Args: filename (str): The template path. Relative to `peltak` package directory. Returns: str: The content of the chosen template.
codesearchnet
def _add_work_if_necessary(self, timers_fired): if timers_fired: return if self._is_executing(): return for applied_ptransform in self._executor.all_nodes: if not self._executor.evaluation_context.is_done(applied_ptransform): pending_bundles = self._executor.node_to_pending_bundles.get(applied_ptransform, []) for bundle in pending_bundles: self._executor.schedule_consumption(applied_ptransform, bundle, [], self._executor.default_completion_callback) self._executor.node_to_pending_bundles[applied_ptransform] = []
Adds more work from the roots if pipeline requires more input. If all active TransformExecutors are in a blocked state, add more work from root nodes that may have additional work. This ensures that if a pipeline has elements available from the root nodes it will add those elements when necessary. Args: timers_fired: True if any timers fired prior to this call.
github-repos
def get_contrib_features(project_root): project = Project(project_root) contrib = project._resolve('.features.contrib') return _get_contrib_features(contrib)
Get contributed features for a project at project_root For a project ``foo``, walks modules within the ``foo.features.contrib`` subpackage. A single object that is an instance of ``ballet.Feature`` is imported if present in each module. The resulting ``Feature`` objects are collected. Args: project_root (str, path-like): Path to project root Returns: List[ballet.Feature]: list of Feature objects
codesearchnet
def _code_search(query, github_user=None): github_client = temple.utils.GithubClient() headers = {'Accept': 'application/vnd.github.v3.text-match+json'} resp = github_client.get('/search/code', params={'q': query, 'per_page': 100}, headers=headers) if resp.status_code == requests.codes.unprocessable_entity and github_user: raise temple.exceptions.InvalidGithubUserError( 'Invalid Github user or org - "{}"'.format(github_user)) resp.raise_for_status() resp_data = resp.json() repositories = collections.defaultdict(dict) while True: repositories.update({ 'git@github.com:{}.git'.format(repo['repository']['full_name']): repo['repository'] for repo in resp_data['items'] }) next_url = _parse_link_header(resp.headers).get('next') if next_url: resp = requests.get(next_url, headers=headers) resp.raise_for_status() resp_data = resp.json() else: break return repositories
Performs a Github API code search Args: query (str): The query sent to Github's code search github_user (str, optional): The Github user being searched in the query string Returns: dict: A dictionary of repository information keyed on the git SSH url Raises: `InvalidGithubUserError`: When ``github_user`` is invalid
juraj-google-style
def lstm(inputs, sequence_length, hparams, train, name, initial_state=None): layers = [_dropout_lstm_cell(hparams, train) for _ in range(hparams.num_hidden_layers)] with tf.variable_scope(name): return tf.nn.dynamic_rnn(tf.nn.rnn_cell.MultiRNNCell(layers), inputs, sequence_length, initial_state=initial_state, dtype=tf.float32, time_major=False)
Adds a stack of LSTM layers on top of input. Args: inputs: The input `Tensor`, shaped `[batch_size, time_steps, hidden_size]`. sequence_length: Lengths of the actual input sequence, excluding padding; a `Tensor` shaped `[batch_size]`. hparams: HParams; hyperparameters. train: bool; `True` when constructing training graph to enable dropout. name: string; Create variable names under this scope. initial_state: tuple of `LSTMStateTuple`s; the initial state of each layer. Returns: A tuple (outputs, states), where: outputs: The output `Tensor`, shaped `[batch_size, time_steps, hidden_size]`. states: A tuple of `LSTMStateTuple`s; the final state of each layer. Bidirectional LSTM returns a concatenation of last forward and backward state, reduced to the original dimensionality.
codesearchnet
def movies_box_office(self, **kwargs): path = self._get_path('movies_box_office') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Gets the top box office earning movies from the API. Sorted by most recent weekend gross ticket sales. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def text_array_to_html(text_arr): if (not text_arr.shape): return plugin_util.markdown_to_safe_html(np.asscalar(text_arr)) warning = '' if (len(text_arr.shape) > 2): warning = plugin_util.markdown_to_safe_html((WARNING_TEMPLATE % len(text_arr.shape))) text_arr = reduce_to_2d(text_arr) html_arr = [plugin_util.markdown_to_safe_html(x) for x in text_arr.reshape((- 1))] html_arr = np.array(html_arr).reshape(text_arr.shape) return (warning + make_table(html_arr))
Take a numpy.ndarray containing strings, and convert it into html. If the ndarray contains a single scalar string, that string is converted to html via our sanitized markdown parser. If it contains an array of strings, the strings are individually converted to html and then composed into a table using make_table. If the array contains dimensionality greater than 2, all but two of the dimensions are removed, and a warning message is prefixed to the table. Args: text_arr: A numpy.ndarray containing strings. Returns: The array converted to html.
codesearchnet
def attach_profile_to_role(client, role_name='forrest_unicorn_role', profile_name='forrest_unicorn_profile'): current_instance_profiles = resource_action(client, action='list_instance_profiles_for_role', log_format='Found Instance Profiles for %(RoleName)s.', RoleName=role_name)['InstanceProfiles'] for profile in current_instance_profiles: if (profile['InstanceProfileName'] == profile_name): LOG.info('Found Instance Profile attached to Role: %s -> %s', profile_name, role_name) break else: for remove_profile in current_instance_profiles: resource_action(client, action='remove_role_from_instance_profile', log_format='Removed Instance Profile from Role: %(InstanceProfileName)s -> %(RoleName)s', InstanceProfileName=remove_profile['InstanceProfileName'], RoleName=role_name) resource_action(client, action='add_role_to_instance_profile', log_format='Added Instance Profile to Role: %(InstanceProfileName)s -> %(RoleName)s', InstanceProfileName=profile_name, RoleName=role_name) return True
Attach an IAM Instance Profile _profile_name_ to Role _role_name_. Args: role_name (str): Name of Role. profile_name (str): Name of Instance Profile. Returns: True upon successful completion.
codesearchnet
def _get_all_trackables(root, exclude_set): all_trackables = trackable_view.TrackableView(root=root).descendants() trackable_index = 0 while trackable_index < len(all_trackables) and exclude_set: if all_trackables[trackable_index] in exclude_set: exclude_set.discard(all_trackables[trackable_index]) all_trackables.pop(trackable_index) else: trackable_index += 1 def _trackable_needs_to_be_saved(obj): if hasattr(obj, '__dict__'): if '_serialize_to_tensors' in obj.__dict__ or '_gather_saveables_for_checkpoint' in obj.__dict__ or '_copy_trackable_to_cpu' in obj.__dict__: return True for t in type(obj).mro(): if t is base.Trackable: continue elif '_serialize_to_tensors' in t.__dict__ or '_gather_saveables_for_checkpoint' in t.__dict__ or '_copy_trackable_to_cpu' in t.__dict__: return True return False saveable_trackables = [x for x in all_trackables if _trackable_needs_to_be_saved(x)] return (saveable_trackables, all_trackables)
Return the list of checkpointable trackables dependent on `root`. Args: root: The root trackable from where we get all its dependent trackables. exclude_set: An ObjectIdentitySet of Trackables to exclude before returning. Each element in `exclude_set` is a specific instance of a `Trackable` and appears precisely once in `TrackableView(root).descendants()`. Returns: saveable_trackables: All trackables that are saveable in `all_trackables` (see definition of "saveable" in `_trackable_needs_to_be_saved()`). A subset of `all_trackables`. all_trackables: All trackables returned by `TrackableView`'s `descendants()` after excluding `exclude_set`. A superset of `saveable_trackables`.
github-repos
def load_kegg(self, kegg_id, kegg_organism_code=None, kegg_seq_file=None, kegg_metadata_file=None, set_as_representative=False, download=False, outdir=None, force_rerun=False): if download: if (not outdir): outdir = self.sequence_dir if (not outdir): raise ValueError('Output directory must be specified') if kegg_organism_code: kegg_id = ((kegg_organism_code + ':') + kegg_id) if self.sequences.has_id(kegg_id): if force_rerun: existing = self.sequences.get_by_id(kegg_id) self.sequences.remove(existing) else: log.debug('{}: KEGG ID already present in list of sequences'.format(kegg_id)) kegg_prop = self.sequences.get_by_id(kegg_id) if (not self.sequences.has_id(kegg_id)): kegg_prop = KEGGProp(id=kegg_id, seq=None, fasta_path=kegg_seq_file, txt_path=kegg_metadata_file) if download: kegg_prop.download_seq_file(outdir, force_rerun) kegg_prop.download_metadata_file(outdir, force_rerun) if self.representative_sequence: if (not self.representative_sequence.uniprot): if kegg_prop.equal_to(self.representative_sequence): self.representative_sequence.update(kegg_prop.get_dict(), only_keys=['sequence_path', 'metadata_path', 'kegg', 'description', 'taxonomy', 'id', 'pdbs', 'uniprot', 'seq_record', 'gene_name', 'refseq']) else: log.warning('{}: representative sequence does not match mapped KEGG sequence.'.format(self.id)) self.sequences.append(kegg_prop) if set_as_representative: self.representative_sequence = kegg_prop return self.sequences.get_by_id(kegg_id)
Load a KEGG ID, sequence, and metadata files into the sequences attribute. Args: kegg_id (str): KEGG ID kegg_organism_code (str): KEGG organism code to prepend to the kegg_id if not part of it already. Example: ``eco:b1244``, ``eco`` is the organism code kegg_seq_file (str): Path to KEGG FASTA file kegg_metadata_file (str): Path to KEGG metadata file (raw KEGG format) set_as_representative (bool): If this KEGG ID should be set as the representative sequence download (bool): If the KEGG sequence and metadata files should be downloaded if not provided outdir (str): Where the sequence and metadata files should be downloaded to force_rerun (bool): If ID should be reloaded and files redownloaded Returns: KEGGProp: object contained in the sequences attribute
codesearchnet
def get_attached_bytes_map(meta_graph): result = {} if (ATTACHMENT_COLLECTION_SAVED not in meta_graph.collection_def): return result collection_def = meta_graph.collection_def[ATTACHMENT_COLLECTION_SAVED] if (collection_def.WhichOneof('kind') != 'bytes_list'): raise ValueError(('Internal CollectionDef for attached messages has kind %s, expected bytes_list' % collection_def.WhichOneof('kind'))) attachment = module_attachment_pb2.ModuleAttachment() for value in collection_def.bytes_list.value: attachment.ParseFromString(value) result[attachment.key] = attachment.value return result
Returns the dict of ModuleAttachments stored in `meta_graph`. Args: meta_graph: A MetaGraphDef, as built by SavedModelHandler.add_graph_copy() from some graph. Returns: A dict, containing the `(key, bytes)` items passed to `attach_bytes()` when the graph had been built. Raises: ValueError: if `meta-graph` is malformed.
codesearchnet
def parse_napp(napp_id): regex = '([a-zA-Z][a-zA-Z0-9_]{2,})/([a-zA-Z][a-zA-Z0-9_]{2,}):?(.+)?' compiled_regex = re.compile(regex) matched = compiled_regex.fullmatch(napp_id) if (not matched): msg = '"{}" NApp has not the form username/napp_name[:version].' raise KytosException(msg.format(napp_id)) return matched.groups()
Convert a napp_id in tuple with username, napp name and version. Args: napp_id: String with the form 'username/napp[:version]' (version is optional). If no version is found, it will be None. Returns: tuple: A tuple with (username, napp, version) Raises: KytosException: If a NApp has not the form _username/name_.
codesearchnet
def createTemplate(data): conn = Qubole.agent() return conn.post(Template.rest_entity_path, data)
Create a new template. Args: `data`: json data required for creating a template Returns: Dictionary containing the details of the template with its ID.
juraj-google-style
def average_precision(truth, recommend): if len(truth) == 0: if len(recommend) == 0: return 1. return 0. tp = accum = 0. for n in range(recommend.size): if recommend[n] in truth: tp += 1. accum += (tp / (n + 1.)) return accum / truth.size
Average Precision (AP). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AP.
juraj-google-style
def HasDateExceptionOn(self, date, exception_type=_EXCEPTION_TYPE_ADD): if (date in self.date_exceptions): return (exception_type == self.date_exceptions[date][0]) return False
Test if this service period has a date exception of the given type. Args: date: a string of form "YYYYMMDD" exception_type: the exception type the date should have. Defaults to _EXCEPTION_TYPE_ADD Returns: True iff this service has service exception of specified type at date.
codesearchnet
def get_replacement_inputs(self, applied_ptransform): return tuple(applied_ptransform.inputs) + tuple((side_input.pvalue for side_input in applied_ptransform.side_inputs))
Provides inputs that will be passed to the replacement PTransform. Args: applied_ptransform: Original AppliedPTransform containing the PTransform to be replaced. Returns: An iterable of PValues that will be passed to the expand() method of the replacement PTransform.
github-repos
def squeeze(x, axis=None): if any_symbolic_tensors((x,)): return Squeeze(axis=axis).symbolic_call(x) return backend.numpy.squeeze(x, axis=axis)
Remove axes of length one from `x`. Args: x: Input tensor. axis: Select a subset of the entries of length one in the shape. Returns: The input tensor with all or a subset of the dimensions of length 1 removed.
github-repos
def __init__( self, session, storage_type=definitions.STORAGE_TYPE_SESSION, task=None): super(FakeStorageWriter, self).__init__( session, storage_type=storage_type, task=task) self._event_data = {} self._event_sources = [] self._event_tags = [] self._events = [] self._warnings = [] self._is_open = False self._task_storage_writers = {} self.analysis_reports = [] self.session_completion = None self.session_start = None self.task_completion = None self.task_start = None
Initializes a storage writer object. Args: session (Session): session the storage changes are part of. storage_type (Optional[str]): storage type. task(Optional[Task]): task.
juraj-google-style
def get_registered_name(obj): if obj in GLOBAL_CUSTOM_NAMES: return GLOBAL_CUSTOM_NAMES[obj] else: return obj.__name__
Returns the name registered to an object within the Keras framework. This function is part of the Keras serialization and deserialization framework. It maps objects to the string names associated with those objects for serialization/deserialization. Args: obj: The object to look up. Returns: The name associated with the object, or the default Python name if the object is not registered.
github-repos
def tree_type_checker(*ref): ref = tuple(ref) if (NeuriteType.all in ref): def check_tree_type(_): 'Always returns true' return True else: def check_tree_type(tree): 'Check whether tree has the same type as ref\n\n Returns:\n True if ref in the same type as tree.type or ref is NeuriteType.all\n ' return (tree.type in ref) return check_tree_type
Tree type checker functor Returns: Functor that takes a tree, and returns true if that tree matches any of NeuriteTypes in ref Ex: >>> from neurom.core.types import NeuriteType, tree_type_checker >>> tree_filter = tree_type_checker(NeuriteType.axon, NeuriteType.basal_dendrite) >>> nrn.i_neurites(tree.isegment, tree_filter=tree_filter)
codesearchnet
def add_dimension(self, dimension, dim_pos, dim_val, vdim=False, **kwargs): if isinstance(dimension, (util.basestring, tuple)): dimension = Dimension(dimension) if (dimension.name in self.kdims): raise Exception('{dim} dimension already defined'.format(dim=dimension.name)) if vdim: dims = self.vdims[:] dims.insert(dim_pos, dimension) dimensions = dict(vdims=dims) dim_pos += self.ndims else: dims = self.kdims[:] dims.insert(dim_pos, dimension) dimensions = dict(kdims=dims) if (issubclass(self.interface, ArrayInterface) and (np.asarray(dim_val).dtype != self.data.dtype)): element = self.clone(datatype=[default_datatype]) data = element.interface.add_dimension(element, dimension, dim_pos, dim_val, vdim) else: data = self.interface.add_dimension(self, dimension, dim_pos, dim_val, vdim) return self.clone(data, **dimensions)
Adds a dimension and its values to the Dataset Requires the dimension name or object, the desired position in the key dimensions and a key value scalar or array of values, matching the length o shape of the Dataset. Args: dimension: Dimension or dimension spec to add dim_pos (int) Integer index to insert dimension at dim_val (scalar or ndarray): Dimension value(s) to add vdim: Disabled, this type does not have value dimensions **kwargs: Keyword arguments passed to the cloned element Returns: Cloned object containing the new dimension
codesearchnet
def _PackArgumentsHelper(self, elem, data, set_type_attrs): if self._packer: data = self._packer.Pack(data, self._version) if isinstance(data, dict): type_override = data.get('xsi_type') if type_override: elem_type = self._DiscoverElementTypeFromLocalname(type_override) else: elem_type = elem.type data_formatted = data.iteritems() packed_result = self._CreateComplexTypeFromData( elem_type, type_override is not None, data_formatted, set_type_attrs) elif isinstance(data, zeep.xsd.CompoundValue): elem_type = data._xsd_type data_formatted = zip(dir(data), [data[k] for k in dir(data)]) packed_result = self._CreateComplexTypeFromData( elem_type, False, data_formatted, set_type_attrs) elif isinstance(data, (list, tuple)): packed_result = [self._PackArgumentsHelper(elem, item, set_type_attrs) for item in data] else: if elem.type.name == 'base64Binary' and self._IsBase64(data): _logger.warn('Passing data to base64 field %s that may ' 'already be encoded. Do not pre-encode base64 ' 'fields with zeep.', elem.name) packed_result = data return packed_result
Recursive helper for PackArguments. Args: elem: The element type we are creating. data: The data to instantiate it with. set_type_attrs: A boolean indicating whether or not attributes that end in .Type should be set. This is only necessary for batch job service. Returns: An instance of type 'elem'.
juraj-google-style
def listen(self): logger.info("Listening on port " + str(self.listener.listen_port)) self.listener.listen()
Starts the client listener to listen for server responses. Args: None Returns: None
juraj-google-style
def get(self, webfont_name, webfont_settings): try: webfont_settings = extend_webfont_settings(webfont_settings) except IcomoonSettingsError as e: msg = "Invalid webfont settings for '{}': {}" self.errors[webfont_name] = msg.format(webfont_name, e.value) return filepath = os.path.join(webfont_settings['fontdir_path'], self.manifest_filename) if os.path.exists(filepath): self.manifests[webfont_name] = self.parse_manifest(filepath) else: msg = 'Filepath for webfont <strong>{name}</strong> does not exists: <code>{filepath}</code>' self.errors[webfont_name] = msg.format(name=webfont_name, filepath=filepath)
Get a manifest file, parse and store it. Args: webfont_name (string): Webfont key name. Used to store manifest and potentially its parser error. webfont_settings (dict): Webfont settings (an item value from ``settings.ICOMOON_WEBFONTS``).
codesearchnet
def validate_email_to_link(email, raw_email=None, message_template=None, ignore_existing=False): raw_email = (raw_email if (raw_email is not None) else email) message_template = (message_template if (message_template is not None) else ValidationMessages.INVALID_EMAIL) try: validate_email(email) except ValidationError: raise ValidationError(message_template.format(argument=raw_email)) existing_record = EnterpriseCustomerUser.objects.get_link_by_email(email) if (existing_record and (not ignore_existing)): raise ValidationError(ValidationMessages.USER_ALREADY_REGISTERED.format(email=email, ec_name=existing_record.enterprise_customer.name)) return (existing_record or False)
Validate email to be linked to Enterprise Customer. Performs two checks: * Checks that email is valid * Checks that it is not already linked to any Enterprise Customer Arguments: email (str): user email to link raw_email (str): raw value as it was passed by user - used in error message. message_template (str): Validation error template string. ignore_existing (bool): If True to skip the check for an existing Enterprise Customer Raises: ValidationError: if email is invalid or already linked to Enterprise Customer. Returns: bool: Whether or not there is an existing record with the same email address.
codesearchnet
def Field( dagster_type, default_value=FIELD_NO_DEFAULT_PROVIDED, is_optional=INFER_OPTIONAL_COMPOSITE_FIELD, is_secret=False, description=None, ): config_type = resolve_to_config_type(dagster_type) if not config_type: raise DagsterInvalidDefinitionError( ( 'Attempted to pass {value_repr} to a Field that expects a valid ' 'dagster type usable in config (e.g. Dict, NamedDict, Int, String et al).' ).format(value_repr=repr(dagster_type)) ) return FieldImpl( config_type=resolve_to_config_type(dagster_type), default_value=default_value, is_optional=is_optional, is_secret=is_secret, description=description, )
The schema for configuration data that describes the type, optionality, defaults, and description. Args: dagster_type (DagsterType): A ``DagsterType`` describing the schema of this field, ie `Dict({'example': Field(String)})` default_value (Any): A default value to use that respects the schema provided via dagster_type is_optional (bool): Whether the presence of this field is optional despcription (str):
juraj-google-style
def batch_decode(self, sequences, **kwargs): return super().batch_decode(sequences, **kwargs)
Convert a list of lists of token ids into a list of strings by calling decode. Args: sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). use_source_tokenizer (`bool`, *optional*, defaults to `False`): Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence problems). kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `List[str]`: The list of decoded sentences.
github-repos
def get_size(self, value=None): if isinstance(value, type(self)): return value.get_size() return (2 + self.length)
Return struct size. Returns: int: Returns the struct size based on inner attributes.
codesearchnet
def sin(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.sin, tf.float32)
Returns a TensorFluent for the sin function. Args: x: The input fluent. Returns: A TensorFluent wrapping the sin function.
juraj-google-style
def from_dir(cls, top, workdir=None, name=None, manager=None, max_depth=2): from .flows import Flow def find_pickles(dirtop): paths = [] for (dirpath, dirnames, filenames) in os.walk(dirtop): fnames = [f for f in filenames if (f == Flow.PICKLE_FNAME)] paths.extend([os.path.join(dirpath, f) for f in fnames]) return paths if is_string(top): pickle_paths = find_pickles(top) else: pickle_paths = [] for p in top: pickle_paths.extend(find_pickles(p)) workdir = ('batch' if (workdir is None) else workdir) new = cls(workdir, name=name, manager=manager) for path in pickle_paths: new.add_flow(path) return new
Find all flows located withing the directory `top` and build the `BatchLauncher`. Args: top: Top level directory or list of directories. workdir: Batch workdir. name: manager: :class:`TaskManager` object. If None, the manager is read from `manager.yml` In this case the YAML file must provide the entry `batch_manager` that defined the queue adapter used to submit the batch script. max_depth: Search in directory only if it is N or fewer levels below top
codesearchnet
def setY(self,Y,standardize=False): assert Y.shape[0]==self.N, 'CVarianceDecomposition:: Incompatible shape' assert Y.shape[1]==self.P, 'CVarianceDecomposition:: Incompatible shape' if standardize: Y=preprocess.standardize(Y) assert (~(SP.isnan(Y).any(axis=1))==self.Iok).all(), 'CVarianceDecomposition:: pattern of missing values needs to match Y given at initialization' self.Y = Y self.vd.setPheno(Y) self.optimum = None self.cache['Sigma'] = None self.cache['Hessian'] = None self.cache['Lparams'] = None self.cache['paramsST']= None
Set phenotype matrix Args: Y: phenotype matrix [N, P] standardize: if True, phenotype is standardized (zero mean, unit variance)
juraj-google-style
def write_layout(_path): path.mkdir_uchroot("/etc/portage/metadata") path.mkfile_uchroot("/etc/portage/metadata/layout.conf") with open(_path, 'w') as layoutconf: lines = layoutconf.write(lines)
Write a valid gentoo layout file to :path:. Args: path - The output path of the layout.conf
juraj-google-style
def AddDatastore(self, urn): if urn not in self._datastores: self._datastores.add(urn) return True return False
Adds a datastore URN as a source. Args: urn: an RDF URN value of the datastore. Returns: True if the datastore is not an already existing source.
juraj-google-style
def from_specification(specification, env_prefix=None, separator='.', parent_names=None): items = {} for item_name, item_info in six.iteritems(specification): names = copy.copy(parent_names) if parent_names else [] items[item_name] = _generate_item(item_name, item_info, env_prefix, separator, names) return items
Used to create YapconfItems from a specification dictionary. Args: specification (dict): The specification used to initialize ``YapconfSpec`` env_prefix (str): Prefix to add to environment names separator (str): Separator for nested items parent_names (list): Parents names of any given item Returns: A dictionary of names to YapconfItems
juraj-google-style
def forward(self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_txt_shape = input_ids.size() elif inputs_embeds is not None: input_txt_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device modal_embeddings = self.modal_encoder(input_modal, start_token=modal_start_tokens, end_token=modal_end_tokens, position_ids=modal_position_ids, token_type_ids=modal_token_type_ids) input_modal_shape = modal_embeddings.size()[:-1] if token_type_ids is None: token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device) txt_embeddings = self.transformer.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1) input_shape = embedding_output.size()[:-1] if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) else: attention_mask = torch.cat([torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) else: encoder_attention_mask = torch.cat([torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.transformer.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.transformer.pooler(sequence_output) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
Returns: Examples: ```python # For example purposes. Not runnable. transformer = BertModel.from_pretrained("google-bert/bert-base-uncased") encoder = ImageEncoder(args) mmbt = MMBTModel(config, transformer, encoder) ```
github-repos
def __move(self, current_pos): if (self.__move_range is not None): next_pos = np.random.randint((current_pos - self.__move_range), (current_pos + self.__move_range)) if (next_pos < 0): next_pos = 0 elif (next_pos >= (self.var_arr.shape[0] - 1)): next_pos = (self.var_arr.shape[0] - 1) return next_pos else: next_pos = np.random.randint((self.var_arr.shape[0] - 1)) return next_pos
Move in the feature map. Args: current_pos: The now position. Returns: The next position.
codesearchnet
def getMonthsBuffer(self, direction): if direction == ReadMonths.kWhReverse: return self.m_rev_mons return self.m_mons
Get the months tariff SerialBlock for meter. Args: direction (int): A :class:`~ekmmeters.ReadMonths` value. Returns: SerialBlock: Requested months tariffs buffer.
juraj-google-style
def lookupSpatialReferenceID(cls, directory, filename): path = os.path.join(directory, filename) with open(path, 'r') as f: srid = lookupSpatialReferenceID(f.read()) return srid
Look up spatial reference system using the projection file. Args: directory (str): filename (str): Return: int: Spatial Reference ID
codesearchnet
def GetCommandLineArguments(self): command_line_arguments = sys.argv if (not command_line_arguments): return '' if isinstance(command_line_arguments[0], py2to3.BYTES_TYPE): encoding = sys.stdin.encoding if (not encoding): encoding = self.preferred_encoding try: command_line_arguments = [argument.decode(encoding) for argument in command_line_arguments] except UnicodeDecodeError: logger.error('Unable to properly read command line input due to encoding error. Replacing non Basic Latin (C0) characters with "?" or "\\ufffd".') command_line_arguments = [argument.decode(encoding, errors='replace') for argument in command_line_arguments] return ' '.join(command_line_arguments)
Retrieves the command line arguments. Returns: str: command line arguments.
codesearchnet
def GetTableView(cls, format_type, column_names=None, title=None): view_class = cls._TABLE_VIEW_FORMAT_CLASSES.get(format_type, None) if (not view_class): raise ValueError('Unsupported format type: {0:s}'.format(format_type)) return view_class(column_names=column_names, title=title)
Retrieves a table view. Args: format_type (str): table view format type. column_names (Optional[list[str]]): column names. title (Optional[str]): title. Returns: BaseTableView: table view. Raises: ValueError: if the format type is not supported.
codesearchnet
def parse(file_or_string): from mysqlparse.grammar.sql_file import sql_file_syntax if hasattr(file_or_string, 'read') and hasattr(file_or_string.read, '__call__'): return sql_file_syntax.parseString(file_or_string.read()) elif isinstance(file_or_string, six.string_types): return sql_file_syntax.parseString(file_or_string) else: raise TypeError("Expected file-like or string object, but got '{type_name}' instead.".format( type_name=type(file_or_string).__name__, ))
Parse a file-like object or string. Args: file_or_string (file, str): File-like object or string. Returns: ParseResults: instance of pyparsing parse results.
juraj-google-style
def List(self, request, global_params=None): config = self.GetMethodConfig('List') return self._RunMethod(config, request, global_params=global_params)
Lists snapshots. Args: request: (DataflowProjectsSnapshotsListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListSnapshotsResponse) The response message.
github-repos
def consume(self, msg): msg['body'] = crypto.sign(msg['body'], **self.hub.config) super(SigningRelayConsumer, self).consume(msg)
Sign the message prior to sending the message. Args: msg (dict): The message to sign and relay.
juraj-google-style
def is_legal_object(self, data_type: str) -> bool: data_type = str(data_type) ranges = self.included_ranges() return ((not ranges) or (data_type in ranges) or (self.super_properties() and any((x.is_legal_object(data_type) for x in self.super_properties()))))
Do data_type validation according to the rules of the XML xsd schema. Args: data_type: Returns:
codesearchnet
def list_nsgs_all(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Network/', 'networkSEcurityGroups?api-version=', NETWORK_API]) return do_get(endpoint, access_token)
List all network security groups in a subscription. Args: access_token (str): a valid Azure Authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of all network security groups in a subscription.
juraj-google-style
def files_from_list(*paths): ret = [] for path in paths: if isfile(path): ret.append(abspath(path)) elif isdir(path): ret += [f for f in ls(path, abspaths=True, recursive=True) if isfile(f)] else: raise File404(path) return ret
Return a list of all file paths from a list of files or directories. For each path in the input: if it is a file, return it; if it is a directory, return a list of files in the directory. Arguments: paths (list of str): List of file and directory paths. Returns: list of str: Absolute file paths. Raises: File404: If any of the paths do not exist.
juraj-google-style
def handle_unexpected_exception(exc): try: write_logfile() addendum = 'Please see the log file for more information.' except IOError: addendum = 'Unable to write log file.' try: message = str(exc) return '{}{}{}'.format(message, ('\n' if message else ''), addendum) except Exception: return str(exc)
Return an error message and write a log file if logging was not enabled. Args: exc: The unexpected exception. Returns: A message to display to the user concerning the unexpected exception.
codesearchnet
def __init__(self, campfire, data=None): super(CampfireEntity, self).__init__(data) self._campfire = campfire self._connection = None if self._campfire: self._connection = self._campfire.get_connection()
Initialize. Args: campfire (:class:`Campfire`): Campfire Instance Kwargs: data (dict): Entity data
juraj-google-style
def GetBudget(self, client_customer_id, budget_id): self.client.SetClientCustomerId(client_customer_id) selector = {'fields': ['BudgetId', 'BudgetName', 'BudgetStatus', 'Amount', 'DeliveryMethod', 'BudgetReferenceCount', 'IsBudgetExplicitlyShared'], 'predicates': [{'field': 'BudgetId', 'operator': 'EQUALS', 'values': [budget_id]}]} budgets = self.client.GetService('BudgetService').get(selector) if (int(budgets['totalNumEntries']) > 0): return budgets['entries'][0] else: return None
Return a Budget with the associated budgetId. Args: client_customer_id: str Client Customer Id to which the budget belongs. budget_id: str id of the budget we want to examine. Returns: Budget A Budget data object.
codesearchnet
def configure( self, accountID, **kwargs ): request = Request( 'PATCH', '/v3/accounts/{accountID}/configuration' ) request.set_path_param( 'accountID', accountID ) body = EntityDict() if 'alias' in kwargs: body.set('alias', kwargs['alias']) if 'marginRate' in kwargs: body.set('marginRate', kwargs['marginRate']) request.set_body_dict(body.dict) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} if str(response.status) == "200": if jbody.get('clientConfigureTransaction') is not None: parsed_body['clientConfigureTransaction'] = \ self.ctx.transaction.ClientConfigureTransaction.from_dict( jbody['clientConfigureTransaction'], self.ctx ) if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') elif str(response.status) == "400": if jbody.get('clientConfigureRejectTransaction') is not None: parsed_body['clientConfigureRejectTransaction'] = \ self.ctx.transaction.ClientConfigureRejectTransaction.from_dict( jbody['clientConfigureRejectTransaction'], self.ctx ) if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "403": if jbody.get('clientConfigureRejectTransaction') is not None: parsed_body['clientConfigureRejectTransaction'] = \ self.ctx.transaction.ClientConfigureRejectTransaction.from_dict( jbody['clientConfigureRejectTransaction'], self.ctx ) if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') else: parsed_body = jbody response.body = parsed_body return response
Set the client-configurable portions of an Account. Args: accountID: Account Identifier alias: Client-defined alias (name) for the Account marginRate: The string representation of a decimal number. Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def new(arg_name, annotated_with=None): if arg_name.startswith(_PROVIDE_PREFIX): binding_key_name = arg_name[_PROVIDE_PREFIX_LEN:] provider_indirection = provider_indirections.INDIRECTION else: binding_key_name = arg_name provider_indirection = provider_indirections.NO_INDIRECTION binding_key = binding_keys.new(binding_key_name, annotated_with) return ArgBindingKey(arg_name, binding_key, provider_indirection)
Creates an ArgBindingKey. Args: arg_name: the name of the bound arg annotation: an Annotation, or None to create an unannotated arg binding key Returns: a new ArgBindingKey
codesearchnet
def WriteBytes(self, value, unhex=True): if unhex: try: value = binascii.unhexlify(value) except binascii.Error: pass return self.stream.write(value)
Write a `bytes` type to the stream. Args: value (bytes): array of bytes to write to the stream. unhex (bool): (Default) True. Set to unhexlify the stream. Use when the bytes are not raw bytes; i.e. b'aabb' Returns: int: the number of bytes written.
codesearchnet
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple[int, int]]]=None) -> 'torch.Tensor': class_queries_logits = outputs.class_queries_logits masks_queries_logits = outputs.masks_queries_logits masks_queries_logits = torch.nn.functional.interpolate(masks_queries_logits, size=(384, 384), mode='bilinear', align_corners=False) masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') semantic_segmentation = [] for idx in range(batch_size): resized_logits = torch.nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
Converts the output of [`Mask2FormerForUniversalSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`Mask2FormerForUniversalSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
github-repos
def _text_checker(job, interval, _interval_set=False, quiet=False, output=sys.stdout): status = job.status() msg = status.value prev_msg = msg msg_len = len(msg) if (not quiet): print(('\r%s: %s' % ('Job Status', msg)), end='', file=output) while (status.name not in ['DONE', 'CANCELLED', 'ERROR']): time.sleep(interval) status = job.status() msg = status.value if (status.name == 'QUEUED'): msg += (' (%s)' % job.queue_position()) if (not _interval_set): interval = max(job.queue_position(), 2) elif (not _interval_set): interval = 2 if (len(msg) < msg_len): msg += (' ' * (msg_len - len(msg))) elif (len(msg) > msg_len): msg_len = len(msg) if ((msg != prev_msg) and (not quiet)): print(('\r%s: %s' % ('Job Status', msg)), end='', file=output) prev_msg = msg if (not quiet): print('', file=output)
A text-based job status checker Args: job (BaseJob): The job to check. interval (int): The interval at which to check. _interval_set (bool): Was interval time set by user? quiet (bool): If True, do not print status messages. output (file): The file like object to write status messages to. By default this is sys.stdout.
codesearchnet
def halted(self): result = int(self._dll.JLINKARM_IsHalted()) if result < 0: raise errors.JLinkException(result) return (result > 0)
Returns whether the CPU core was halted. Args: self (JLink): the ``JLink`` instance Returns: ``True`` if the CPU core is halted, otherwise ``False``. Raises: JLinkException: on device errors.
juraj-google-style
def claim(self, unclaimed_file_readers): claimed_vcf_readers = [] for caller in self._callers: (unclaimed_file_readers, translated_vcf_readers) = caller.claim(unclaimed_file_readers) claimed_vcf_readers.extend(translated_vcf_readers) return (unclaimed_file_readers, claimed_vcf_readers)
Allows each caller to claim incoming files as they are recognized. Args: unclaimed_file_readers: Usually, all files in the input dir. Returns: A tuple of unclaimed file readers and claimed VcfReaders. The presence of any unclaimed file readers could indicate stray files in the input dir.
codesearchnet
def __setstate__(self, state): superstate, localstate = state super(_StorageApi, self).__setstate__(superstate) self.api_url = localstate['api_url']
Restore state as part of deserialization/unpickling. Args: state: the tuple from a __getstate__ call
juraj-google-style
def read_folder(directory): res = [] for filename in os.listdir(directory): with io.open(os.path.join(directory, filename), encoding='utf-8') as f: content = f.read() res.append(content) return res
read text files in directory and returns them as array Args: directory: where the text files are Returns: Array of text
codesearchnet
def reference_value_to_document(reference_value, client): parts = reference_value.split(DOCUMENT_PATH_DELIMITER, 5) if (len(parts) != 6): msg = BAD_REFERENCE_ERROR.format(reference_value) raise ValueError(msg) document = client.document(parts[(- 1)]) if (document._document_path != reference_value): msg = WRONG_APP_REFERENCE.format(reference_value, client._database_string) raise ValueError(msg) return document
Convert a reference value string to a document. Args: reference_value (str): A document reference value. client (~.firestore_v1beta1.client.Client): A client that has a document factory. Returns: ~.firestore_v1beta1.document.DocumentReference: The document corresponding to ``reference_value``. Raises: ValueError: If the ``reference_value`` is not of the expected format: ``projects/{project}/databases/{database}/documents/...``. ValueError: If the ``reference_value`` does not come from the same project / database combination as the ``client``.
codesearchnet
def matvec(self, x, adjoint=False, name='matvec'): with self._name_scope(name): x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x') self._check_input_dtype(x) self_dim = -2 if adjoint else -1 tensor_shape.dimension_at_index(self.shape, self_dim).assert_is_compatible_with(x.shape[-1]) return self._matvec(x, adjoint=adjoint)
Transform [batch] vector `x` with left multiplication: `x --> Ax`. ```python # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] operator = LinearOperator(...) X = ... # shape [..., N], batch vector Y = operator.matvec(X) Y.shape ==> [..., M] Y[..., :] = sum_j A[..., :, j] X[..., j] ``` Args: x: `Tensor` with compatible shape and same `dtype` as `self`. `x` is treated as a [batch] vector meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility. adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. name: A name for this `Op`. Returns: A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
github-repos
def generate(self, cache_root): generator_cwd = os.path.join(cache_root, 'generated', self.vlnv.sanitized_name) generator_input_file = os.path.join(generator_cwd, self.name+'_input.yml') logger.info('Generating ' + str(self.vlnv)) if not os.path.exists(generator_cwd): os.makedirs(generator_cwd) with open(generator_input_file, 'w') as f: f.write(yaml.dump(self.generator_input)) args = [os.path.join(os.path.abspath(self.generator.root), self.generator.command), generator_input_file] if self.generator.interpreter: args[0:0] = [self.generator.interpreter] Launcher(args[0], args[1:], cwd=generator_cwd).run() cores = [] logger.debug("Looking for generated cores in " + generator_cwd) for root, dirs, files in os.walk(generator_cwd): for f in files: if f.endswith('.core'): try: cores.append(Core(os.path.join(root, f))) except SyntaxError as e: w = "Failed to parse generated core file " + f + ": " + e.msg raise RuntimeError(w) logger.debug("Found " + ', '.join(str(c.name) for c in cores)) return cores
Run a parametrized generator Args: cache_root (str): The directory where to store the generated cores Returns: list: Cores created by the generator
juraj-google-style
def run(self, args): jlink = self.create_jlink(args) mcu = args.name[0].lower() if pylink.unlock(jlink, mcu): print('Successfully unlocked device!') else: print('Failed to unlock device!')
Unlocks the target device. Args: self (UnlockCommand): the ``UnlockCommand`` instance args (Namespace): the arguments passed on the command-line Returns: ``None``
codesearchnet
def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: default_to_square = True if 'shortest_edge' in size: size = size['shortest_edge'] default_to_square = False elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred.
github-repos
def parse_rank_score(rank_score_entry, case_id): rank_score = None if rank_score_entry: for family_info in rank_score_entry.split(','): splitted_info = family_info.split(':') if case_id == splitted_info[0]: rank_score = float(splitted_info[1]) return rank_score
Parse the rank score Args: rank_score_entry(str): The raw rank score entry case_id(str) Returns: rank_score(float)
juraj-google-style
def reverse_transform_table(self, table, table_meta, missing=None): if (missing is None): missing = self.missing else: self.missing = missing warnings.warn(DEPRECATION_MESSAGE.format('reverse_transform_table'), DeprecationWarning) result = pd.DataFrame(index=table.index) table_name = table_meta['name'] for field in table_meta['fields']: new_column = self._reverse_transform_column(table, field, table_name) if (new_column is not None): result[field['name']] = new_column return result
Transform a `table` back to its original format. Args: table(pandas.DataFrame): Contents of the table to be transformed. table_meta(dict): Metadata for the given table. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: pandas.DataFrame: Table in original format.
codesearchnet
def assertAllLess(self, a, comparison_target): a, comparison_target = self.evaluate_if_both_tensors(a, comparison_target) a = self._GetNdArray(a) self.assertLess(np.max(a), comparison_target)
Assert element values are all less than a target value. Args: a: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). comparison_target: The target value of comparison.
github-repos
def _init_tag_params(self, tag, params): self._element = tag self.params = params self._parseTagName() self._istag = True self._isendtag = False self._isnonpairtag = False self._element = self.tagToString()
Alternative constructor used when the tag parameters are added to the HTMLElement (HTMLElement(tag, params)). This method just creates string and then pass it to the :meth:`_init_tag`. Args: tag (str): HTML tag as string. params (dict): HTML tag parameters as dictionary.
juraj-google-style
def _get_model_reference(self, model_id): return ModelReference.from_api_repr( {"projectId": self.project, "datasetId": self.dataset_id, "modelId": model_id} )
Constructs a ModelReference. Args: model_id (str): the ID of the model. Returns: google.cloud.bigquery.model.ModelReference: A ModelReference for a model in this dataset.
juraj-google-style
def CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) self._timestamp = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) self.is_local_time = False
Copies a POSIX timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC.
juraj-google-style
def set_description(self, name, action, seqno, value=None, default=False, disable=False): commands = [('route-map %s %s %s' % (name, action, seqno))] if (value is not None): commands.append(self.command_builder('description', disable=True)) commands.append(self.command_builder('description', value=value, default=default, disable=disable)) return self.configure(commands)
Configures the routemap description Args: name (string): The full name of the routemap. action (string): The action to take for this routemap clause. seqno (integer): The sequence number for the routemap clause. value (string): The value to configure for the routemap description default (bool): Specifies to default the routemap description value disable (bool): Specifies to negate the routemap description Returns: True if the operation succeeds otherwise False is returned
codesearchnet
def get_value_index(self, indices): size = self['size'] if self.get('size') else self['dimension']['size'] ndims = len(size) mult = 1 num = 0 for idx, dim in enumerate(size): mult *= size[ndims - idx] if (idx > 0) else 1 num += mult * indices[ndims - idx - 1] return num
Converts a list of dimensions’ indices into a numeric value index. Args: indices(list): list of dimension's indices. Returns: num(int): numeric value index.
juraj-google-style
def build(self, var_list): if self.built: return super().build(var_list) self._r = [] self._c = [] self._v = [] for var in var_list: if len(var.shape) < 2: self._r.append(backend.Variable(0, name=var.name, trainable=False)) self._c.append(backend.Variable(0, name=var.name, trainable=False)) elif self._overwrite_variable_with_gradient(var): self._r.append(None) self._c.append(None) else: r_shape = var.shape[:-1] c_shape = var.shape[:-2] + (var.shape[-1],) self._r.append(self.add_variable(shape=r_shape, dtype=var.dtype, name=var.name)) self._c.append(self.add_variable(shape=c_shape, dtype=var.dtype, name=var.name)) if self._overwrite_variable_with_gradient(var): self._v.append(None) else: self._v.append(self.add_variable_from_reference(reference_variable=var, name='velocity'))
Initialize optimizer variables. Adam optimizer has 3 types of variables: momentums, velocities and velocity_hat (only set when amsgrad is applied), Args: var_list: list of model variables to build Adam variables on.
github-repos
def get_func(func_ea): if isinstance(func_ea, idaapi.func_t): return func_ea func = idaapi.get_func(func_ea) if func is None: raise exceptions.SarkNoFunction("No function at 0x{:08X}".format(func_ea)) return func
get_func(func_t or ea) -> func_t Take an IDA function (``idaapi.func_t``) or an address (EA) and return an IDA function object. Use this when APIs can take either a function or an address. Args: func_ea: ``idaapi.func_t`` or ea of the function. Returns: An ``idaapi.func_t`` object for the given address. If a ``func_t`` is provided, it is returned.
juraj-google-style
def GetParent(self): if self.root: return None return PathInfo(components=self.components[:(- 1)], path_type=self.path_type, directory=True)
Constructs a path info corresponding to the parent of current path. The root path (represented by an empty list of components, corresponds to `/` on Unix-like systems) does not have a parent. Returns: Instance of `rdf_objects.PathInfo` or `None` if parent does not exist.
codesearchnet
def leak(self: EventSetOrNode, duration: Duration) -> EventSetOrNode: from temporian.core.operators.leak import leak return leak(self, duration=duration)
Subtracts a duration from an [`EventSet`][temporian.EventSet]'s timestamps. In other words, shifts the timestamp values backward in time. Note that this operator moves future data into the past, and should be used with caution to prevent unwanted future leakage. For instance, this op should generally not be used to compute the input features of a model. Usage example: ```python >>> a = tp.event_set( ... timestamps=[0, 1, 5, 6], ... features={"value": [0, 1, 5, 6]}, ... ) >>> b = a.leak(tp.duration.seconds(2)) >>> b indexes: ... (4 events): timestamps: [-2. -1. 3. 4.] 'value': [0 1 5 6] ... ``` Args: duration: Duration to leak by. Returns: Leaked EventSet.
github-repos
def handle_response_for_connection(self, should_post=False): status_code = self._response.status_code data = self._response.data if (data and ('errors' in data)): self._response.errors = data['errors'] if (status_code in [HTTP_CODE_SUCCESS, HTTP_CODE_CREATED, HTTP_CODE_EMPTY]): return True if (status_code == HTTP_CODE_MULTIPLE_CHOICES): return False if (status_code in [HTTP_CODE_PERMISSION_DENIED, HTTP_CODE_UNAUTHORIZED]): if (not should_post): return True return False if (status_code in [HTTP_CODE_CONFLICT, HTTP_CODE_NOT_FOUND, HTTP_CODE_BAD_REQUEST, HTTP_CODE_METHOD_NOT_ALLOWED, HTTP_CODE_PRECONDITION_FAILED, HTTP_CODE_SERVICE_UNAVAILABLE]): if (not should_post): return True return False if (status_code == HTTP_CODE_INTERNAL_SERVER_ERROR): return False if (status_code == HTTP_CODE_ZERO): bambou_logger.error('NURESTConnection: Connection error with code 0. Sending NUNURESTConnectionFailureNotification notification and exiting.') return False bambou_logger.error(('NURESTConnection: Report this error, because this should not happen: %s' % self._response)) return False
Check if the response succeed or not. In case of error, this method also print messages and set an array of errors in the response object. Returns: Returns True if the response has succeed, False otherwise
codesearchnet
def update_hmet_card_file(hmet_card_file_path, new_hmet_data_path): hmet_card_file_path_temp = '{0}_tmp'.format(hmet_card_file_path) try: remove(hmet_card_file_path_temp) except OSError: pass copy(hmet_card_file_path, hmet_card_file_path_temp) with io_open(hmet_card_file_path_temp, 'w', newline='\r\n') as out_hmet_list_file: with open(hmet_card_file_path) as old_hmet_list_file: for date_path in old_hmet_list_file: out_hmet_list_file.write(u'{0}\n'.format(path.join(new_hmet_data_path, path.basename(date_path)))) try: remove(hmet_card_file_path) except OSError: pass rename(hmet_card_file_path_temp, hmet_card_file_path)
This function updates the paths in the HMET card file to the new location of the HMET data. This is necessary because the file paths are absolute and will need to be updated if moved. Args: hmet_card_file_path(str): Location of the file used for the HMET_ASCII card. new_hmet_data_path(str): Location where the HMET ASCII files are currently. Example:: new_hmet_data_path = "E:\\GSSHA\\new_hmet_directory" hmet_card_file_path = "E:\\GSSHA\\hmet_card_file.txt" update_hmet_card_file(hmet_card_file_path, new_hmet_data_path)
codesearchnet
def NextToken(self): if (len(self.buffer) < 512): if ((self.Feed() == 0) and (not self.buffer)): return None return Lexer.NextToken(self)
Retrieves the next token. Returns: The next token (instance of Token) or None.
codesearchnet
def minimum(x1, x2): if any_symbolic_tensors((x1, x2)): return Minimum().symbolic_call(x1, x2) return backend.numpy.minimum(x1, x2)
Element-wise minimum of `x1` and `x2`. Args: x1: First tensor. x2: Second tensor. Returns: Output tensor, element-wise minimum of `x1` and `x2`.
github-repos
def make_lda_variational(activation, num_topics, layer_sizes): encoder_net = tf.keras.Sequential() for num_hidden_units in layer_sizes: encoder_net.add( tf.keras.layers.Dense( num_hidden_units, activation=activation, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) encoder_net.add( tf.keras.layers.Dense( num_topics, activation=tf.nn.softplus, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) def lda_variational(bag_of_words): concentration = _clip_dirichlet_parameters(encoder_net(bag_of_words)) return ed.Dirichlet(concentration=concentration, name="topics_posterior") return lda_variational
Creates the variational distribution for LDA. Args: activation: Activation function to use. num_topics: The number of topics. layer_sizes: The number of hidden units per layer in the encoder. Returns: lda_variational: A function that takes a bag-of-words Tensor as input and returns a distribution over topics.
juraj-google-style
def _ReadCompressedData(self, read_size): compressed_data = self._file_object.read(read_size) read_count = len(compressed_data) self._compressed_data = b''.join([self._compressed_data, compressed_data]) self._uncompressed_data, self._compressed_data = ( self._decompressor.Decompress(self._compressed_data)) self._uncompressed_data_size = len(self._uncompressed_data) return read_count
Reads compressed data from the file-like object. Args: read_size (int): number of bytes of compressed data to read. Returns: int: number of bytes of compressed data read.
juraj-google-style
def to_hg_scheme_url(cls, url): regexes = cls._get_url_scheme_regexes() for (scheme_key, pattern, regex) in regexes: match = regex.match(url) if (match is not None): groups = match.groups() if (len(groups) == 2): return u''.join(scheme_key, ': elif (len(groups) == 1): return u''.join(scheme_key, ':
Convert a URL to local mercurial URL schemes Args: url (str): URL to map to local mercurial URL schemes example:: # schemes.gh = git://github.com/ >> remote_url = git://github.com/westurner/dotfiles' >> to_hg_scheme_url(remote_url) << gh://westurner/dotfiles
codesearchnet
def update_add(x, increment): return state_ops.assign_add(x, increment)
Update the value of `x` by adding `increment`. Args: x: A Variable. increment: A tensor of same shape as `x`. Returns: The variable `x` updated.
github-repos
def has_no_jumps(neuron, max_distance=30.0, axis='z'): bad_ids = [] axis = {'x': COLS.X, 'y': COLS.Y, 'z': COLS.Z, }[axis.lower()] for neurite in iter_neurites(neuron): section_segment = ((sec, seg) for sec in iter_sections(neurite) for seg in iter_segments(sec)) for sec, (p0, p1) in islice(section_segment, 1, None): if max_distance < abs(p0[axis] - p1[axis]): bad_ids.append((sec.id, [p0, p1])) return CheckResult(len(bad_ids) == 0, bad_ids)
Check if there are jumps (large movements in the `axis`) Arguments: neuron(Neuron): The neuron object to test max_distance(float): value above which consecutive z-values are considered a jump axis(str): one of x/y/z, which axis to check for jumps Returns: CheckResult with result list of ids of bad sections
juraj-google-style
def set_flowcontrol_send(self, name, value=None, default=False, disable=False): return self.set_flowcontrol(name, 'send', value, default, disable)
Configures the interface flowcontrol send value Args: name (string): The interface identifier. It must be a full interface name (ie Ethernet, not Et) value (boolean): True if the interface should enable sending flow control packets, otherwise False default (boolean): Specifies to default the interface flow control send value disable (boolean): Specifies to disable the interface flow control send value Returns: True if the operation succeeds otherwise False is returned
codesearchnet
def send(self, message_type, task_id, message): x = 0 try: buffer = pickle.dumps((self.source_id, int(time.time()), message_type, message)) except Exception as e: print('Exception during pickling {}'.format(e)) return try: x = self.sock.sendto(buffer, (self.ip, self.port)) except socket.timeout: print('Could not send message within timeout limit') return False return x
Sends a message to the UDP receiver Parameter --------- message_type: monitoring.MessageType (enum) In this case message type is RESOURCE_INFO most often task_id: int Task identifier of the task for which resource monitoring is being reported message: object Arbitrary pickle-able object that is to be sent Returns: # bytes sent
codesearchnet
def __init__(self, target='', graph=None, config=None): if not config: gpu_options = config_pb2.GPUOptions(allow_growth=True) config = config_pb2.ConfigProto(gpu_options=gpu_options) config.graph_options.place_pruned_graph = True super(InteractiveSession, self).__init__(target, graph, config) with InteractiveSession._count_lock: if InteractiveSession._active_session_count > 0: logging.error('An interactive session is already active. This can cause out-of-memory errors or some other unexpected errors (due to the unpredictable timing of garbage collection) in some cases. You must explicitly call `InteractiveSession.close()` to release resources held by the other session(s). Please use `tf.Session()` if you intend to productionize.') InteractiveSession._active_session_count += 1 self._explicitly_closed = False self._default_session = self.as_default() self._default_session.enforce_nesting = False self._default_session.__enter__() self._explicit_graph = graph if self._explicit_graph is not None: self._default_graph = graph.as_default() self._default_graph.enforce_nesting = False self._default_graph.__enter__()
Creates a new interactive TensorFlow session. If no `graph` argument is specified when constructing the session, the default graph will be launched in the session. If you are using more than one graph (created with `tf.Graph()`) in the same process, you will have to use different sessions for each graph, but each graph can be used in multiple sessions. In this case, it is often clearer to pass the graph to be launched explicitly to the session constructor. Args: target: (Optional.) The execution engine to connect to. Defaults to using an in-process engine. graph: (Optional.) The `Graph` to be launched (described above). config: (Optional) `ConfigProto` proto used to configure the session.
github-repos
def __eq__(self, other): if not isinstance(other, FrameSet): if not hasattr(other, '__iter__'): return NotImplemented other = self.from_iterable(other) this = hash(self.items) | hash(self.order) that = hash(other.items) | hash(other.order) return this == that
Check if `self` == `other` via a comparison of the hash of their contents. If `other` is not a :class:`FrameSet`, but is a set, frozenset, or is iterable, it will be cast to a :class:`FrameSet`. Args: other (:class:`FrameSet`): Also accepts an object that can be cast to a :class:`FrameSet` Returns: bool: :class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
juraj-google-style
def parse(cls, args): try: (options, args) = cls.optparser.parse_args(args) if options.db_tap_id is None: raise ParseError("db_tap_id is required", cls.optparser.format_help()) if options.query is None and options.script_location is None: raise ParseError("query or script location is required", cls.optparser.format_help()) if options.script_location is not None: if options.query is not None: raise ParseError( "Both query and script_location cannot be specified", cls.optparser.format_help()) if ((options.script_location.find("s3: (options.script_location.find("s3n: try: q = open(options.script_location).read() except IOError as e: raise ParseError("Unable to open script location: %s" % str(e), cls.optparser.format_help()) options.script_location = None options.query = q except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None if options.macros is not None: options.macros = json.loads(options.macros) v = vars(options) v["command_type"] = "DbTapQueryCommand" return v
Parse command line arguments to construct a dictionary of command parameters that can be used to create a command Args: `args`: sequence of arguments Returns: Dictionary that can be used in create method Raises: ParseError: when the arguments are not correct
juraj-google-style
def skip_if(expr, reason, extras=None): if expr: skip(reason, extras)
Skip a test if expression evaluates to True. Args: expr: The expression that is evaluated. reason: The reason this test is skipped. extras: An optional field for extra information to be included in test result.
github-repos
def FromTrimmedData(data, index): header = Header() ms = StreamManager.GetStream(data) reader = BinaryReader(ms) header.DeserializeUnsigned(reader) reader.ReadByte() witness = Witness() witness.Deserialize(reader) header.Script = witness StreamManager.ReleaseStream(ms) return header
Deserialize into a Header object from the provided data. Args: data (bytes): index: UNUSED Returns: Header:
codesearchnet
def Reboot(self, target_mode=b'', timeout_ms=None): return self._SimpleCommand( b'reboot', arg=target_mode or None, timeout_ms=timeout_ms)
Reboots the device. Args: target_mode: Normal reboot when unspecified. Can specify other target modes such as 'recovery' or 'bootloader'. timeout_ms: Optional timeout in milliseconds to wait for a response. Returns: Usually the empty string. Depends on the bootloader and the target_mode.
juraj-google-style
def __init__(self, source, lineno, target, what=None): self.source = source self.lineno = lineno self.target = target self.what = what
Initialization method. Args: source (Module): source Module. lineno (int): number of line at which import statement occurs. target (str/Module/Package): the target node. what (str): what is imported (optional).
juraj-google-style
def delete_field(self, field_name): self._whoosh.remove_field(field_name.strip()) return self._whoosh.schema
This function deletes one determined field using the command MODEL.pw.delete_field(FIELD) Args: field_name (string): This argument let you delete some field for some model registered in the index. Returns: (WhooshSchema): The new schema after deleted is returned.
juraj-google-style
def read_value(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0): try: value = unpack('!Q', istream.read(self.LENGTH))[0] except Exception: self.logger.error('Error reading boolean value from buffer') raise if (value == 1): self.value = True elif (value == 0): self.value = False else: raise ValueError('expected: 0 or 1, observed: {0}'.format(value)) self.validate()
Read the value of the Boolean object from the input stream. Args: istream (Stream): A buffer containing the encoded bytes of the value of a Boolean object. Usually a BytearrayStream object. Required. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: if the read boolean value is not a 0 or 1.
codesearchnet