code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _ParseTriggerEndTime(self, parser_mediator, trigger): time_elements_tuple = (trigger.end_date.year, trigger.end_date.month, trigger.end_date.day_of_month, 0, 0, 0) date_time = None if (time_elements_tuple != (0, 0, 0, 0, 0, 0)): try: date_time = dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple) date_time.is_local_time = True date_time._precision = dfdatetime_definitions.PRECISION_1_DAY except ValueError: parser_mediator.ProduceExtractionWarning('invalid trigger end time: {0!s}'.format(time_elements_tuple)) return date_time
Parses the end time from a trigger. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. trigger (job_trigger): a trigger. Returns: dfdatetime.DateTimeValues: last run date and time or None if not available.
codesearchnet
def parse_variant(store, institute_obj, case_obj, variant_obj, update=False, genome_build='37', get_compounds=True): has_changed = False compounds = variant_obj.get('compounds', []) if (compounds and get_compounds): if ('not_loaded' not in compounds[0]): new_compounds = store.update_variant_compounds(variant_obj) variant_obj['compounds'] = new_compounds has_changed = True variant_obj['compounds'] = sorted(variant_obj['compounds'], key=(lambda compound: (- compound['combined_score']))) variant_genes = variant_obj.get('genes') if (variant_genes is not None): for gene_obj in variant_genes: if (not gene_obj['hgnc_id']): continue if (gene_obj.get('hgnc_symbol') is None): hgnc_gene = store.hgnc_gene(gene_obj['hgnc_id'], build=genome_build) if (not hgnc_gene): continue has_changed = True gene_obj['hgnc_symbol'] = hgnc_gene['hgnc_symbol'] if (update and has_changed): variant_obj = store.update_variant(variant_obj) variant_obj['comments'] = store.events(institute_obj, case=case_obj, variant_id=variant_obj['variant_id'], comments=True) if variant_genes: variant_obj.update(get_predictions(variant_genes)) if (variant_obj.get('category') == 'cancer'): variant_obj.update(get_variant_info(variant_genes)) for compound_obj in compounds: compound_obj.update(get_predictions(compound_obj.get('genes', []))) if isinstance(variant_obj.get('acmg_classification'), int): acmg_code = ACMG_MAP[variant_obj['acmg_classification']] variant_obj['acmg_classification'] = ACMG_COMPLETE_MAP[acmg_code] variant_length = variant_obj.get('length') variant_obj['length'] = {100000000000: 'inf', (- 1): 'n.d.'}.get(variant_length, variant_length) if (not ('end_chrom' in variant_obj)): variant_obj['end_chrom'] = variant_obj['chromosome'] return variant_obj
Parse information about variants. - Adds information about compounds - Updates the information about compounds if necessary and 'update=True' Args: store(scout.adapter.MongoAdapter) institute_obj(scout.models.Institute) case_obj(scout.models.Case) variant_obj(scout.models.Variant) update(bool): If variant should be updated in database genome_build(str)
codesearchnet
def _call_method(self, method, req, resp_class): payload = req.SerializeToString() headers = { 'Content-Type': 'application/x-protobuf', 'Content-Length': str(len(payload)), 'X-Goog-Api-Format-Version': '2' } response, content = self._http.request( '%s:%s' % (self._url, method), method='POST', body=payload, headers=headers) if response.status != 200: raise _make_rpc_error(method, response, content) resp = resp_class() resp.ParseFromString(content) return resp
_call_method call the given RPC method over HTTP. It uses the given protobuf message request as the payload and returns the deserialized protobuf message response. Args: method: RPC method name to be called. req: protobuf message for the RPC request. resp_class: protobuf message class for the RPC response. Returns: Deserialized resp_class protobuf message instance. Raises: RPCError: The rpc method call failed.
juraj-google-style
def call_boxes(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]]=None, boxes: Optional[Union[List[List[int]], List[List[List[int]]]]]=None, word_labels: Optional[Union[List[int], List[List[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: def _is_valid_text_input(t): if isinstance(t, str): return True elif isinstance(t, (list, tuple)): if len(t) == 0: return True elif isinstance(t[0], str): return True elif isinstance(t[0], (list, tuple)): return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: if not _is_valid_text_input(text): raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ') if not isinstance(text_pair, (list, tuple)): raise ValueError('words must of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).') elif not isinstance(text, (list, tuple)): raise ValueError('Words must of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).') if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError('You must provide corresponding bounding boxes') if is_batched: if len(words) != len(boxes): raise ValueError('You must provide words and boxes for an equal amount of examples') for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError('You must provide as many words as there are bounding boxes') elif len(words) != len(boxes): raise ValueError('You must provide as many words as there are bounding boxes') if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.') batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus_boxes(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: return self.encode_plus_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD).
github-repos
def getVersionListCount(self, orgresource): url = nurls['getVersionListCount'] data = {'userid': self.user_id, 'useridx': self.useridx, 'orgresource': orgresource, } r = self.session.post(url = url, data = data) j = json.loads(r.text) if j['message'] != 'success': print "[*] Error getVersionListCount: " + j['message'] return False else: return int(j['resultvalue']['count'])
GetVersionListCount Args: orgresource: File path Returns: Integer number: # of version list False: Failed to get property
juraj-google-style
def _URange(s): a = s.split("..") if len(a) == 1: return [_UInt(a[0])] if len(a) == 2: lo = _UInt(a[0]) hi = _UInt(a[1]) if lo < hi: return range(lo, hi + 1) raise InputError("invalid Unicode range %s" % (s,))
Converts string to Unicode range. '0001..0003' => [1, 2, 3]. '0001' => [1]. Args: s: string to convert Returns: Unicode range Raises: InputError: the string is not a valid Unicode range.
juraj-google-style
def text(cls, text, *, resize=None, single_use=None, selective=None): return cls(types.KeyboardButton(text), resize=resize, single_use=single_use, selective=selective)
Creates a new button with the given text. Args: resize (`bool`): If present, the entire keyboard will be reconfigured to be resized and be smaller if there are not many buttons. single_use (`bool`): If present, the entire keyboard will be reconfigured to be usable only once before it hides itself. selective (`bool`): If present, the entire keyboard will be reconfigured to be "selective". The keyboard will be shown only to specific users. It will target users that are @mentioned in the text of the message or to the sender of the message you reply to.
codesearchnet
def _get_table(name): item = google.datalab.utils.commands.get_notebook_item(name) if isinstance(item, bigquery.Table): return item try: return _existing_table_cache[name] except KeyError: table = bigquery.Table(name) if table.exists(): _existing_table_cache[name] = table return table return None
Given a variable or table name, get a Table if it exists. Args: name: the name of the Table or a variable referencing the Table. Returns: The Table, if found.
juraj-google-style
def persist_upstream_diagram(self, filepath): assert isinstance(filepath, str), 'Step {} error, filepath must be str. Got {} instead'.format(self.name, type(filepath)) persist_as_png(self.upstream_structure, filepath)
Creates upstream steps diagram and persists it to disk as png file. Pydot graph is created and persisted to disk as png file under the filepath directory. Args: filepath (str): filepath to which the png with steps visualization should be persisted
codesearchnet
def markdown_cell(markdown): r import utool as ut markdown_header = ut.codeblock( ) markdown_footer = ut.codeblock( ) return (markdown_header + '\n' + ut.indent(repr_single_for_md(markdown), ' ' * 2) + '\n' + markdown_footer)
r""" Args: markdown (str): Returns: str: json formatted ipython notebook markdown cell CommandLine: python -m ibeis.templates.generate_notebook --exec-markdown_cell Example: >>> # DISABLE_DOCTEST >>> from ibeis.templates.generate_notebook import * # NOQA >>> markdown = '# Title' >>> result = markdown_cell(markdown) >>> print(result)
juraj-google-style
def _get_short_description(self): if (self.description is None): return None lines = [x for x in self.description.split('\n')] if (len(lines) == 1): return lines[0] elif ((len(lines) >= 3) and (lines[1] == '')): return lines[0] return None
Return the first line of a multiline description Returns: string: The short description, otherwise None
codesearchnet
def sign(allocate_quota_request): if (not isinstance(allocate_quota_request, sc_messages.AllocateQuotaRequest)): raise ValueError(u'Invalid request') op = allocate_quota_request.allocateOperation if ((op is None) or (op.methodName is None) or (op.consumerId is None)): logging.error(u'Bad %s: not initialized => not signed', allocate_quota_request) raise ValueError(u'allocate_quota request must be initialized with an operation') md5 = hashlib.md5() md5.update(op.methodName.encode('utf-8')) md5.update(b'\x00') md5.update(op.consumerId.encode('utf-8')) if op.labels: signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels)) for value_set in op.quotaMetrics: md5.update(b'\x00') md5.update(value_set.metricName.encode('utf-8')) for mv in value_set.metricValues: metric_value.update_hash(md5, mv) md5.update(b'\x00') return md5.digest()
Obtains a signature for an operation in a `AllocateQuotaRequest` Args: op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an operation used in a `AllocateQuotaRequest` Returns: string: a secure hash generated from the operation
codesearchnet
def define_grid(self, matrix): self.style['grid-template-areas'] = ''.join("'%s'"%(' '.join(x)) for x in matrix)
Populates the Table with a list of tuples of strings. Args: matrix (list): list of iterables of strings (lists or something else). Items in the matrix have to correspond to a key for the children.
juraj-google-style
def WinChmod(filename, acl_list, user=None): if (user is None): user = win32api.GetUserName() if (not os.path.exists(filename)): raise RuntimeError(('filename %s does not exist' % filename)) acl_bitmask = 0 for acl in acl_list: acl_bitmask |= getattr(ntsecuritycon, acl) dacl = win32security.ACL() (win_user, _, _) = win32security.LookupAccountName('', user) dacl.AddAccessAllowedAce(win32security.ACL_REVISION, acl_bitmask, win_user) security_descriptor = win32security.GetFileSecurity(filename, win32security.DACL_SECURITY_INFORMATION) security_descriptor.SetSecurityDescriptorDacl(DACL_PRESENT, dacl, DACL_DEFAULT) win32security.SetFileSecurity(filename, win32security.DACL_SECURITY_INFORMATION, security_descriptor)
Provide chmod-like functionality for windows. Doco links: goo.gl/n7YR1 goo.gl/rDv81 goo.gl/hDobb Args: filename: target filename for acl acl_list: list of ntsecuritycon acl strings to be applied with bitwise OR. e.g. ["FILE_GENERIC_READ", "FILE_GENERIC_WRITE"] user: username string. If not specified we use the user we are running as. Raises: AttributeError: if a bad permission is passed RuntimeError: if filename doesn't exist
codesearchnet
def __init__(self, channel): self.GetStepNames = channel.unary_unary( '/gauge.messages.lspService/GetStepNames', request_serializer=messages__pb2.StepNamesRequest.SerializeToString, response_deserializer=messages__pb2.StepNamesResponse.FromString, ) self.CacheFile = channel.unary_unary( '/gauge.messages.lspService/CacheFile', request_serializer=messages__pb2.CacheFileRequest.SerializeToString, response_deserializer=lsp__pb2.Empty.FromString, ) self.GetStepPositions = channel.unary_unary( '/gauge.messages.lspService/GetStepPositions', request_serializer=messages__pb2.StepPositionsRequest.SerializeToString, response_deserializer=messages__pb2.StepPositionsResponse.FromString, ) self.GetImplementationFiles = channel.unary_unary( '/gauge.messages.lspService/GetImplementationFiles', request_serializer=lsp__pb2.Empty.SerializeToString, response_deserializer=messages__pb2.ImplementationFileListResponse.FromString, ) self.ImplementStub = channel.unary_unary( '/gauge.messages.lspService/ImplementStub', request_serializer=messages__pb2.StubImplementationCodeRequest.SerializeToString, response_deserializer=messages__pb2.FileDiff.FromString, ) self.ValidateStep = channel.unary_unary( '/gauge.messages.lspService/ValidateStep', request_serializer=messages__pb2.StepValidateRequest.SerializeToString, response_deserializer=messages__pb2.StepValidateResponse.FromString, ) self.Refactor = channel.unary_unary( '/gauge.messages.lspService/Refactor', request_serializer=messages__pb2.RefactorRequest.SerializeToString, response_deserializer=messages__pb2.RefactorResponse.FromString, ) self.GetStepName = channel.unary_unary( '/gauge.messages.lspService/GetStepName', request_serializer=messages__pb2.StepNameRequest.SerializeToString, response_deserializer=messages__pb2.StepNameResponse.FromString, ) self.GetGlobPatterns = channel.unary_unary( '/gauge.messages.lspService/GetGlobPatterns', request_serializer=lsp__pb2.Empty.SerializeToString, response_deserializer=messages__pb2.ImplementationFileGlobPatternResponse.FromString, ) self.KillProcess = channel.unary_unary( '/gauge.messages.lspService/KillProcess', request_serializer=messages__pb2.KillProcessRequest.SerializeToString, response_deserializer=lsp__pb2.Empty.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def _get_genes(self, variant): transcripts = variant['transcripts'] ensembl_ids = [transcript['ensembl_id'] for transcript in transcripts if transcript['ensembl_id']] hgnc_symbols = [transcript['hgnc_symbol'] for transcript in transcripts if transcript['hgnc_symbol']] genes = get_gene_info(ensembl_ids, hgnc_symbols) return genes
Add the genes for a variant Get the hgnc symbols from all transcripts and add them to the variant. Args: variant (dict): A variant dictionary Returns: genes (list): A list of Genes
juraj-google-style
def cummax(self, axis=None, skipna=True, *args, **kwargs): axis = self._get_axis_number(axis) if axis is not None else 0 if axis: self._validate_dtypes() return self.__constructor__( query_compiler=self._query_compiler.cummax( axis=axis, skipna=skipna, **kwargs ) )
Perform a cumulative maximum across the DataFrame. Args: axis (int): The axis to take maximum on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative maximum of the DataFrame.
juraj-google-style
def delete_variants(self, case_id, variant_type, category=None): category = category or '' LOG.info("Deleting old {0} {1} variants for case {2}".format( variant_type, category, case_id)) query = {'case_id': case_id, 'variant_type': variant_type} if category: query['category'] = category result = self.variant_collection.delete_many(query) LOG.info("{0} variants deleted".format(result.deleted_count))
Delete variants of one type for a case This is used when a case is reanalyzed Args: case_id(str): The case id variant_type(str): 'research' or 'clinical' category(str): 'snv', 'sv' or 'cancer'
juraj-google-style
def unstack(x, num=None, axis=0): if any_symbolic_tensors((x,)): return Unstack(num, axis).symbolic_call(x) return backend.core.unstack(x, num=num, axis=axis)
Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors. Args: x: The input tensor. num: The length of the dimension axis. Automatically inferred if `None`. axis: The axis along which to unpack. Returns: A list of tensors unpacked along the given axis. Example: >>> x = keras.ops.array([[1, 2], [3, 4]]) >>> keras.ops.unstack(x, axis=0) [array([1, 2]), array([3, 4])]
github-repos
def tmpdir(suffix='', prefix='tmp', dir=None): tmp = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir) (yield tmp) shutil.rmtree(tmp)
Create a temporary directory with a context manager. The file is deleted when the context exits. The prefix, suffix, and dir arguments are the same as for mkstemp(). Args: suffix (str): If suffix is specified, the file name will end with that suffix, otherwise there will be no suffix. prefix (str): If prefix is specified, the file name will begin with that prefix; otherwise, a default prefix is used. dir (str): If dir is specified, the file will be created in that directory; otherwise, a default directory is used. Returns: str: path to the directory
codesearchnet
def _get_next_partition(self) -> tuple[int, float]: rank = self._working_tensor_shape.rank if rank is None or rank == 0: return (0, math.inf) num_elems = self._working_tensor_shape.num_elements() def num_partitions(axis: int) -> float: axis_len = self._working_tensor_shape.dims[axis].value slice_elems = num_elems bytes_per_slice = slice_elems * self._dtype_size slices_per_shard = self._shard_size_remaining if slices_per_shard == 0: return math.inf return math.ceil(axis_len / slices_per_shard) min_parts = num_partitions(0) min_axis = 0 for axis in range(1, rank): parts_along_axis = num_partitions(axis) part_size = num_elems * self._dtype_size / parts_along_axis if parts_along_axis < min_parts and part_size <= self._shard_size_remaining: min_axis, min_parts = (axis, int(parts_along_axis)) return (min_axis, math.ceil(int(self._working_tensor_shape[min_axis]) / min_parts))
Gets tensor partition with size closest to shard_size_remaining. Returns: A tuple containing the axis and size of the next partition.
github-repos
def trajectory(self): traj = np.zeros((2, self.times.size)) for (t, time) in enumerate(self.times): traj[(:, t)] = self.center_of_mass(time) return traj
Calculates the center of mass for each time step and outputs an array Returns:
codesearchnet
def infer_element_type(elements): element_type = typehints.Union[[trivial_inference.instance_to_type(e) for e in elements]] return element_type
For internal use only; no backwards-compatibility guarantees. Infer a Beam type for a list of elements. Args: elements (List[Any]): A list of elements for which the type should be inferred. Returns: A Beam type encompassing all elements.
github-repos
def apply_grad_zmat_tensor(grad_C, construction_table, cart_dist): if (construction_table.index != cart_dist.index).any(): message = 'construction_table and cart_dist must use the same index' raise ValueError(message) X_dist = cart_dist.loc[(:, ['x', 'y', 'z'])].values.T C_dist = np.tensordot(grad_C, X_dist, axes=([3, 2], [0, 1])).T if (C_dist.dtype == np.dtype('i8')): C_dist = C_dist.astype('f8') try: C_dist[(:, [1, 2])] = np.rad2deg(C_dist[(:, [1, 2])]) except AttributeError: C_dist[(:, [1, 2])] = sympy.deg(C_dist[(:, [1, 2])]) from chemcoord.internal_coordinates.zmat_class_main import Zmat cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral'] dtypes = ['O', 'i8', 'f8', 'i8', 'f8', 'i8', 'f8'] new = pd.DataFrame(data=np.zeros((len(construction_table), 7)), index=cart_dist.index, columns=cols, dtype='f8') new = new.astype(dict(zip(cols, dtypes))) new.loc[(:, ['b', 'a', 'd'])] = construction_table new.loc[(:, 'atom')] = cart_dist.loc[(:, 'atom')] new.loc[(:, ['bond', 'angle', 'dihedral'])] = C_dist return Zmat(new, _metadata={'last_valid_cartesian': cart_dist})
Apply the gradient for transformation to Zmatrix space onto cart_dist. Args: grad_C (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array. The mathematical details of the index layout is explained in :meth:`~chemcoord.Cartesian.get_grad_zmat()`. construction_table (pandas.DataFrame): Explained in :meth:`~chemcoord.Cartesian.get_construction_table()`. cart_dist (:class:`~chemcoord.Cartesian`): Distortions in cartesian space. Returns: :class:`Zmat`: Distortions in Zmatrix space.
codesearchnet
def diff_is_docstring_only(repo: Repo, branching_point: str, filename: str) -> bool: folder = Path(repo.working_dir) with checkout_commit(repo, branching_point): with open(folder / filename, 'r', encoding='utf-8') as f: old_content = f.read() with open(folder / filename, 'r', encoding='utf-8') as f: new_content = f.read() old_content_clean = clean_code(old_content) new_content_clean = clean_code(new_content) return old_content_clean == new_content_clean
Check if the diff is only in docstrings (or comments and whitespace) in a filename. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). branching_point (`str`): The commit reference of where to compare for the diff. filename (`str`): The filename where we want to know if the diff isonly in docstrings/comments. Returns: `bool`: Whether the diff is docstring/comments only or not.
github-repos
def load_region(adapter, case_id, hgnc_id=None, chrom=None, start=None, end=None): if hgnc_id: gene_obj = adapter.hgnc_gene(hgnc_id) if (not gene_obj): ValueError('Gene {} does not exist in database'.format(hgnc_id)) chrom = gene_obj['chromosome'] start = gene_obj['start'] end = gene_obj['end'] case_obj = adapter.case(case_id=case_id) if (not case_obj): raise ValueError('Case {} does not exist in database'.format(case_id)) log.info('Load clinical SNV variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='snv', chrom=chrom, start=start, end=end) vcf_sv_file = case_obj['vcf_files'].get('vcf_sv') if vcf_sv_file: log.info('Load clinical SV variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='sv', chrom=chrom, start=start, end=end) vcf_str_file = case_obj['vcf_files'].get('vcf_str') if vcf_str_file: log.info('Load clinical STR variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='str', chrom=chrom, start=start, end=end) if case_obj['is_research']: log.info('Load research SNV variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='research', category='snv', chrom=chrom, start=start, end=end) vcf_sv_research = case_obj['vcf_files'].get('vcf_sv_research') if vcf_sv_research: log.info('Load research SV variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='research', category='sv', chrom=chrom, start=start, end=end)
Load all variants in a region defined by a HGNC id Args: adapter (MongoAdapter) case_id (str): Case id hgnc_id (int): If all variants from a gene should be uploaded chrom (str): If variants from coordinates should be uploaded start (int): Start position for region end (int): Stop position for region
codesearchnet
def _create_conversion_trie(strict): t = pygtrie.CharTrie() for beta, uni in _map.BETACODE_MAP.items(): if strict: t[beta] = uni else: diacritics = beta[1:] perms = itertools.permutations(diacritics) for perm in perms: perm_str = beta[0] + ''.join(perm) t[perm_str.lower()] = uni t[perm_str.upper()] = uni return t
Create the trie for betacode conversion. Args: text: The beta code text to convert. All of this text must be betacode. strict: Flag to allow for flexible diacritic order on input. Returns: The trie for conversion.
juraj-google-style
def _create_checkable_action(self, text, conf_name, editorstack_method): def toogle(checked): self.switch_to_plugin() self._toggle_checkable_action(checked, editorstack_method, conf_name) action = create_action(self, text, toggled=toogle) action.setChecked(CONF.get('editor', conf_name)) return action
Helper function to create a checkable action. Args: text (str): Text to be displayed in the action. conf_name (str): configuration setting associated with the action editorstack_method (str): name of EditorStack class that will be used to update the changes in each editorstack.
juraj-google-style
def auto_docstring(obj=None, *, custom_intro=None, custom_args=None, checkpoint=None): def auto_docstring_decorator(obj): if len(obj.__qualname__.split('.')) > 1: return auto_method_docstring(obj, custom_args=custom_args, custom_intro=custom_intro, checkpoint=checkpoint) else: return auto_class_docstring(obj, custom_args=custom_args, custom_intro=custom_intro, checkpoint=checkpoint) if obj: return auto_docstring_decorator(obj) return auto_docstring_decorator
Automatically generates docstrings for classes and methods in the Transformers library. This decorator can be used in the following forms: @auto_docstring def my_function(...): ... or @auto_docstring() def my_function(...): ... or @auto_docstring(custom_intro="Custom intro", ...) def my_function(...): ... Args: custom_intro (str, optional): Custom introduction text to add to the docstring. This will replace the default introduction text generated by the decorator before the Args section. checkpoint (str, optional): Checkpoint name to use in the docstring. This should be automatically inferred from the model configuration class, but can be overridden if needed.
github-repos
def get_conversion_factor(self, new_unit): uo_base, ofactor = self.as_base_units un_base, nfactor = Unit(new_unit).as_base_units units_new = sorted(un_base.items(), key=lambda d: _UNAME2UTYPE[d[0]]) units_old = sorted(uo_base.items(), key=lambda d: _UNAME2UTYPE[d[0]]) factor = ofactor / nfactor for uo, un in zip(units_old, units_new): if uo[1] != un[1]: raise UnitError("Units %s and %s are not compatible!" % (uo, un)) c = ALL_UNITS[_UNAME2UTYPE[uo[0]]] factor *= (c[uo[0]] / c[un[0]]) ** uo[1] return factor
Returns a conversion factor between this unit and a new unit. Compound units are supported, but must have the same powers in each unit type. Args: new_unit: The new unit.
juraj-google-style
def adversary(self, name, owner=None, **kwargs): return Adversary(self.tcex, name, owner=owner, **kwargs)
Create the Adversary TI object. Args: owner: name: **kwargs: Return:
codesearchnet
def get_recipe(self, recipe_name): if recipe_name.endswith('.yaml'): recipe = self._recipes.get(RecipeObject.FromFile(recipe_name, self._recipe_actions, self._recipe_resources).name) else: recipe = self._recipes.get(recipe_name) if recipe is None: raise RecipeNotFoundError("Could not find recipe", recipe_name=recipe_name, known_recipes=[x for x in self._recipes.keys()]) return recipe
Get a recipe by name. Args: recipe_name (str): The name of the recipe to fetch. Can be either the yaml file name or the name of the recipe.
juraj-google-style
def __init__(self, temperature=1.0, max_fine_history_length=512, max_fine_input_length=1024, n_fine_codebooks=8, **kwargs): super().__init__(temperature=temperature) self.max_fine_history_length = max_fine_history_length self.max_fine_input_length = max_fine_input_length self.n_fine_codebooks = n_fine_codebooks
Class that holds a generation configuration for [`BarkFineModel`]. [`BarkFineModel`] is an autoencoder model, so should not usually be used for generation. However, under the hood, it uses `temperature` when used by [`BarkModel`] This configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the documentation from [`GenerationConfig`] for more information. Args: temperature (`float`, *optional*): The value used to modulate the next token probabilities. max_fine_history_length (`int`, *optional*, defaults to 512): Max length of the fine history vector. max_fine_input_length (`int`, *optional*, defaults to 1024): Max length of fine input vector. n_fine_codebooks (`int`, *optional*, defaults to 8): Number of codebooks used.
github-repos
def handle_app_update(self, task_id, future, memo_cbk=False): if (not self.tasks[task_id]['app_fu'].done()): logger.error('Internal consistency error: app_fu is not done for task {}'.format(task_id)) if (not (self.tasks[task_id]['app_fu'] == future)): logger.error('Internal consistency error: callback future is not the app_fu in task structure, for task {}'.format(task_id)) if (not memo_cbk): self.memoizer.update_memo(task_id, self.tasks[task_id], future) if (self.checkpoint_mode == 'task_exit'): self.checkpoint(tasks=[task_id]) if (self.tasks[task_id]['app_fu'] and self.tasks[task_id]['app_fu'].done() and (self.tasks[task_id]['app_fu'].exception() is None) and (self.tasks[task_id]['executor'] != 'data_manager') and (self.tasks[task_id]['func_name'] != '_ftp_stage_in') and (self.tasks[task_id]['func_name'] != '_http_stage_in')): for dfu in self.tasks[task_id]['app_fu'].outputs: f = dfu.file_obj if (isinstance(f, File) and f.is_remote()): self.data_manager.stage_out(f, self.tasks[task_id]['executor']) return
This function is called as a callback when an AppFuture is in its final state. It will trigger post-app processing such as checkpointing and stageout. Args: task_id (string) : Task id future (Future) : The relevant app future (which should be consistent with the task structure 'app_fu' entry KWargs: memo_cbk(Bool) : Indicates that the call is coming from a memo update, that does not require additional memo updates.
codesearchnet
def handle(self, message, connection): handler = self._handlers.get((message.msgtype, message.revision)) if handler is None: handler = self._handlers.get(message.msgtype) if handler is None: raise ProtocolError("%s not expected on server" % message) try: work = yield handler(message, connection) except Exception as e: log.error("error handling message %r: %r", message, e) log.debug(" message header %r content %r", message.header, message.content, exc_info=1) work = connection.error(message, repr(e)) raise gen.Return(work)
Delegate a received message to the appropriate handler. Args: message (Message) : The message that was receive that needs to be handled connection (ServerConnection) : The connection that received this message Raises: ProtocolError
juraj-google-style
def _set_typeahead(cls, el, value): PlaceholderHandler.reset_placeholder_dropdown(el) if ((not value) and (not el.value)): DropdownHandler.set_dropdown_glyph(el.id, 'glyphicon-alert') return if (len(value) == 1): source = value[0]['source'].strip() dropdown_el = DropdownHandler.set_dropdown_glyph(el.id, 'glyphicon-eye-open') dropdown_content = "<span class='gray_text'>&nbsp;(%s)</span>" if source: dropdown_el.html = (dropdown_content % source[::(- 1)]) el.value = value[0]['val'] return parent_id = el.parent.id if ('typeahead' not in parent_id.lower()): parent_id = el.parent.parent.id if (parent_id in cls._set_by_typeahead): window.destroy_typeahead_tag((' window.make_typeahead_tag((' DropdownHandler.set_dropdown_glyph(el.id, 'glyphicon-menu-down') PlaceholderHandler.set_placeholder_dropdown(el) cls._set_by_typeahead.add(parent_id)
Convert given `el` to typeahead input and set it to `value`. This method also sets the dropdown icons and descriptors. Args: el (obj): Element reference to the input you want to convert to typeahead. value (list): List of dicts with two keys: ``source`` and ``val``.
codesearchnet
def describe_file(module): descriptor = FileDescriptor() descriptor.package = util.get_package_for_module(module) if (not descriptor.package): descriptor.package = None message_descriptors = [] enum_descriptors = [] for name in sorted(dir(module)): value = getattr(module, name) if isinstance(value, type): if issubclass(value, messages.Message): message_descriptors.append(describe_message(value)) elif issubclass(value, messages.Enum): enum_descriptors.append(describe_enum(value)) if message_descriptors: descriptor.message_types = message_descriptors if enum_descriptors: descriptor.enum_types = enum_descriptors return descriptor
Build a file from a specified Python module. Args: module: Python module to describe. Returns: Initialized FileDescriptor instance describing the module.
codesearchnet
def _translate_name(name): underscored = inflection.underscore(name) dasherized = inflection.dasherize(underscored) words = dasherized.split('-') last_word = words.pop() words.append(inflection.pluralize(last_word)) return '-'.join(words)
Translate the class name to the API endpoint. For example, Car would become cars, FastCar would become fast-cars. Args: name (string): Camel case name (singular) Returns: string: A pluraised, dasherized string.
juraj-google-style
def intrusion_sets(self, name, owner=None, **kwargs): return IntrusionSet(self.tcex, name, owner=owner, **kwargs)
Create the Intrustion Set TI object. Args: owner: name: **kwargs: Return:
codesearchnet
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
Create a mask from the two sequences passed to be used in a sequence-pair classification task. MVP does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros.
github-repos
def _ReadSupportedOS(self, definition_values, definition_object, name): supported_os = definition_values.get('supported_os', []) if (not isinstance(supported_os, list)): raise errors.FormatError('Invalid supported_os type: {0!s}'.format(type(supported_os))) undefined_supported_os = set(supported_os).difference(self.supported_os) if undefined_supported_os: error_string = 'Artifact definition: {0:s} undefined supported operating system: {1:s}.'.format(name, ', '.join(undefined_supported_os)) raise errors.FormatError(error_string) definition_object.supported_os = supported_os
Reads the optional artifact or source type supported OS. Args: definition_values (dict[str, object]): artifact definition values. definition_object (ArtifactDefinition|SourceType): the definition object. name (str): name of the artifact definition. Raises: FormatError: if there are undefined supported operating systems.
codesearchnet
def ddot(L, R, left=None, out=None): r L = asarray(L, float) R = asarray(R, float) if left is None: ok = min(L.ndim, R.ndim) == 1 and max(L.ndim, R.ndim) == 2 if not ok: msg = "Wrong array layout. One array should have" msg += " ndim=1 and the other one ndim=2." raise ValueError(msg) left = L.ndim == 1 if left: if out is None: out = copy(R) L = L.reshape(list(L.shape) + [1] * (R.ndim - 1)) return multiply(L, R, out=out) else: if out is None: out = copy(L) return multiply(L, R, out=out)
r"""Dot product of a matrix and a diagonal one. Args: L (array_like): Left matrix. R (array_like): Right matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: Resulting matrix.
juraj-google-style
def load_module_functions(module): module_functions = {} for (name, item) in vars(module).items(): if validator.is_function(item): module_functions[name] = item return module_functions
load python module functions. Args: module: python module Returns: dict: functions mapping for specified python module { "func1_name": func1, "func2_name": func2 }
codesearchnet
def ExpandGlobs(path, opts = None): precondition.AssertType(path, Text) if not path: raise ValueError("Path is empty") if not _IsAbsolutePath(path, opts): raise ValueError("Path '%s' is not absolute" % path) if opts is not None and opts.pathtype == rdf_paths.PathSpec.PathType.REGISTRY: root_dir, tail = path.replace("\\", "/").lstrip("/").split("/", 1) components = list(ParsePath(tail, opts=opts)) else: drive, tail = os.path.splitdrive(path) root_dir = os.path.join(drive, os.path.sep).upper() components = list(ParsePath(tail[1:], opts=opts)) return _ExpandComponents(root_dir, components)
Performs glob expansion on a given path. Path can contain regular glob elements (such as `**`, `*`, `?`, `[a-z]`). For example, having files `foo`, `bar`, `baz` glob expansion of `ba?` will yield `bar` and `baz`. Args: path: A path to expand. opts: A `PathOpts` object. Returns: Generator over all possible glob expansions of a given path. Raises: ValueError: If given path is empty or relative.
juraj-google-style
def create_sys_dsn(driver: str, **kw) -> bool: attributes = [] for attr in kw.keys(): attributes.append("%s=%s" % (attr, kw[attr])) return bool( ctypes.windll.ODBCCP32.SQLConfigDataSource(0, ODBC_ADD_SYS_DSN, driver, nul.join(attributes)) )
(Windows only.) Create a system ODBC data source name (DSN). Args: driver: ODBC driver name kw: Driver attributes Returns: bool: was the DSN created?
juraj-google-style
def __call__(self, artist, genres, lyrics='', return_tensors='pt') -> BatchEncoding: input_ids = [0, 0, 0] artist = [artist] * len(self.version) genres = [genres] * len(self.version) artists_tokens, genres_tokens, lyrics_tokens = self.tokenize(artist, genres, lyrics) artists_id, genres_ids, full_tokens = self._convert_token_to_id(artists_tokens, genres_tokens, lyrics_tokens) attention_masks = [-INFINITY] * len(full_tokens[-1]) input_ids = [self.convert_to_tensors([input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]], tensor_type=return_tensors) for i in range(len(self.version))] return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks})
Convert the raw string to a list of token ids Args: artist (`str`): Name of the artist. genres (`str`): List of genres that will be mixed to condition the audio lyrics (`str`, *optional*, defaults to `""`): Lyrics used to condition the generation
github-repos
def get_license_from_url(url): if (not url): return split_url = urlsplit(url, scheme='http') if (split_url.netloc.lower() == 'creativecommons.org'): if ('publicdomain' in split_url.path): match = _RE_PUBLIC_DOMAIN_URL.match(split_url.path) if (match is None): license = ['public domain'] else: license = ['CC0'] license.extend((part for part in match.groups() if part)) else: license = ['CC'] match = _RE_LICENSE_URL.match(split_url.path) license.extend((part.upper() for part in match.groups() if part)) elif (split_url.netloc == 'arxiv.org'): license = ['arXiv'] match = _RE_LICENSE_URL.match(split_url.path) license.extend((part for part in match.groups() if part)) else: raise ValueError('Unknown license URL') return u' '.join(license)
Get the license abbreviation from an URL. Args: url(str): canonical url of the license. Returns: str: the corresponding license abbreviation. Raises: ValueError: when the url is not recognized
codesearchnet
def load_default_traditional_chinese_parser() -> Parser: with open(os.path.join(MODEL_DIR, 'zh-hant.json'), encoding='utf-8') as f: model = json.load(f) return Parser(model)
Loads a parser equipped with the default Traditional Chinese model. Returns: A parser (:obj:`budoux.Parser`).
github-repos
def filter_segs(self, segs): def whole_seg(seg): m = self.seg_regex.match(seg) if m and m.group(0) == seg: return True else: return False return list(filter(whole_seg, segs))
Given list of strings, return only those which are valid segments. Args: segs (list): list of unicode values Returns: list: values in `segs` that are valid segments (according to the definititions of bases and diacritics/modifiers known to the object
juraj-google-style
def snake_to_camel(name): ret = ''.join((x.title() for x in name.split('_'))) ret = (ret[0].lower() + ret[1:]) return ret
Takes a snake_field_name and returns a camelCaseFieldName Args: name (str): E.g. snake_field_name or SNAKE_FIELD_NAME Returns: str: camelCase converted name. E.g. capsFieldName
codesearchnet
def get_referenced_object_as_list(prev_obj, obj, dot_separated_name, desired_type=None): res = get_referenced_object(prev_obj, obj, dot_separated_name, desired_type) if (res is None): return [] elif (type(res) is list): return res else: return [res]
Same as get_referenced_object, but always returns a list. Args: prev_obj: see get_referenced_object obj: see get_referenced_object dot_separated_name: see get_referenced_object desired_type: see get_referenced_object Returns: same as get_referenced_object, but always returns a list
codesearchnet
def market_if_touched_replace(self, accountID, orderID, **kwargs): return self.replace( accountID, orderID, order=MarketIfTouchedOrderRequest(**kwargs) )
Shortcut to replace a pending MarketIfTouched Order in an Account Args: accountID : The ID of the Account orderID : The ID of the MarketIfTouched Order to replace kwargs : The arguments to create a MarketIfTouchedOrderRequest Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def split_instance_route(self, route: 'InstanceRoute') -> Optional[Tuple[('InstanceRoute', 'InstanceRoute')]]: sroute = [] sn = self while sn: sroute.append(sn.iname()) sn = sn.data_parent() i = 0 while True: if (not sroute): break inst = sroute.pop() if (inst != route[i].iname()): return None while True: i += 1 if ((i >= len(route)) or isinstance(route[i], MemberName)): break if (not sroute): return (InstanceRoute(route[:i]), InstanceRoute(route[i:])) if (i >= len(route)): return None
Split `route` into the part up to receiver and the rest. Args: route: Absolute instance route (the receiver should correspond to an instance node on this route). Returns: A tuple consisting of - the part of `route` from the root up to and including the instance whose schema node is the receiver, and - the rest of `route`. ``None`` is returned if the receiver is not on the route.
codesearchnet
def build_frontend(self, frontend_node): proxy_name = frontend_node.frontend_header.proxy_name.text service_address_node = frontend_node.frontend_header.service_address config_block_lines = self.__build_config_block( frontend_node.config_block) host, port = '', '' if isinstance(service_address_node, pegnode.ServiceAddress): host = service_address_node.host.text port = service_address_node.port.text else: for line in config_block_lines: if isinstance(line, config.Bind): host, port = line.host, line.port break else: raise Exception( 'Not specify host and port in `frontend` definition') return config.Frontend( name=proxy_name, host=host, port=port, config_block=config_block_lines)
parse `frontend` sections, and return a config.Frontend Args: frontend_node (TreeNode): Description Raises: Exception: Description Returns: config.Frontend: an object
juraj-google-style
def query(self, s): s1 = np.sort([self.order[token] for token in s if token in self.order]) logging.debug("{} original tokens and {} tokens after applying " "frequency order.".format(len(s), len(s1))) prefix = self._get_prefix(s1) candidates = set([i for p1, token in enumerate(prefix) for i, p2 in self.index[token] if self.position_filter_func(s1, self.sets[i], p1, p2, self.similarity_threshold)]) logging.debug("{} candidates found.".format(len(candidates))) results = deque([]) for i in candidates: s2 = self.sets[i] sim = self.similarity_func(s1, s2) if sim < self.similarity_threshold: continue results.append((i, sim)) logging.debug("{} verified sets found.".format(len(results))) return list(results)
Query the search index for sets similar to the query set. Args: s (Iterable): the query set. Returns (list): a list of tuples `(index, similarity)` where the index is the index of the matching sets in the original list of sets.
juraj-google-style
def __init__(self, coupling_map, initial_layout=None, trials=20, seed=None): super().__init__() self.coupling_map = coupling_map self.initial_layout = initial_layout self.trials = trials self.seed = seed self.requires.append(BarrierBeforeFinalMeasurements())
Maps a DAGCircuit onto a `coupling_map` using swap gates. Args: coupling_map (CouplingMap): Directed graph represented a coupling map. initial_layout (Layout): initial layout of qubits in mapping trials (int): the number of attempts the randomized algorithm makes. seed (int): initial seed.
juraj-google-style
def typecheck(fn): is_compiled = False if hasattr(fn, '__wrapped__'): signature_fn = fn.__wrapped__ if hasattr(signature_fn, 'is_tp_compiled'): is_compiled = getattr(signature_fn, 'is_tp_compiled') else: signature_fn = fn signature = inspect.signature(signature_fn) @wraps(fn) def wrapper(*args, **kwargs): try: all_args = signature.bind(*args, **kwargs) for arg_key, arg_value in all_args.arguments.items(): trace = _Trace().add_context(f'When checking the argument "{arg_key}" of function "{fn.__name__}".') if arg_key not in signature.parameters: raise ValueError(f'Unexpected argument "{arg_key}"') param = signature.parameters[arg_key] if param.kind in [inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD]: _check_annotation(trace, is_compiled, arg_value, param.annotation) elif param.kind is inspect.Parameter.VAR_POSITIONAL: _check_annotation_list_or_set_or_uniform_tuple(trace, is_compiled, arg_value, [param.annotation]) elif param.kind is inspect.Parameter.VAR_KEYWORD: for sub_key, sub_value in arg_value.items(): _check_annotation(_Trace().add_context(f'When checking the key "{sub_key}" of argument "{arg_key}" of function "{fn.__name__}".'), is_compiled, sub_value, param.annotation) except ValueError as e: if _ERROR_RAISES_EXCEPTION: e.__traceback__ = None raise e else: logging.warning('%s', str(e)) output = fn(*args, **kwargs) try: trace = _Trace().add_context(f'When checking the returned value of function "{fn.__name__}".') _check_annotation(trace, is_compiled, output, signature.return_annotation) except ValueError as e: if _ERROR_RAISES_EXCEPTION: e.__traceback__ = None raise e else: logging.warning('%s', str(e)) return output setattr(wrapper, '_typecheck', True) return wrapper
Annotation that check the arguments and outputs of a function at runtime. @typecheck checks, at runtime, that the type hints of the arguments and output of a function are satisfied. Usage example: ```python @typecheck def f(a, b: int, c: str = "aze") -> List[str]: return ["hello", "world"] f(1, 2, "a") # Ok f(1, 2, 3) # Fails ``` If combined with @compile, @typecheck should be applied after @compile (i.e. place @compile just below @typecheck in the code). This code only support what is required by Temporian API. Does not support typing.GenericTypeAlias e.g. list[int]. Use List[int] instead. Args: fn: Function to instrument. Returns: Instrumented function.
github-repos
def __call__(self, func): if not hasattr(func, "parser"): _LOG.debug("Creating parser for '%s'%s", func.__name__, "/%s" % self._name if self._name else "") (func_args, _, _, defaults) = getargspec(func) self._types, func_args = _check_types(func.__name__, self._types, func_args, defaults) args_and_defaults = _get_args_and_defaults(func_args, defaults) parser = _get_arg_parser(func, self._types, args_and_defaults, self._delimiter_chars) parser.get_name = lambda: self._name func.parser = parser func.parser.call = _get_parser_call_method(func) @wraps(func) def decorated(*args, **kwargs): return func(*args, **kwargs) return decorated
Add an argument parser attribute `parser` to the decorated function. Args: func: the function for which we want to create an argument parser
juraj-google-style
def call(self, inputs, state): _check_rnn_cell_input_dtypes([inputs, state]) sigmoid = math_ops.sigmoid one = constant_op.constant(1, dtype=dtypes.int32) if self._state_is_tuple: c, h = state else: c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one) gate_inputs = math_ops.matmul(array_ops.concat([inputs, h], 1), self._kernel) gate_inputs = nn_ops.bias_add(gate_inputs, self._bias) i, j, f, o = array_ops.split(value=gate_inputs, num_or_size_splits=4, axis=one) forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype) add = math_ops.add multiply = math_ops.multiply new_c = add(multiply(c, sigmoid(add(f, forget_bias_tensor))), multiply(sigmoid(i), self._activation(j))) new_h = multiply(self._activation(new_c), sigmoid(o)) if self._state_is_tuple: new_state = LSTMStateTuple(new_c, new_h) else: new_state = array_ops.concat([new_c, new_h], 1) return (new_h, new_state)
Long short-term memory cell (LSTM). Args: inputs: `2-D` tensor with shape `[batch_size, input_size]`. state: An `LSTMStateTuple` of state tensors, each shaped `[batch_size, num_units]`, if `state_is_tuple` has been set to `True`. Otherwise, a `Tensor` shaped `[batch_size, 2 * num_units]`. Returns: A pair containing the new hidden state, and the new state (either a `LSTMStateTuple` or a concatenated state, depending on `state_is_tuple`).
github-repos
def _num_image_tokens(image_size: Tuple[int, int], patch_size: Tuple[int, int]) -> int: height, width = image_size patch_height, patch_width = patch_size if isinstance(patch_size, (tuple, list)) else (patch_size, patch_size) num_width_tokens = (width - 1) num_height_tokens = (height - 1) return (num_height_tokens, num_width_tokens)
Calculate the number of image tokens given the image size and patch size. Args: image_size (`Tuple[int, int]`): The size of the image as `(height, width)`. patch_size (`Tuple[int, int]`): The patch size as `(height, width)`. Returns: `int`: The number of image tokens.
github-repos
def get_events_for_blocks(self, blocks, subscriptions): events = [] for blkw in blocks: events.extend(self.get_events_for_block(blkw, subscriptions)) return events
Get a list of events associated with all the blocks. Args: blocks (list of BlockWrapper): The blocks to search for events that match each subscription. subscriptions (list of EventSubscriptions): EventFilter and event type to filter events. Returns (list of Events): The Events associated which each block id. Raises: KeyError A receipt is missing from the receipt store.
juraj-google-style
def _retrieve_endpoint(self, endpoint_id: str, location: str, is_private: bool) -> aiplatform.Endpoint: if is_private: endpoint: aiplatform.Endpoint = aiplatform.PrivateEndpoint(endpoint_name=endpoint_id, location=location) LOGGER.debug('Treating endpoint %s as private', endpoint_id) else: endpoint = aiplatform.Endpoint(endpoint_name=endpoint_id, location=location) LOGGER.debug('Treating endpoint %s as public', endpoint_id) try: mod_list = endpoint.list_models() except Exception as e: raise ValueError('Failed to contact endpoint %s, got exception: %s', endpoint_id, e) if len(mod_list) == 0: raise ValueError('Endpoint %s has no models deployed to it.', endpoint_id) return endpoint
Retrieves an AI Platform endpoint and queries it for liveness/deployed models. Args: endpoint_id: the numerical ID of the Vertex AI endpoint to retrieve. is_private: a boolean indicating if the Vertex AI endpoint is a private endpoint Returns: An aiplatform.Endpoint object Raises: ValueError: if endpoint is inactive or has no models deployed to it.
github-repos
def build_request_relationship(type, ids): if (ids is None): return {'data': None} elif isinstance(ids, str): return {'data': {'id': ids, 'type': type}} else: return {'data': [{'id': id, 'type': type} for id in ids]}
Build a relationship list. A relationship list is used to update relationships between two resources. Setting sensors on a label, for example, uses this function to construct the list of sensor ids to pass to the Helium API. Args: type(string): The resource type for the ids in the relationship ids([uuid] or uuid): Just one or a list of resource uuids to use in the relationship Returns: A ready to use relationship JSON object.
codesearchnet
def unbind(self, devices_to_unbind): if self.entity_api_key == "": return {'status': 'failure', 'response': 'No API key found in request'} url = self.base_url + "api/0.1.0/subscribe/unbind" headers = {"apikey": self.entity_api_key} data = { "exchange": "amq.topic", "keys": devices_to_unbind, "queue": self.entity_id } with self.no_ssl_verification(): r = requests.delete(url, json=data, headers=headers) print(r) response = dict() if "No API key" in str(r.content.decode("utf-8")): response["status"] = "failure" r = json.loads(r.content.decode("utf-8"))['message'] elif 'unbind' in str(r.content.decode("utf-8")): response["status"] = "success" r = r.content.decode("utf-8") else: response["status"] = "failure" r = r.content.decode("utf-8") response["response"] = str(r) return response
This function allows an entity to unbound devices that are already bound. Args: devices_to_unbind (list): an array of devices that are to be unbound ( stop listening) Example unbind(["test10","testDemo105"])
juraj-google-style
def _add_new_tf_operations(self, compute_devices=True) -> list['Operation']: self._check_not_finalized() new_ops = [self._create_op_from_tf_operation(c_op, compute_device=compute_devices) for c_op in self.new_operations()] for op in new_ops: new_control_inputs = self._control_dependencies_for_inputs(op.inputs) op._add_control_inputs(new_control_inputs) op._control_flow_post_processing() return new_ops
Creates `Operations` in this graph for any new TF_Operations. This is useful for when TF_Operations are indirectly created by the C API outside of the Operation constructor (e.g. by TF_ImportGraphDef, TF_FinishWhile). This ensures there are corresponding Operations for all TF_Operations in the underlying TF_Graph. Args: compute_devices: (Optional.) If True, device functions will be executed to compute the device properties of each new Operation. Returns: A list of the new `Operation` objects.
github-repos
def get_coordinate_offset(self, other_reading): my_x, my_y = self.reference_source_point other_x, other_y = other_reading.reference_source_point return my_x - other_x, my_y - other_y
Calculates the offsets between readings' coordinate systems. Args: other_reading: ossos.astrom.SourceReading The reading to compare coordinate systems with. Returns: (offset_x, offset_y): The x and y offsets between this reading and the other reading's coordinate systems.
juraj-google-style
def sparse_slice(sp_input, start, size, name=None): sp_input = _convert_to_sparse_tensor(sp_input) start = ops.convert_to_tensor(start, dtypes.int64) size = ops.convert_to_tensor(size, dtypes.int64) with ops.name_scope(name, 'SparseSlice', [sp_input]) as name: output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice(sp_input.indices, sp_input.values, sp_input.dense_shape, start, size, name=name) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
Slice a `SparseTensor` based on the `start` and `size`. For example, if the input is input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: sparse.slice([0, 0], [2, 4]) = shape = [2, 4] [ a ] [b c ] sparse.slice([0, 4], [2, 3]) = shape = [2, 3] [ d e ] [ ] Args: sp_input: The `SparseTensor` to split. start: 1-D. tensor represents the start of the slice. size: 1-D. tensor represents the size of the slice. name: A name for the operation (optional). Returns: A `SparseTensor` objects resulting from splicing. Raises: TypeError: If `sp_input` is not a `SparseTensor`.
github-repos
def tan(x): if any_symbolic_tensors((x,)): return Tan().symbolic_call(x) return backend.numpy.tan(x)
Compute tangent, element-wise. Args: x: Input tensor. Returns: Output tensor of same shape as `x`.
github-repos
def get_msd_plot(self, plt=None, mode="specie"): from pymatgen.util.plotting import pretty_plot plt = pretty_plot(12, 8, plt=plt) if np.max(self.dt) > 100000: plot_dt = self.dt / 1000 unit = 'ps' else: plot_dt = self.dt unit = 'fs' if mode == "species": for sp in sorted(self.structure.composition.keys()): indices = [i for i, site in enumerate(self.structure) if site.specie == sp] sd = np.average(self.sq_disp_ions[indices, :], axis=0) plt.plot(plot_dt, sd, label=sp.__str__()) plt.legend(loc=2, prop={"size": 20}) elif mode == "sites": for i, site in enumerate(self.structure): sd = self.sq_disp_ions[i, :] plt.plot(plot_dt, sd, label="%s - %d" % ( site.specie.__str__(), i)) plt.legend(loc=2, prop={"size": 20}) elif mode == "mscd": plt.plot(plot_dt, self.mscd, 'r') plt.legend(["Overall"], loc=2, prop={"size": 20}) else: plt.plot(plot_dt, self.msd, 'k') plt.plot(plot_dt, self.msd_components[:, 0], 'r') plt.plot(plot_dt, self.msd_components[:, 1], 'g') plt.plot(plot_dt, self.msd_components[:, 2], 'b') plt.legend(["Overall", "a", "b", "c"], loc=2, prop={"size": 20}) plt.xlabel("Timestep ({})".format(unit)) if mode == "mscd": plt.ylabel("MSCD ($\\AA^2$)") else: plt.ylabel("MSD ($\\AA^2$)") plt.tight_layout() return plt
Get the plot of the smoothed msd vs time graph. Useful for checking convergence. This can be written to an image file. Args: plt: A plot object. Defaults to None, which means one will be generated. mode (str): Determines type of msd plot. By "species", "sites", or direction (default). If mode = "mscd", the smoothed mscd vs. time will be plotted.
juraj-google-style
def mark_streamer(self, index): self._logger.debug('Marking streamer %d manually', index) if (index >= len(self.streamers)): raise ArgumentError('Invalid streamer index', index=index, num_streamers=len(self.streamers)) self._manually_triggered_streamers.add(index)
Manually mark a streamer that should trigger. The next time check_streamers is called, the given streamer will be manually marked that it should trigger, which will cause it to trigger unless it has no data. Args: index (int): The index of the streamer that we should mark as manually triggered. Raises: ArgumentError: If the streamer index is invalid.
codesearchnet
def __init__(self, lower=True, num_norm=True, use_char=True, initial_vocab=None): self._num_norm = num_norm self._use_char = use_char self._word_vocab = Vocabulary(lower=lower) self._char_vocab = Vocabulary(lower=False) self._label_vocab = Vocabulary(lower=False, unk_token=False) if initial_vocab: self._word_vocab.add_documents([initial_vocab]) self._char_vocab.add_documents(initial_vocab)
Create a preprocessor object. Args: lower: boolean. Whether to convert the texts to lowercase. use_char: boolean. Whether to use char feature. num_norm: boolean. Whether to normalize text. initial_vocab: Iterable. Initial vocabulary for expanding word_vocab.
juraj-google-style
def get_correct_answer(question, default=None, required=False, answer=None, is_answer_correct=None): u while 1: if default is None: msg = u' - No Default Available' else: msg = (u'\n[DEFAULT] -> {}\nPress Enter To ' u'Use Default'.format(default)) prompt = question + msg + u'\n--> ' if answer is None: answer = six.moves.input(prompt) if answer == '' and required and default is not None: print(u'You have to enter a value\n\n') six.moves.input(u'Press enter to continue') print(u'\n\n') answer = None continue if answer == u'' and default is not None: answer = default _ans = ask_yes_no(u'You entered {}, is this ' u'correct?'.format(answer), answer=is_answer_correct) if _ans: return answer else: answer = None
u"""Ask user a question and confirm answer Args: question (str): Question to ask user default (str): Default answer if no input from user required (str): Require user to input answer answer (str): Used for testing is_answer_correct (str): Used for testing
juraj-google-style
def _check_required_fields(self, object_type, ignore_fields): for field in self.configuration[object_type]['required_fields']: if field not in self.data and field not in ignore_fields: raise HDXError('Field %s is missing in %s!' % (field, object_type))
Helper method to check that metadata for HDX object is complete Args: ignore_fields (List[str]): Any fields to ignore in the check Returns: None
juraj-google-style
def as_check_request(self, timer=datetime.utcnow): if (not self.service_name): raise ValueError(u'the service name must be set') if (not self.operation_id): raise ValueError(u'the operation id must be set') if (not self.operation_name): raise ValueError(u'the operation name must be set') op = super(Info, self).as_operation(timer=timer) labels = {} if self.android_cert_fingerprint: labels[_KNOWN_LABELS.SCC_ANDROID_CERT_FINGERPRINT.label_name] = self.android_cert_fingerprint if self.android_package_name: labels[_KNOWN_LABELS.SCC_ANDROID_PACKAGE_NAME.label_name] = self.android_package_name if self.client_ip: labels[_KNOWN_LABELS.SCC_CALLER_IP.label_name] = self.client_ip if self.ios_bundle_id: labels[_KNOWN_LABELS.SCC_IOS_BUNDLE_ID.label_name] = self.ios_bundle_id if self.referer: labels[_KNOWN_LABELS.SCC_REFERER.label_name] = self.referer labels[_KNOWN_LABELS.SCC_SERVICE_AGENT.label_name] = SERVICE_AGENT labels[_KNOWN_LABELS.SCC_USER_AGENT.label_name] = USER_AGENT op.labels = encoding.PyValueToMessage(sc_messages.Operation.LabelsValue, labels) check_request = sc_messages.CheckRequest(operation=op) return sc_messages.ServicecontrolServicesCheckRequest(serviceName=self.service_name, checkRequest=check_request)
Makes a `ServicecontrolServicesCheckRequest` from this instance Returns: a ``ServicecontrolServicesCheckRequest`` Raises: ValueError: if the fields in this instance are insufficient to to create a valid ``ServicecontrolServicesCheckRequest``
codesearchnet
def GetBalance(self, wallet, address, as_string=False): addr = PromptUtils.parse_param(address, wallet) if isinstance(addr, UInt160): addr = addr.Data sb = ScriptBuilder() sb.EmitAppCallWithOperationAndArgs(self.ScriptHash, 'balanceOf', [addr]) (tx, fee, results, num_ops, engine_success) = test_invoke(sb.ToArray(), wallet, []) if engine_success: try: val = results[0].GetBigInteger() precision_divisor = pow(10, self.decimals) balance = (Decimal(val) / Decimal(precision_divisor)) if as_string: formatter_str = ('.%sf' % self.decimals) balance_str = format(balance, formatter_str) return balance_str return balance except Exception as e: logger.error(('could not get balance: %s ' % e)) traceback.print_stack() else: addr_str = Crypto.ToAddress(UInt160(data=addr)) logger.error(f'Could not get balance of address {addr_str} for token contract {self.ScriptHash}. VM execution failed. Make sure the contract exists on the network and that it adheres to the NEP-5 standard') return 0
Get the token balance. Args: wallet (neo.Wallets.Wallet): a wallet instance. address (str): public address of the account to get the token balance of. as_string (bool): whether the return value should be a string. Default is False, returning an integer. Returns: int/str: token balance value as int (default), token balanace as string if `as_string` is set to True. 0 if balance retrieval failed.
codesearchnet
def save_project_id(project_id): try: subprocess.call(['gcloud', 'config', 'set', 'project', project_id]) except: config_file = os.path.join(get_config_dir(), 'config.json') config = {} if os.path.exists(config_file): with open(config_file) as f: config = json.loads(f.read()) config['project_id'] = project_id with open(config_file, 'w') as f: f.write(json.dumps(config))
Save project id to config file. Args: project_id: the project_id to save.
codesearchnet
def maybe_get_common_dtype(arg_list): if all(((a is None) for a in arg_list)): return None return dtype_util.common_dtype(arg_list, tf.float32)
Return common dtype of arg_list, or None. Args: arg_list: an iterable of items which are either `None` or have a `dtype` property. Returns: dtype: The common dtype of items in `arg_list`, or `None` if the list is empty or all items are `None`.
codesearchnet
def log_every_n(level, msg, n, *args): count = _get_next_log_count_per_token(get_absl_logger().findCaller()) log_if(level, msg, (not (count % n)), *args)
Logs 'msg % args' at level 'level' once per 'n' times. Logs the 1st call, (N+1)st call, (2N+1)st call, etc. Not threadsafe. Args: level: int, the absl logging level at which to log. msg: str, the message to be logged. n: int, the number of times this should be called before it is logged. *args: The args to be substitued into the msg.
codesearchnet
def create_host_call(model_dir): graph = tf.get_default_graph() summaries = graph.get_collection(tf.GraphKeys.SUMMARIES) gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1]) summary_kwargs = collections.OrderedDict() for t in summaries: if (t.op.type not in ['ScalarSummary']): tf.logging.warn(('Ignoring unsupported tf.Summary type %s' % t.op.type)) continue name = t.op.name tensor = t.op.inputs[1] if (t.op.type == 'ScalarSummary'): assert tensor.shape.is_compatible_with([]) if (tensor.dtype == tf.int64): tensor = tf.to_int32(tensor) summary_kwargs[('ScalarSummary' + name)] = tf.reshape(tensor, [1]) elif (t.op.type == 'ImageSummary'): if (tensor.dtype != tf.float32): tf.logging.warn(('Currently T2T on TPU only supports ImageSummary of tf.float32-type Tensors. Skipping Tensor %s with dtype %s...' % (tensor.name, tensor.dtype))) continue summary_kwargs[('ImageSummary' + name)] = tensor if (not summary_kwargs): return None summary_kwargs['global_step'] = gs_t log_info(('summary_kwargs %s' % str(summary_kwargs))) def host_call_fn(**kwargs): 'Training host call. Creates summaries for training metrics.\n\n Args:\n **kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must\n contain key "global_step" with value of current global_step Tensor.\n\n Returns:\n List of summary ops to run on the CPU host.\n ' gs = tf.to_int64(kwargs.pop('global_step')[0]) with tf.contrib.summary.create_file_writer(model_dir).as_default(): with tf.contrib.summary.always_record_summaries(): for (name, value) in sorted(six.iteritems(kwargs)): if name.startswith('ScalarSummary'): name = name[len('ScalarSummary'):] tf.contrib.summary.scalar(name, tf.reduce_mean(tf.to_float(value)), step=gs) elif name.startswith('ImageSummary'): name = name[len('ImageSummary'):] tf.contrib.summary.image(name, value, step=gs) return tf.contrib.summary.all_summary_ops() return (host_call_fn, summary_kwargs)
Construct a host_call writing scalar summaries. Args: model_dir: String containing path to train Returns: (fn, args) Pair to be called by TPUEstimator as the host_call.
codesearchnet
def get_default_assets_zip_provider(): path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))), 'webfiles.zip') if (not os.path.exists(path)): logger.warning('webfiles.zip static assets not found: %s', path) return None return (lambda : open(path, 'rb'))
Opens stock TensorBoard web assets collection. Returns: Returns function that returns a newly opened file handle to zip file containing static assets for stock TensorBoard, or None if webfiles.zip could not be found. The value the callback returns must be closed. The paths inside the zip file are considered absolute paths on the web server.
codesearchnet
def _get_newsfeeds(self, uri, detail_level = None): if detail_level: if detail_level not in ['ALL', 'CONDENSED']: return requests.codes.bad_request, {'success' : 'False', 'error': 'detailLevel needs to be provided and field_type needs to be \'ALL\' or \'CONDENSED\''} uri += self.detail_level_suffix + detail_level return self._req('get', uri)
General purpose function to get newsfeeds Args: uri uri for the feed base detail_level arguments for req str ['ALL', 'CONDENSED'] return list of feed dicts parse at your convenience
juraj-google-style
def xarrayfunc(func): @wraps(func) def wrapper(*args, **kwargs): if any(isinstance(arg, xr.DataArray) for arg in args): newargs = [] for arg in args: if isinstance(arg, xr.DataArray): newargs.append(arg.values) else: newargs.append(arg) return dc.full_like(args[0], func(*newargs, **kwargs)) else: return func(*args, **kwargs) return wrapper
Make a function compatible with xarray.DataArray. This function is intended to be used as a decorator like:: >>> @dc.xarrayfunc >>> def func(array): ... # do something ... return newarray >>> >>> result = func(array) Args: func (function): Function to be wrapped. The first argument of the function must be an array to be processed. Returns: wrapper (function): Wrapped function.
juraj-google-style
def save(self, output_saved_model_dir, save_gpu_specific_engines=True, options=None): assert self._converted if trt_utils.is_experimental_feature_activated('remove_native_segments'): logging.info("'remove_native_segments' experimental feature is enabled during saving of converted SavedModel.") self._converted_func = _remove_native_segments(self._converted_func) self._converted_graph_def = self._converted_func.graph.as_graph_def() if self._need_calibration and (not self._calibrated): raise RuntimeError('A model that requires INT8 calibration has to be built before saving it. Call build() to build and calibrate the TensorRT engines.') engine_asset_dir = tempfile.mkdtemp() resource_map = {} def _serialize_and_track_engine(node): canonical_engine_name = _get_canonical_engine_name(node.name) if canonical_engine_name in resource_map: return filename = os.path.join(engine_asset_dir, 'trt-serialized-engine.' + canonical_engine_name) try: gen_trt_ops.serialize_trt_resource(resource_name=canonical_engine_name, filename=filename, delete_resource=True, save_gpu_specific_engines=save_gpu_specific_engines) except errors.NotFoundError: logging.info('Could not find %s in TF-TRT cache. This can happen if build() is not called, which means TensorRT engines will be built and cached at runtime.', canonical_engine_name) return resource_map[canonical_engine_name] = _TRTEngineResource(canonical_engine_name, filename, self._conversion_params.maximum_cached_engines) self._for_each_trt_node(self._converted_graph_def, _serialize_and_track_engine) trackable = autotrackable.AutoTrackable() if self.freeze else self._saved_model trackable.trt_engine_resources = resource_map if not self._conversion_params.allow_build_at_runtime: def _reset_allow_build_at_runtime(node): node.attr['_allow_build_at_runtime'].b = False self._for_each_trt_node(self._converted_graph_def, _reset_allow_build_at_runtime) reset_converted_func = wrap_function.function_from_graph_def(self._converted_graph_def, [tensor.name for tensor in self._converted_func.inputs], [tensor.name for tensor in self._converted_func.outputs]) reset_converted_func.graph.structured_outputs = nest.pack_sequence_as(self._converted_func.graph.structured_outputs, reset_converted_func.graph.structured_outputs) reset_converted_func.graph.structured_input_signature = self._converted_func.structured_input_signature self._converted_func = reset_converted_func signatures = {self._input_saved_model_signature_key: self._converted_func} save.save(trackable, output_saved_model_dir, signatures, options=options)
Save the converted SavedModel. Args: output_saved_model_dir: directory to saved the converted SavedModel. save_gpu_specific_engines: whether to save TRT engines that have been built. When True, all engines are saved and when False, the engines are not saved and will be rebuilt at inference time. By using save_gpu_specific_engines=False after doing INT8 calibration, inference can be done on different GPUs than the GPU that the model was calibrated and saved on. options: `tf.saved_model.SaveOptions` object for configuring save options. Raises: RuntimeError: if the needed calibration hasn't been done.
github-repos
def _sendline(self, line): logging.info('%s: sending line', self.port) self._lines = [] try: self._read() except socket.error: logging.debug('%s: Nothing cleared', self.port) print 'sending [%s]' % line self._write(line + '\r\n') time.sleep(0.1)
Send exactly one line to the device Args: line str: data send to device
juraj-google-style
def average(self, var): return self._averages.get(var.ref(), None)
Returns the `Variable` holding the average of `var`. Args: var: A `Variable` object. Returns: A `Variable` object or `None` if the moving average of `var` is not maintained.
github-repos
def get_vasp_input(self, vasp_input_set=MPRelaxSet, **kwargs): d = vasp_input_set(self.final_structure, **kwargs).get_vasp_input() d["transformations.json"] = json.dumps(self.as_dict()) return d
Returns VASP input as a dict of vasp objects. Args: vasp_input_set (pymatgen.io.vaspio_set.VaspInputSet): input set to create vasp input files from structures
juraj-google-style
def _project_single_observable(self, **kwargs: Dict[(str, Any)]) -> Hist: assert isinstance(self.output_attribute_name, str) (output_hist, projection_name, projection_name_args) = self._project_observable(input_key='single_observable', input_observable=self.observable_to_project_from, **kwargs) output_hist_args = projection_name_args output_hist_args.update({'output_hist': output_hist, 'projection_name': projection_name}) output_hist = self.output_hist(**output_hist_args) if (not hasattr(self.output_observable, self.output_attribute_name)): raise ValueError(f'Attempted to assign hist to non-existent attribute {self.output_attribute_name} of object {self.output_observable}. Check the attribute name!') setattr(self.output_observable, self.output_attribute_name, output_hist) return output_hist
Driver function for projecting and storing a single observable. Args: kwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...) Returns: The projected histogram. The histogram is also stored in the output specified by ``output_observable``.
codesearchnet
def _new_convolution(self, use_bias): def clean_dict(input_dict): if input_dict and not use_bias: cleaned_dict = input_dict.copy() cleaned_dict.pop("b", None) return cleaned_dict return input_dict return self._conv_class( output_channels=4*self._output_channels, kernel_shape=self._kernel_shape, stride=self._stride, rate=self._rate, padding=self._padding, use_bias=use_bias, initializers=clean_dict(self._initializers), partitioners=clean_dict(self._partitioners), regularizers=clean_dict(self._regularizers), name="conv")
Returns new convolution. Args: use_bias: Use bias in convolutions. If False, clean_dict removes bias entries from initializers, partitioners and regularizers passed to the constructor of the convolution.
juraj-google-style
class BasicRNNCell(LayerRNNCell): def __init__(self, num_units, activation=None, reuse=None, name=None, dtype=None, **kwargs): warnings.warn('`tf.nn.rnn_cell.BasicRNNCell` is deprecated and will be removed in a future version. This class is equivalent as `tf.keras.layers.SimpleRNNCell`, and will be replaced by that in Tensorflow 2.0.') super(BasicRNNCell, self).__init__(_reuse=reuse, name=name, dtype=dtype, **kwargs) _check_supported_dtypes(self.dtype) if context.executing_eagerly() and tf_config.list_logical_devices('GPU'): logging.warning('%s: Note that this cell is not optimized for performance. Please use tf.contrib.cudnn_rnn.CudnnRNNTanh for better performance on GPU.', self) self.input_spec = input_spec.InputSpec(ndim=2) self._num_units = num_units if activation: self._activation = activations.get(activation) else: self._activation = math_ops.tanh @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units @tf_utils.shape_type_conversion def build(self, inputs_shape): if inputs_shape[-1] is None: raise ValueError('Expected inputs.shape[-1] to be known, saw shape: %s' % str(inputs_shape)) _check_supported_dtypes(self.dtype) input_depth = inputs_shape[-1] self._kernel = self.add_variable(_WEIGHTS_VARIABLE_NAME, shape=[input_depth + self._num_units, self._num_units]) self._bias = self.add_variable(_BIAS_VARIABLE_NAME, shape=[self._num_units], initializer=init_ops.zeros_initializer(dtype=self.dtype)) self.built = True def call(self, inputs, state): _check_rnn_cell_input_dtypes([inputs, state]) gate_inputs = math_ops.matmul(array_ops.concat([inputs, state], 1), self._kernel) gate_inputs = nn_ops.bias_add(gate_inputs, self._bias) output = self._activation(gate_inputs) return (output, output) def get_config(self): config = {'num_units': self._num_units, 'activation': activations.serialize(self._activation), 'reuse': self._reuse} base_config = super(BasicRNNCell, self).get_config() return dict(list(base_config.items()) + list(config.items()))
The most basic RNN cell. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnRNNTanh` for better performance on GPU. Args: num_units: int, The number of units in the RNN cell. activation: Nonlinearity to use. Default: `tanh`. It could also be string that is within Keras activation function names. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. **kwargs: Dict, keyword named properties for common layer attributes, like `trainable` etc when constructing the cell from configs of get_config().
github-repos
def restore_captures(concrete_function, inputs): bound_inputs = [get_tensor_from_node(obj) for obj in inputs] bound_variables = [obj for obj in inputs if isinstance(obj, (variables_lib.Variable, resource_variable_ops.BaseResourceVariable))] captured_inputs_list = [] concrete_function.set_variables(bound_variables) if bound_inputs: for bound_input, internal_capture in zip(bound_inputs, concrete_function.inputs[-len(bound_inputs):]): if hasattr(bound_input, '__tf_experimental_restore_capture__'): captured_inputs_list.append(bound_input.__tf_experimental_restore_capture__(concrete_function, internal_capture)) else: captured_inputs_list.append(bound_input) concrete_function.graph.replace_capture(bound_input, internal_capture) if internal_capture.dtype == dtypes.resource: if resource_variable_ops.is_resource_variable(bound_input): try: handle = bound_input.handle except ValueError: pass else: handle_data_util.copy_handle_data(handle, internal_capture) else: handle_data_util.copy_handle_data(bound_input, internal_capture) concrete_function.graph.capture(bound_input) if any([inp is None for inp in captured_inputs_list]): warnings.warn("Trying to load ShardedVariables using tf.saved_model.load. This won't work if using a tf.distribute.Strategy, and may use excess memory if not using a Strategy. Ignore this warning if using tf.keras.models.load_model.") concrete_function.set_external_captures(captured_inputs_list) if concrete_function.function_type: concrete_function._function_type = function_type_lib.FunctionType(concrete_function.function_type.parameters.values(), concrete_function.graph.function_captures.capture_types, return_annotation=concrete_function.function_type.output)
Restore captures for the concrete function. Used at deserialization time. For functions that are being deserialized, saved model restores objects that tensors were captured from, but functions only know about their tensors -- object information is destroyed by tracing. This additional logic extracts the tensors which the function originally captured. Args: concrete_function: the concrete function for which to restore captures inputs: a list tensors or other Python objects (such as variables) which contain tensors that were originally captured by the function
github-repos
def builder_from_source(source, filename, system_includes, nonsystem_includes, quiet=False): return ASTBuilder(tokenize.get_tokens(source), filename, system_includes, nonsystem_includes, quiet=quiet)
Utility method that returns an ASTBuilder from source code. Args: source: 'C++ source code' filename: 'file1' Returns: ASTBuilder
codesearchnet
def __init__(self, on_ui_exit=None, config=None): self._on_ui_exit = on_ui_exit self._command_handler_registry = debugger_cli_common.CommandHandlerRegistry() self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry() self._tab_completion_registry.register_tab_comp_context([''], self.CLI_EXIT_COMMANDS + [debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] + debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES) self._config = config or cli_config.CLIConfig() self._config_argparser = argparse.ArgumentParser(description='config command', usage=argparse.SUPPRESS) subparsers = self._config_argparser.add_subparsers() set_parser = subparsers.add_parser('set') set_parser.add_argument('property_name', type=str) set_parser.add_argument('property_value', type=str) set_parser = subparsers.add_parser('show') self.register_command_handler('config', self._config_command_handler, self._config_argparser.format_help(), prefix_aliases=['cfg'])
Constructor of the base class. Args: on_ui_exit: (`Callable`) the callback to be called when the UI exits. config: An instance of `cli_config.CLIConfig()` carrying user-facing configurations.
github-repos
def random_set_distribution( rnd: Optional[tcod.random.Random], dist: int ) -> None: lib.TCOD_random_set_distribution(rnd.random_c if rnd else ffi.NULL, dist)
Change the distribution mode of a random number generator. Args: rnd (Optional[Random]): A Random instance, or None to use the default. dist (int): The distribution mode to use. Should be DISTRIBUTION_*.
juraj-google-style
def set_circular(self, circular: bool, chain: List[Table] = None) -> None: self.circular = circular self.circular_chain = chain or []
Mark this table as circular (or not). Args: circular: is it circular? chain: if it's circular, this should be the list of tables participating in the circular chain
juraj-google-style
def time_estimate(self, duration, **kwargs): path = '%s/%s/time_estimate' % (self.manager.path, self.get_id()) data = {'duration': duration} return self.manager.gitlab.http_post(path, post_data=data, **kwargs)
Set an estimated time of work for the object. Args: duration (str): Duration in human format (e.g. 3h30) **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTimeTrackingError: If the time tracking update cannot be done
juraj-google-style
def __init__( self, size, weights=None, bias=True, l2_regularization=0.0, l1_regularization=0.0, trainable=True, named_tensors=None, scope='linear', summary_labels=() ): self.size = size self.weights_init = weights self.bias_init = bias self.l2_regularization = l2_regularization self.l1_regularization = l1_regularization self.trainable = trainable super(Linear, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
Linear layer. Args: size: Layer size. weights: Weight initialization, random if None. bias: Bias initialization, random if True, no bias added if False. l2_regularization: L2 regularization weight. l1_regularization: L1 regularization weight.
juraj-google-style
def get_typecast_value(self, value, type): if (type == entities.Variable.Type.BOOLEAN): return (value == 'true') elif (type == entities.Variable.Type.INTEGER): return int(value) elif (type == entities.Variable.Type.DOUBLE): return float(value) else: return value
Helper method to determine actual value based on type of feature variable. Args: value: Value in string form as it was parsed from datafile. type: Type denoting the feature flag type. Return: Value type-casted based on type of feature variable.
codesearchnet
def batch_shape(self): return tensor_shape.as_shape(self._batch_shape())
Shape of a single sample from a single event index as a `TensorShape`. May be partially defined or unknown. The batch dimensions are indexes into independent, non-identical parameterizations of this distribution. Returns: batch_shape: `TensorShape`, possibly unknown.
github-repos
def _build_ragged_tensor_from_value_ranges(starts, limits, step, values): if step is None: step = 1 step = ops.convert_to_tensor(step, name='step') if step.dtype.is_integer: step = math_ops.cast(step, starts.dtype) else: raise TypeError('slice strides must be integers or None') value_indices = ragged_math_ops.range(starts, limits, step, row_splits_dtype=starts.dtype) if isinstance(values, ragged_tensor.RaggedTensor): gathered_values = ragged_gather_ops.gather(params=values, indices=value_indices.values) else: gathered_values = array_ops.gather(params=values, indices=value_indices.values) return value_indices.with_values(gathered_values)
Returns a `RaggedTensor` containing the specified sequences of values. Returns a RaggedTensor `output` where: ```python output.shape[0] = starts.shape[0] output[i] = values[starts[i]:limits[i]:step] ``` Requires that `starts.shape == limits.shape` and `0 <= starts[i] <= limits[i] <= values.shape[0]`. Args: starts: 1D integer Tensor specifying the start indices for the sequences of values to include. limits: 1D integer Tensor specifying the limit indices for the sequences of values to include. step: Integer value specifying the step size for strided slices. values: The set of values to select from. Returns: A `RaggedTensor`. Raises: ValueError: Until the prerequisite ops are checked in.
github-repos
async def getProvStack(self, iden: str): return self.cell.provstor.getProvStack(s_common.uhex(iden))
Return the providence stack associated with the given iden. Args: iden (str): the iden from splice Note: the iden appears on each splice entry as the 'prov' property
codesearchnet