code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def ParseMetadataFile(self, parser_mediator, file_entry, data_stream_name): parent_path_spec = getattr(file_entry.path_spec, 'parent', None) filename_upper = file_entry.name.upper() if (self._mft_parser and parent_path_spec and (filename_upper in ('$MFT', '$MFTMIRR')) and (not data_stream_name)): self._ParseDataStreamWithParser(parser_mediator, self._mft_parser, file_entry, '') elif (self._usnjrnl_parser and parent_path_spec and (filename_upper == '$USNJRNL') and (data_stream_name == '$J')): volume_file_object = path_spec_resolver.Resolver.OpenFileObject(parent_path_spec, resolver_context=parser_mediator.resolver_context) try: self._ParseFileEntryWithParser(parser_mediator, self._usnjrnl_parser, file_entry, file_object=volume_file_object) finally: volume_file_object.close()
Parses a metadata file. Args: parser_mediator (ParserMediator): parser mediator. file_entry (dfvfs.FileEntry): file entry. data_stream_name (str): data stream name.
codesearchnet
def _derive_namespaces(self): for graph in [self.diffs.overlap, self.diffs.removed, self.diffs.added]: for s,p,o in graph: try: ns_prefix, ns_uri, predicate = graph.compute_qname(p) self.update_namespaces.add(ns_uri) except: logger.debug('could not parse Object URI: %s' % ns_uri) try: ns_prefix, ns_uri, predicate = graph.compute_qname(o) self.update_namespaces.add(ns_uri) except: logger.debug('could not parse Object URI: %s' % ns_uri) logger.debug(self.update_namespaces) for ns_uri in self.update_namespaces: for k in self.prefixes.__dict__: if str(ns_uri) == str(self.prefixes.__dict__[k]): logger.debug('adding prefix %s for uri %s to unique_prefixes' % (k,str(ns_uri))) self.update_prefixes[k] = self.prefixes.__dict__[k]
Small method to loop through three graphs in self.diffs, identify unique namespace URIs. Then, loop through provided dictionary of prefixes and pin one to another. Args: None: uses self.prefixes and self.diffs Returns: None: sets self.update_namespaces and self.update_prefixes
juraj-google-style
def dataframe(start_row=0, max_rows=None, use_cache=True): output = QueryOutput() output._output_type = 'dataframe' output._dataframe_start_row = start_row output._dataframe_max_rows = max_rows output._use_cache = use_cache return output
Construct a query output object where the result is a dataframe Args: start_row: the row of the table at which to start the export (default 0). max_rows: an upper limit on the number of rows to export (default None). use_cache: whether to use cached results or not (default True).
codesearchnet
def call_rpc(*inputs, **kwargs): rpc_executor = kwargs['rpc_executor'] output = [] try: value = inputs[1].pop() addr = (value.value >> 16) rpc_id = (value.value & 65535) reading_value = rpc_executor.rpc(addr, rpc_id) output.append(IOTileReading(0, 0, reading_value)) except (HardwareError, StreamEmptyError): pass for input_x in inputs: input_x.skip_all() return output
Call an RPC based on the encoded value read from input b. The response of the RPC must be a 4 byte value that is used as the output of this call. The encoded RPC must be a 32 bit value encoded as "BBH": B: ignored, should be 0 B: the address of the tile that we should call H: The id of the RPC to call All other readings are then skipped so that there are no readings in any input queue when this function returns Returns: list(IOTileReading)
codesearchnet
def openResultsInBrowser(res): print(emphasis('\n\tOpening URIs in the default web browser...')) urisToBrowser(['https: time.sleep(2) uris = [] for r in res: for att in r['attributes']: if (att['type'] == 'i3visio.uri'): uris.append(att['value']) urisToBrowser(uris)
Method that collects the URI from a list of entities and opens them Args: ----- res: A list containing several i3visio entities.
codesearchnet
def from_scf_task(cls, scf_task, ddk_tolerance=None, manager=None): if not isinstance(scf_task, ScfTask): raise TypeError("task `%s` does not inherit from ScfTask" % scf_task) new = cls(manager=manager) multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance) ddk_tasks = [] for ddk_inp in multi_ddk: ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: "WFK"}) ddk_tasks.append(ddk_task) multi_dde = scf_task.input.make_dde_inputs(use_symmetries=False) dde_tasks = [] dde_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks} dde_deps.update({scf_task: "WFK"}) for dde_inp in multi_dde: dde_task = new.register_dde_task(dde_inp, deps=dde_deps) dde_tasks.append(dde_task) dte_deps = {scf_task: "WFK DEN"} dte_deps.update({dde_task: "1WF 1DEN" for dde_task in dde_tasks}) multi_dte = scf_task.input.make_dte_inputs() dte_tasks = [] for dte_inp in multi_dte: dte_task = new.register_dte_task(dte_inp, deps=dte_deps) dte_tasks.append(dte_task) return new
Build a DteWork from a ground-state task. Args: scf_task: ScfTask object. ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default. manager: :class:`TaskManager` object.
juraj-google-style
def __init__(self, input_file="mol.qin", output_file="mol.qout", scf_max_cycles=200, geom_max_cycles=200): self.input_file = input_file self.output_file = output_file self.scf_max_cycles = scf_max_cycles self.geom_max_cycles = geom_max_cycles self.outdata = None self.errors = [] self.opt_error_history = []
Initializes the error handler from a set of input and output files. Args: input_file (str): Name of the QChem input file. output_file (str): Name of the QChem output file. scf_max_cycles (int): The max iterations to set to fix SCF failure. geom_max_cycles (int): The max iterations to set to fix geometry optimization failure.
juraj-google-style
def lat_id(self, line): if (self.grid == 'WAC'): lat = (((((1 + self.LINE_PROJECTION_OFFSET) - line) * self.MAP_SCALE) * 0.001) / self.A_AXIS_RADIUS) return ((lat * 180) / np.pi) else: lat = (float(self.CENTER_LATITUDE) - (((line - float(self.LINE_PROJECTION_OFFSET)) - 1) / float(self.MAP_RESOLUTION))) return lat
Return the corresponding latitude Args: line (int): Line number Returns: Correponding latitude in degree
codesearchnet
def caleom(date): date = parsefun(date) date += datetime.timedelta(days=32-date.day) date -= datetime.timedelta(days=date.day) return date
Adjust date to last day of the month, regardless of work days. Args: date (date, datetime or str): Date to be adjusted. Returns: datetime: Adjusted date.
juraj-google-style
def CheckDisjointCalendars(self): a_service_periods = self.feed_merger.a_schedule.GetServicePeriodList() b_service_periods = self.feed_merger.b_schedule.GetServicePeriodList() for a_service_period in a_service_periods: (a_start, a_end) = a_service_period.GetDateRange() for b_service_period in b_service_periods: (b_start, b_end) = b_service_period.GetDateRange() overlap_start = max(a_start, b_start) overlap_end = min(a_end, b_end) if (overlap_end >= overlap_start): return False return True
Check whether any old service periods intersect with any new ones. This is a rather coarse check based on transitfeed.SevicePeriod.GetDateRange. Returns: True if the calendars are disjoint or False if not.
codesearchnet
def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates, padding, use_gpu, dtype=dtypes.float32): assert image_shape[3] == kernel_shape[2] np.random.seed(1) image = np.random.random_sample(image_shape).astype(np.float32) kernel = np.random.random_sample(kernel_shape).astype(np.float32) strides = [1] + strides + [1] rates = [1] + rates + [1] image_tensor = constant_op.constant(image, shape=image_shape, name='input', dtype=dtype) kernel_tensor = constant_op.constant(kernel, shape=kernel_shape, name='filter', dtype=dtype) def compute_dilation2d(image_tensor, kernel_tensor): return nn_ops.dilation2d(image_tensor, kernel_tensor, strides=strides, rates=rates, padding=padding, name='dilation2d') with test_util.device(use_gpu=use_gpu): with self.cached_session(): err1 = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(lambda x: compute_dilation2d(x, kernel_tensor), [image_tensor])) err2 = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(lambda x: compute_dilation2d(image_tensor, x), [kernel_tensor])) err = max(err1, err2) print('Dilation gradient error = %f' % err) if dtype == dtypes.bfloat16: self.assertLess(err, 4.0) else: self.assertLess(err, 0.0001)
Verifies the gradients of the dilation function. Args: image_shape: Input shape, [batch, in_height, in_width, channels]. kernel_shape: Filter shape, [filter_height, filter_width, channels]. strides: Output strides, specified as [stride_height, stride_width]. rates: Atrous rates, specified as [rate_height, rate_width]. padding: Padding type. use_gpu: Whether we are running on GPU.
github-repos
def _get_function_inputs(f, src_kwargs): if hasattr(f, "_func"): f = f._func try: argspec = inspect.getfullargspec(f) except AttributeError: argspec = inspect.getargspec(f) fkwargs = {k: v for k, v in six.iteritems(src_kwargs) if k in argspec.args} return fkwargs
Filters inputs to be compatible with function `f`'s signature. Args: f: Function according to whose input signature we filter arguments. src_kwargs: Keyword arguments to filter according to `f`. Returns: kwargs: Dict of key-value pairs in `src_kwargs` which exist in `f`'s signature.
juraj-google-style
def stop_loss_replace(self, accountID, orderID, **kwargs): return self.replace(accountID, orderID, order=StopLossOrderRequest(**kwargs))
Shortcut to replace a pending Stop Loss Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Stop Loss Order to replace kwargs : The arguments to create a StopLossOrderRequest Returns: v20.response.Response containing the results from submitting the request
codesearchnet
def t2t_train(model_name, dataset_name, data_dir=None, output_dir=None, config_file=None, config=None): if model_name not in _MODEL_REGISTRY: raise ValueError("Model %s not in registry. Available models:\n * %s." % (model_name, "\n * ".join(_MODEL_REGISTRY.keys()))) model_class = _MODEL_REGISTRY[model_name]() gin.bind_parameter("train_fn.model_class", model_class) gin.bind_parameter("train_fn.dataset", dataset_name) gin.parse_config_files_and_bindings(config_file, config) train_fn(data_dir, output_dir=output_dir)
Main function to train the given model on the given dataset. Args: model_name: The name of the model to train. dataset_name: The name of the dataset to train on. data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. config_file: the gin configuration file to use. config: string (in gin format) to override gin parameters.
juraj-google-style
def directed_bipartition(seq, nontrivial=False): bipartitions = [(tuple((seq[i] for i in part0_idx)), tuple((seq[j] for j in part1_idx))) for (part0_idx, part1_idx) in directed_bipartition_indices(len(seq))] if nontrivial: return bipartitions[1:(- 1)] return bipartitions
Return a list of directed bipartitions for a sequence. Args: seq (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two parts. Example: >>> directed_bipartition((1, 2, 3)) # doctest: +NORMALIZE_WHITESPACE [((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,)), ((3,), (1, 2)), ((1, 3), (2,)), ((2, 3), (1,)), ((1, 2, 3), ())]
codesearchnet
def summarize(self, test_arr, vectorizable_token, sentence_list, limit=5): if isinstance(vectorizable_token, VectorizableToken) is False: raise TypeError() _ = self.inference(test_arr) _, loss_arr, _ = self.compute_retrospective_loss() loss_list = loss_arr.tolist() abstract_list = [] for i in range(limit): key = loss_arr.argmin() _ = loss_list.pop(key) loss_arr = np.array(loss_list) seq_arr = test_arr[key] token_arr = vectorizable_token.tokenize(seq_arr.tolist()) s = " ".join(token_arr.tolist()) _s = "".join(token_arr.tolist()) for sentence in sentence_list: if s in sentence or _s in sentence: abstract_list.append(sentence) abstract_list = list(set(abstract_list)) if len(abstract_list) >= limit: break return abstract_list
Summarize input document. Args: test_arr: `np.ndarray` of observed data points.. vectorizable_token: is-a `VectorizableToken`. sentence_list: `list` of all sentences. limit: The number of selected abstract sentence. Returns: `list` of `str` of abstract sentences.
juraj-google-style
def get_reduced(self, column_reductions): for cr in column_reductions: if cr not in self.column_reductions: raise ValueError("Column reduction %r is not known to this Aggregator!" % cr) return self.reduced_df[column_reductions]
This function gets called by ColumnFunction._apply(). After a ColumnFunction has been passed to Aggregator's constructor, the ColumnFunction can use this function to request the populated, aggregated columns that correspond to its ColumnReductions. Args: column_reduction (list[ColumnReduction]) Returns: pd.DataFrame: A dataframe, where the column names are ColumnReductions.
juraj-google-style
def _SetYaraRules(self, yara_rules_string): if not yara_rules_string: return analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance( 'yara') analyzer_object.SetRules(yara_rules_string) self._analyzers.append(analyzer_object)
Sets the Yara rules. Args: yara_rules_string (str): unparsed Yara rule definitions.
juraj-google-style
def readyup_entity(self, label: str, type: str, uid: Union[(int, str)]=None, comment: str=None, definition: str=None, superclass: str=None, synonyms: list=None, existing_ids: List[dict]=None) -> dict: entity = dict(label=label, type=type) if uid: entity['uid'] = uid if definition: entity['definition'] = definition if comment: entity['comment'] = comment if superclass: entity['superclass'] = {'ilx_id': self.fix_ilx(superclass)} if synonyms: entity['synonyms'] = [{'literal': syn} for syn in synonyms] if existing_ids: if (existing_ids[0].get('curie') and existing_ids[0].get('iri')): pass else: exit('Need curie and iri for existing_ids in List[dict] form') entity['existing_ids'] = existing_ids return entity
Setups the entity to be InterLex ready Args: label: name of entity type: entities type Can be any of the following: term, cde, fde, pde, annotation, relationship uid: usually fine and auto completes to api user ID, but if you provide one with a clearance higher than 0 you can make your own custom. Good for mass imports by one person to avoid label collides. definition: entities definition comment: a foot note regarding either the interpretation of the data or the data itself superclass: entity is a sub-part of this entity Example: Organ is a superclass to Brain synonyms: entity synonyms existing_ids: existing curie/iris that link data | couldnt format this easier Returns: dict
codesearchnet
def clean(self, value, *_): if ((not value) or (not isinstance(value, LocalizedValue))): return None is_all_null = True for (lang_code, _) in settings.LANGUAGES: if (value.get(lang_code) is not None): is_all_null = False break if (is_all_null and self.null): return None return value
Cleans the specified value into something we can store in the database. For example, when all the language fields are left empty, and the field is allowed to be null, we will store None instead of empty keys. Arguments: value: The value to clean. Returns: The cleaned value, ready for database storage.
codesearchnet
def autodiff_ast(func, wrt, motion, mode, preserve_result, check_dims, verbose): node = annotate.resolve_calls(func) node = desugar.explicit_loop_indexes(node) fence.validate(node, inspect.getsource(func)) node = anf_.anf(node) if (verbose >= 2): print('ANF') print(quoting.to_source(node)) if (mode == 'reverse'): (node, required, stack) = reverse_ad.reverse_ad(node.body[0], wrt, preserve_result, check_dims) if (verbose >= 2): print('RAW') print(quoting.to_source(node)) if (motion == 'split'): node = reverse_ad.split(node, stack) else: node = reverse_ad.joint(node) if (verbose >= 2): print('MOTION') print(quoting.to_source(node)) elif (mode == 'forward'): (node, required) = forward_ad.forward_ad(node.body[0], wrt, preserve_result, check_dims) return (node, required)
Perform AD on a single function and return the AST. Args: See `grad`. Returns: node: The AST of a module containing the adjoint and primal function definitions. required: A list of non-built in functions that this function called, and of which the primals and adjoints need to be made available in order for the returned function to run.
codesearchnet
def identify_triggers( cfg, sources, sinks, lattice, nosec_lines ): assignment_nodes = filter_cfg_nodes(cfg, AssignmentNode) tainted_nodes = filter_cfg_nodes(cfg, TaintedNode) tainted_trigger_nodes = [ TriggerNode( Source('Framework function URL parameter'), cfg_node=node ) for node in tainted_nodes ] sources_in_file = find_triggers(assignment_nodes, sources, nosec_lines) sources_in_file.extend(tainted_trigger_nodes) find_secondary_sources(assignment_nodes, sources_in_file, lattice) sinks_in_file = find_triggers(cfg.nodes, sinks, nosec_lines) sanitiser_node_dict = build_sanitiser_node_dict(cfg, sinks_in_file) return Triggers(sources_in_file, sinks_in_file, sanitiser_node_dict)
Identify sources, sinks and sanitisers in a CFG. Args: cfg(CFG): CFG to find sources, sinks and sanitisers in. sources(tuple): list of sources, a source is a (source, sanitiser) tuple. sinks(tuple): list of sources, a sink is a (sink, sanitiser) tuple. nosec_lines(set): lines with # nosec whitelisting Returns: Triggers tuple with sink and source nodes and a sanitiser node dict.
juraj-google-style
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0): tstream = BytearrayStream() if self.unique_identifier is not None: self.unique_identifier.write(tstream, kmip_version=kmip_version) self.length = tstream.length() super(ActivateRequestPayload, self).write( ostream, kmip_version=kmip_version ) ostream.write(tstream.buffer)
Write the data encoding the ActivateRequestPayload object to a stream. Args: ostream (Stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def coerce(self, value): if isinstance(value, compat.basestring): return value return str(value)
Convert any value into a string value. Args: value (any): The value to coerce. Returns: str: The string representation of the value.
juraj-google-style
def match_Signature_against_Signature(self, sig1, sig2, subst, skip_self=False): subst.update({p.type_param: None for p in sig1.template + sig2.template}) params1 = sig1.params params2 = sig2.params if skip_self: assert params1 and params1[0].name == 'self' params1 = params1[1:] if params2 and params2[0].name == 'self': params2 = params2[1:] equalities = [] if len(params1) > len(params2) and (not sig2.has_optional): return booleq.FALSE if sig1.starargs is not None and sig2.starargs is not None: equalities.append(self.match_type_against_type(sig1.starargs.type, sig2.starargs.type, subst)) if sig1.starstarargs is not None and sig2.starstarargs is not None: equalities.append(self.match_type_against_type(sig1.starstarargs.type, sig2.starstarargs.type, subst)) for p1, p2 in zip(params1, params2): if p1.optional and (not p2.optional): return booleq.FALSE for i, p2 in enumerate(params2): if i >= len(params1): if not p2.optional: return booleq.FALSE else: pass else: p1 = params1[i] if p1.name != p2.name and (not (pytd_utils.ANON_PARAM.match(p1.name) or pytd_utils.ANON_PARAM.match(p2.name))): return booleq.FALSE equalities.append(self.match_type_against_type(p1.type, p2.type, subst)) equalities.append(self.match_type_against_type(sig1.return_type, sig2.return_type, subst)) return booleq.And(equalities)
Match a pytd.Signature against another pytd.Signature. Args: sig1: The caller sig2: The callee subst: Current type parameters. skip_self: If True, doesn't compare the first parameter, which is considered (and verified) to be "self". Returns: An instance of booleq.BooleanTerm, i.e. a boolean formula.
github-repos
def interceptable(func): @functools.wraps(func) def func_wrapped(*args, **kwargs): with get_next_interceptor() as interceptor: return interceptor(func, *args, **kwargs) return func_wrapped
Decorator that wraps `func` so that its execution is intercepted. The wrapper passes `func` to the interceptor for the current thread. If there is no next interceptor, we perform an "immediate" call to `func`. That is, `func` terminates without forwarding its execution to another interceptor. Args: func: Function to wrap. Returns: The decorated function.
juraj-google-style
def element_or_none(self, using, value): try: return self._execute(Command.FIND_ELEMENT, { 'using': using, 'value': value }) except: return None
Check if an element in the current context. Support: Android iOS Web(WebView) Args: using(str): The element location strategy. value(str): The value of the location strategy. Returns: Return Element if the element does exists and return None otherwise. Raises: WebDriverException.
juraj-google-style
def error_message(channel, err_title, err_message): gui = ui_embed.UI( channel, err_title, err_message, modulename=modulename, colour=modulecolor_error ) return gui
Creates an embed UI for the topic update Args: channel (discord.Channel): The Discord channel to bind the embed to err_title: The title for the error err_message: The message for the error Returns: embed: The created embed
juraj-google-style
def rand_ascii_str(length): letters = [random.choice(ascii_letters_and_digits) for _ in range(length)] return ''.join(letters)
Generates a random string of specified length, composed of ascii letters and digits. Args: length: The number of characters in the string. Returns: The random string generated.
github-repos
def avl_split_first(root): if (root is None): raise IndexError('Empty tree has no maximum element') (root, left, right) = avl_release_kids(root) if (left is None): (new_root, first_node) = (right, root) else: (new_left, first_node) = avl_split_first(left) new_root = avl_join(new_left, right, root) return (new_root, first_node)
Removes the minimum element from the tree Returns: tuple: new_root, first_node O(log(n)) = O(height(root))
codesearchnet
def multiply(self, other): if not isinstance(other, Number): raise QiskitError("other is not a number") return SuperOp(other * self._data, self.input_dims(), self.output_dims())
Return the QuantumChannel self + other. Args: other (complex): a complex number. Returns: SuperOp: the scalar multiplication other * self as a SuperOp object. Raises: QiskitError: if other is not a valid scalar.
juraj-google-style
def __call__(self, data): if isinstance(data, dict): return json.dumps({k: _ndarray_to_list(v) for k, v in six.iteritems(data)}) if hasattr(data, 'read'): return _json_serialize_from_buffer(data) return json.dumps(_ndarray_to_list(data))
Take data of various formats and serialize them into the expected request body. This uses information about supported input formats for the deployed model. Args: data (object): Data to be serialized. Returns: object: Serialized data used for the request.
juraj-google-style
def _to_bfloat16_unbiased(x, noise): x_sign = tf.sign(x) x = x * x_sign + 1e-30 cand1 = tf.to_bfloat16(x) cand1_f = tf.to_float(cand1) cand2 = tf.to_bfloat16( tf.where(tf.greater(x, cand1_f), cand1_f * 1.005, cand1_f * 0.995)) ret = _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2) return ret * tf.to_bfloat16(x_sign)
Convert a float32 to a bfloat16 using randomized roundoff. Args: x: A float32 Tensor. noise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x) Returns: A float32 Tensor.
juraj-google-style
def source_required(src_file): if (not src_file.exists()): return True required = True hash_file = src_file.with_suffix('.hash', depth=0) LOG.debug('Hash file location: %s', hash_file) if hash_file.exists(): new_hash = get_hash_of_dirs(src_file) with open(hash_file, 'r') as h_file: old_hash = h_file.readline() required = (not (new_hash == old_hash)) if required: from benchbuild.utils.cmd import rm rm('-r', src_file) rm(hash_file) if required: LOG.info('Source required for: %s', src_file) LOG.debug('Reason: src-exists: %s hash-exists: %s', src_file.exists(), hash_file.exists()) return required
Check, if a download is required. Args: src_file: The filename to check for. src_root: The path we find the file in. Returns: True, if we need to download something, False otherwise.
codesearchnet
def in_to_out(self, in_path, out_path=None): if is_same_file(in_path, out_path): logger.debug('in path and out path are the same file. writing to temp file and then replacing in path with the temp file.') out_path = None logger.debug(f'opening source file: {in_path}') with open(in_path) as infile: obj = self.object_representer.load(infile) if out_path: logger.debug(f'opening destination file for writing: {out_path}') ensure_dir(out_path) with open(out_path, 'w') as outfile: self.object_representer.dump(outfile, self.formatter(obj)) return else: logger.debug('opening temp file for writing...') with NamedTemporaryFile(mode='w+t', dir=os.path.dirname(in_path), delete=False) as outfile: self.object_representer.dump(outfile, self.formatter(obj)) logger.debug(f'moving temp file to: {in_path}') move_temp_file(outfile.name, infile.name)
Load file into object, formats, writes object to out. If in_path and out_path point to the same thing it will in-place edit and overwrite the in path. Even easier, if you do want to edit a file in place, don't specify out_path, or set it to None. Args: in_path: str or path-like. Must refer to a single existing file. out_path: str or path-like. Must refer to a single destination file location. will create directory structure if it doesn't exist. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None.
codesearchnet
def _broadcast_grad(op, accumulated_grad): grads = [t for t in accumulated_grad.op.inputs] for t in grads: _check_device(t) with ops.device(op.device): return gen_nccl_ops.nccl_reduce(input=grads, reduction='sum')
The gradients for input `Operation` of `broadcast`. Args: op: The `broadcast send` `Operation` that we are differentiating. accumulated_grad: Accumulated gradients with respect to the output of the `broadcast` op. Returns: Gradients with respect to the input of `broadcast`.
github-repos
def get_query_info(sql, con, partition_column): engine = create_engine(con) if is_table(engine, sql): table_metadata = get_table_metadata(engine, sql) query = build_query_from_table(sql) cols = get_table_columns(table_metadata) else: check_query(sql) query = sql.replace(';', '') cols = get_query_columns(engine, query) cols_names = list(cols.keys()) return (cols_names, query)
Return a columns name list and the query string Args: sql: SQL query or table name con: database connection or url string partition_column: column used to share the data between the workers Returns: Columns name list and query string
codesearchnet
def create_latin_hypercube_samples(order, dim=1): randoms = numpy.random.random((order * dim)).reshape((dim, order)) for dim_ in range(dim): perm = numpy.random.permutation(order) randoms[dim_] = ((perm + randoms[dim_]) / order) return randoms
Latin Hypercube sampling. Args: order (int): The order of the latin hyper-cube. Defines the number of samples. dim (int): The number of dimensions in the latin hyper-cube. Returns (numpy.ndarray): Latin hyper-cube with ``shape == (dim, order)``.
codesearchnet
def GetFileEntryByPathSpec(self, path_spec): tsk_vs_part, partition_index = tsk_partition.GetTSKVsPartByPathSpec( self._tsk_volume, path_spec) location = getattr(path_spec, 'location', None) if tsk_vs_part is None: if location is None or location != self.LOCATION_ROOT: return None return tsk_partition_file_entry.TSKPartitionFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True) if location is None and partition_index is not None: path_spec.location = '/p{0:d}'.format(partition_index) return tsk_partition_file_entry.TSKPartitionFileEntry( self._resolver_context, self, path_spec)
Retrieves a file entry for a path specification. Args: path_spec (PathSpec): a path specification. Returns: TSKPartitionFileEntry: a file entry or None of not available.
juraj-google-style
def __init__(self, step_stats: step_stats_pb2.StepStats, graph: Optional[Any]=None) -> None: self._origin_step_stats = step_stats self._step_stats = None self._graph = graph self._chrome_trace = _ChromeTraceFormatter() self._next_pid = 0 self._device_pids = {} self._tensor_pids = {} self._tensors = {} self._next_flow_id = 0 self._flow_starts = {} self._alloc_times = {} self._allocator_maximums = {}
Constructs a new Timeline. A 'Timeline' is used for visualizing the execution of a TensorFlow computation. It shows the timings and concurrency of execution at the granularity of TensorFlow Ops. This class is not thread safe. Args: step_stats: The 'step_stats_pb2.StepStats' proto recording execution times. graph: (Optional) The 'Graph' that was executed.
github-repos
def tensor_layout(self, tensor_shape, mesh_shape): ret = [self.tensor_dimension_to_mesh_axis(d, mesh_shape) for d in tensor_shape] not_nones = [a for a in ret if (a is not None)] if (len(not_nones) != len(set(not_nones))): raise ValueError(('Two Tensor Dimensions may not map to the same Mesh Dimension: layout=%s tensor_shape=%s mesh_shape=%s ' % (self, tensor_shape, mesh_shape))) return TensorLayout(ret)
Computes TensorLayout given a Tensor Shape and a Mesh Shape. Args: tensor_shape: Shape. mesh_shape: Shape. Returns: TensorLayout. Raises: ValueError: If two Tensor Dimensions map to the same Mesh Dimensions.
codesearchnet
def load_image(buf, request_components=0): x = ffi.new('int*') y = ffi.new('int*') n = ffi.new('int*') cbuf = ffi.from_buffer(buf) bitmap = lib.stbi_load_from_memory(ffi.cast('unsigned char*', cbuf), len(buf), x, y, n, request_components) pybuffer = ffi.buffer(bitmap, ((x[0] * y[0]) * n[0])) return (pybuffer, x[0], y[0], n[0])
Load a png or jpeg image into a bitmap buffer. Args: buf (Buffer): Buffer to load request_components (int): If you want to force number of components Returns: A tuple containing: - Bitmap buffer - width of bitmap - height of bitmap - number of components
codesearchnet
def clone(self, deep: bool=False, memo: Optional[Any]=None, override: Optional[Dict[str, Any]]=None) -> 'Symbolic': return self.sym_clone(deep, memo, override)
Clones current object symbolically. Args: deep: If True, perform deep copy (equivalent to copy.deepcopy). Otherwise shallow copy (equivalent to copy.copy). memo: Memo object for deep clone. override: An optional dict of key path to new values to override cloned value. Returns: A copy of self.
github-repos
def if_then_else(cls, condition: 'TensorFluent', true_case: 'TensorFluent', false_case: 'TensorFluent') -> 'TensorFluent': true = TensorFluent.constant(True, tf.bool) false = TensorFluent.constant(False, tf.bool) ite = (condition == true) * true_case + (condition == false) * false_case if true_case.dtype == tf.bool and false_case.dtype == tf.bool: ite = ite.cast(tf.bool) return ite
Returns a TensorFluent for the control op if-then-else. Args: condition: Boolean fluent for the if condition. true_case: Fluent returned in the true clause. false_case: Fluent returned in the false clause. Returns: A TensorFluent wrapping the if-then-else control statement. Raises: ValueError: If cases don't have same shape.
juraj-google-style
def attention_lm_moe_large(): hparams = attention_lm_moe_base() hparams.num_hidden_layers = 5 hparams.moe_layers = '3' hparams.hidden_size = 1024 hparams.num_heads = 16 hparams.filter_size = 4096 hparams.moe_hidden_sizes = '4096' hparams.moe_num_experts = 128 hparams.layer_prepostprocess_dropout = 0.2 return hparams
Large model for distributed training. Over 1B parameters, so requires multi-gpu training due to memory requirements. on lm1b_32k: After 45K steps on 8 GPUs (synchronous): eval_log_ppl_per_token = 3.18 eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9 Returns: an hparams object.
codesearchnet
def __init__(self, text: str, schema_data: SchemaData, mid: ModuleId): super().__init__(text) self.mid = mid self.schema_data = schema_data
Initialize the parser instance. Args: text: Feature expression text. schema_data: Data for the current schema. mid: Identifier of the context module. Raises: ModuleNotRegistered: If `mid` is not registered in the data model.
juraj-google-style
def bulk_actions(objects, index, action): assert (index != '_all'), "index arg must be a valid index name. '_all' is a reserved term." logger.info("Creating bulk '%s' actions for '%s'", action, index) for obj in objects: try: logger.debug("Appending '%s' action for '%r'", action, obj) (yield obj.as_search_action(index=index, action=action)) except Exception: logger.exception('Unable to create search action for %s', obj)
Yield bulk api 'actions' from a collection of objects. The output from this method can be fed in to the bulk api helpers - each document returned by get_documents is decorated with the appropriate bulk api op_type. Args: objects: iterable (queryset, list, ...) of SearchDocumentMixin objects. If the objects passed in is a generator, then this function will yield the results rather than returning them. index: string, the name of the index to target - the index name is embedded into the return value and is used by the bulk api. action: string ['index' | 'update' | 'delete'] - this decides how the final document is formatted.
codesearchnet
def get_node(self, role: str, default=None) -> BioCNode: return next((node for node in self.nodes if (node.role == role)), default)
Get the first node with role Args: role: role default: node returned instead of raising StopIteration Returns: the first node with role
codesearchnet
def parser(self, column: str, parser: str, error: str, value: Any) -> None: log = self._build_parser_message(column, parser, error, value) self.queue_log_message(log)
Adds parser error information to base log message and sends it to the logger for writing. Args: * column: column where the rule is applied * parser: parser function that failed and raises this message * error: error that occurred * value: value that fails to parse Returns: * None
github-repos
def earliest_date(dates, full_date=False): min_date = min((PartialDate.loads(date) for date in dates)) if ((not min_date.month) and full_date): min_date.month = 1 if ((not min_date.day) and full_date): min_date.day = 1 return min_date.dumps()
Return the earliest among the schema-compliant dates. This is a convenience wrapper around :ref:`PartialDate`, which should be used instead if more features are needed. Args: dates(list): List of dates from which oldest/earliest one will be returned full_date(bool): Adds month and/or day as "01" if they are missing Returns: str: Earliest date from provided list
codesearchnet
def distances_from_root(self, leaves=True, internal=True, unlabeled=False): if not isinstance(leaves, bool): raise TypeError("leaves must be a bool") if not isinstance(internal, bool): raise TypeError("internal must be a bool") if not isinstance(unlabeled, bool): raise TypeError("unlabeled must be a bool") if leaves or internal: d = dict() for node in self.traverse_preorder(): if node.is_root(): d[node] = 0 else: d[node] = d[node.parent] if node.edge_length is not None: d[node] += node.edge_length if ((leaves and node.is_leaf()) or (internal and not node.is_leaf())) and (unlabeled or node.label is not None): yield (node,d[node])
Generator over the root-to-node distances of this ``Tree``; (node,distance) tuples Args: ``terminal`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` ``unlabeled`` (``bool``): ``True`` to include unlabeled nodes, otherwise ``False``
juraj-google-style
def merge_nodes(self, n1: str, n2: str, same_polarity: bool=True): for p in self.predecessors(n1): for st in self[p][n1]['InfluenceStatements']: if (not same_polarity): st.obj_delta['polarity'] = (- st.obj_delta['polarity']) st.obj.db_refs['UN'][0] = (n2, st.obj.db_refs['UN'][0][1]) if (not self.has_edge(p, n2)): self.add_edge(p, n2) self[p][n2]['InfluenceStatements'] = self[p][n1]['InfluenceStatements'] else: self[p][n2]['InfluenceStatements'] += self[p][n1]['InfluenceStatements'] for s in self.successors(n1): for st in self.edges[(n1, s)]['InfluenceStatements']: if (not same_polarity): st.subj_delta['polarity'] = (- st.subj_delta['polarity']) st.subj.db_refs['UN'][0] = (n2, st.subj.db_refs['UN'][0][1]) if (not self.has_edge(n2, s)): self.add_edge(n2, s) self[n2][s]['InfluenceStatements'] = self[n1][s]['InfluenceStatements'] else: self[n2][s]['InfluenceStatements'] += self[n1][s]['InfluenceStatements'] self.remove_node(n1)
Merge node n1 into node n2, with the option to specify relative polarity. Args: n1 n2 same_polarity
codesearchnet
async def get_matches(self, state: MatchState=MatchState.all_): matches = (await self.connection('GET', 'tournaments/{}/matches'.format(self._tournament_id), state=state.value, participant_id=self._id)) ms = [] for m in matches: ms.append((await self._tournament.get_match(m['match']['id']))) return ms
Return the matches of the given state |methcoro| Args: state: see :class:`MatchState` Raises: APIException
codesearchnet
def _find_sequence(self) -> List[GridQubit]: tail = self._sequence_search(self._start, []) tail.pop(0) head = self._sequence_search(self._start, tail) head.reverse() return self._expand_sequence((head + tail))
Looks for a sequence starting at a given qubit. Search is issued twice from the starting qubit, so that longest possible sequence is found. Starting qubit might not be the first qubit on the returned sequence. Returns: The longest sequence found by this method.
codesearchnet
def conv_input_length(output_length, filter_size, padding, stride): if output_length is None: return None assert padding in {'same', 'valid', 'full'} if padding == 'same': pad = filter_size elif padding == 'valid': pad = 0 elif padding == 'full': pad = filter_size - 1 return (output_length - 1) * stride - 2 * pad + filter_size
Determines input length of a convolution given output length. Args: output_length: integer. filter_size: integer. padding: one of "same", "valid", "full". stride: integer. Returns: The input length (integer).
github-repos
def MakeSuiteFromDict(d, name=''): suite = Suite(name=name) suite.SetDict(d) suite.Normalize() return suite
Makes a suite from a map from values to probabilities. Args: d: dictionary that maps values to probabilities name: string name for this suite Returns: Suite object
codesearchnet
def get_parser(segmenter, **options): if (segmenter == 'nlapi'): return NLAPIParser(**options) elif (segmenter == 'mecab'): return MecabParser() elif (segmenter == 'tinysegmenter'): return TinysegmenterParser() else: raise ValueError('Segmenter {} is not supported.'.format(segmenter))
Gets a parser. Args: segmenter (str): Segmenter to use. options (:obj:`dict`, optional): Optional settings. Returns: Parser (:obj:`budou.parser.Parser`) Raises: ValueError: If unsupported segmenter is specified.
codesearchnet
def ReleaseFileObject(self, file_object): identifier, cache_value = self._file_object_cache.GetCacheValueByObject( file_object) if not identifier: raise RuntimeError('Object not cached.') if not cache_value: raise RuntimeError('Invalid cache value.') self._file_object_cache.ReleaseObject(identifier) result = cache_value.IsDereferenced() if result: self._file_object_cache.RemoveObject(identifier) return result
Releases a cached file-like object. Args: file_object (FileIO): file-like object. Returns: bool: True if the file-like object can be closed. Raises: PathSpecError: if the path specification is incorrect. RuntimeError: if the file-like object is not cached or an inconsistency is detected in the cache.
juraj-google-style
def load_strain(self, strain_id, strain_genome_file): strain_gp = GEMPRO(gem_name=strain_id, genome_path=strain_genome_file, write_protein_fasta_files=False) self.strains.append(strain_gp) return self.strains.get_by_id(strain_id)
Load a strain as a new GEM-PRO by its ID and associated genome file. Stored in the ``strains`` attribute. Args: strain_id (str): Strain ID strain_genome_file (str): Path to strain genome file
juraj-google-style
def truncate_rationale(rationale, max_length=MAX_RATIONALE_SIZE_IN_EVENT): if (isinstance(rationale, basestring) and (max_length is not None) and (len(rationale) > max_length)): return (rationale[0:max_length], True) else: return (rationale, False)
Truncates the rationale for analytics event emission if necessary Args: rationale (string): the string value of the rationale max_length (int): the max length for truncation Returns: truncated_value (string): the possibly truncated version of the rationale was_truncated (bool): returns true if the rationale is truncated
codesearchnet
def _sync_content_metadata(self, serialized_data): url = self.enterprise_configuration.sapsf_base_url + self.global_sap_config.course_api_path try: status_code, response_body = self._call_post_with_session(url, serialized_data) except requests.exceptions.RequestException as exc: raise ClientError( 'SAPSuccessFactorsAPIClient request failed: {error} {message}'.format( error=exc.__class__.__name__, message=str(exc) ) ) if status_code >= 400: raise ClientError( 'SAPSuccessFactorsAPIClient request failed with status {status_code}: {message}'.format( status_code=status_code, message=response_body ) )
Create/update/delete content metadata records using the SuccessFactors OCN Course Import API endpoint. Arguments: serialized_data: Serialized JSON string representing a list of content metadata items. Raises: ClientError: If SuccessFactors API call fails.
juraj-google-style
def lower(self, lowering): old_shape = self.inputs[0].shape new_shape = self.outputs[0].shape mesh_impl = lowering.mesh_impl(self) slices = lowering.tensors[self.inputs[0]] mesh_axis_to_cumprod_old = mesh_impl.mesh_axis_to_cumprod(old_shape) mesh_axis_to_cumprod_new = mesh_impl.mesh_axis_to_cumprod(new_shape) mesh_axes_allsplit = [] mesh_axes_allconcat = [] mesh_axes_alltoall = [] for (mesh_axis, (old_cumprod, new_cumprod)) in enumerate(zip(mesh_axis_to_cumprod_old, mesh_axis_to_cumprod_new)): if (new_cumprod != old_cumprod): if (old_cumprod is None): mesh_axes_allsplit.append(mesh_axis) elif (new_cumprod is None): mesh_axes_allconcat.append(mesh_axis) else: mesh_axes_alltoall.append(mesh_axis) laid_out_size = mesh_impl.laid_out_size(old_shape) for mesh_axis in mesh_axes_allsplit: tensor_axis = old_shape.cumprod_to_tensor_axis(mesh_axis_to_cumprod_new[mesh_axis]) if (tensor_axis is None): raise NotImplementedError(('Try first reshaping to insert a new tf dimension, then changing layout. input_shape=%s output_shape=%s' % (self.inputs[0].shape, self.outputs[0].shape))) slices = mesh_impl.allsplit(slices, mesh_axis, tensor_axis) laid_out_size for mesh_axis in mesh_axes_alltoall: split_tensor_axis = old_shape.cumprod_to_tensor_axis(mesh_axis_to_cumprod_new[mesh_axis]) if (split_tensor_axis is None): raise NotImplementedError(('Try first reshaping to insert a new tf dimension, then changing layout. input_shape=%s output_shape=%s' % (self.inputs[0].shape, self.outputs[0].shape))) concat_tensor_axis = old_shape.cumprod_to_tensor_axis(mesh_axis_to_cumprod_old[mesh_axis]) assert (concat_tensor_axis is not None) slices = mesh_impl.alltoall(slices, mesh_axis, split_tensor_axis, concat_tensor_axis) lowering.add_counter(('alltoall/%s/reshape_op' % mesh_axis), laid_out_size) for mesh_axis in mesh_axes_allconcat: tensor_axis = old_shape.cumprod_to_tensor_axis(mesh_axis_to_cumprod_old[mesh_axis]) assert (tensor_axis is not None) slices = mesh_impl.allconcat(slices, mesh_axis, tensor_axis) laid_out_size *= mesh_impl.shape[mesh_axis].size lowering.add_counter(('allconcat/%s/reshape_op' % mesh_axis), laid_out_size) old_slice_shape = mesh_impl.slice_shape(old_shape) new_slice_shape = mesh_impl.slice_shape(new_shape) if (new_slice_shape != old_slice_shape): def reshape_fn(x): return tf.reshape(x, new_slice_shape) slices = mesh_impl.slicewise(reshape_fn, slices) lowering.set_tensor_lowering(self.outputs[0], slices)
Lower the ReshapeOperation. Reshaping can require collective communication between processors. We haven't yet implemented all possible reshapes. We try to handle the common cases here - otherwise we raise a NotImplementedError. Args: lowering: a Lowering Raises: NotImplementedError: if we haven't covered this case
codesearchnet
def handle_message(self, ch, method, properties, body): input = {} headers = {} try: self.sessid = method.routing_key input = json_decode(body) data = input['data'] if 'path' in data: if data['path'] in VIEW_METHODS: data['view'] = data['path'] else: data['wf'] = data['path'] session = Session(self.sessid) headers = {'remote_ip': input['_zops_remote_ip'], 'source': input['_zops_source']} if 'wf' in data: output = self._handle_workflow(session, data, headers) elif 'job' in data: self._handle_job(session, data, headers) return else: output = self._handle_view(session, data, headers) except HTTPError as e: import sys if hasattr(sys, '_called_from_test'): raise output = {"cmd": "error", "error": self._prepare_error_msg(e.message), "code": e.code} log.exception("Http error occurred") except: self.current = Current(session=session, input=data) self.current.headers = headers import sys if hasattr(sys, '_called_from_test'): raise err = traceback.format_exc() output = {"cmd": "error", "error": self._prepare_error_msg(err), "code": 500} log.exception("Worker error occurred with messsage body:\n%s" % body) if 'callbackID' in input: output['callbackID'] = input['callbackID'] log.info("OUTPUT for %s: %s" % (self.sessid, output)) output['reply_timestamp'] = time() self.send_output(output)
this is a pika.basic_consumer callback handles client inputs, runs appropriate workflows and views Args: ch: amqp channel method: amqp method properties: body: message body
juraj-google-style
def icao(msg): DF = df(msg) if DF in (11, 17, 18): addr = msg[2:8] elif DF in (0, 4, 5, 16, 20, 21): c0 = bin2int(crc(msg, encode=True)) c1 = hex2int(msg[-6:]) addr = '%06X' % (c0 ^ c1) else: addr = None return addr
Calculate the ICAO address from an Mode-S message with DF4, DF5, DF20, DF21 Args: msg (String): 28 bytes hexadecimal message string Returns: String: ICAO address in 6 bytes hexadecimal string
juraj-google-style
def draw_curve(self, grid_characters: BoxDrawCharacterSet, *, top: bool=False, left: bool=False, right: bool=False, bottom: bool=False, crossing_char: Optional[str]=None): if (not any([top, left, right, bottom])): return sign_top = ((+ 1) if top else ((- 1) if self.top else 0)) sign_bottom = ((+ 1) if bottom else ((- 1) if self.bottom else 0)) sign_left = ((+ 1) if left else ((- 1) if self.left else 0)) sign_right = ((+ 1) if right else ((- 1) if self.right else 0)) if top: self.top = grid_characters.top_bottom if bottom: self.bottom = grid_characters.top_bottom if left: self.left = grid_characters.left_right if right: self.right = grid_characters.left_right if (not all([crossing_char, self.top, self.bottom, self.left, self.right])): crossing_char = box_draw_character(self._prev_curve_grid_chars, grid_characters, top=sign_top, bottom=sign_bottom, left=sign_left, right=sign_right) self.center = (crossing_char or '') self._prev_curve_grid_chars = grid_characters
Draws lines in the box using the given character set. Supports merging the new lines with the lines from a previous call to draw_curve, including when they have different character sets (assuming there exist characters merging the two). Args: grid_characters: The character set to draw the curve with. top: Draw topward leg? left: Draw leftward leg? right: Draw rightward leg? bottom: Draw downward leg? crossing_char: Overrides the all-legs-present character. Useful for ascii diagrams, where the + doesn't always look the clearest.
codesearchnet
def retry(exceptions, tries=5, delay=1, backoff=2, logger=None): def deco_retry(func): @wraps(func) async def f_retry(self, *args, **kwargs): if not iscoroutine(func): f = coroutine(func) else: f = func mtries, mdelay = tries, delay while mtries > 1: try: return await f(self, *args, **kwargs) except exceptions: if logger: logger.info('Retrying %s after %s seconds', f.__name__, mdelay) sleep(mdelay) mtries -= 1 mdelay *= backoff return await f(self, *args, **kwargs) return f_retry return deco_retry
Retry calling the decorated function using an exponential backoff. Args: exceptions: The exception to check. may be a tuple of exceptions to check. tries: Number of times to try (not retry) before giving up. delay: Initial delay between retries in seconds. backoff: Backoff multiplier (e.g. value of 2 will double the delay each retry). logger: Logger to use. If None, print.
juraj-google-style
def shakespeare(chunk_size): file_name = maybe_download('http: with open(file_name) as f: shakespeare_full = f.read() length = ((len(shakespeare_full) if (length < len(shakespeare_full)): shakespeare_full = shakespeare_full[:length] arr = np.array([convert_to_int(c) for c in shakespeare_full])[0:((len(shakespeare_full) / chunk_size) * chunk_size)] return arr.reshape(((len(arr) / chunk_size), chunk_size))
Downloads Shakespeare, converts it into ASCII codes and chunks it. Args: chunk_size: The dataset is broken down so that it is shaped into batches x chunk_size. Returns: A numpy array of ASCII codes shaped into batches x chunk_size.
codesearchnet
def MatchBestComponentName(self, component): fd = self.OpenAsContainer() file_listing = set(fd.ListNames()) if (component not in file_listing): lower_component = component.lower() for x in file_listing: if (lower_component == x.lower()): component = x break if (fd.supported_pathtype != self.pathspec.pathtype): new_pathspec = rdf_paths.PathSpec(path=component, pathtype=fd.supported_pathtype) else: new_pathspec = self.pathspec.last.Copy() new_pathspec.path = component return new_pathspec
Returns the name of the component which matches best our base listing. In order to do the best case insensitive matching we list the files in the base handler and return the base match for this component. Args: component: A component name which should be present in this directory. Returns: the best component name.
codesearchnet
def from_string(cls, key_pem, is_x509_cert): key_pem = _helpers._to_bytes(key_pem) if is_x509_cert: der = rsa.pem.load_pem(key_pem, 'CERTIFICATE') (asn1_cert, remaining) = decoder.decode(der, asn1Spec=Certificate()) if (remaining != b''): raise ValueError('Unused bytes', remaining) cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo'] key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey']) pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER') else: pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM') return cls(pubkey)
Construct an RsaVerifier instance from a string. Args: key_pem: string, public key in PEM format. is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is expected to be an RSA key in PEM format. Returns: RsaVerifier instance. Raises: ValueError: if the key_pem can't be parsed. In either case, error will begin with 'No PEM start marker'. If ``is_x509_cert`` is True, will fail to find the "-----BEGIN CERTIFICATE-----" error, otherwise fails to find "-----BEGIN RSA PUBLIC KEY-----".
codesearchnet
def resolve_type(arg): arg_type = type(arg) if arg_type == list: assert isinstance(arg, list) sample = arg[:min(4, len(arg))] tentative_type = TentativeType() for sample_item in sample: tentative_type.add(resolve_type(sample_item)) return ListType(tentative_type) elif arg_type == set: assert isinstance(arg, set) sample = [] iterator = iter(arg) for i in range(0, min(4, len(arg))): sample.append(next(iterator)) tentative_type = TentativeType() for sample_item in sample: tentative_type.add(resolve_type(sample_item)) return SetType(tentative_type) elif arg_type == FakeIterator: assert isinstance(arg, FakeIterator) sample = [] iterator = iter(arg) for i in range(0, min(4, len(arg))): sample.append(next(iterator)) tentative_type = TentativeType() for sample_item in sample: tentative_type.add(resolve_type(sample_item)) return IteratorType(tentative_type) elif arg_type == tuple: assert isinstance(arg, tuple) sample = list(arg[:min(10, len(arg))]) return TupleType([resolve_type(sample_item) for sample_item in sample]) elif arg_type == dict: assert isinstance(arg, dict) key_tt = TentativeType() val_tt = TentativeType() for i, (k, v) in enumerate(iteritems(arg)): if i > 4: break key_tt.add(resolve_type(k)) val_tt.add(resolve_type(v)) return DictType(key_tt, val_tt) else: return type(arg)
Resolve object to one of our internal collection types or generic built-in type. Args: arg: object to resolve
juraj-google-style
class MeanAbsolutePercentageError(MeanMetricWrapper): def __init__(self, name='mean_absolute_percentage_error', dtype=None): super(MeanAbsolutePercentageError, self).__init__(mean_absolute_percentage_error, name, dtype=dtype)
Computes the mean absolute percentage error between `y_true` and `y_pred`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanAbsolutePercentageError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 250000000.0 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 500000000.0 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanAbsolutePercentageError()]) ```
github-repos
def read_geojson(filename): json_file = open(filename) data = json.load(json_file) json_file.close() times = data['properties']['times'] main_data = dict(timesteps=[], masks=[], x=[], y=[], i=[], j=[]) attribute_data = dict() for feature in data['features']: for main_name in main_data.keys(): main_data[main_name].append(np.array(feature['properties'][main_name])) for (k, v) in feature['properties']['attributes'].items(): if (k not in attribute_data.keys()): attribute_data[k] = [np.array(v)] else: attribute_data[k].append(np.array(v)) kwargs = {} for kw in ['dx', 'step', 'u', 'v']: if (kw in data['properties'].keys()): kwargs[kw] = data['properties'][kw] sto = STObject(main_data['timesteps'], main_data['masks'], main_data['x'], main_data['y'], main_data['i'], main_data['j'], times[0], times[(- 1)], **kwargs) for (k, v) in attribute_data.items(): sto.attributes[k] = v return sto
Reads a geojson file containing an STObject and initializes a new STObject from the information in the file. Args: filename: Name of the geojson file Returns: an STObject
codesearchnet
def length_from_embedding(emb): return tf.cast(tf.reduce_sum(mask_from_embedding(emb), [1, 2, 3]), tf.int32)
Compute the length of each sequence in the batch. Args: emb: a sequence embedding Tensor with shape [batch, max_time, 1, depth]. Returns: a Tensor with shape [batch].
codesearchnet
def extract_table(tabletag): theadtag = tabletag.find_next('thead') headertags = theadtag.find_all('th') if (len(headertags) == 0): headertags = theadtag.find_all('td') headers = [] for tag in headertags: headers.append(get_text(tag)) tbodytag = tabletag.find_next('tbody') trtags = tbodytag.find_all('tr') table = list() for trtag in trtags: row = dict() tdtags = trtag.find_all('td') for (i, tag) in enumerate(tdtags): row[headers[i]] = get_text(tag) table.append(row) return table
Extract HTML table as list of dictionaries Args: tabletag (Tag): BeautifulSoup tag Returns: str: Text of tag stripped of leading and trailing whitespace and newlines and with &nbsp replaced with space
codesearchnet
class Content: text: Optional[str] = None
Container for embeddable content. Add new types as when as necessary. Args: text: Text content to be embedded
github-repos
def get(self, name): if name.startswith(' return self.tags.get(name[1:]) return self.props.get(name)
Return a secondary property value from the Node. Args: name (str): The name of a secondary property. Returns: (obj): The secondary property value or None.
juraj-google-style
def matches(self, stream): if (self.match_type != stream.stream_type): return False if (self.match_id is not None): return (self.match_id == stream.stream_id) if (self.match_spec == DataStreamSelector.MatchUserOnly): return (not stream.system) elif (self.match_spec == DataStreamSelector.MatchSystemOnly): return stream.system elif (self.match_spec == DataStreamSelector.MatchUserAndBreaks): return ((not stream.system) or (stream.system and (stream.stream_id in DataStream.KnownBreakStreams))) return True
Check if this selector matches the given stream Args: stream (DataStream): The stream to check Returns: bool: True if this selector matches the stream
codesearchnet
def _check_conditional_statement(statement, num_collections): correct_var = list(ascii_lowercase)[:num_collections] st_statement = BaseCollection._remove_operators(statement) parsed_st = [s for s in st_statement if s.isalpha()] for var in parsed_st: if (var not in correct_var): raise ValueError('Invalid conditional statement: {}\n Statement should be a valid Python statement and the variables should be named as follows: {}'.format(statement, ', '.join(correct_var))) return correct_var
Method to check conditional statements to be sure that they are valid. Args: statement: A conditional statement as a string (e.g. a>25 and a%5==0). The variable should always be named as 'a' (without quotations). num_collections: An integer representing the number of data collections that the statement will be evaluating. Return: correct_var: A list of the correct variable names that should be used within the statement (eg. ['a', 'b', 'c'])
codesearchnet
def parse_row(schema, data): def parse_value(data_type, value): 'Parses a value returned from a BigQuery response.\n\n Args:\n data_type: the type of the value as specified by the schema.\n value: the raw value to return (before casting to data_type).\n\n Returns:\n The value cast to the data_type.\n ' if (value is not None): if (value == 'null'): value = None elif (data_type == 'INTEGER'): value = int(value) elif (data_type == 'FLOAT'): value = float(value) elif (data_type == 'TIMESTAMP'): value = datetime.datetime.utcfromtimestamp(float(value)) elif (data_type == 'BOOLEAN'): value = (value == 'true') elif (type(value) != str): value = str(value) return value row = {} if (data is None): return row for (i, (field, schema_field)) in enumerate(zip(data['f'], schema)): val = field['v'] name = schema_field['name'] data_type = schema_field['type'] repeated = (True if (('mode' in schema_field) and (schema_field['mode'] == 'REPEATED')) else False) if (repeated and (val is None)): row[name] = [] elif (data_type == 'RECORD'): sub_schema = schema_field['fields'] if repeated: row[name] = [Parser.parse_row(sub_schema, v['v']) for v in val] else: row[name] = Parser.parse_row(sub_schema, val) elif repeated: row[name] = [parse_value(data_type, v['v']) for v in val] else: row[name] = parse_value(data_type, val) return row
Parses a row from query results into an equivalent object. Args: schema: the array of fields defining the schema of the data. data: the JSON row from a query result. Returns: The parsed row object.
codesearchnet
def update_course_runs(self, course_runs, enterprise_customer, enterprise_context): updated_course_runs = [] for course_run in course_runs: track_selection_url = utils.get_course_track_selection_url(course_run=course_run, query_parameters=dict(enterprise_context, **utils.get_enterprise_utm_context(enterprise_customer))) enrollment_url = enterprise_customer.get_course_run_enrollment_url(course_run.get('key')) course_run.update({'enrollment_url': enrollment_url, 'track_selection_url': track_selection_url}) marketing_url = course_run.get('marketing_url') if marketing_url: query_parameters = dict(enterprise_context, **utils.get_enterprise_utm_context(enterprise_customer)) course_run.update({'marketing_url': utils.update_query_parameters(marketing_url, query_parameters)}) updated_course_runs.append(course_run) return updated_course_runs
Update Marketing urls in course metadata and return updated course. Arguments: course_runs (list): List of course runs. enterprise_customer (EnterpriseCustomer): enterprise customer instance. enterprise_context (dict): The context to inject into URLs. Returns: (dict): Dictionary containing updated course metadata.
codesearchnet
class MaxPooling2D(keras_layers.MaxPooling2D, base.Layer): def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(MaxPooling2D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)
Max pooling layer for 2D inputs (e.g. images). Args: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer.
github-repos
def import_tsv(self, tsv_file): r = fapi.upload_entities_tsv(self.namespace, self.name, self.tsv_file, self.api_url) fapi._check_response_code(r, 201)
Upload entity data to workspace from tsv loadfile. Args: tsv_file (file): Tab-delimited file of entity data
codesearchnet
def sg_train_func(func): r @wraps(func) def wrapper(**kwargs): r opt = tf.sg_opt(kwargs) opt += tf.sg_opt(lr=0.001, save_dir='asset/train', max_ep=1000, ep_size=100000, save_interval=600, log_interval=60, eval_metric=[], max_keep=5, keep_interval=1, tqdm=True) epoch, loss = -1, None saver = tf.train.Saver(max_to_keep=opt.max_keep, keep_checkpoint_every_n_hours=opt.keep_interval) for m in opt.eval_metric: tf.sg_summary_metric(m) log_dir = opt.save_dir + '/run-%02d%02d-%02d%02d' % tuple(time.localtime(time.time()))[1:5] summary_writer = tf.summary.FileWriter(log_dir) def console_log(sess_): if epoch >= 0: tf.sg_info('\tEpoch[%03d:gs=%d] - loss = %s' % (epoch, sess_.run(tf.sg_global_step()), ('NA' if loss is None else '%8.6f' % loss))) sv = tf.train.Supervisor(logdir=opt.save_dir, saver=saver, save_model_secs=opt.save_interval, summary_writer=summary_writer, save_summaries_secs=opt.log_interval, global_step=tf.sg_global_step(), local_init_op=tf.sg_phase().assign(True)) with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: if not opt.tqdm: sv.loop(opt.log_interval, console_log, args=(sess, )) _step = sess.run(tf.sg_global_step()) ep = _step if ep <= opt.max_ep: tf.sg_info('Training started from epoch[%03d]-step[%d].' % (ep, _step)) for ep in range(ep, opt.max_ep + 1): start_step = sess.run(tf.sg_global_step()) % opt.ep_size epoch = ep if opt.tqdm: iterator = tqdm(range(start_step, opt.ep_size), total=opt.ep_size, initial=start_step, desc='train', ncols=70, unit='b', leave=False) else: iterator = range(start_step, opt.ep_size) for _ in iterator: if sv.should_stop(): break batch_loss = func(sess, opt) if batch_loss is not None and \ not np.isnan(batch_loss.all()) and not np.isinf(batch_loss.all()): if loss is None: loss = np.mean(batch_loss) else: loss = loss * 0.9 + np.mean(batch_loss) * 0.1 console_log(sess) saver.save(sess, opt.save_dir + '/model.ckpt', global_step=sess.run(tf.sg_global_step())) tf.sg_info('Training finished at epoch[%d]-step[%d].' % (ep, sess.run(tf.sg_global_step()))) else: tf.sg_info('Training already finished at epoch[%d]-step[%d].' % (ep - 1, sess.run(tf.sg_global_step()))) return wrapper
r""" Decorates a function `func` as sg_train_func. Args: func: A function to decorate
juraj-google-style
def __init__(self, current): import sys read_existing = set(sys.PYOKO_LOGS['read']) - set(sys.PYOKO_LOGS['new']) current.output = { 'response': "DB Access Stats: {}".format(str(sys.PYOKO_STAT_COUNTER), str(read_existing)), 'http_headers': (('Content-Type', 'text/plain'),), } sys.PYOKO_LOGS = { "save": 0, "update": 0, "read": 0, "count": 0, "search": 0, }
GET method handler Args: req: Request object. resp: Response object.
juraj-google-style
def _set_control_flow_context(self, ctx) -> None: self._control_flow_context = ctx
Sets the current control flow context of this op. Args: ctx: a context object.
github-repos
def _sort_course_modes(self, modes): def slug_weight(mode): '\n Assign a weight to the course mode dictionary based on the position of its slug in the sorting list.\n ' sorting_slugs = COURSE_MODE_SORT_ORDER sorting_slugs_size = len(sorting_slugs) if (mode['slug'] in sorting_slugs): return (sorting_slugs_size - sorting_slugs.index(mode['slug'])) return 0 return sorted(modes, key=slug_weight, reverse=True)
Sort the course mode dictionaries by slug according to the COURSE_MODE_SORT_ORDER constant. Arguments: modes (list): A list of course mode dictionaries. Returns: list: A list with the course modes dictionaries sorted by slug.
codesearchnet
def _image_url(array, fmt='png', mode="data", quality=90, domain=None): supported_modes = ("data") if mode not in supported_modes: message = "Unsupported mode '%s', should be one of '%s'." raise ValueError(message, mode, supported_modes) image_data = serialize_array(array, fmt=fmt, quality=quality) base64_byte_string = base64.b64encode(image_data).decode('ascii') return "data:image/" + fmt.upper() + ";base64," + base64_byte_string
Create a data URL representing an image from a PIL.Image. Args: image: a numpy mode: presently only supports "data" for data URL Returns: URL representing image
juraj-google-style
def MakeZip(self, input_dir, output_file): logging.info("Generating zip template file at %s", output_file) zf = zipfile.ZipFile(output_file, "w") oldwd = os.getcwd() os.chdir(input_dir) for path in ["debian", "rpmbuild", "fleetspeak"]: for root, _, files in os.walk(path): for f in files: zf.write(os.path.join(root, f)) zf.close() os.chdir(oldwd)
Creates a ZIP archive of the files in the input directory. Args: input_dir: the name of the input directory. output_file: the name of the output ZIP archive without extension.
juraj-google-style
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None): countriesdata = cls.countriesdata(use_live=use_live) m49 = countriesdata['m49iso3'].get(iso3) if m49 is not None: return m49 if exception is not None: raise exception return None
Get M49 from ISO3 code Args: iso3 (str): ISO3 code for which to get M49 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[int]: M49 code
juraj-google-style
def _parse_phone(self, val): ret = {'type': None, 'value': None} try: ret['type'] = val[1]['type'] except (IndexError, KeyError, ValueError, TypeError): pass ret['value'] = val[3].strip() try: self.vars['phone'].append(ret) except AttributeError: self.vars['phone'] = [] self.vars['phone'].append(ret)
The function for parsing the vcard phone numbers. Args: val (:obj:`list`): The value to parse.
codesearchnet
def validate_checksum( filename, md5sum ): filename = match_filename( filename ) md5_hash = file_md5( filename=filename ) if md5_hash != md5sum: raise ValueError('md5 checksums are inconsistent: {}'.format( filename ))
Compares the md5 checksum of a file with an expected value. If the calculated and expected checksum values are not equal, ValueError is raised. If the filename `foo` is not found, will try to read a gzipped file named `foo.gz`. In this case, the checksum is calculated for the unzipped file. Args: filename (str): Path for the file to be checksummed. md5sum (str): The expected hex checksum. Returns: None
juraj-google-style
def rst_content(self, prefix: str = "", suffix: str = "", heading_underline_char: str = "=", method: AutodocMethod = None) -> str: spacer = " " if method is None: method = self.method is_python = self.is_python if method == AutodocMethod.BEST: if is_python: method = AutodocMethod.AUTOMODULE else: method = AutodocMethod.CONTENTS elif method == AutodocMethod.AUTOMODULE: if not is_python: method = AutodocMethod.CONTENTS if method == AutodocMethod.AUTOMODULE: if self.source_rst_title_style_python: title = self.python_module_name else: title = self.source_filename_rel_project_root instruction = ".. automodule:: {modulename}\n :members:".format( modulename=self.python_module_name ) elif method == AutodocMethod.CONTENTS: title = self.source_filename_rel_project_root instruction = ( ".. literalinclude:: {filename}\n" "{spacer}:language: {language}".format( filename=self.source_filename_rel_rst_file, spacer=spacer, language=self.pygments_language ) ) else: raise ValueError("Bad method!") content = .format( filename=self.rst_filename_rel_project_root, AUTOGENERATED_COMMENT=AUTOGENERATED_COMMENT, prefix=prefix, underlined_title=rst_underline( title, underline_char=heading_underline_char), instruction=instruction, suffix=suffix, ).strip() + "\n" return content
Returns the text contents of an RST file that will automatically document our source file. Args: prefix: prefix, e.g. RST copyright comment suffix: suffix, after the part we're creating heading_underline_char: RST character to use to underline the heading method: optional method to override ``self.method``; see constructor Returns: the RST contents
juraj-google-style
def flatten(iterable): return itertools.chain.from_iterable(a if isinstance(a,Iterable) and not isinstance(a, str) else [a] for a in iterable)
This function allows a simple a way to iterate over a "complex" iterable, for example, if the input [12, [23], (4, 3), "lkjasddf"], this will return an Iterable that returns 12, 23, 4, 3 and "lkjasddf". Args: iterable (Iterable) - A complex iterable that will be flattened Returns: (Iterable): An Iterable that flattens multiple interables
juraj-google-style
def wigner_data(q_result, meas_qubits, labels, shots=None): num = len(meas_qubits) dim = 2**num p = [0.5 + 0.5 * np.sqrt(3), 0.5 - 0.5 * np.sqrt(3)] parity = 1 for i in range(num): parity = np.kron(parity, p) w = [0] * len(labels) wpt = 0 counts = [marginal_counts(q_result.get_counts(circ), meas_qubits) for circ in labels] for entry in counts: x = [0] * dim for i in range(dim): if bin(i)[2:].zfill(num) in entry: x[i] = float(entry[bin(i)[2:].zfill(num)]) if shots is None: shots = np.sum(x) for i in range(dim): w[wpt] = w[wpt] + (x[i] / shots) * parity[i] wpt += 1 return w
Get the value of the Wigner function from measurement results. Args: q_result (Result): Results from execution of a state tomography circuits on a backend. meas_qubits (list[int]): a list of the qubit indexes measured. labels (list[str]): a list of names of the circuits shots (int): number of shots Returns: list: The values of the Wigner function at measured points in phase space
juraj-google-style
def eval_from_json(json): changes = poloniex.get_gains_losses(poloniex.parse_changes(json)) return RSI.eval_algorithm(changes['gains'], changes['losses'])
Evaluates RSI from JSON (typically Poloniex API response) Args: json: List of dates where each entry is a dict of raw market data. Returns: Float between 0 and 100, momentum indicator of a market measuring the speed and change of price movements.
juraj-google-style
def _ParseFileHeader(self, file_object): file_header_map = self._GetDataTypeMap( 'chrome_cache_data_block_file_header') try: file_header, _ = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse data block file header with error: {0!s}'.format( exception)) if file_header.signature != self._FILE_SIGNATURE: raise errors.ParseError('Unsupported data block file signature') format_version = '{0:d}.{1:d}'.format( file_header.major_version, file_header.minor_version) if format_version not in ('2.0', '2.1'): raise errors.ParseError( 'Unsupported data block file format version: {0:s}'.format( format_version)) if file_header.block_size not in (256, 1024, 4096): raise errors.ParseError( 'Unsupported data block file block size: {0:d}'.format( file_header.block_size))
Parses the file header. Args: file_object (dfvfs.FileIO): a file-like object to parse. Raises: ParseError: if the file header cannot be read.
juraj-google-style
def print_dict(d, show_missing=True): for k, v in sorted(d.items()): if (not v) and show_missing: print('{} -'.format(k)) elif isinstance(v, list): print(k) for item in v: print(' {}'.format(item)) elif isinstance(v, dict): print(k) for kk, vv in sorted(v.items()): print(' {:<20} {}'.format(kk, vv))
Prints a shallow dict to console. Args: d: Dict to print. show_missing: Whether to show keys with empty values.
juraj-google-style
def run_step(context): logger.debug('started') context.clear() logger.info(f'Context wiped. New context size: {len(context)}') logger.debug('done')
Wipe the entire context. Args: Context is a dictionary or dictionary-like. Does not require any specific keys in context.
codesearchnet
def create(labels=None, **kw): if (labels is not None): kw[u'labels'] = encoding.PyValueToMessage(MetricValue.LabelsValue, labels) return MetricValue(**kw)
Constructs a new metric value. This acts as an alternate to MetricValue constructor which simplifies specification of labels. Rather than having to create a MetricValue.Labels instance, all that's necessary to specify the required string. Args: labels (dict([string, [string]]): **kw: any other valid keyword args valid in the MetricValue constructor Returns :class:`MetricValue`: the created instance
codesearchnet
def service_account_email(self): if (self._service_account_email is None): self._service_account_email = app_identity.get_service_account_name() return self._service_account_email
Get the email for the current service account. Returns: string, The email associated with the Google App Engine service account.
codesearchnet