code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _magic_parser(stream, magic): (in_doc, fields) = (0, None) for line in stream: line = line.strip() if line.startswith(magic): keys = line.split() fields = OrderedDict(((k, []) for k in keys)) if (fields is not None): in_doc += 1 if (in_doc == 1): continue if (not line): break tokens = list(map(float, line.split()[1:])) assert (len(tokens) == len(keys)) for (l, v) in zip(fields.values(), tokens): l.append(v) if fields: return OrderedDict([(k, np.array(v)) for (k, v) in fields.items()]) else: return None
Parse the section with the SCF cycle Returns: dict where the key are the name of columns and the values are list of numbers. Note if no section was found. .. warning:: The parser is very fragile and should be replaced by YAML.
codesearchnet
def get_cell_shift(flow_model): assert flow_model.lower() in FlowModelConst.d8_deltas return FlowModelConst.d8_deltas.get(flow_model.lower())
Get flow direction induced cell shift dict. Args: flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
juraj-google-style
def plot_thermodynamic_properties(self, tmin, tmax, ntemp, ylim=None, **kwargs): temperatures = np.linspace(tmin, tmax, ntemp) mol = "" if self.structure else "-c" fig = self._plot_thermo(self.dos.cv, temperatures, ylabel="Thermodynamic properties", ylim=ylim, label=r"$C_v$ (J/K/mol{})".format(mol), **kwargs) self._plot_thermo(self.dos.entropy, temperatures, ylim=ylim, ax=fig.axes[0], label=r"$S$ (J/K/mol{})".format(mol), **kwargs) self._plot_thermo(self.dos.internal_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=1e-3, label=r"$\Delta E$ (kJ/K/mol{})".format(mol), **kwargs) self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=1e-3, label=r"$\Delta F$ (kJ/K/mol{})".format(mol), **kwargs) fig.axes[0].legend(loc="best") return fig
Plots all the thermodynamic properties in a temperature range. Args: tmin: minimum temperature tmax: maximum temperature ntemp: number of steps ylim: tuple specifying the y-axis limits. kwargs: kwargs passed to the matplotlib function 'plot'. Returns: matplotlib figure
juraj-google-style
async def forget(request): auth_policy = request.get(POLICY_KEY) if (auth_policy is None): raise RuntimeError('auth_middleware not installed') return (await auth_policy.forget(request))
Called to forget the userid for a request Args: request: aiohttp Request object Raises: RuntimeError: Middleware is not installed
codesearchnet
def update(self, item): if (item.matrix not in self.data): self.data[item.matrix] = [] result = Select(self.data[item.matrix]).where((lambda entry: (entry.stage == item.stage))).build() if (len(result) > 0): stage = result[0] stage.status = item.status stage.add(item.timestamp, item.information) else: stage = CollectorStage(stage=item.stage, status=item.status) stage.add(item.timestamp, item.information) self.data[item.matrix].append(stage)
Add a collector item. Args: item (CollectorUpdate): event data like stage, timestampe and status.
codesearchnet
def parse_elements(elements): if (not (len(elements) == 5)): raise ValueError('Invalid WPL waypoint data') latitude = parse_latitude(elements[0], elements[1]) longitude = parse_longitude(elements[2], elements[3]) name = elements[4] return Waypoint(latitude, longitude, name)
Parse waypoint data elements. Args: elements (list): Data values for fix Returns: nmea.Waypoint: Object representing data
codesearchnet
def from_stat_file(cls, statfile, timestep=1, is_leap_year=False): stat = STAT(statfile) def check_missing(opt_data, data_name): if opt_data == []: raise ValueError('Stat file contains no optical data.') for i, x in enumerate(opt_data): if x is None: raise ValueError( 'Missing optical depth data for {} at month {}'.format( data_name, i) ) check_missing(stat.monthly_tau_beam, 'monthly_tau_beam') check_missing(stat.monthly_tau_diffuse, 'monthly_tau_diffuse') return cls.from_ashrae_revised_clear_sky(stat.location, stat.monthly_tau_beam, stat.monthly_tau_diffuse, timestep, is_leap_year)
Create an ASHRAE Revised Clear Sky wea object from the monthly sky optical depths in a .stat file. Args: statfile: Full path to the .stat file. timestep: An optional integer to set the number of time steps per hour. Default is 1 for one value per hour. is_leap_year: A boolean to indicate if values are representing a leap year. Default is False.
juraj-google-style
def update_mongo_compound_variants(self, bulk): requests = [] for var_id in bulk: var_obj = bulk[var_id] if not var_obj.get('compounds'): continue operation = pymongo.UpdateOne( {'_id': var_obj['_id']}, { '$set': { 'compounds': var_obj['compounds'] } }) requests.append(operation) if not requests: return try: self.variant_collection.bulk_write(requests, ordered=False) except BulkWriteError as err: LOG.warning("Updating compounds failed") raise err
Update the compound information for a bulk of variants in the database Args: bulk(dict): {'_id': scout.models.Variant}
juraj-google-style
def apply_gradients(self, grads_and_vars, global_step=None, name=None): summed_grads_and_vars = [] for grad, var in grads_and_vars: if grad is None: summed_grads_and_vars.append((grad, var)) else: with ops.colocate_with(grad): summed_grads_and_vars.append((tpu_ops.cross_replica_sum(grad, self._group_assignment), var)) return self._opt.apply_gradients(summed_grads_and_vars, global_step, name)
Apply gradients to variables. Calls tpu_ops.cross_replica_sum() to sum gradient contributions across replicas, and then applies the real optimizer. Args: grads_and_vars: List of (gradient, variable) pairs as returned by compute_gradients(). global_step: Optional Variable to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the Optimizer constructor. Returns: An `Operation` that applies the gradients. If `global_step` was not None, that operation also increments `global_step`. Raises: ValueError: If the grads_and_vars is malformed.
github-repos
def ParseOptions(cls, options, output_module): if not isinstance(output_module, mysql_4n6time.MySQL4n6TimeOutputModule): raise errors.BadConfigObject( 'Output module is not an instance of MySQL4n6TimeOutputModule') MySQL4n6TimeDatabaseArgumentsHelper.ParseOptions(options, output_module) shared_4n6time_output.Shared4n6TimeOutputArgumentsHelper.ParseOptions( options, output_module)
Parses and validates options. Args: options (argparse.Namespace): parser options. output_module (OutputModule): output module to configure. Raises: BadConfigObject: when the output module object is of the wrong type.
juraj-google-style
def get_point(self, *position): array = _ffi.new(self._arrayType, position) if self._useOctaves: return (self._noiseFunc(self._noise, array, self._octaves) + 1) * 0.5 return (self._noiseFunc(self._noise, array) + 1) * 0.5
Return the noise value of a specific position. Example usage: value = noise.getPoint(x, y, z) Args: position (Tuple[float, ...]): The point to sample at. Returns: float: The noise value at position. This will be a floating point in the 0.0-1.0 range.
juraj-google-style
def get_json_files(files, recursive=False): json_files = [] if (not files): return json_files for fn in files: if os.path.isdir(fn): children = list_json_files(fn, recursive) json_files.extend(children) elif is_json(fn): json_files.append(fn) else: continue if (not json_files): raise NoJSONFileFoundError('No JSON files found!') return json_files
Return a list of files to validate from `files`. If a member of `files` is a directory, its children with a ``.json`` extension will be added to the return value. Args: files: A list of file paths and/or directory paths. recursive: If ``true``, this will descend into any subdirectories of input directories. Returns: A list of file paths to validate.
codesearchnet
def generate_brome_config(): config = {} for key in iter(default_config): for (inner_key, value) in iter(default_config[key].items()): if (key not in config): config[key] = {} config[key][inner_key] = value['default'] return config
Generate a brome config with default value Returns: config (dict)
codesearchnet
def MultiDelete(self, urns, token=None): urns = [rdfvalue.RDFURN(urn) for urn in urns] if (token is None): token = data_store.default_token for urn in urns: if (urn.Path() == '/'): raise ValueError("Can't delete root URN. Please enter a valid URN") deletion_pool = DeletionPool(token=token) deletion_pool.MultiMarkForDeletion(urns) marked_root_urns = deletion_pool.root_urns_for_deletion marked_urns = deletion_pool.urns_for_deletion logging.debug(u'Found %d objects to remove when removing %s', len(marked_urns), urns) logging.debug(u'Removing %d root objects when removing %s: %s', len(marked_root_urns), urns, marked_root_urns) pool = data_store.DB.GetMutationPool() for root in marked_root_urns: self._DeleteChildFromIndex(root, mutation_pool=pool) for urn_to_delete in marked_urns: try: self.intermediate_cache.ExpireObject(urn_to_delete.Path()) except KeyError: pass pool.DeleteSubjects(marked_urns) pool.Flush() self.Flush() logging.debug('Removed %d objects', len(marked_urns))
Drop all the information about given objects. DANGEROUS! This recursively deletes all objects contained within the specified URN. Args: urns: Urns of objects to remove. token: The Security Token to use for opening this item. Raises: ValueError: If one of the urns is too short. This is a safety check to ensure the root is not removed.
codesearchnet
def get_ax3d_fig_plt(ax=None, **kwargs): import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d if (ax is None): fig = plt.figure(**kwargs) ax = axes3d.Axes3D(fig) else: fig = plt.gcf() return (ax, fig, plt)
Helper function used in plot functions supporting an optional Axes3D argument. If ax is None, we build the `matplotlib` figure and create the Axes3D else we return the current active figure. Args: kwargs: keyword arguments are passed to plt.figure if ax is not None. Returns: ax: :class:`Axes` object figure: matplotlib figure plt: matplotlib pyplot module.
codesearchnet
def AddContext(self, context_string, description=None): if (context_string not in self.context): if (context_string not in self.valid_contexts): raise InvalidContextError(('Invalid context specified: %s' % context_string)) self.context.append(context_string) self.context_descriptions[context_string] = description self.FlushCache()
Adds a context string to the global configuration. The context conveys information about the caller of the config system and allows the configuration to have specialized results for different callers. Note that the configuration file may specify conflicting options for different contexts. In this case, later specified contexts (i.e. the later AddContext() calls) will trump the earlier specified contexts. This allows specialized contexts to be specified on the command line which override normal operating options. Args: context_string: A string which describes the global program. description: A description as to when this context applies. Raises: InvalidContextError: An undefined context was specified.
codesearchnet
def _build(self, one_hot_input_sequence): input_shape = one_hot_input_sequence.get_shape() batch_size = input_shape[1] batch_embed_module = snt.BatchApply(self._embed_module) input_sequence = batch_embed_module(one_hot_input_sequence) input_sequence = tf.nn.relu(input_sequence) initial_state = self._core.initial_state(batch_size) if self._use_dynamic_rnn: (output_sequence, final_state) = tf.nn.dynamic_rnn(cell=self._core, inputs=input_sequence, time_major=True, initial_state=initial_state) else: rnn_input_sequence = tf.unstack(input_sequence) (output, final_state) = tf.contrib.rnn.static_rnn(cell=self._core, inputs=rnn_input_sequence, initial_state=initial_state) output_sequence = tf.stack(output) batch_output_module = snt.BatchApply(self._output_module) output_sequence_logits = batch_output_module(output_sequence) return (output_sequence_logits, final_state)
Builds the deep LSTM model sub-graph. Args: one_hot_input_sequence: A Tensor with the input sequence encoded as a one-hot representation. Its dimensions should be `[truncation_length, batch_size, output_size]`. Returns: Tuple of the Tensor of output logits for the batch, with dimensions `[truncation_length, batch_size, output_size]`, and the final state of the unrolled core,.
codesearchnet
def get_battery_info(self) -> dict: (output, _) = self._execute('-s', self.device_sn, 'shell', 'dumpsys', 'battery') battery_status = re.split('\n |: ', output[33:].strip()) return dict(zip(battery_status[::2], battery_status[1::2]))
Show device battery information. Returns: A dict. For example: {'AC powered': 'false', 'Charge counter': '0', 'Max charging current': '0', 'Max charging voltage': '0', 'USB powered': 'false', 'Wireless powered': 'false', 'health': '2', 'level': '67', 'present': 'true', 'scale': '100', 'status': '3', 'technology': 'Li-poly', 'temperature': '310', 'voltage': '3965'}
codesearchnet
def dict_load(self, ns_dict): for prefix, uri in ns_dict.items(): self.bind(prefix, uri, override=False, calc=False) self.__make_dicts__
Reads a dictionary of namespaces and binds them to the manager Args: ns_dict: dictionary with the key as the prefix and the value as the uri
juraj-google-style
def GetFormatterObject(cls, data_type): data_type = data_type.lower() if (data_type not in cls._formatter_objects): formatter_object = None if (data_type in cls._formatter_classes): formatter_class = cls._formatter_classes[data_type] formatter_object = formatter_class() if (not formatter_object): logger.warning('Using default formatter for data type: {0:s}'.format(data_type)) formatter_object = default.DefaultFormatter() cls._formatter_objects[data_type] = formatter_object return cls._formatter_objects[data_type]
Retrieves the formatter object for a specific data type. Args: data_type (str): data type. Returns: EventFormatter: corresponding formatter or the default formatter if not available.
codesearchnet
def execute(self, inputs=None, output=None, load_targets=False): if self == output: if os.path.exists(self._dump_dirname): shutil.rmtree(self._dump_dirname) if os.path.exists(self._target_filename): os.remove(self._target_filename) os.makedirs(self._dump_dirname) if inputs is None: inputs = [] if not hasattr(self, 'result'): if self in inputs or (load_targets and self.target): logging.info('Loading\n%s' % util.indent(str(self))) self.load() else: for i in self.inputs: i.execute(inputs=inputs, output=output, load_targets=load_targets) args = merge_results(self.inputs) logging.info('Running\n%s' % util.indent(str(self))) self.result = self.run(*args.args, **args.kwargs) if self == output: logging.info('Dumping\n%s' % util.indent(str(self))) self.dump() util.touch(self._target_filename)
Run this step, recursively running or loading inputs. Used in bin/run_step.py which is run by drake. Args: inputs: collection of steps that should be loaded output: step that should be dumped after it is run load_targets (boolean): load all steps which are targets. This argument is not used by run_step.py because target does not get serialized. But it can be useful for running steps directly.
juraj-google-style
def resolve_import(self, item): name = item.name short_name = None if item.is_from and not item.is_star: if '.' in name.lstrip('.'): rindex = name.rfind('.') else: rindex = name.rfind('.') + 1 short_name = name[:rindex] if import_finder.is_builtin(name): filename = name + '.so' return Builtin(filename, name) filename, level = convert_to_path(name) if level: filename = os.path.normpath( os.path.join(self.current_directory, filename)) files = [(name, filename)] if short_name: short_filename = os.path.dirname(filename) files.append((short_name, short_filename)) for module_name, path in files: for fs in self.fs_path: f = self._find_file(fs, path) if not f or f == self.current_module.path: continue if item.is_relative(): package_name = self.current_module.package_name if package_name is None: raise ImportException(name) module_name = get_absolute_name(package_name, module_name) if isinstance(self.current_module, System): return System(f, module_name) return Local(f, module_name, fs) if item.source: prefix, ext = os.path.splitext(item.source) mod_name = name if short_name: mod = prefix.replace(os.path.sep, '.') mod = utils.strip_suffix(mod, '.__init__') if not mod.endswith(name) and mod.endswith(short_name): mod_name = short_name if ext == '.pyc': pyfile = prefix + '.py' if os.path.exists(pyfile): return System(pyfile, mod_name) elif not ext: pyfile = os.path.join(prefix, "__init__.py") if os.path.exists(pyfile): return System(pyfile, mod_name) return System(item.source, mod_name) raise ImportException(name)
Simulate how Python resolves imports. Returns the filename of the source file Python would load when processing a statement like 'import name' in the module we're currently under. Args: item: An instance of ImportItem Returns: A filename Raises: ImportException: If the module doesn't exist.
juraj-google-style
def can_process_matrix(entry, matrix_tags): if len(matrix_tags) == 0: return True count = 0 if 'tags' in entry: for tag in matrix_tags: if tag in entry['tags']: count += 1 return count > 0
Check given matrix tags to be in the given list of matric tags. Args: entry (dict): matrix item (in yaml). matrix_tags (list): represents --matrix-tags defined by user in command line. Returns: bool: True when matrix entry can be processed.
juraj-google-style
def load_scout(adapter, config, ped=None, update=False): log.info('Check that the panels exists') if (not check_panels(adapter, config.get('gene_panels', []), config.get('default_gene_panels'))): raise ConfigError('Some panel(s) does not exist in the database') case_obj = adapter.load_case(config, update=update) return case_obj
Load a new case from a Scout config. Args: adapter(MongoAdapter) config(dict): loading info ped(Iterable(str)): Pedigree ingformation update(bool): If existing case should be updated
codesearchnet
def trace_on(graph=True, profiler=False, profiler_outdir=None): if ops.inside_function(): logging.warn('Cannot enable trace inside a tf.function.') return if not context.executing_eagerly(): logging.warn('Must enable trace in eager mode.') return global _current_trace_context with _current_trace_context_lock: if _current_trace_context: logging.warn('Trace already enabled') return if graph and (not profiler): context.context().enable_graph_collection() if profiler: if profiler_outdir is None: logging.warn("No `profiler_outdir` passed to trace_on(). Profiler won't be enabled.") else: context.context().enable_run_metadata() _profiler.start(profiler_outdir) _current_trace_context = _TraceContext(graph=graph, profiler=profiler)
Starts a trace to record computation graphs and profiling information. Must be invoked in eager mode. When enabled, TensorFlow runtime will collect information that can later be exported and consumed by TensorBoard. The trace is activated across the entire TensorFlow runtime and affects all threads of execution. To stop the trace and export the collected information, use `tf.summary.trace_export`. To stop the trace without exporting, use `tf.summary.trace_off`. Args: graph: If True, enables collection of executed graphs. It includes ones from tf.function invocation and ones from the legacy graph mode. The default is True. profiler: If True, enables the advanced profiler. Enabling profiler implicitly enables the graph collection. The profiler may incur a high memory overhead. The default is False. profiler_outdir: Output directory for profiler. It is required when profiler is enabled when trace was started. Otherwise, it is ignored.
github-repos
def getOrderedLinks(self, session): streamLinks = session.query(StreamLink).\ filter(StreamLink.channelInputFile == self).\ order_by(StreamLink.linkNumber).\ all() return streamLinks
Retrieve the links in the order of the link number. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: list: A list of :class:`.StreamLink` objects.
juraj-google-style
def parsed_to_ast(parsed: Parsed, errors: Errors, component_type: str=''): ast = {} sorted_keys = sorted(parsed.keys()) for key in sorted_keys: if (parsed[key]['type'] == 'Nested'): nested_component_stack = ['subject', 'object'] if component_type: component_stack = [component_type] else: component_stack = ['subject', 'object'] for key in sorted_keys: if ((parsed[key]['type'] == 'Function') and (parsed[key]['function_level'] == 'top')): ast[component_stack.pop(0)] = parsed_function_to_ast(parsed, key) elif ((parsed[key]['type'] == 'Relation') and ('relation' not in ast)): ast['relation'] = {'name': parsed[key]['name'], 'type': 'Relation', 'span': key} elif (parsed[key]['type'] == 'Nested'): ast['nested'] = {} for nested_key in sorted_keys: if (nested_key <= key): continue if ((parsed[nested_key]['type'] == 'Function') and (parsed[nested_key]['function_level'] == 'top')): ast['nested'][nested_component_stack.pop(0)] = parsed_function_to_ast(parsed, nested_key) elif ((parsed[nested_key]['type'] == 'Relation') and ('relation' not in ast['nested'])): ast['nested']['relation'] = {'name': parsed[nested_key]['name'], 'type': 'Relation', 'span': parsed[nested_key]['span']} return (ast, errors) return (ast, errors)
Convert parsed data struct to AST dictionary Args: parsed: errors: component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input
codesearchnet
def _make_pred_succ_maps(self, node): pred_map = {e[2]['wire']: e[0] for e in self._multi_graph.in_edges(nbunch=node, data=True)} succ_map = {e[2]['wire']: e[1] for e in self._multi_graph.out_edges(nbunch=node, data=True)} return pred_map, succ_map
Return predecessor and successor dictionaries. Args: node (DAGNode): reference to multi_graph node Returns: tuple(dict): tuple(predecessor_map, successor_map) These map from wire (Register, int) to predecessor (successor) nodes of n.
juraj-google-style
def find_elb_dns_zone_id(name='', env='dev', region='us-east-1'): LOG.info('Find %s ELB DNS Zone ID in %s [%s].', name, env, region) client = boto3.Session(profile_name=env).client('elb', region_name=region) elbs = client.describe_load_balancers(LoadBalancerNames=[name]) return elbs['LoadBalancerDescriptions'][0]['CanonicalHostedZoneNameID']
Get an application's AWS elb dns zone id. Args: name (str): ELB name env (str): Environment/account of ELB region (str): AWS Region Returns: str: elb DNS zone ID
juraj-google-style
def browse_stations_categories(self): response = self._call(mc_calls.BrowseStationCategories) station_categories = response.body.get('root', {}).get('subcategories', []) return station_categories
Get the categories from Browse Stations. Returns: list: Station categories that can contain subcategories.
codesearchnet
def get_install_value(self, value_name, wanted_type=None): try: item_value, item_type = self.__reg_query_value(self.__reg_uninstall_handle, value_name) except pywintypes.error as exc: if exc.winerror == winerror.ERROR_FILE_NOT_FOUND: return None raise if wanted_type and item_type not in self.__reg_types[wanted_type]: item_value = None return item_value
For the uninstall section of the registry return the name value. Args: value_name (str): Registry value name. wanted_type (str): The type of value wanted if the type does not match None is return. wanted_type support values are ``str`` ``int`` ``list`` ``bytes``. Returns: value: Value requested or None if not found.
juraj-google-style
def blocking_reader(reader, input, buffer_size=_DEFAULT_BUFFER_SIZE): ion_event = None while True: read_event = (yield ion_event) ion_event = reader.send(read_event) while ((ion_event is not None) and ion_event.event_type.is_stream_signal): data = input.read(buffer_size) if (len(data) == 0): if (ion_event.event_type is IonEventType.INCOMPLETE): ion_event = reader.send(NEXT_EVENT) continue else: (yield ION_STREAM_END_EVENT) return ion_event = reader.send(read_data_event(data))
Provides an implementation of using the reader co-routine with a file-like object. Args: reader(Coroutine): A reader co-routine. input(BaseIO): The file-like object to read from. buffer_size(Optional[int]): The optional buffer size to use.
codesearchnet
def iter_non_intersecting(self, iterable, key=None, descending=False): return _ContainsVersionIterator(self, iterable, key, descending, mode=_ContainsVersionIterator.MODE_NON_INTERSECTING)
Like `iter_intersect_test`, but returns non-intersections only. Returns: An iterator that returns items from `iterable` that don't intersect.
codesearchnet
def download_uniprot_file(uniprot_id, filetype, outdir='', force_rerun=False): my_file = '{}.{}'.format(uniprot_id, filetype) url = 'http: outfile = op.join(outdir, my_file) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): urlretrieve(url, outfile) return outfile
Download a UniProt file for a UniProt ID/ACC Args: uniprot_id: Valid UniProt ID filetype: txt, fasta, xml, rdf, or gff outdir: Directory to download the file Returns: str: Absolute path to file
juraj-google-style
def _peek(self, size=-1): with self._seek_lock: seek = self._seek with handle_os_exceptions(): return self._read_range(seek, seek + size)
Return bytes from the stream without advancing the position. Args: size (int): Number of bytes to read. -1 to read the full stream. Returns: bytes: bytes read
juraj-google-style
def _ParseUpdateKeyValue(self, parser_mediator, registry_value, key_path): if not registry_value.DataIsString(): parser_mediator.ProduceExtractionWarning( 'unsupported UpdateKey value data type: {0:s}'.format( registry_value.data_type_string)) return date_time_string = registry_value.GetDataAsObject() if not date_time_string: parser_mediator.ProduceExtractionWarning('missing UpdateKey value data') return re_match = self._UPDATE_DATE_TIME_RE.match(date_time_string) if not re_match: parser_mediator.ProduceExtractionWarning( 'unsupported UpdateKey value data: {0!s}'.format(date_time_string)) return month, day_of_month, year, hours, minutes, seconds, part_of_day = ( re_match.groups()) try: year = int(year, 10) month = int(month, 10) day_of_month = int(day_of_month, 10) hours = int(hours, 10) minutes = int(minutes, 10) seconds = int(seconds, 10) except (TypeError, ValueError): parser_mediator.ProduceExtractionWarning( 'invalid UpdateKey date time value: {0!s}'.format(date_time_string)) return if part_of_day == 'PM': hours += 12 time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds) try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) date_time.is_local_time = True except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid UpdateKey date time value: {0!s}'.format( time_elements_tuple)) return event_data = CCleanerUpdateEventData() event_data.key_path = key_path event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UPDATE, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses the UpdateKey value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_value (dfwinreg.WinRegistryValue): Windows Registry value. key_path (str): Windows Registry key path.
juraj-google-style
def gunzip_file(infile, outfile=None, outdir=None, delete_original=False, force_rerun_flag=False): if not outfile: outfile = infile.replace('.gz', '') if not outdir: outdir = '' else: outdir = op.dirname(infile) outfile = op.join(outdir, op.basename(outfile)) if force_rerun(flag=force_rerun_flag, outfile=outfile): gz = gzip.open(infile, "rb") decoded = gz.read() with open(outfile, "wb") as new_file: new_file.write(decoded) gz.close() log.debug('{}: file unzipped'.format(outfile)) else: log.debug('{}: file already unzipped'.format(outfile)) if delete_original: os.remove(infile) return outfile
Decompress a gzip file and optionally set output values. Args: infile: Path to .gz file outfile: Name of output file outdir: Path to output directory delete_original: If original .gz file should be deleted force_rerun_flag: If file should be decompressed if outfile already exists Returns: str: Path to decompressed file
juraj-google-style
def FileEntryExistsByPathSpec(self, path_spec): fsapfs_file_entry = None location = getattr(path_spec, 'location', None) identifier = getattr(path_spec, 'identifier', None) try: if identifier is not None: fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_identifier( identifier) elif location is not None: fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_path(location) except IOError as exception: raise errors.BackEndError(exception) return fsapfs_file_entry is not None
Determines if a file entry for a path specification exists. Args: path_spec (PathSpec): path specification. Returns: bool: True if the file entry exists. Raises: BackEndError: if the file entry cannot be opened.
juraj-google-style
def __init__(self, file_pattern, min_bundle_size, compression_type, strip_trailing_newlines, coder: coders.Coder, buffer_size=DEFAULT_READ_BUFFER_SIZE, validate=True, skip_header_lines=0, header_processor_fns=(None, None), delimiter=None, escapechar=None): super().__init__(file_pattern, min_bundle_size, compression_type=compression_type, validate=validate) self._strip_trailing_newlines = strip_trailing_newlines self._compression_type = compression_type self._coder = coder self._buffer_size = buffer_size if skip_header_lines < 0: raise ValueError('Cannot skip negative number of header lines: %d' % skip_header_lines) elif skip_header_lines > 10: _LOGGER.warning('Skipping %d header lines. Skipping large number of header lines might significantly slow down processing.') self._skip_header_lines = skip_header_lines self._header_matcher, self._header_processor = header_processor_fns if delimiter is not None: if not isinstance(delimiter, bytes) or len(delimiter) == 0: raise ValueError('Delimiter must be a non-empty bytes sequence.') if self._is_self_overlapping(delimiter): raise ValueError('Delimiter must not self-overlap.') self._delimiter = delimiter if escapechar is not None: if not (isinstance(escapechar, bytes) and len(escapechar) == 1): raise ValueError("escapechar must be bytes of size 1: '%s'" % escapechar) self._escapechar = escapechar
Initialize a _TextSource Args: header_processor_fns (tuple): a tuple of a `header_matcher` function and a `header_processor` function. The `header_matcher` should return `True` for all lines at the start of the file that are part of the file header and `False` otherwise. These header lines will not be yielded when reading records and instead passed into `header_processor` to be handled. If `skip_header_lines` and a `header_matcher` are both provided, the value of `skip_header_lines` lines will be skipped and the header will be processed from there. delimiter (bytes) Optional: delimiter to split records. Must not self-overlap, because self-overlapping delimiters cause ambiguous parsing. escapechar (bytes) Optional: a single byte to escape the records delimiter, can also escape itself. Raises: ValueError: if skip_lines is negative. Please refer to documentation in class `ReadFromText` for the rest of the arguments.
github-repos
def add_role(self, databaseName, roleName, collectionName=None): role = {"databaseName" : databaseName, "roleName" : roleName} if collectionName: role["collectionName"] = collectionName if collectionName and roleName not in [RoleSpecs.read, RoleSpecs.readWrite]: raise ErrRole("Permissions [%s] not available for a collection" % roleName) elif not collectionName and roleName not in [RoleSpecs.read, RoleSpecs.readWrite, RoleSpecs.dbAdmin] and databaseName != "admin": raise ErrRole("Permissions [%s] is only available for admin database" % roleName) if role not in self.roles: self.roles.append(role)
Add one role Args: databaseName (str): Database Name roleName (RoleSpecs): role Keyword Args: collectionName (str): Collection Raises: ErrRole: role not compatible with the databaseName and/or collectionName
juraj-google-style
def dict_diff(d1: Dict[(Any, Any)], d2: Dict[(Any, Any)], deleted_value: Any=None) -> Dict[(Any, Any)]: changes = {k: v for (k, v) in d2.items() if ((k not in d1) or (d2[k] != d1[k]))} for k in d1.keys(): if (k not in d2): changes[k] = deleted_value return changes
Returns a representation of the changes that need to be made to ``d1`` to create ``d2``. Args: d1: a dictionary d2: another dictionary deleted_value: value to use for deleted keys; see below Returns: dict: a dictionary of the format ``{k: v}`` where the ``k``/``v`` pairs are key/value pairs that are absent from ``d1`` and present in ``d2``, or present in both but with different values (in which case the ``d2`` value is shown). If a key ``k`` is present in ``d1`` but absent in ``d2``, the result dictionary has the entry ``{k: deleted_value}``.
codesearchnet
def get_splits(self, n_splits=1): if (n_splits == 1): stratify = (self.target if self._stratify else None) return train_test_split(self.data, self.target, shuffle=self._shuffle, stratify=stratify) else: cv_class = (StratifiedKFold if self._stratify else KFold) cv = cv_class(n_splits=n_splits, shuffle=self._shuffle) splits = list() for (train, test) in cv.split(self.data, self.target): X_train = self._get_split(self.data, train) y_train = self._get_split(self.target, train) X_test = self._get_split(self.data, test) y_test = self._get_split(self.target, test) splits.append((X_train, X_test, y_train, y_test)) return splits
Return splits of this dataset ready for Cross Validation. If n_splits is 1, a tuple containing the X for train and test and the y for train and test is returned. Otherwise, if n_splits is bigger than 1, a list of such tuples is returned, one for each split. Args: n_splits (int): Number of times that the data needs to be splitted. Returns: tuple or list: if n_splits is 1, a tuple containing the X for train and test and the y for train and test is returned. Otherwise, if n_splits is bigger than 1, a list of such tuples is returned, one for each split.
codesearchnet
def delete(self, name, **kwargs): self.gitlab.http_delete(self.path, query_data={'name': name}, **kwargs)
Delete a Label on the server. Args: name: The name of the label **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabDeleteError: If the server cannot perform the request
codesearchnet
def searchPageFor(doc, pno, text, hit_max=16, quads=False): return doc[pno].searchFor(text, hit_max = hit_max, quads = quads)
Search for a string on a page. Args: pno: page number text: string to be searched for hit_max: maximum hits quads: return quads instead of rectangles Returns: a list of rectangles or quads, each containing an occurrence.
juraj-google-style
def set_auth_traps_enabled(status=True): vname = 'EnableAuthenticationTraps' current_status = get_auth_traps_enabled() if bool(status) == current_status: _LOG.debug('%s already contains the provided value.', vname) return True vdata = int(status) __utils__['reg.set_value'](_HKEY, _SNMP_KEY, vname, vdata, 'REG_DWORD') new_status = get_auth_traps_enabled() if status == new_status: _LOG.debug('Setting %s configured successfully: %s', vname, vdata) return True _LOG.error('Unable to configure %s with value: %s', vname, vdata) return False
Manage the sending of authentication traps. Args: status (bool): True to enable traps. False to disable. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_snmp.set_auth_traps_enabled status='True'
juraj-google-style
def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int): if self.assistant_model.generation_config.num_assistant_tokens_schedule in {'heuristic', 'heuristic_transient'}: if num_matches == len(scores[0]) - 1: self.num_assistant_tokens += 2.0 else: self.num_assistant_tokens = max(1.0, self.num_assistant_tokens - 1.0) if is_sklearn_available() and self.assistant_model.generation_config.assistant_confidence_threshold and (type(self) is AssistedCandidateGenerator): self.matches.extend([1] * num_matches) if len(self.probs) > len(self.matches): self.matches.append(0) excess_length = len(self.probs) - len(self.matches) if excess_length > 0: del self.probs[-excess_length:] if len(self.probs) > 5 and {0, 1}.issubset(self.matches): fpr, tpr, thresholds = roc_curve(self.matches, self.probs) fnr = 1 - tpr costs = fpr + 3 * fnr optimal_threshold_index = np.argmin(costs) best_threshold = thresholds[optimal_threshold_index] self.assistant_model.generation_config.assistant_confidence_threshold = best_threshold
Updates the candidate generation strategy based on the outcomes. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search num_matches (`int`): The number of matches between the candidate sequences and the model predictions.
github-repos
def suggest(q='', results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None, max_hotttnesss=None, min_hotttnesss=None): buckets = (buckets or []) kwargs = {} kwargs['q'] = q if (max_familiarity is not None): kwargs['max_familiarity'] = max_familiarity if (min_familiarity is not None): kwargs['min_familiarity'] = min_familiarity if (max_hotttnesss is not None): kwargs['max_hotttnesss'] = max_hotttnesss if (min_hotttnesss is not None): kwargs['min_hotttnesss'] = min_hotttnesss if results: kwargs['results'] = results if buckets: kwargs['bucket'] = buckets if limit: kwargs['limit'] = 'true' result = util.callm(('%s/%s' % ('artist', 'suggest')), kwargs) return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']]
Suggest artists based upon partial names. Args: Kwargs: q (str): The text to suggest artists from results (int): An integer number of results to return buckets (list): A list of strings specifying which buckets to retrieve limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets max_familiarity (float): A float specifying the max familiarity of artists to search for min_familiarity (float): A float specifying the min familiarity of artists to search for max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for Returns: A list of Artist objects Example: >>> results = artist.suggest(text='rad') >>> results >>>
codesearchnet
def param_shapes(cls, sample_shape, name='DistributionParamShapes'): with ops.name_scope(name, values=[sample_shape]): return cls._param_shapes(sample_shape)
Shapes of parameters given the desired shape of a call to `sample()`. This is a class method that describes what key/value arguments are required to instantiate the given `Distribution` so that a particular shape is returned for that instance's call to `sample()`. Subclasses should override class method `_param_shapes`. Args: sample_shape: `Tensor` or python list/tuple. Desired shape of a call to `sample()`. name: name to prepend ops with. Returns: `dict` of parameter name to `Tensor` shapes.
github-repos
def accepts(self, tp, converter): tp = ParameterizedProperty._validate_type_param(tp) self.alternatives.append((tp, converter)) return self
Declare that other types may be converted to this property type. Args: tp (Property) : A type that may be converted automatically to this property type. converter (callable) : A function accepting ``value`` to perform conversion of the value to this property type. Returns: self
codesearchnet
def add_team_member(self, account_id=None, email_address=None): return self._add_remove_team_member(self.TEAM_ADD_MEMBER_URL, email_address, account_id)
Add or invite a user to your Team Args: account_id (str): The id of the account of the user to invite to your team. email_address (str): The email address of the account to invite to your team. The account id prevails if both account_id and email_address are provided. Returns: A Team object
juraj-google-style
def set(self, section, option, value=None): try: section = self.__getitem__(section) except KeyError: raise NoSectionError(section) from None option = self.optionxform(option) if option in section: section[option].value = value else: section[option] = value return self
Set an option. Args: section (str): section name option (str): option name value (str): value, default None
juraj-google-style
def diffs_prof(step): diff, rad = diff_prof(step) return _scale_prof(step, diff, rad), rad
Scaled diffusion. This computation takes sphericity into account if necessary. Args: step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData instance. Returns: tuple of :class:`numpy.array`: the diffusion and the radial position at which it is evaluated.
juraj-google-style
def from_dict(cls, copula_dict): instance = cls(copula_dict['copula_type']) instance.theta = copula_dict['theta'] instance.tau = copula_dict['tau'] return instance
Create a new instance from the given parameters. Args: copula_dict: `dict` with the parameters to replicate the copula. Like the output of `Bivariate.to_dict` Returns: Bivariate: Instance of the copula defined on the parameters.
codesearchnet
def buckets_list(self, projection='noAcl', max_results=0, page_token=None, project_id=None): if max_results == 0: max_results = Api._MAX_RESULTS args = {'project': project_id if project_id else self._project_id, 'maxResults': max_results} if projection is not None: args['projection'] = projection if page_token is not None: args['pageToken'] = page_token url = Api._ENDPOINT + (Api._BUCKET_PATH % '') return google.datalab.utils.Http.request(url, args=args, credentials=self._credentials)
Issues a request to retrieve the list of buckets. Args: projection: the projection of the bucket information to retrieve. max_results: an optional maximum number of objects to retrieve. page_token: an optional token to continue the retrieval. project_id: the project whose buckets should be listed. Returns: A parsed list of bucket information dictionaries. Raises: Exception if there is an error performing the operation.
juraj-google-style
def _get_encoded_length(audio_length, kernel_sizes=None, strides=None, dilations=None, use_causal_conv=None): cur_length = audio_length if kernel_sizes is None or strides is None or dilations is None or (use_causal_conv is None): return cur_length for kernel_size, stride, dilation in zip(kernel_sizes, strides, dilations): effective_kernel_size = (kernel_size - 1) * dilation + 1 padding_total = kernel_size - stride padding_right = padding_total padding_left = padding_total - padding_right n_frames = (cur_length - effective_kernel_size + padding_total) / stride + 1 n_frames = math.ceil(n_frames) - 1 ideal_length = n_frames * stride + kernel_size - padding_total extra_padding = ideal_length - cur_length if use_causal_conv: padding_left = padding_total padding_right = extra_padding else: padding_left = padding_left padding_right = padding_right + extra_padding cur_length = cur_length + padding_left + padding_right cur_length = (cur_length - dilation * (kernel_size - 1) - 1) return cur_length
Compute the length of the encoded audio sequence. Args: audio_length (int): The length of the audio sequence. kernel_sizes (List[int]): The kernel sizes for the convolutional layers. strides (List[int]): The strides for the convolutional layers. use_causal_conv (bool): Whether to use causal convolutions.
github-repos
def _set_xla_sharding(self, xla_sharding): if self._variable_read and (not context.executing_eagerly()): logging.warning("This variable (%s) has already been read (ie. a ReadVariableOp has already been generated) and a new XlaShardingOp using this sharding will not be created unless it is read again. If that's not possible, please set the XLA sharding before reading the variable.", self.name) self._xla_sharding = xla_sharding
Annotates this `ResourceVariable` with `xla_sharding`. `xla_sharding` will be used to create an `XlaShardingOp` whenever a `ReadVariableOp` is created. Args: xla_sharding: The xla.OpSharding proto to annotate this ResourceVariable with.
github-repos
def reload_data(self): db.session.rollback() self.__data = {} try: for ns in db.ConfigNamespace.all(): self.__data[ns.namespace_prefix] = {x.key: x.value for x in ns.config_items} except SQLAlchemyError as ex: if (str(ex).find('1146') != (- 1)): pass
Reloads the configuration from the database Returns: `None`
codesearchnet
def validate(self, size): msg = 'scale and array size must match, but were scale: {self.scale.n_bands}, array size: {size}' if (size != len(self.scale)): raise ValueError(msg.format(**locals()))
Ensure that the size of the dimension matches the number of bands in the scale Raises: ValueError: when the dimension size and number of bands don't match
codesearchnet
def _ip_unnumbered_type(self, **kwargs): method_name = ('interface_%s_ip_ip_config_unnumbered_ip_donor_interface_type' % kwargs['int_type']) ip_unnumbered_type = getattr(self._interface, method_name) config = ip_unnumbered_type(**kwargs) if kwargs['delete']: tag = 'ip-donor-interface-type' config.find(('. return config
Return the `ip unnumbered` donor type XML. You should not use this method. You probably want `Interface.ip_unnumbered`. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet etc). delete (bool): Remove the configuration if ``True``. ip_donor_interface_type (str): The donor interface type (loopback) Returns: XML to be passed to the switch. Raises: None
codesearchnet
def _GetKeys(self, data, keys, depth=1): keys = set(keys) match = {} if (depth == 1): for key in keys: match[key] = data[key] else: for (_, parsed_key, parsed_value) in self._RecurseKey(data, depth=depth): if (parsed_key in keys): match[parsed_key] = parsed_value if (set(match.keys()) == keys): return match return match
Helper function to return keys nested in a bencode dict. By default this function will return the values for the named keys requested by a plugin in match{}. The default setting is to look a single layer down from the root (same as the check for plugin applicability). This level is suitable for most cases. For cases where there is variability in the name at the first level (e.g. it is the MAC addresses of a device, or a UUID) it is possible to override the depth limit and use _GetKeys to fetch from a deeper level. Args: data (dict[str, object]): bencode data values. keys (list[str]): keys that should be returned. depth (int): how many levels deep to check for a match. Returns: dict[str, object]: a dictionary with just the keys requested.
codesearchnet
def wait_for_prompt(self, timeout_s=None): with self._cond: if self._prompt: if (timeout_s is None): self._cond.wait(((3600 * 24) * 365)) else: self._cond.wait(timeout_s) if (self._response is None): raise PromptUnansweredError return self._response
Wait for the user to respond to the current prompt. Args: timeout_s: Seconds to wait before raising a PromptUnansweredError. Returns: A string response, or the empty string if text_input was False. Raises: PromptUnansweredError: Timed out waiting for the user to respond.
codesearchnet
def cho_solve(L, b): from scipy.linalg import cho_solve as sp_cho_solve L = asarray(L, float) b = asarray(b, float) if (L.size == 0): if (b.size != 0): raise ValueError('Dimension mismatch between L and b.') return empty(b.shape) return sp_cho_solve((L, True), b, check_finite=False)
r"""Solve for Cholesky decomposition. Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`, given the Cholesky factorization of :math:`\mathrm A`. Args: L (array_like): Lower triangular matrix. b (array_like): Right-hand side. Returns: :class:`numpy.ndarray`: The solution to the system :math:`\mathrm A \mathbf x = \mathbf b`. See Also -------- numpy.linalg.cholesky : Cholesky decomposition. scipy.linalg.cho_solve : Solve linear equations given Cholesky factorization.
codesearchnet
def create_exponential(num_finite_buckets, growth_factor, scale): if num_finite_buckets <= 0: raise ValueError(_BAD_NUM_FINITE_BUCKETS) if growth_factor <= 1.0: raise ValueError(_BAD_FLOAT_ARG % (u'growth factor', 1.0)) if scale <= 0.0: raise ValueError(_BAD_FLOAT_ARG % (u'scale', 0.0)) return sc_messages.Distribution( bucketCounts=[0] * (num_finite_buckets + 2), exponentialBuckets=sc_messages.ExponentialBuckets( numFiniteBuckets=num_finite_buckets, growthFactor=growth_factor, scale=scale))
Creates a new instance of distribution with exponential buckets Args: num_finite_buckets (int): initializes number of finite buckets growth_factor (float): initializes the growth factor scale (float): initializes the scale Return: :class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution` Raises: ValueError: if the args are invalid for creating an instance
juraj-google-style
def read_value(self): with ops.name_scope('Read'): value = self._read_variable_op() return array_ops.identity(value)
Constructs an op which reads the value of this variable. Should be used when there are multiple reads, or when it is desirable to read the value only after some condition is true. Returns: The value of the variable.
github-repos
def parse_indices(indices_string): indices_string = re.sub('\\s+', '', indices_string) if indices_string.startswith('[') and indices_string.endswith(']'): indices_string = indices_string[1:-1] return [int(element) for element in indices_string.split(',')]
Parse a string representing indices. For example, if the input is "[1, 2, 3]", the return value will be a list of indices: [1, 2, 3] Args: indices_string: (str) a string representing indices. Can optionally be surrounded by a pair of brackets. Returns: (list of int): Parsed indices.
github-repos
def experiment_pb(hparam_infos, metric_infos, user='', description='', time_created_secs=None): if (time_created_secs is None): time_created_secs = time.time() experiment = api_pb2.Experiment(description=description, user=user, time_created_secs=time_created_secs, hparam_infos=hparam_infos, metric_infos=metric_infos) return _summary(metadata.EXPERIMENT_TAG, plugin_data_pb2.HParamsPluginData(experiment=experiment))
Creates a summary that defines a hyperparameter-tuning experiment. Args: hparam_infos: Array of api_pb2.HParamInfo messages. Describes the hyperparameters used in the experiment. metric_infos: Array of api_pb2.MetricInfo messages. Describes the metrics used in the experiment. See the documentation at the top of this file for how to populate this. user: String. An id for the user running the experiment description: String. A description for the experiment. May contain markdown. time_created_secs: float. The time the experiment is created in seconds since the UNIX epoch. If None uses the current time. Returns: A summary protobuffer containing the experiment definition.
codesearchnet
def _FormatServiceText(self, service): string_segments = [ service.name, '\tImage Path = {0:s}'.format(service.image_path), '\tService Type = {0:s}'.format(service.HumanReadableType()), '\tStart Type = {0:s}'.format(service.HumanReadableStartType()), '\tService Dll = {0:s}'.format(service.service_dll), '\tObject Name = {0:s}'.format(service.object_name), '\tSources:'] for source in service.sources: string_segments.append('\t\t{0:s}:{1:s}'.format(source[0], source[1])) return '\n'.join(string_segments)
Produces a human readable multi-line string representing the service. Args: service (WindowsService): service to format. Returns: str: human readable representation of a Windows Service.
juraj-google-style
class DPTNeck(nn.Module): def __init__(self, config): super().__init__() self.config = config if config.backbone_config is not None and config.backbone_config.model_type in ['swinv2']: self.reassemble_stage = None else: self.reassemble_stage = DPTReassembleStage(config) self.convs = nn.ModuleList() for channel in config.neck_hidden_sizes: self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False)) self.fusion_stage = DPTFeatureFusionStage(config) def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]: if not isinstance(hidden_states, (tuple, list)): raise TypeError('hidden_states should be a tuple or list of tensors') if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.') if self.reassemble_stage is not None: hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width) features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)] output = self.fusion_stage(features) return output
DPTNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as input and produces another list of tensors as output. For DPT, it includes 2 stages: * DPTReassembleStage * DPTFeatureFusionStage. Args: config (dict): config dict.
github-repos
def reverse_fstring(pattern: str, string: str) -> dict[str, str] | None: pattern = _pattern_cache(pattern) if (m := pattern.fullmatch(string)): return m.groupdict() else: return None
Reverse f-string. Example: ```python epy.reverse_fstring( '/home/{user}/projects/{project}', '/home/conchylicultor/projects/menhir' ) == { 'user': 'conchylicultor', 'project': 'menhir', } ``` Args: pattern: The f-string pattern (can only contained named group) string: The string to search Returns: The extracted info
github-repos
def minimal_selector(self, complete_selector): if (complete_selector not in self._selector_map): raise KeyError("No value with selector '{}'.".format(complete_selector)) selector_components = complete_selector.split('.') node = self._selector_tree start = None for (i, component) in enumerate(reversed(selector_components)): if (len(node) == 1): if (start is None): start = (- i) else: start = None node = node[component] if (len(node) > 1): return complete_selector return '.'.join(selector_components[start:])
Returns the minimal selector that uniquely matches `complete_selector`. Args: complete_selector: A complete selector stored in the map. Returns: A partial selector that unambiguously matches `complete_selector`. Raises: KeyError: If `complete_selector` is not in the map.
codesearchnet
def _subtoken_ids_to_tokens(self, subtokens): concatenated = ''.join([self._subtoken_id_to_subtoken_string(s) for s in subtokens]) split = concatenated.split('_') ret = [] for t in split: if t: unescaped = _unescape_token((t + '_')) if unescaped: ret.append(unescaped) return ret
Converts a list of subtoken ids to a list of tokens. Args: subtokens: a list of integers in the range [0, vocab_size) Returns: a list of strings.
codesearchnet
def exists(self, file_path, check_link=False): if check_link and self.islink(file_path): return True file_path = make_string_path(file_path) if file_path is None: raise TypeError if not file_path: return False if file_path == self.dev_null.name: return not self.is_windows_fs try: if self.is_filepath_ending_with_separator(file_path): return False file_path = self.resolve_path(file_path) except (IOError, OSError): return False if file_path == self.root.name: return True path_components = self._path_components(file_path) current_dir = self.root for component in path_components: current_dir = self._directory_content(current_dir, component)[1] if not current_dir: return False return True
Return true if a path points to an existing file system object. Args: file_path: The path to examine. Returns: (bool) True if the corresponding object exists. Raises: TypeError: if file_path is None.
juraj-google-style
def parse_config(data: dict) -> dict: return {'email': data.get('email'), 'family': data['family_id'], 'samples': [{'id': sample_id, 'type': analysis_type} for (sample_id, analysis_type) in data['analysis_type'].items()], 'config_path': data['config_file_analysis'], 'is_dryrun': (True if ('dry_run_all' in data) else False), 'log_path': data['log_file'], 'out_dir': data['outdata_dir'], 'priority': data['slurm_quality_of_service'], 'sampleinfo_path': data['sample_info_file']}
Parse MIP config file. Args: data (dict): raw YAML input from MIP analysis config file Returns: dict: parsed data
codesearchnet
def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None): sess.run(tf.group(tf.local_variables_initializer(), tf.global_variables_initializer())) if (resume and (not (logdir or checkpoint))): raise ValueError('Need to specify logdir to resume a checkpoint.') if logdir: state = tf.train.get_checkpoint_state(logdir) if checkpoint: checkpoint = os.path.join(logdir, checkpoint) if ((not checkpoint) and state and state.model_checkpoint_path): checkpoint = state.model_checkpoint_path if (checkpoint and (resume is False)): message = 'Found unexpected checkpoint when starting a new run.' raise RuntimeError(message) if checkpoint: saver.restore(sess, checkpoint)
Initialize or restore variables from a checkpoint if available. Args: sess: Session to initialize variables in. saver: Saver to restore variables. logdir: Directory to search for checkpoints. checkpoint: Specify what checkpoint name to use; defaults to most recent. resume: Whether to expect recovering a checkpoint or starting a new run. Raises: ValueError: If resume expected but no log directory specified. RuntimeError: If no resume expected but a checkpoint was found.
codesearchnet
def id_by_index(index, resources): if ((index < 0) or (index >= len(resources))): return '' try: return resources[index].header_signature except AttributeError: return resources[index].address
Helper method to fetch the id or address of a resource by its index Args: resources (list of objects): The resources to be paginated index (integer): The index of the target resource Returns: str: The address or header_signature of the resource, returns an empty string if not found
codesearchnet
def AddBatchJob(client): batch_job_service = client.GetService('BatchJobService', version='v201809') batch_job_operations = [{'operand': {}, 'operator': 'ADD'}] return batch_job_service.mutate(batch_job_operations)['value'][0]
Add a new BatchJob to upload operations to. Args: client: an instantiated AdWordsClient used to retrieve the BatchJob. Returns: The new BatchJob created by the request.
codesearchnet
def create(self, key, value): data = None if key is not None: key = key.strip() self.tcex.log.debug(u'create variable {}'.format(key)) parsed_key = self.parse_variable(key.strip()) variable_type = parsed_key['type'] if variable_type in self.read_data_types: data = self.create_data_types[variable_type](key, value) else: data = self.create_raw(key, value) return data
Create method of CRUD operation for working with KeyValue DB. This method will automatically determine the variable type and call the appropriate method to write the data. If a non standard type is provided the data will be written as RAW data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result string of DB write.
juraj-google-style
def get_info(self, userSpecifier, **kwargs): request = Request('GET', '/v3/users/{userSpecifier}') request.set_path_param('userSpecifier', userSpecifier) response = self.ctx.request(request) if (response.content_type is None): return response if (not response.content_type.startswith('application/json')): return response jbody = json.loads(response.raw_body) parsed_body = {} if (str(response.status) == '200'): if (jbody.get('userInfo') is not None): parsed_body['userInfo'] = self.ctx.user.UserInfo.from_dict(jbody['userInfo'], self.ctx) elif (str(response.status) == '401'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') elif (str(response.status) == '403'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') elif (str(response.status) == '405'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') else: parsed_body = jbody response.body = parsed_body return response
Fetch the user information for the specified user. This endpoint is intended to be used by the user themself to obtain their own information. Args: userSpecifier: The User Specifier Returns: v20.response.Response containing the results from submitting the request
codesearchnet
def get_repo_url(pypirc, repository): pypirc = os.path.abspath(os.path.expanduser(pypirc)) pypi_config = base.PyPIConfig(pypirc) repo_config = pypi_config.get_repo_config(repository) if repo_config: return repo_config.get_clean_url() else: return base.RepositoryURL(repository)
Fetch the RepositoryURL for a given repository, reading info from pypirc. Will try to find the repository in the .pypirc, including username/password. Args: pypirc (str): path to the .pypirc config file repository (str): URL or alias for the repository Returns: base.RepositoryURL for the repository
codesearchnet
def tf_step(self, x, iteration, conjugate, residual, squared_residual): x, next_iteration, conjugate, residual, squared_residual = super(ConjugateGradient, self).tf_step( x, iteration, conjugate, residual, squared_residual ) A_conjugate = self.fn_x(conjugate) if self.damping > 0.0: A_conjugate = [A_conj + self.damping * conj for A_conj, conj in zip(A_conjugate, conjugate)] conjugate_A_conjugate = tf.add_n( inputs=[tf.reduce_sum(input_tensor=(conj * A_conj)) for conj, A_conj in zip(conjugate, A_conjugate)] ) alpha = squared_residual / tf.maximum(x=conjugate_A_conjugate, y=util.epsilon) next_x = [t + alpha * conj for t, conj in zip(x, conjugate)] next_residual = [res - alpha * A_conj for res, A_conj in zip(residual, A_conjugate)] next_squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in next_residual]) beta = next_squared_residual / tf.maximum(x=squared_residual, y=util.epsilon) next_conjugate = [res + beta * conj for res, conj in zip(next_residual, conjugate)] return next_x, next_iteration, next_conjugate, next_residual, next_squared_residual
Iteration loop body of the conjugate gradient algorithm. Args: x: Current solution estimate $x_t$. iteration: Current iteration counter $t$. conjugate: Current conjugate $c_t$. residual: Current residual $r_t$. squared_residual: Current squared residual $r_t^2$. Returns: Updated arguments for next iteration.
juraj-google-style
def Cancel(self, request, global_params=None): config = self.GetMethodConfig('Cancel') return self._RunMethod(config, request, global_params=global_params)
Cancels a build in progress. Args: request: (CancelBuildRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Build) The response message.
github-repos
def get_weights(self): return backend.batch_get_value(self.weights)
Returns the current value of the weights of the optimizer. Returns: A list of numpy arrays.
github-repos
def full_game_name(short_name): camel_game_name = misc_utils.snakecase_to_camelcase(short_name) full_name = camel_game_name + ATARI_GAME_MODE return full_name
CamelCase game name with mode suffix. Args: short_name: snake_case name without mode e.g "crazy_climber" Returns: full game name e.g. "CrazyClimberNoFrameskip-v4"
juraj-google-style
def import_aliases(alias_source): alias_table = get_alias_table() if is_url(alias_source): alias_source = retrieve_file_from_url(alias_source) alias_table.read(alias_source) os.remove(alias_source) else: alias_table.read(alias_source) _commit_change(alias_table)
Import aliases from a file or an URL. Args: alias_source: The source of the alias. It can be a filepath or an URL.
juraj-google-style
def parse_structure(self, store_in_memory=False): if (not self.structure_file): log.error('{}: no structure file, unable to parse'.format(self.id)) return None else: structure = StructureIO(self.structure_path, self.file_type) structure_chains = [x.id for x in structure.first_model.child_list] self.add_chain_ids(structure_chains) self.get_structure_seqs(structure.first_model) if (not self.mapped_chains): self.add_mapped_chain_ids(structure_chains) if store_in_memory: self.parsed = True self.structure = structure return structure
Read the 3D coordinates of a structure file and return it as a Biopython Structure object. Also create ChainProp objects in the chains attribute for each chain in the first model. Args: store_in_memory (bool): If the Biopython Structure object should be stored in the attribute ``structure``. Returns: Structure: Biopython Structure object
codesearchnet
def backend_monitor(backend): if not isinstance(backend, IBMQBackend): raise QiskitError('Input variable is not of type IBMQBackend.') config = backend.configuration().to_dict() status = backend.status().to_dict() config_dict = {**status, **config} if not config['simulator']: props = backend.properties().to_dict() print(backend.name()) print('='*len(backend.name())) print('Configuration') print('-'*13) offset = ' ' upper_list = ['n_qubits', 'operational', 'status_msg', 'pending_jobs', 'basis_gates', 'local', 'simulator'] lower_list = list(set(config_dict.keys()).difference(upper_list)) lower_list.remove('gates') for item in upper_list+lower_list: print(offset+item+':', config_dict[item]) if config['simulator']: return print() qubit_header = 'Qubits [Name / Freq / T1 / T2 / U1 err / U2 err / U3 err / Readout err]' print(qubit_header) print('-'*len(qubit_header)) sep = ' / ' for qub in range(len(props['qubits'])): name = 'Q%s' % qub qubit_data = props['qubits'][qub] gate_data = props['gates'][3*qub:3*qub+3] t1_info = qubit_data[0] t2_info = qubit_data[1] freq_info = qubit_data[2] readout_info = qubit_data[3] freq = str(round(freq_info['value'], 5))+' '+freq_info['unit'] T1 = str(round(t1_info['value'], 5))+' ' + t1_info['unit'] T2 = str(round(t2_info['value'], 5))+' ' + t2_info['unit'] U1 = str(round(gate_data[0]['parameters'][0]['value'], 5)) U2 = str(round(gate_data[1]['parameters'][0]['value'], 5)) U3 = str(round(gate_data[2]['parameters'][0]['value'], 5)) readout_error = str(round(readout_info['value'], 5)) qstr = sep.join([name, freq, T1, T2, U1, U2, U3, readout_error]) print(offset+qstr) print() multi_qubit_gates = props['gates'][3*config['n_qubits']:] multi_header = 'Multi-Qubit Gates [Name / Type / Gate Error]' print(multi_header) print('-'*len(multi_header)) for gate in multi_qubit_gates: name = gate['name'] ttype = gate['gate'] error = str(round(gate['parameters'][0]['value'], 5)) mstr = sep.join([name, ttype, error]) print(offset+mstr)
Monitor a single IBMQ backend. Args: backend (IBMQBackend): Backend to monitor. Raises: QiskitError: Input is not a IBMQ backend.
juraj-google-style
def has_open_file(self, file_object): return (file_object in [wrappers[0].get_object() for wrappers in self.open_files if wrappers])
Return True if the given file object is in the list of open files. Args: file_object: The FakeFile object to be checked. Returns: `True` if the file is open.
juraj-google-style
def recipe_dcm_to_sheets(config, auth_read, account, report_id, report_name, sheet, tab): dcm(config, {'auth': auth_read, 'report': {'account': account, 'report_id': report_id, 'name': report_name}, 'out': {'sheets': {'sheet': sheet, 'tab': tab, 'range': 'A1'}}})
Move existing CM report into a Sheet tab. Args: auth_read (authentication) - Credentials used for reading data. account (integer) - NA report_id (integer) - NA report_name (string) - NA sheet (string) - NA tab (string) - NA
github-repos
def extract(self, extractor: Extractor, extractable: Extractable = None, tokenizer: Tokenizer = None, joiner: str = " ", **options) -> List[Extraction]: if not extractable: extractable = self if not tokenizer: tokenizer = self.etk.default_tokenizer extracted_results = list() if extractor.input_type == InputType.TOKENS: if self.etk.error_policy == ErrorPolicy.PROCESS: if isinstance(extractable.value, list): self.etk.log( "Extractor needs tokens, tokenizer needs string to tokenize, got list, converting to string", "warning", self.doc_id, self.url) warnings.warn( "Extractor needs tokens, tokenizer needs string to tokenize, got list, converting to string") elif isinstance(extractable.value, dict): self.etk.log( "Extractor needs tokens, tokenizer needs string to tokenize, got dict, converting to string", "warning", self.doc_id, self.url) warnings.warn( "Extractor needs tokens, tokenizer needs string to tokenize, got dict, converting to string") tokens = extractable.get_tokens(tokenizer) if tokens: extracted_results = extractor.extract(tokens, **options) else: raise ExtractorValueError( "Extractor needs string, tokenizer needs string to tokenize, got " + str(type(extractable.value))) elif extractor.input_type == InputType.TEXT: if self.etk.error_policy == ErrorPolicy.PROCESS: if isinstance(extractable.value, list): self.etk.log("Extractor needs string, got extractable value as list, converting to string", "warning", self.doc_id, self.url) warnings.warn("Extractor needs string, got extractable value as list, converting to string") elif isinstance(extractable.value, dict): self.etk.log("Extractor needs string, got extractable value as dict, converting to string", "warning", self.doc_id, self.url) warnings.warn("Extractor needs string, got extractable value as dict, converting to string") text = extractable.get_string(joiner) if text: extracted_results = extractor.extract(text, **options) else: pass elif extractor.input_type == InputType.OBJECT: extracted_results = extractor.extract(extractable.value, **options) elif extractor.input_type == InputType.HTML: if bool(BeautifulSoup(extractable.value, "html.parser").find()): extracted_results = extractor.extract(extractable.value, **options) else: pass try: jsonPath = extractable.full_path except AttributeError: jsonPath = None for e in extracted_results: e.prov_id = self.provenance_id_index extraction_provenance_record: ExtractionProvenanceRecord = ExtractionProvenanceRecord( e.prov_id, jsonPath, e.provenance["extractor_name"], e.provenance["start_char"], e.provenance["end_char"], e.provenance["confidence"], self, extractable.prov_id) self._provenances[e.prov_id] = extraction_provenance_record self.provenance_id_index_incrementer() self.create_provenance(extraction_provenance_record) return extracted_results
Invoke the extractor on the given extractable, accumulating all the extractions in a list. Args: extractor (Extractor): extractable (extractable): tokenizer: user can pass custom tokenizer if extractor wants token joiner: user can pass joiner if extractor wants text options: user can pass arguments as a dict to the extract() function of different extractors Returns: List of Extraction, containing all the extractions.
juraj-google-style
def get_snippet(self, snippet_key = None): uri = '/'.join([ self.api_uri, self.snippets_suffix ]) if snippet_key: uri = '/'.join([ uri, snippet_key ]) code, data = self._req('get', uri) return code, data
Get all/one specific snippet by its key Args: key snippet key (default: None i.e. ALL) return (status code, snippet dict or list thereof)
juraj-google-style
def setContext(self, context_str): if (len(self.m_context) == 0) and (len(context_str) >= 7): if context_str[0:7] != "request": ekm_log("Context: " + context_str) self.m_context = context_str
Set context string for serial command. Private setter. Args: context_str (str): Command specific string.
juraj-google-style
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype, use_gpu): x1 = self._CreateNumpyTensor(tensor_in_sizes) x2 = self._CreateNumpyTensor(filter_in_sizes) with test_util.device(use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype) t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype) strides = [1] + strides + [1] dilations = [1] + dilations + [1] if isinstance(padding, (list, tuple)): padding = [(0, 0)] + padding + [(0, 0)] if data_format == 'NCHW': t1 = test_util.NHWCToNCHW(t1) strides = test_util.NHWCToNCHW(strides) dilations = test_util.NHWCToNCHW(dilations) if isinstance(padding, (list, tuple)): padding = test_util.NHWCToNCHW(padding) conv = nn_ops.conv2d(t1, t2, dilations=dilations, strides=strides, padding=padding, data_format=data_format) self.assertEqual(conv.dtype, dtype) if data_format == 'NCHW': conv = test_util.NCHWToNHWC(conv) return conv
Verifies the output values of the convolution function. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. dilations: Dilated rate: [col_dilation, row_dilation] strides: Stride: [col_stride, row_stride] padding: Padding type. data_format: Format of the data tensors. dtype: Data type for inputs and outputs. use_gpu: True if the operations should be run on GPU Returns: Symbolic tensor value that can be used to execute the computation
github-repos
def _ParseCredentialOptions(self, options): credentials = getattr(options, 'credentials', []) if (not isinstance(credentials, list)): raise errors.BadConfigOption('Unsupported credentials value.') for credential_string in credentials: (credential_type, _, credential_data) = credential_string.partition(':') if ((not credential_type) or (not credential_data)): raise errors.BadConfigOption('Badly formatted credential: {0:s}.'.format(credential_string)) if (credential_type not in self._SUPPORTED_CREDENTIAL_TYPES): raise errors.BadConfigOption('Unsupported credential type for: {0:s}.'.format(credential_string)) if (credential_type in self._BINARY_DATA_CREDENTIAL_TYPES): try: credential_data = credential_data.decode('hex') except TypeError: raise errors.BadConfigOption('Unsupported credential data for: {0:s}.'.format(credential_string)) self._credentials.append((credential_type, credential_data))
Parses the credential options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
codesearchnet
def filter_iqr(array, lower, upper): (upper, lower) = iqr(array, upper, lower) new = list(array) for x in new[:]: if ((x < lower) or (x > upper)): new.remove(x) return new
Return elements which falls within specified interquartile range. Arguments: array (list): Sequence of numbers. lower (float): Lower bound for IQR, in range 0 <= lower <= 1. upper (float): Upper bound for IQR, in range 0 <= upper <= 1. Returns: list: Copy of original list, with elements outside of IQR removed.
codesearchnet
def has_progress(self, previous_perf: 'NexmarkPerf') -> bool: if self.runtime_sec != previous_perf.runtime_sec or self.event_count != previous_perf.event_count or self.result_count != previous_perf.result_count: return True return False
Args: previous_perf: a NexmarkPerf object to be compared to self Returns: True if there are observed pipeline activity between self and other NexmarkPerf values
github-repos
def _generate_date_with_wildcard_query(self, date_value): if date_value.endswith(ast.GenericValue.WILDCARD_TOKEN): try: date_value = _truncate_wildcard_from_date(date_value) except ValueError: return {} return self._generate_range_queries(self.KEYWORD_TO_ES_FIELDNAME['date'], {ES_RANGE_EQ_OPERATOR: date_value}) else: return {}
Helper for generating a date keyword query containing a wildcard. Returns: (dict): The date query containing the wildcard or an empty dict in case the date value is malformed. The policy followed here is quite conservative on what it accepts as valid input. Look into :meth:`inspire_query_parser.utils.visitor_utils._truncate_wildcard_from_date` for more information.
codesearchnet
def get_cpu_props(cls, family, arch='x86'): cpus = cls.get_cpus_by_arch(arch) try: return cpus.xpath('model[@name="{0}"]'.format(family))[0] except IndexError: raise LagoException('No such CPU family: {0}'.format(family))
Get CPU info XML Args: family(str): CPU family arch(str): CPU arch Returns: lxml.etree.Element: CPU xml Raises: :exc:`~LagoException`: If no such CPU family exists
codesearchnet
def by_issn(issn): old_url = aleph.ALEPH_URL aleph.ALEPH_URL = NTK_ALEPH_URL records = aleph.getISSNsXML(issn, base="STK02") aleph.ALEPH_URL = old_url for record in records: marc = MARCXMLRecord(record) additional_info = { "222": marc.get("222", None), "PER": marc.get("PER", None), "776": marc.get("776", None), "008": marc.get("008", None), "alt_end_date": "" } additional_info = { key: val for key, val in additional_info.iteritems() if val } alt_end_date = None alt_creation_date = None if additional_info["008"]: alt_creation_date = additional_info["008"][7:11] alt_end_date = additional_info["008"][11:15] if alt_end_date in ["9999", "****"]: alt_creation_date += "-" alt_end_date = None additional_info["alt_end_date"] = alt_end_date author = Author.parse_author(marc) model = Model( url=_first_or_none( marc.get("856u") ), conspect=_first_or_none( marc.get("072a") ), annotation_tags=_first_or_none( marc.get("520a") ), periodicity=_first_or_none( marc.get("310a") ), title_tags=_first_or_none( marc.get("222a") ), subtitle_tags=_first_or_none( marc.get("245b") ), place_tags=remove_hairs( _first_or_none(marc.get("260a")) or "" ), author_tags=author._asdict() if author else None, publisher_tags=remove_hairs( ( _first_or_none(marc.get("260b")) or _first_or_none(marc.get("264b")) or "", ), ", " ), creation_dates=_first_or_none( marc.get("260c", [alt_creation_date]) ), lang_tags=_first_or_none( marc.get("040b") ), keyword_tags=marc.get("650a07"), source_info=_first_or_none( marc.get("500a") ), original_xml=record, additional_info=additional_info, ) yield _add_source(model)
Query aleph for records with given `issn`. The lookup is directed to the NTK's Aleph. Args: issn (str): ISSN of the periodical. Returns: obj: :class:`Model` instances for each record.
juraj-google-style
def parse_args(argv=None): parent_parser = get_parent_parser() desc = "Data Version Control" parser = DvcParser( prog="dvc", description=desc, parents=[parent_parser], formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument( "-V", "--version", action=VersionAction, nargs=0, help="Show program's version.", ) subparsers = parser.add_subparsers( title="Available Commands", metavar="COMMAND", dest="cmd", help="Use dvc COMMAND --help for command-specific help.", ) fix_subparsers(subparsers) for cmd in COMMANDS: cmd.add_parser(subparsers, parent_parser) args = parser.parse_args(argv) return args
Parses CLI arguments. Args: argv: optional list of arguments to parse. sys.argv is used by default. Raises: dvc.exceptions.DvcParserError: raised for argument parsing errors.
juraj-google-style
def create(self, name): return Bucket(name, context=self._context).create(self._project_id)
Creates a new bucket. Args: name: a unique name for the new bucket. Returns: The newly created bucket. Raises: Exception if there was an error creating the bucket.
juraj-google-style