code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _sideral(date, longitude=0., model='mean', eop_correction=True, terms=106): t = date.change_scale('UT1').julian_century theta = 67310.54841 + (876600 * 3600 + 8640184.812866) * t + 0.093104 * t ** 2\ - 6.2e-6 * t ** 3 theta /= 240. if model == 'apparent': theta += equinox(date, eop_correction, terms) theta += longitude theta %= 360. return theta
Get the sideral time at a defined date Args: date (Date): longitude (float): Longitude of the observer (in degrees) East positive/West negative. model (str): 'mean' or 'apparent' for GMST and GAST respectively Return: float: Sideral time in degrees GMST: Greenwich Mean Sideral Time LST: Local Sideral Time (Mean) GAST: Greenwich Apparent Sideral Time
juraj-google-style
def add_plugin(self, f): if f.endswith('.py'): plugin_name = os.path.splitext(os.path.basename(f))[0] if plugin_name in sys.modules: try: handler = reload(sys.modules[plugin_name]) print'\t- %s %sRELOAD%s' % (plugin_name, color.Yellow, color.Normal) except ImportError, error: print 'Failed to import plugin: %s (%s)' % (plugin_name, error) return else: try: handler = __import__(plugin_name, globals(), locals(), [], -1) except ImportError, error: print 'Failed to import plugin: %s (%s)' % (plugin_name, error) return plugin = self.validate(handler) print '\t- %s %sOK%s' % (plugin_name, color.Green, color.Normal) if plugin: plugin['name'] = plugin_name plugin['dependencies'] = plugin['class'].dependencies plugin['docstring'] = plugin['class'].__doc__ plugin['mod_time'] = datetime.utcfromtimestamp(os.path.getmtime(f)) try: plugin['sample_set_input'] = getattr(plugin['class'], 'sample_set_input') except AttributeError: plugin['sample_set_input'] = False self.plugin_callback(plugin)
Adding and verifying plugin. Args: f: the filepath for the plugin.
juraj-google-style
def get_domain_reports(self, domains): api_name = 'virustotal-domain-reports' (all_responses, domains) = self._bulk_cache_lookup(api_name, domains) responses = self._request_reports("domain", domains, 'domain/report') for domain, response in zip(domains, responses): if self._cache: self._cache.cache_value(api_name, domain, response) all_responses[domain] = response return all_responses
Retrieves the most recent VT info for a set of domains. Args: domains: list of string domains. Returns: A dict with the domain as key and the VT report as value.
juraj-google-style
def add_minute(self, minute): _moy = (self.moy + int(minute)) return self.__class__.from_moy(_moy)
Create a new DateTime after the minutes are added. Args: minute: An integer value for minutes.
codesearchnet
def state_view_for_block(block_wrapper, state_view_factory): state_root_hash = \ block_wrapper.state_root_hash \ if block_wrapper is not None else None return state_view_factory.create_view(state_root_hash)
Returns the state view for an arbitrary block. Args: block_wrapper (BlockWrapper): The block for which a state view is to be returned state_view_factory (StateViewFactory): The state view factory used to create the StateView object Returns: StateView object associated with the block
juraj-google-style
def mkdir(self, path): self.__validate_storage_path(path, projects_allowed=False) parent_metadata = self.get_parent(path) self.api_client.create_folder(path.split('/')[-1], parent_metadata['uuid'])
Create a folder in the storage service pointed by the given path. Args: path (str): The path of the folder to be created Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
juraj-google-style
def _execute(self, connection, query, fetch=True): with connection.cursor() as cursor: cursor.execute(query) if fetch: return cursor.fetchall() else: cursor.execute('COMMIT;')
Executes given query and returns result. Args: connection: connection to postgres database who stores mpr data. query (str): sql query fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch. Returns: iterable with query result or None if fetch is False.
codesearchnet
def get_bucket_files(glob_pattern, base_dir, force=False, pattern_slice=slice(None)): if (gcsfs is None): raise RuntimeError("Missing 'gcsfs' dependency for GCS download.") if (not os.path.isdir(base_dir)): raise OSError('Directory does not exist: {}'.format(base_dir)) if isinstance(glob_pattern, str): glob_pattern = [glob_pattern] fs = gcsfs.GCSFileSystem(token='anon') filenames = [] for gp in glob_pattern: if isinstance(gp, str): glob_results = list(fs.glob(gp)) else: glob_results = [fn for pat in gp for fn in fs.glob(pat)] for fn in glob_results[pattern_slice]: ondisk_fn = os.path.basename(fn) ondisk_pathname = os.path.join(base_dir, ondisk_fn) filenames.append(ondisk_pathname) if (force and os.path.isfile(ondisk_pathname)): os.remove(ondisk_pathname) elif os.path.isfile(ondisk_pathname): LOG.info('Found existing: {}'.format(ondisk_pathname)) continue LOG.info('Downloading: {}'.format(ondisk_pathname)) fs.get(('gs: if (not filenames): raise OSError('No files could be found or downloaded.') return filenames
Helper function to download files from Google Cloud Storage. Args: glob_pattern (str or list): Glob pattern string or series of patterns used to search for on Google Cloud Storage. The pattern should include the "gs://" protocol prefix. If a list of lists, then the results of each sublist pattern are concatenated and the result is treated as one pattern result. This is important for things like ``pattern_slice`` and complicated glob patterns not supported by GCP. base_dir (str): Root directory to place downloaded files on the local system. force (bool): Force re-download of data regardless of its existence on the local system. Warning: May delete non-demo files stored in download directory. pattern_slice (slice): Slice object to limit the number of files returned by each glob pattern.
codesearchnet
def OpenAndRead(relative_path='debugger-blacklist.yaml'): try: with open(os.path.join(sys.path[0], relative_path), 'r') as f: return Read(f) except IOError: return None
Attempts to find the yaml configuration file, then read it. Args: relative_path: Optional relative path override. Returns: A Config object if the open and read were successful, None if the file does not exist (which is not considered an error). Raises: Error (some subclass): As thrown by the called Read() function.
codesearchnet
def _PromptUserForVSSCurrentVolume(self): while True: self._output_writer.Write('Volume Shadow Snapshots (VSS) were selected also process current\nvolume? [yes, no]\n') process_current_volume = self._input_reader.Read() process_current_volume = process_current_volume.strip() process_current_volume = process_current_volume.lower() if ((not process_current_volume) or (process_current_volume in ('no', 'yes'))): break self._output_writer.Write('\nUnsupported option, please try again or abort with Ctrl^C.\n\n') self._output_writer.Write('\n') return ((not process_current_volume) or (process_current_volume == 'yes'))
Prompts the user if the current volume with VSS should be processed. Returns: bool: True if the current volume with VSS should be processed.
codesearchnet
def look_source(self, sourcepath, library_paths=None): if sourcepath not in self._CHILDREN_MAP: with io.open(sourcepath, 'r', encoding='utf-8') as fp: finded_paths = self.parse(fp.read()) children = self.resolve(sourcepath, finded_paths, library_paths=library_paths) self._CHILDREN_MAP[sourcepath] = children for p in children: self._PARENTS_MAP[p].add(sourcepath) for path in children: if path not in self._CHILDREN_MAP: self.look_source(path, library_paths=library_paths) return
Open a SCSS file (sourcepath) and find all involved file through imports. This will fill internal buffers ``_CHILDREN_MAP`` and ``_PARENTS_MAP``. Args: sourcepath (str): Source file path to start searching for imports. Keyword Arguments: library_paths (list): List of directory paths for libraries to resolve paths if resolving fails on the base source path. Default to None.
juraj-google-style
def get_cartesian(self): def create_cartesian(positions, row): xyz_frame = pd.DataFrame(columns=['atom', 'x', 'y', 'z'], index=self.index[:row], dtype='f8') xyz_frame['atom'] = self.loc[(xyz_frame.index, 'atom')] xyz_frame.loc[(:, ['x', 'y', 'z'])] = positions[:row] from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian cartesian = Cartesian(xyz_frame, metadata=self.metadata) return cartesian c_table = self.loc[(:, ['b', 'a', 'd'])] c_table = c_table.replace(constants.int_label) c_table = c_table.replace({k: v for (v, k) in enumerate(c_table.index)}) c_table = c_table.values.astype('i8').T C = self.loc[(:, ['bond', 'angle', 'dihedral'])].values.T C[([1, 2], :)] = np.radians(C[([1, 2], :)]) (err, row, positions) = transformation.get_X(C, c_table) positions = positions.T if (err == ERR_CODE_InvalidReference): rename = dict(enumerate(self.index)) i = rename[row] (b, a, d) = self.loc[(i, ['b', 'a', 'd'])] cartesian = create_cartesian(positions, row) raise InvalidReference(i=i, b=b, a=a, d=d, already_built_cartesian=cartesian) elif (err == ERR_CODE_OK): return create_cartesian(positions, (row + 1))
Return the molecule in cartesian coordinates. Raises an :class:`~exceptions.InvalidReference` exception, if the reference of the i-th atom is undefined. Args: None Returns: Cartesian: Reindexed version of the zmatrix.
codesearchnet
def getfileversion(self): status, major_v, minor_v, release, info = _C.Hgetfileversion(self._id) _checkErr('getfileversion', status, "cannot get file version") return major_v, minor_v, release, info
Get file version info. Args: no argument Returns: 4-element tuple with the following components: -major version number (int) -minor version number (int) -complete library version number (int) -additional information (string) C library equivalent : Hgetlibversion
juraj-google-style
def delete_fork_relation(self, **kwargs): path = ('/projects/%s/fork' % self.get_id()) self.manager.gitlab.http_delete(path, **kwargs)
Delete a forked relation between existing projects. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabDeleteError: If the server failed to perform the request
codesearchnet
def ami_lookup(region='us-east-1', name='tomcat8'): if AMI_JSON_URL: ami_dict = _get_ami_dict(AMI_JSON_URL) ami_id = ami_dict[region][name] elif GITLAB_TOKEN: warn_user('Use AMI_JSON_URL feature instead.') ami_contents = _get_ami_file(region=region) ami_dict = json.loads(ami_contents) ami_id = ami_dict[name] else: ami_id = name LOG.info('Using AMI: %s', ami_id) return ami_id
Look up AMI ID. Use _name_ to find AMI ID. If no ami_base_url or gitlab_token is provided, _name_ is returned as the ami id. Args: region (str): AWS Region to find AMI ID. name (str): Simple AMI base name to lookup. Returns: str: AMI ID for _name_ in _region_.
codesearchnet
def get_msd_plot(self, plt=None, mode='specie'): from pymatgen.util.plotting import pretty_plot plt = pretty_plot(12, 8, plt=plt) if (np.max(self.dt) > 100000): plot_dt = (self.dt / 1000) unit = 'ps' else: plot_dt = self.dt unit = 'fs' if (mode == 'species'): for sp in sorted(self.structure.composition.keys()): indices = [i for (i, site) in enumerate(self.structure) if (site.specie == sp)] sd = np.average(self.sq_disp_ions[(indices, :)], axis=0) plt.plot(plot_dt, sd, label=sp.__str__()) plt.legend(loc=2, prop={'size': 20}) elif (mode == 'sites'): for (i, site) in enumerate(self.structure): sd = self.sq_disp_ions[(i, :)] plt.plot(plot_dt, sd, label=('%s - %d' % (site.specie.__str__(), i))) plt.legend(loc=2, prop={'size': 20}) elif (mode == 'mscd'): plt.plot(plot_dt, self.mscd, 'r') plt.legend(['Overall'], loc=2, prop={'size': 20}) else: plt.plot(plot_dt, self.msd, 'k') plt.plot(plot_dt, self.msd_components[(:, 0)], 'r') plt.plot(plot_dt, self.msd_components[(:, 1)], 'g') plt.plot(plot_dt, self.msd_components[(:, 2)], 'b') plt.legend(['Overall', 'a', 'b', 'c'], loc=2, prop={'size': 20}) plt.xlabel('Timestep ({})'.format(unit)) if (mode == 'mscd'): plt.ylabel('MSCD ($\\AA^2$)') else: plt.ylabel('MSD ($\\AA^2$)') plt.tight_layout() return plt
Get the plot of the smoothed msd vs time graph. Useful for checking convergence. This can be written to an image file. Args: plt: A plot object. Defaults to None, which means one will be generated. mode (str): Determines type of msd plot. By "species", "sites", or direction (default). If mode = "mscd", the smoothed mscd vs. time will be plotted.
codesearchnet
def verify(self, obj): if obj not in self.options: raise ValidationError("Object is not in list of enumerated options", reason='not in list of enumerated options', object=obj, options=self.options) return obj
Verify that the object conforms to this verifier's schema. Args: obj (object): A python object to verify Raises: ValidationError: If there is a problem verifying the object, a ValidationError is thrown with at least the reason key set indicating the reason for the lack of validation.
juraj-google-style
def times(*combined): assert combined if len(combined) == 1: return combined[0] first = combined[0] rest_combined = times(*combined[1:]) combined_results = [] for a in first: for b in rest_combined: if set(a.keys()).intersection(set(b.keys())): raise ValueError('Keys need to not overlap: {} vs {}'.format(a.keys(), b.keys())) combined_results.append(OrderedDict(list(a.items()) + list(b.items()))) return combined_results
Generate a product of N sets of combinations. times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4]) Args: *combined: N lists of dictionaries that specify combinations. Returns: a list of dictionaries for each combination. Raises: ValueError: if some of the inputs have overlapping keys.
github-repos
def get_developer_package(path, format=None): from rez.developer_package import DeveloperPackage return DeveloperPackage.from_path(path, format=format)
Create a developer package. Args: path (str): Path to dir containing package definition file. format (str): Package definition file format, detected if None. Returns: `DeveloperPackage`.
juraj-google-style
def get_pending_enrollment_message(cls, pending_users, enrolled_in): pending_emails = [pending_user.user_email for pending_user in pending_users] return ( 'warning', _( "The following learners do not have an account on " "{platform_name}. They have not been enrolled in " "{enrolled_in}. When these learners create an account, they will " "be enrolled automatically: {pending_email_list}" ).format( platform_name=settings.PLATFORM_NAME, enrolled_in=enrolled_in, pending_email_list=', '.join(pending_emails), ) )
Create message for the users who were enrolled in a course or program. Args: users: An iterable of PendingEnterpriseCustomerUsers who were successfully linked with a pending enrollment enrolled_in (str): A string identifier for the course or program the pending users were linked to Returns: tuple: A 2-tuple containing a message type and message text
juraj-google-style
def _group_value_by_device(per_replica_values): destinations = per_replica_values[0]._devices grouped = [[] for _ in range(len(destinations))] for per_replica_value in per_replica_values: for i, v in enumerate(per_replica_value.values): assert per_replica_value._devices == destinations grouped[i].append((v, None)) return grouped
Group values into sublists by their devices. This grouping is needed to call the all-reduce library because it expects a list of the following form: [[(grad0_gpu0, v0_gpu0), (grad1_gpu0, v1_gpu0), (grad2_gpu0, v2_gpu0) ...], [(grad0_gpu1, v0_gpu1), (grad1_gpu1, v1_gpu1), (grad2_gpu1, v2_gpu1) ...], [(grad0_gpu2, v0_gpu2), (grad1_gpu0, v1_gpu2), (grad2_gpu0, v2_gpu2) ...], ... ] Args: per_replica_values: a list of PerReplica objects. Returns: a list of lists, each sublist has components for its corresponding device of PerReplica objects, paired with a None.
github-repos
def LoadData(self, data, custom_properties=None): self.__data = [] self.AppendData(data, custom_properties)
Loads new rows to the data table, clearing existing rows. May also set the custom_properties for the added rows. The given custom properties dictionary specifies the dictionary that will be used for *all* given rows. Args: data: The rows that the table will contain. custom_properties: A dictionary of string to string to set as the custom properties for all rows.
juraj-google-style
def url_is(white_list): def func(url): prefixes = white_list.get('PREFIXES', ()) for prefix in prefixes: if url.startswith(prefix): return True constants = white_list.get('CONSTANTS', ()) for exact_url in constants: if (url == exact_url): return True return False return func
Function generator. Args: white_list (dict): dict with PREFIXES and CONSTANTS keys (list values). Returns: func: a function to check if a URL is...
codesearchnet
def _cached_by_domain(api_name): def wrapped(func): def decorated(self, domains): if (not self._cache): return func(self, domains) all_responses = self._cache.bulk_lookup(api_name, domains) domains = list((set(domains) - set(all_responses))) if domains: response = func(self, domains) if (not response): raise ResponseError('No response for uncached domains') for domain in response: self._cache.cache_value(api_name, domain, response[domain]) all_responses[domain] = response[domain] return all_responses return decorated return wrapped
A caching wrapper for functions that take a list of domains as parameters. Raises: ResponseError - if the response received from the endpoint is not valid.
codesearchnet
def process_new_issues(self, volumes, existing_issues): new_issues = {} for (issue_id, volume) in volumes.items(): state = EBSIssueState.DETECTED.value if (issue_id in existing_issues): issue = existing_issues[issue_id] data = {'state': state, 'notes': issue.notes, 'last_notice': issue.last_notice} if issue.update(data): new_issues.setdefault(issue.volume.account, []).append(issue) self.log.debug('Updated EBSVolumeAuditIssue {}'.format(issue_id)) else: properties = {'volume_id': volume.id, 'account_id': volume.account_id, 'location': volume.location, 'state': state, 'last_change': datetime.now(), 'last_notice': None, 'notes': []} issue = EBSVolumeAuditIssue.create(issue_id, properties=properties) new_issues.setdefault(issue.volume.account, []).append(issue) return new_issues
Takes a dict of existing volumes missing tags and a dict of existing issues, and finds any new or updated issues. Args: volumes (:obj:`dict` of `str`: `EBSVolume`): Dict of current volumes with issues existing_issues (:obj:`dict` of `str`: `EBSVolumeAuditIssue`): Current list of issues Returns: :obj:`dict` of `str`: `EBSVolumeAuditIssue`
codesearchnet
def convert_tokens_into_matrix(self, token_list): return np.array(self.vectorize(token_list)).astype(np.float32)
Create matrix of sentences. Args: token_list: The list of tokens. Returns: 2-D `np.ndarray` of sentences. Each row means one hot vectors of one sentence.
juraj-google-style
def flatten_(structure): if isinstance(structure, dict): if structure: structure = zip(*sorted(structure.items(), key=lambda x: x[0]))[1] else: structure = () if isinstance(structure, (tuple, list)): result = [] for element in structure: result += flatten_(element) return tuple(result) return (structure,)
Combine all leaves of a nested structure into a tuple. The nested structure can consist of any combination of tuples, lists, and dicts. Dictionary keys will be discarded but values will ordered by the sorting of the keys. Args: structure: Nested structure. Returns: Flat tuple.
juraj-google-style
def validate_read(self, address): if not any(address.startswith(ns) for ns in self._read_list): raise AuthorizationException(address=address)
Raises an exception if the address is not allowed to be read in this context, based on txn inputs. Args: address (str): An address to be validated. Returns: None Raises: AuthorizationException
juraj-google-style
def process_answer(self, user, item, asked, answered, time, answer, response_time, guess, **kwargs): pass
This method is used during the answer streaming and is called after the predictive model for each answer. Args: user (int): identifier of ther user answering the question asked (int): identifier of the asked item answered (int): identifier of the answered item or None if the user answered "I don't know" response_time (int) time the answer took in milliseconds time (datetime.datetime) time when the user answered the question guess (float): probability of correct response in case of random answer
codesearchnet
def print_serial_number_info(self, serial_number, print_to_screen=True): r = self.select_serial_number_row(serial_number) if r.empty: warnings.warn("missing serial number") return txt1 = 80 * "=" txt1 += "\n" txt1 += f" serial number {serial_number}\n" txt1 = 80 * "-" txt1 += "\n" txt2 = "" for label, value in zip(r.columns, r.values[0]): if label in self.headers: txt1 += f"{label}: \t {value}\n" else: txt2 += f"({label}: \t {value})\n" if print_to_screen: print(txt1) print(80 * "-") print(txt2) print(80 * "=") return else: return txt1
Print information about the run. Args: serial_number: serial number. print_to_screen: runs the print statement if True, returns txt if not. Returns: txt if print_to_screen is False, else None.
juraj-google-style
def concat_pairs(tensor_tuple0: Tuple[torch.Tensor], tensor_tuple1: Tuple[torch.Tensor]) -> Tuple[torch.Tensor]: return tuple([torch.cat([tensor0, tensor1]) for tensor0, tensor1 in zip(tensor_tuple0, tensor_tuple1)])
Concatenate two tuples of tensors pairwise Args: tensor_tuple0 (`Tuple[torch.Tensor]`): Tuple of tensors. tensor_tuple1 (`Tuple[torch.Tensor]`): Tuple of tensors. Returns: (`Tuple[torch.Tensor]`): Tuple of concatenated tensors.
github-repos
def __init__(self, inputs=None, outputs=None, assettype=AssetType.GoverningToken, assetname='', amount=Fixed8(0), precision=0, owner=None, admin=None): super(RegisterTransaction, self).__init__(inputs, outputs) self.Type = TransactionType.RegisterTransaction self.AssetType = assettype self.Name = assetname self.Amount = amount if inputs is not None: self.inputs = inputs else: self.inputs = [] if outputs is not None: self.outputs = outputs else: self.outputs = [] if owner is not None and type(owner) is not EllipticCurve.ECPoint: raise Exception("Invalid owner, must be ECPoint instance") self.Owner = owner self.Admin = admin self.Precision = precision
Create an instance. Args: inputs (list): outputs (list): assettype (neo.Core.AssetType): assetname (str): amount (Fixed8): precision (int): number of decimals the asset has. owner (EllipticCurve.ECPoint): admin (UInt160):
juraj-google-style
def trigger(self, target: str, trigger: str, parameters: Dict[(str, Any)]={}): pass
Calls the specified Trigger of another Area with the optionally given parameters. Args: target: The name of the target Area. trigger: The name of the Trigger. parameters: The parameters of the function call.
codesearchnet
def RegisterSourceType(cls, source_type_class): if (source_type_class.TYPE_INDICATOR in cls._source_type_classes): raise KeyError('Source type already set for type: {0:s}.'.format(source_type_class.TYPE_INDICATOR)) cls._source_type_classes[source_type_class.TYPE_INDICATOR] = source_type_class
Registers a source type. Source types are identified based on their type indicator. Args: source_type_class (type): source type. Raises: KeyError: if source types is already set for the corresponding type indicator.
codesearchnet
def create_additional_charge(self, *, subscription_id, description, plan_value, plan_tax, plan_tax_return_base, currency): payload = {'description': description, 'additionalValues': [{'name': 'ITEM_VALUE', 'value': plan_value, 'currency': currency}, {'name': 'ITEM_TAX', 'value': plan_tax, 'currency': currency}, {'name': 'ITEM_TAX_RETURN_BASE', 'value': plan_tax_return_base, 'currency': currency}]} fmt = 'subscriptions/{}/recurringBillItems'.format(subscription_id) return self.client._post((self.url + fmt), json=payload, headers=self.get_headers())
Adds extra charges to the respective invoice for the current period. Args: subscription_id: Identification of the subscription description: plan_value: plan_tax: plan_tax_return_base: currency: Returns:
codesearchnet
def create_list(self, **kwargs): path = self._get_path('create_list') kwargs.update({'session_id': self.session_id}) payload = {'name': kwargs.pop('name', None), 'description': kwargs.pop('description', None)} if ('language' in kwargs): payload['language'] = kwargs['language'] response = self._POST(path, kwargs, payload) self._set_attrs_to_values(response) return response
Create a new list. A valid session id is required. Args: name: Name of the list. description: Description of the list. language: (optional) ISO 639-1 code. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def rotation(self): rotation = self._libinput.libinput_event_tablet_tool_get_rotation(self._handle) changed = self._libinput.libinput_event_tablet_tool_rotation_has_changed(self._handle) return (rotation, changed)
The current Z rotation of the tool in degrees, clockwise from the tool's logical neutral position and whether it has changed in this event. For tools of type :attr:`~libinput.constant.TabletToolType.MOUSE` and :attr:`~libinput.constant.TabletToolType.LENS` the logical neutral position is pointing to the current logical north of the tablet. For tools of type :attr:`~libinput.constant.TabletToolType.BRUSH`, the logical neutral position is with the buttons pointing up. If this axis does not exist on the current tool, this property is (0, :obj:`False`). Returns: (float, bool): The current value of the the axis and whether it has changed.
codesearchnet
def memcached_client(servers=config.memcached_uri, debug=config.debug_memcache): key = None try: (client, key) = scoped_instance_manager.acquire(servers, debug=debug) (yield client) finally: if key: scoped_instance_manager.release(key)
Get a shared memcached instance. This function shares the same memcached instance across nested invocations. This is done so that memcached connections can be kept to a minimum, but at the same time unnecessary extra reconnections are avoided. Typically an initial scope (using 'with' construct) is made around parts of code that hit the cache server many times - such as a resolve, or executing a context. On exit of the topmost scope, the memcached client is disconnected. Returns: `Client`: Memcached instance.
codesearchnet
def get_initial_state_args(value_and_gradients_function, initial_position, grad_tolerance, control_inputs=None): if control_inputs: with tf.control_dependencies(control_inputs): (f0, df0) = value_and_gradients_function(initial_position) else: (f0, df0) = value_and_gradients_function(initial_position) converged = (norm(df0, dims=1) < grad_tolerance) return dict(converged=converged, failed=tf.zeros_like(converged), num_iterations=tf.convert_to_tensor(value=0), num_objective_evaluations=tf.convert_to_tensor(value=1), position=initial_position, objective_value=f0, objective_gradient=df0)
Returns a dictionary to populate the initial state of the search procedure. Performs an initial convergence check and the first evaluation of the objective function. Args: value_and_gradients_function: A Python callable that accepts a tensor and returns a tuple of two tensors: the objective function value and its derivative. initial_position: The starting point of the search procedure. grad_tolerance: The gradient tolerance for the procedure. control_inputs: Optional ops used to assert the validity of inputs, these are added as control dependencies to execute before the objective function is evaluated for the first time. Returns: An dictionary with values for the following keys: converged: True if the convergence check finds that the initial position is already an argmin of the objective function. failed: Initialized to False. num_objective_evaluations: Initialized to 1. position: Initialized to the initial position. objective_value: Initialized to the value of the objective function at the initial position. objective_gradient: Initialized to the gradient of the objective function at the initial position.
codesearchnet
def label_count(self, label_list_ids=None): count = collections.defaultdict(int) for label_list in self.label_lists.values(): if ((label_list_ids is None) or (label_list.idx in label_list_ids)): for (label_value, label_count) in label_list.label_count().items(): count[label_value] += label_count return count
Return a dictionary containing the number of times, every label-value in this utterance is occurring. Args: label_list_ids (list): If not None, only labels from label-lists with an id contained in this list are considered. Returns: dict: A dictionary containing the number of occurrences with the label-value as key.
codesearchnet
def show(self, view: View, request: Request): return view.render('welcome', {'app': request.app().make('Application')})
Show the welcome page. Arguments: view {masonite.view.View} -- The Masonite view class. Application {config.application} -- The application config module. Returns: masonite.view.View -- The Masonite view class.
codesearchnet
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An PLBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def dynamic_import(modname, import_tuples, developing=True, ignore_froms=[], dump=False, ignore_startswith=[], ignore_endswith=[], ignore_list=[], check_not_imported=True, return_initstr=False, verbose=False): if verbose: print(('[UTIL_IMPORT] Running Dynamic Imports for modname=%r ' % modname)) try: module = sys.modules[modname] except: module = __import__(modname) imports = [tup[0] for tup in import_tuples] __excecute_imports(module, modname, imports, verbose=verbose) if developing: from_imports = __execute_fromimport_star(module, modname, import_tuples, ignore_list=ignore_list, ignore_startswith=ignore_startswith, ignore_endswith=ignore_endswith, check_not_imported=check_not_imported, verbose=verbose) else: from_imports = __execute_fromimport(module, modname, import_tuples, verbose=verbose) inject_execstr = _inject_execstr(modname, import_tuples) dump_requested = (((('--dump-%s-init' % modname) in sys.argv) or (('--print-%s-init' % modname) in sys.argv)) or dump) overwrite_requested = (('--update-%s-init' % modname) in sys.argv) if verbose: print(('[UTIL_IMPORT] Finished Dynamic Imports for modname=%r ' % modname)) if dump_requested: is_main_proc = (multiprocessing.current_process().name == 'MainProcess') if is_main_proc: from utool import util_str initstr = _initstr(modname, imports, from_imports, inject_execstr) print(util_str.indent(initstr)) if overwrite_requested: '\n SeeAlso:\n util_inject.inject_python_code\n util_str.replace_between_tags\n ' is_main_proc = (multiprocessing.current_process().name == 'MainProcess') if is_main_proc: from utool import util_str from os.path import join, exists initstr = _initstr(modname, imports, from_imports, inject_execstr, withheader=False) new_else = util_str.indent(initstr) init_fpath = join(module.__path__[0], '__init__.py') print(('attempting to update: %r' % init_fpath)) assert exists(init_fpath) new_lines = [] editing = False updated = False with open(init_fpath, 'r') as file_: lines = file_.readlines() for line in lines: if (not editing): new_lines.append(line) if line.strip().startswith(' new_lines.append((('\n' + new_else) + '\n editing = True updated = True if line.strip().startswith(' editing = False if updated: print(('writing updated file: %r' % init_fpath)) new_text = ''.join(new_lines) with open(init_fpath, 'w') as file_: file_.write(new_text) else: print(('no write hook for file: %r' % init_fpath)) if return_initstr: initstr = _initstr(modname, imports, from_imports, '', withheader=False) return (inject_execstr, initstr) else: return inject_execstr
MAIN ENTRY POINT Dynamically import listed util libraries and their attributes. Create reload_subs function. Using __import__ like this is typically not considered good style However, it is better than import * and this will generate the good file text that can be used when the module is 'frozen" Returns: str: init_inject_str - by default all imports are executed in this function and only the remainig code needed to be executed is returned to define the reload logic. str, str: init_inject_str, init_str - if return_initstr is True then also returns init_str defining the from imports. Ignore: ignore_startswith = [] ignore_endswith = [] check_not_imported = True verbose = True
codesearchnet
def GetLaunchedFlows(self, flow_type='outstanding'): result = None all_clients = set(self.ListAllClients()) finished_clients = set(self.ListFinishedClients()) outstanding_clients = (all_clients - finished_clients) if (flow_type == 'all'): result = all_clients elif (flow_type == 'finished'): result = finished_clients elif (flow_type == 'outstanding'): result = outstanding_clients flows = aff4.FACTORY.MultiListChildren([self.urn.Add(x.Basename()) for x in result]) return [x[0] for (_, x) in flows]
Returns the session IDs of all the flows we launched. Args: flow_type: The type of flows to fetch. Can be "all", "outstanding" or "finished". Returns: A list of flow URNs.
codesearchnet
def check_models_are_auto_configured(module: types.ModuleType, all_auto_models: List[str]) -> List[str]: defined_models = get_models(module) failures = [] for model_name, _ in defined_models: if model_name not in all_auto_models and (not ignore_unautoclassed(model_name)): failures.append(f'{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file `utils/check_repo.py`.') return failures
Check models defined in module are each in an auto class. Args: module (`types.ModuleType`): The module in which we get the models. all_auto_models (`List[str]`): The list of all models in an auto class (as obtained with `get_all_auto_configured_models()`). Returns: `List[str]`: The list of error messages corresponding to models not tested.
github-repos
def overlap(self, feature, stranded: bool=False): feature_strand = feature.strand strand = self.strand if (stranded and ((strand == '.') or ((strand == '+') and (feature_strand in ['-', '.'])) or ((strand == '-') and (feature_strand in ['+', '.'])))): return False iv_1 = set(range(feature.start, (feature.end + 1))) iv_2 = set(range(self.start, (self.end + 1))) if (len(iv_1.intersection(iv_2)) > 0): return True else: return False
Determine if a feature's position overlaps with the entry Args: feature (class): GFF3Entry object stranded (bool): allow features to overlap on different strands if True [default: False] Returns: bool: True if features overlap, else False
codesearchnet
def _add_mgmt_to_domains(self, conf, mgmts): for dom_name, dom_spec in conf['domains'].iteritems(): domain_mgmt = [ nic['net'] for nic in dom_spec['nics'] if nic['net'] in mgmts ].pop() dom_spec['mgmt_net'] = domain_mgmt
Add management network key('mgmt_net') to each domain. Note this assumes ``conf`` was validated. Args: conf(dict): spec mgmts(list): list of management networks names
juraj-google-style
def prepare(self, variables): initializedsteps = [] if variables is None: variables = dict() for step, params, _resources, _files in self.steps: new_params = _complete_parameters(params, variables) initializedsteps.append(step(new_params)) return initializedsteps
Initialize all steps in this recipe using their parameters. Args: variables (dict): A dictionary of global variable definitions that may be used to replace or augment the parameters given to each step. Returns: list of RecipeActionObject like instances: The list of instantiated steps that can be used to execute this recipe.
juraj-google-style
def _get_symmetry(self): d = spglib.get_symmetry(self._cell, symprec=self._symprec, angle_tolerance=self._angle_tol) trans = [] for t in d['translations']: trans.append([float(Fraction.from_float(c).limit_denominator(1000)) for c in t]) trans = np.array(trans) trans[(np.abs(trans) == 1)] = 0 return (d['rotations'], trans)
Get the symmetry operations associated with the structure. Returns: Symmetry operations as a tuple of two equal length sequences. (rotations, translations). "rotations" is the numpy integer array of the rotation matrices for scaled positions "translations" gives the numpy float64 array of the translation vectors in scaled positions.
codesearchnet
def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, position_ids, **kwargs) return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)
Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration >>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large") >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) ```
github-repos
def for_input_type(self, input_type): return self
Returns a specialized implementation of self, if it exists. Otherwise, returns self. Args: input_type: the type of input elements.
github-repos
def _add_def_paths(self, prop_dict): for prop_key, prop_value in prop_dict.iteritems(): if prop_key == '$ref' and not 'prop_value'.startswith(' prop_dict[prop_key] = ' elif isinstance(prop_value, dict): self._add_def_paths(prop_value)
Recursive method to add relative paths for any $ref objects. Args: prop_dict: The property dict to alter. Side Effects: Alters prop_dict in-place.
juraj-google-style
def get_collection(self, id_or_uri, filter=''): if filter: filter = self.__make_query_filter(filter) filter = "?" + filter[1:] uri = "{uri}{filter}".format(uri=self.build_uri(id_or_uri), filter=filter) logger.debug('Get resource collection (uri = %s)' % uri) response = self._connection.get(uri) return self.__get_members(response)
Retrieves a collection of resources. Use this function when the 'start' and 'count' parameters are not allowed in the GET call. Otherwise, use get_all instead. Optional filtering criteria may be specified. Args: id_or_uri: Can be either the resource ID or the resource URI. filter (list or str): General filter/query string. Returns: Collection of the requested resource.
juraj-google-style
def _validate(self, value): _LOGGER.info('validate: Got type %s', type(value)) if ((value is not None) and (not isinstance(value, client.Flow))): raise TypeError('Property {0} must be convertible to a flow instance; received: {1}.'.format(self._name, value))
Validates a value as a proper Flow object. Args: value: A value to be set on the property. Raises: TypeError if the value is not an instance of Flow.
codesearchnet
def Delete(self, request, global_params=None): config = self.GetMethodConfig('Delete') return self._RunMethod(config, request, global_params=global_params)
Delete an association between a GCP project and a GitHub Enterprise server. Args: request: (CloudbuildProjectsGithubEnterpriseConfigsDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Operation) The response message.
github-repos
def ParseFileObject(self, parser_mediator, file_object): try: self._ParseFileHeader(file_object) except errors.ParseError as exception: raise errors.ParseError('Unable to parse index file header with error: {0!s}'.format(exception)) file_object.seek(112, os.SEEK_CUR) self._ParseIndexTable(file_object)
Parses a file-like object. Args: parser_mediator (ParserMediator): a parser mediator. file_object (dfvfs.FileIO): a file-like object to parse. Raises: ParseError: when the file cannot be parsed.
codesearchnet
def create_project(self, collab_id): return self._authenticated_request.to_endpoint('project/').with_json_body(self._prep_params(locals())).return_body().post()
Create a new project. Args: collab_id (int): The id of the collab the project should be created in. Returns: A dictionary of details of the created project:: { u'collab_id': 12998, u'created_by': u'303447', u'created_on': u'2017-03-21T14:06:32.293902Z', u'description': u'', u'entity_type': u'project', u'modified_by': u'303447', u'modified_on': u'2017-03-21T14:06:32.293967Z', u'name': u'12998', u'uuid': u'2516442e-1e26-4de1-8ed8-94523224cc40' } Raises: StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
codesearchnet
def _map_query_path_to_location_info(query_metadata_table): query_path_to_location_info = {} for (location, location_info) in query_metadata_table.registered_locations: if (not isinstance(location, Location)): continue if (location.query_path in query_path_to_location_info): equivalent_location_info = query_path_to_location_info[location.query_path] if (not _location_infos_equal(location_info, equivalent_location_info)): raise AssertionError(u'Differing LocationInfos at query_path {} between {} and {}. Expected parent_location.query_path, optional_scopes_depth, recursive_scopes_depth and types to be equal for LocationInfos sharing the same query path.'.format(location.query_path, location_info, equivalent_location_info)) query_path_to_location_info[location.query_path] = location_info return query_path_to_location_info
Create a map from each query path to a LocationInfo at that path. Args: query_metadata_table: QueryMetadataTable, object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). Returns: Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path.
codesearchnet
def __init__(self, filename, sample_filename, probability_threshold=0.9): self.samples = pd.read_csv(sample_filename, sep=" ", skiprows=2, names=["fid", "iid", "missing", "father", "mother", "sex", "plink_geno"], dtype=dict(fid=str, iid=str)) try: self.samples = self.samples.set_index("iid", verify_integrity=True) except ValueError: logging.info( "Setting the index as 'fid_iid' because the individual IDs " "are not unique." ) self.samples["fid_iid"] = [ "{fid}_{iid}".format(fid=fid, iid=iid) for fid, iid in zip(self.samples.fid, self.samples.iid) ] self.samples = self.samples.set_index( "fid_iid", verify_integrity=True, ) self._impute2_file = get_open_func(filename)(filename, "r") self.has_index = path.isfile(filename + ".idx") self._impute2_index = None self._index_has_location = False if self.has_index: self._impute2_index = get_index( filename, cols=[0, 1, 2], names=["chrom", "name", "pos"], sep=" ", ) try: self._impute2_index = self._impute2_index.set_index( "name", verify_integrity=True, ) self._has_duplicated = False except ValueError as e: self._has_duplicated = True duplicated = self._impute2_index.name.duplicated(keep=False) duplicated_markers = self._impute2_index.loc[ duplicated, "name" ] duplicated_marker_counts = duplicated_markers.value_counts() self._dup_markers = { m: [] for m in duplicated_marker_counts.index } logging.found_duplicates(duplicated_marker_counts.iteritems()) counter = Counter() for i, marker in duplicated_markers.iteritems(): counter[marker] += 1 new_name = "{}:dup{}".format(marker, counter[marker]) self._impute2_index.loc[i, "name"] = new_name self._dup_markers[marker].append(new_name) self._impute2_index = self._impute2_index.set_index( "name", verify_integrity=True, ) self._index_has_location = ( "chrom" in self._impute2_index.columns and "pos" in self._impute2_index.columns ) if self._index_has_location: self._impute2_index["multiallelic"] = False self._impute2_index.loc[ self._impute2_index.duplicated(["chrom", "pos"], keep=False), "multiallelic" ] = True self.prob_t = probability_threshold
IMPUTE2 file reader. Args: filename (str): The name of the IMPUTE2 file. sample_filename (str): The name of the SAMPLE file. probability_threshold (float): The probability threshold. Note ==== If the sample IDs are not unique, the index is changed to be the sample family ID and individual ID (i.e. fid_iid).
juraj-google-style
def __parse_tostr(self, text, **kwargs): n = self.options.get('nbest', 1) if (self._KW_BOUNDARY in kwargs): patt = kwargs.get(self._KW_BOUNDARY, '.') tokens = list(self.__split_pattern(text, patt)) text = ''.join([t[0] for t in tokens]) btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) bpos = 0 self.__mecab.mecab_lattice_set_boundary_constraint(self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY) for (token, match) in tokens: bpos += 1 if match: mark = self.MECAB_INSIDE_TOKEN else: mark = self.MECAB_ANY_BOUNDARY for _ in range(1, len(self.__str2bytes(token))): self.__mecab.mecab_lattice_set_boundary_constraint(self.lattice, bpos, mark) bpos += 1 self.__mecab.mecab_lattice_set_boundary_constraint(self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY) elif (self._KW_FEATURE in kwargs): features = kwargs.get(self._KW_FEATURE, ()) fd = {morph: self.__str2bytes(feat) for (morph, feat) in features} tokens = self.__split_features(text, [e[0] for e in features]) text = ''.join([t[0] for t in tokens]) btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) bpos = 0 for (chunk, match) in tokens: c = len(self.__str2bytes(chunk)) if (match == True): self.__mecab.mecab_lattice_set_feature_constraint(self.lattice, bpos, (bpos + c), fd[chunk]) bpos += c else: btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) self.__mecab.mecab_parse_lattice(self.tagger, self.lattice) if (n > 1): res = self.__mecab.mecab_lattice_nbest_tostr(self.lattice, n) else: res = self.__mecab.mecab_lattice_tostr(self.lattice) if (res != self.__ffi.NULL): raw = self.__ffi.string(res) return self.__bytes2str(raw).strip() else: err = self.__mecab.mecab_lattice_strerror(self.lattice) logger.error(self.__bytes2str(self.__ffi.string(err))) raise MeCabError(self.__bytes2str(self.__ffi.string(err)))
Builds and returns the MeCab function for parsing Unicode text. Args: fn_name: MeCab function name that determines the function behavior, either 'mecab_sparse_tostr' or 'mecab_nbest_sparse_tostr'. Returns: A function definition, tailored to parsing Unicode text and returning the result as a string suitable for display on stdout, using either the default or N-best behavior.
codesearchnet
def set_agent(self, short_name, client_id): if (short_name not in self.services): raise ArgumentError('Unknown service name', short_name=short_name) self.agents[short_name] = client_id
Register a client id that handlers commands for a service. Args: short_name (str): The name of the service to set an agent for. client_id (str): A globally unique id for the client that should receive commands for this service.
codesearchnet
def new_panel(store, institute_id, panel_name, display_name, csv_lines): institute_obj = store.institute(institute_id) if (institute_obj is None): flash('{}: institute not found'.format(institute_id)) return None panel_obj = store.gene_panel(panel_name) if panel_obj: flash('panel already exists: {} - {}'.format(panel_obj['panel_name'], panel_obj['display_name'])) return None log.debug('parse genes from CSV input') try: new_genes = parse_genes(csv_lines) except SyntaxError as error: flash(error.args[0], 'danger') return None log.debug('build new gene panel') panel_id = None try: panel_data = build_panel(dict(panel_name=panel_name, institute=institute_obj['_id'], version=1.0, date=dt.datetime.now(), display_name=display_name, genes=new_genes), store) panel_id = store.add_gene_panel(panel_data) except Exception as err: log.error('An error occurred while adding the gene panel {}'.format(err)) return panel_id
Create a new gene panel. Args: store(scout.adapter.MongoAdapter) institute_id(str) panel_name(str) display_name(str) csv_lines(iterable(str)): Stream with genes Returns: panel_id: the ID of the new panel document created or None
codesearchnet
def __init__(self, args=None, varargs=None, varkw=None, defaults=None, kwonlyargs=None, kwonlydefaults=None, annotations=None): self.args = args or [] self.varargs = varargs self.varkw = varkw self.defaults = defaults or () self.kwonlyargs = kwonlyargs or [] self.kwonlydefaults = kwonlydefaults or {} self.annotations = annotations or {}
Constructs a FullArgSpec with each provided attribute, or the default. Args: args: A list of the argument names accepted by the function. varargs: The name of the *varargs argument or None if there isn't one. varkw: The name of the **kwargs argument or None if there isn't one. defaults: A tuple of the defaults for the arguments that accept defaults. kwonlyargs: A list of argument names that must be passed with a keyword. kwonlydefaults: A dictionary of keyword only arguments and their defaults. annotations: A dictionary of arguments and their annotated types.
github-repos
def clock(self, interval, basis): cache_name = self._classify_clock(interval, basis) cache_data = self.clock_cache.get(cache_name) if cache_data is None: parent_stream, trigger = self.parent.clock(interval, basis) if trigger.use_count is False: raise SensorGraphSemanticError("Unsupported clock trigger in GatedClockScope", trigger=trigger) elif interval % trigger.reference != 0: raise SensorGraphSemanticError("Unsupported trigger ratio in GatedClockScope", trigger=trigger, interval=interval) ratio = interval stream = self.allocator.allocate_stream(DataStream.CounterType) latch_stream = self.allocator.attach_stream(self.latch_stream) self.sensor_graph.add_node(u'({} always && {} {}) => {} using copy_latest_a'.format(parent_stream, latch_stream, self.latch_trigger, stream)) self.clock_cache[cache_name] = (stream, ratio) else: stream, ratio = cache_data if interval % ratio != 0: raise SensorGraphSemanticError("Unsupported trigger ratio in GatedClockScope", ratio=ratio, interval=interval) count = interval clock_stream = self.allocator.attach_stream(stream) return clock_stream, InputTrigger(u'count', '>=', count)
Return a NodeInput tuple for triggering an event every interval. We request each distinct type of clock at most once and combine it with our latch stream each time it is requested. Args: interval (int): The interval (in seconds) at which this input should trigger.
juraj-google-style
def floor(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.floor, tf.float32)
Returns a TensorFluent for the floor function. Args: x: The input fluent. Returns: A TensorFluent wrapping the floor function.
codesearchnet
def _get_grand_potential(self, composition): if self.use_hull_energy: grand_potential = self.pd_non_grand.get_hull_energy(composition) else: grand_potential = InterfacialReactivity._get_entry_energy( self.pd_non_grand, composition) grand_potential -= sum([composition[e] * mu for e, mu in self.pd.chempots.items()]) if self.norm: grand_potential /= sum([composition[el] for el in composition if el not in self.pd.chempots]) return grand_potential
Computes the grand potential Phi at a given composition and chemical potential(s). Args: composition (Composition): Composition object. Returns: Grand potential at a given composition at chemical potential(s).
juraj-google-style
def generate(arglist, git_tag_override=None): spec, head_symlink, _, dest_file = arglist data = json.load(open(spec)) git_version = None if not data['git']: git_version = b'unknown' else: old_branch = data['branch'] new_branch = parse_branch_ref(head_symlink) if new_branch != old_branch: raise RuntimeError("Run ./configure again, branch was '%s' but is now '%s'" % (old_branch, new_branch)) git_version = get_git_version(data['path'], git_tag_override) write_version_info(dest_file, git_version)
Generate version_info.cc as given `destination_file`. Args: arglist: should be a sequence that contains spec, head_symlink, ref_symlink, destination_file. `destination_file` is the filename where version_info.cc will be written `spec` is a filename where the file contains a JSON dictionary 'git' bool that is true if the source is in a git repo 'path' base path of the source code 'branch' the name of the ref specification of the current branch/tag `head_symlink` is a filename to HEAD that is cross-referenced against what is contained in the json branch designation. `ref_symlink` is unused in this script but passed, because the build system uses that file to detect when commits happen. git_tag_override: Override the value for the git tag. This is useful for releases where we want to build the release before the git tag is created. Raises: RuntimeError: If ./configure needs to be run, RuntimeError will be raised.
github-repos
def is_location(v) -> (bool, str): def convert2float(value): try: float_num = float(value) return float_num except ValueError: return False if (not isinstance(v, str)): return (False, v) split_lst = v.split(':') if (len(split_lst) != 5): return (False, v) if convert2float(split_lst[3]): longitude = abs(convert2float(split_lst[3])) if (longitude > 90): return (False, v) if convert2float(split_lst[4]): latitude = abs(convert2float(split_lst[3])) if (latitude > 180): return (False, v) return (True, v)
Boolean function for checking if v is a location format Args: v: Returns: bool
codesearchnet
def save_link(self, path_info): assert path_info["scheme"] == "local" path = path_info["path"] if not os.path.exists(path): return mtime, _ = get_mtime_and_size(path) inode = get_inode(path) relpath = os.path.relpath(path, self.root_dir) cmd = ( "REPLACE INTO {}(path, inode, mtime) " 'VALUES ("{}", {}, "{}")'.format( self.LINK_STATE_TABLE, relpath, self._to_sqlite(inode), mtime ) ) self._execute(cmd)
Adds the specified path to the list of links created by dvc. This list is later used on `dvc checkout` to cleanup old links. Args: path_info (dict): path info to add to the list of links.
juraj-google-style
def __init__(self, label, names=(), path=None): self.label = label self.names = names self.path = path for name in names: setattr(self, name, self.__class__(name, path="{0}.{1}".format(path, label) if path else label))
Create a new enumeration. The parent enum creates an instance for each item. Args: label (str): enum name names (list): item labels path (list): qualified parent name, for :func:`repr` output
juraj-google-style
def output(self, filename): if filename == '': filename = 'contracts.dot' if not filename.endswith('.dot'): filename += ".dot" info = 'Inheritance Graph: ' + filename self.info(info) with open(filename, 'w', encoding='utf8') as f: f.write('digraph "" {\n') for c in self.contracts: f.write(self._summary(c)) f.write('}')
Output the graph in filename Args: filename(string)
juraj-google-style
def show(config, section, opt): if section not in config.keys(): raise ConfigError("section '{}' doesn't exist".format(section)) if opt not in config[section].keys(): raise ConfigError( "option '{}.{}' doesn't exist".format(section, opt) ) logger.info(config[section][opt])
Prints option value from the config. Args: config (configobj.ConfigObj): config to work on. section (str): section name. opt (str): option name.
juraj-google-style
def _generate_parser(name, path, required=False, notfoundmsg=None): output = ('def %s(dom):\n' % _get_parser_name(name)) dom = True parser_table = {'find': (lambda path: _find_template(path.params, path.index, required, notfoundmsg)), 'wfind': (lambda path: _wfind_template(dom, path.params, path.index, required, notfoundmsg)), 'match': (lambda path: _match_template(path.params, path.index, required, notfoundmsg)), 'left_neighbour_tag': (lambda path: _neigh_template(path.params, path.index, True, required, notfoundmsg)), 'right_neighbour_tag': (lambda path: _neigh_template(path.params, path.index, False, required, notfoundmsg))} if isinstance(path, path_patterns.PathCall): output += parser_table[path.call_type](path) elif isinstance(path, path_patterns.Chained): for path in path.chain: output += parser_table[path.call_type](path) dom = False else: raise UserWarning(('Unknown type of path parameters! (%s)' % str(path))) output += (IND + 'return el\n') output += '\n\n' return output
Generate parser named `name` for given `path`. Args: name (str): Basename for the parsing function (see :func:`_get_parser_name` for details). path (obj): :class:`.PathCall` or :class:`.Chained` instance. required (bool, default False): Use :func:`_required_idiom` to returned data. notfoundmsg (str, default None): Message which will be used for :func:`_required_idiom` if the item is not found. Returns: str: Python code for parsing `path`.
codesearchnet
def plot_iso(axis, step, var): (xmesh, ymesh, fld) = get_meshes_fld(step, var) if conf.field.shift: fld = np.roll(fld, conf.field.shift, axis=0) axis.contour(xmesh, ymesh, fld, linewidths=1)
Plot isocontours of scalar field. Args: axis (:class:`matplotlib.axes.Axes`): the axis handler of an existing matplotlib figure where the isocontours should be plotted. step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData instance. var (str): the scalar field name.
codesearchnet
def predict(self, text): pred = self.predict_proba(text) tags = self._get_tags(pred) return tags
Predict using the model. Args: text: string, the input text. Returns: tags: list, shape = (num_words,) Returns predicted values.
juraj-google-style
def quat2mat(quaternion): q = np.array(quaternion, dtype=np.float32, copy=True)[[3, 0, 1, 2]] n = np.dot(q, q) if n < EPS: return np.identity(3) q *= math.sqrt(2.0 / n) q = np.outer(q, q) return np.array( [ [1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]], [q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]], [q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]], ] )
Converts given quaternion (x, y, z, w) to matrix. Args: quaternion: vec4 float angles Returns: 3x3 rotation matrix
juraj-google-style
def AddEnumDescriptor(self, enum_desc): if not isinstance(enum_desc, descriptor.EnumDescriptor): raise TypeError('Expected instance of descriptor.EnumDescriptor.') self._enum_descriptors[enum_desc.full_name] = enum_desc self.AddFileDescriptor(enum_desc.file)
Adds an EnumDescriptor to the pool. This method also registers the FileDescriptor associated with the message. Args: enum_desc: An EnumDescriptor.
juraj-google-style
def multi_flags_validator(flag_names, message='Flag validation failed', flag_values=FLAGS): def decorate(function): register_multi_flags_validator(flag_names, function, message=message, flag_values=flag_values) return function return decorate
A function decorator for defining a multi-flag validator. Registers the decorated function as a validator for flag_names, e.g. @gflags.multi_flags_validator(['foo', 'bar']) def _CheckFooBar(flags_dict): ... See register_multi_flags_validator() for the specification of checker function. Args: flag_names: [str], a list of the flag names to be checked. message: error text to be shown to the user if checker returns False. If checker raises ValidationError, message from the raised error will be shown. flag_values: An optional FlagValues instance to validate against. Returns: A function decorator that registers its function argument as a validator. Raises: AttributeError: If a flag is not registered as a valid flag name.
codesearchnet
def BalanceFor(self, assetId): for (key, fixed8) in self.Balances.items(): if (key == assetId): return fixed8 return Fixed8(0)
Get the balance for a given asset id. Args: assetId (UInt256): Returns: Fixed8: balance value.
codesearchnet
def _init_from_bool(self, z, x): if (z is None): raise QiskitError('z vector must not be None.') if (x is None): raise QiskitError('x vector must not be None.') if (len(z) != len(x)): raise QiskitError('length of z and x vectors must be the same. (z: {} vs x: {})'.format(len(z), len(x))) z = _make_np_bool(z) x = _make_np_bool(x) self._z = z self._x = x return self
Construct pauli from boolean array. Args: z (numpy.ndarray): boolean, z vector x (numpy.ndarray): boolean, x vector Returns: Pauli: self Raises: QiskitError: if z or x are None or the length of z and x are different.
codesearchnet
def remove_child(self, child): if child in self.children.values() and hasattr(child, 'identifier'): for k in self.children.keys(): if hasattr(self.children[k], 'identifier'): if self.children[k].identifier == child.identifier: if k in self._render_children_list: self._render_children_list.remove(k) self.children.pop(k) break
Removes a child instance from the Tag's children. Args: child (Tag): The child to be removed.
juraj-google-style
def pop_event(self, event_name, timeout=DEFAULT_TIMEOUT): if (not self.started): raise IllegalStateError('Dispatcher needs to be started before popping.') e_queue = self.get_event_q(event_name) if (not e_queue): raise TypeError('Failed to get an event queue for {}'.format(event_name)) try: if timeout: return e_queue.get(True, timeout) elif (timeout == 0): return e_queue.get(False) else: return e_queue.get(True) except queue.Empty: raise queue.Empty('Timeout after {}s waiting for event: {}'.format(timeout, event_name))
Pop an event from its queue. Return and remove the oldest entry of an event. Block until an event of specified name is available or times out if timeout is set. Args: event_name: Name of the event to be popped. timeout: Number of seconds to wait when event is not present. Never times out if None. Returns: The oldest entry of the specified event. None if timed out. Raises: IllegalStateError: Raised if pop is called before the dispatcher starts polling.
codesearchnet
def _CheckMacOSPaths(self, filename, artifact_definition, source, paths): result = True paths_with_private = [] paths_with_symbolic_link_to_private = [] for path in paths: path_lower = path.lower() path_segments = path_lower.split(source.separator) if (not path_segments): logging.warning('Empty path defined by artifact definition: {0:s} in file: {1:s}'.format(artifact_definition.name, filename)) result = False elif (len(path_segments) == 1): continue elif (path_segments[1] in self._MACOS_PRIVATE_SUB_PATHS): paths_with_symbolic_link_to_private.append(path) elif ((path_segments[1] == 'private') and (len(path_segments) >= 2)): if (path_segments[2] in self._MACOS_PRIVATE_SUB_PATHS): paths_with_private.append(path) else: logging.warning('Unsupported private path: {0:s} defined by artifact definition: {1:s} in file: {2:s}'.format(path, artifact_definition.name, filename)) result = False for private_path in paths_with_private: if (private_path[8:] not in paths_with_symbolic_link_to_private): logging.warning('Missing symbolic link: {0:s} for path: {1:s} defined by artifact definition: {2:s} in file: {3:s}'.format(private_path[8:], private_path, artifact_definition.name, filename)) result = False for path in paths_with_symbolic_link_to_private: private_path = '/private{0:s}'.format(path) if (private_path not in paths_with_private): logging.warning('Missing path: {0:s} for symbolic link: {1:s} defined by artifact definition: {2:s} in file: {3:s}'.format(private_path, path, artifact_definition.name, filename)) result = False return result
Checks if the paths are valid MacOS paths. Args: filename (str): name of the artifacts definition file. artifact_definition (ArtifactDefinition): artifact definition. source (SourceType): source definition. paths (list[str]): paths to validate. Returns: bool: True if the MacOS paths is valid.
codesearchnet
def mark_deprecated(replaced_by): def decorator(fn): @wraps(fn) def wrapper(*args, **kw): from peltak.core import shell if shell.is_tty: warnings.warn("This command is has been deprecated. Please use " "{new} instead.".format(new=replaced_by)) return fn(*args, **kw) return wrapper return decorator
Mark command as deprecated. Args: replaced_by (str): The command that deprecated this command and should be used instead.
juraj-google-style
def index(self, connection, partition, columns): import hashlib query_tmpl = if not isinstance(columns,(list,tuple)): columns = [columns] col_list = ','.join('"{}"'.format(col) for col in columns) col_hash = hashlib.md5(col_list).hexdigest() try: table_name = partition.vid except AttributeError: table_name = partition query = query_tmpl.format( index_name='{}_{}_i'.format(table_name, col_hash), table_name=table_name, columns=col_list) logger.debug('Creating sqlite index: query: {}'.format(query)) cursor = connection.cursor() cursor.execute(query)
Create an index on the columns. Args: connection (apsw.Connection): connection to sqlite database who stores mpr table or view. partition (orm.Partition): columns (list of str):
juraj-google-style
def watermark_image(image, wtrmrk_path, corner=2): padding = 2 wtrmrk_img = Image.open(wtrmrk_path) if ((wtrmrk_img.width > (image.width - (padding * 2))) or (wtrmrk_img.height > (image.height - (padding * 2)))): res = (int((image.width / 8.0)), int((image.height / 8.0))) resize_in_place(wtrmrk_img, res) pos = get_pos(corner, image.size, wtrmrk_img.size, padding) was_P = (image.mode == 'P') was_L = (image.mode == 'L') if (image.mode not in ['RGB', 'RGBA']): if (image.format in ['JPG', 'JPEG']): image = image.convert('RGB') else: image = image.convert('RGBA') image.paste(wtrmrk_img.convert('RGBA'), pos, wtrmrk_img.convert('RGBA')) if was_P: image = image.convert('P', palette=Image.ADAPTIVE, colors=256) elif was_L: image = image.convert('L') return image
Adds a watermark image to an instance of a PIL Image. If the provided watermark image (wtrmrk_path) is larger than the provided base image (image), then the watermark image will be automatically resized to roughly 1/8 the size of the base image. Args: image: An instance of a PIL Image. This is the base image. wtrmrk_path: Path to the watermark image to use. corner: An integer between 0 and 3 representing the corner where the watermark image should be placed on top of the base image. 0 is top left, 1 is top right, 2 is bottom right and 3 is bottom left. NOTE: Right now, this is permanently set to 2 (bottom right) but this can be changed in the future by either creating a new cmd-line flag or putting this in the config file. Returns: The watermarked image
codesearchnet
def send_msg(self, address, args=[]): if (not address.startswith('/')): address = '/{}'.format(address) msg = osc_message_builder.OscMessageBuilder(address=address) for arg in args: msg.add_arg(arg) self.conn.send(msg.build()) return
Send multiple args into a single message to a given address. Args: address (str): OSC Address. args (list): Arguments to be parsed in VVVV.
codesearchnet
def identity_kernel_initializer(shape, dtype=tf.float32, partition_info=None): if (len(shape) != 4): raise ValueError('Convolution kernels must be rank 4.') (filter_height, filter_width, in_channels, out_channels) = shape if (filter_width != filter_height): raise ValueError('Identity initializer only works for square filters.') if ((filter_width % 2) != 1): raise ValueError('Identity initializer requires filters have odd height and width.') if (in_channels != out_channels): raise ValueError('in_channels must equal out_channels in order to construct per-channel identities.') middle_pixel = (filter_height is_middle_pixel = tf.logical_and(tf.equal(_range_along_dimension(0, shape), middle_pixel), tf.equal(_range_along_dimension(1, shape), middle_pixel)) is_same_channel = tf.equal(_range_along_dimension(2, shape), _range_along_dimension(3, shape)) return tf.cast(tf.logical_and(is_same_channel, is_middle_pixel), dtype=dtype)
An initializer for constructing identity convolution kernels. Constructs a convolution kernel such that applying it is the same as an identity operation on the input. Formally, the kernel has entry [i, j, in, out] = 1 if in equals out and i and j are the middle of the kernel and 0 otherwise. Args: shape: List of integers. Represents shape of result. dtype: data type for values in result. partition_info: Partition information for initializer functions. Ignored. Returns: Tensor of desired shape and dtype such that applying it as a convolution kernel results in the identity operation. Raises: ValueError: If shape does not define a valid kernel. If filter width and height differ. If filter width and height are not odd numbers. If number of input and output channels differ.
codesearchnet
def on_graph_def(self, graph_def, device_name, wall_time): if self._dump_dir: if self._grpc_path: self._write_graph_def(graph_def, device_name, wall_time) else: self._cached_graph_defs.append(graph_def) self._cached_graph_def_device_names.append(device_name) self._cached_graph_def_wall_times.append(wall_time) else: self._event_listener_servicer.partition_graph_defs.append(graph_def)
Implementation of the tensor value-carrying Event proto callback. Args: graph_def: A GraphDef object. device_name: Name of the device on which the graph was created. wall_time: An epoch timestamp (in microseconds) for the graph.
github-repos
class TFDebertaStableDropout(keras.layers.Layer): def __init__(self, drop_prob, **kwargs): super().__init__(**kwargs) self.drop_prob = drop_prob @tf.custom_gradient def xdropout(self, inputs): mask = tf.cast(1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool) scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=self.compute_dtype) if self.drop_prob > 0: inputs = tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), inputs) * scale def grad(upstream): if self.drop_prob > 0: return tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), upstream) * scale else: return upstream return (inputs, grad) def call(self, inputs: tf.Tensor, training: tf.Tensor=False): if training: return self.xdropout(inputs) return inputs
Optimized dropout module for stabilizing the training Args: drop_prob (float): the dropout probabilities
github-repos
def set_calibration(self, enabled, imus): if len(imus) == 0: imus = list(range(MAX_IMUS)) for i in imus: if i < 0 or i >= MAX_IMUS: logger.warn('Invalid IMU index {} in set_calibration'.format(i)) continue self.imus[i]._use_calibration = enabled
Set calibration state for attached IMUs. Args: enabled (bool): True to apply calibration to IMU data (if available). False to output uncalibrated data. imus (list): indicates which IMUs the calibration state should be set on. Empty list or [0, 1, 2, 3, 4] will apply to all IMUs, [0, 1] only to first 2 IMUs, etc.
juraj-google-style
def to_dataframe(self, start_row=0, max_rows=None, use_cache=True, dialect=None, billing_tier=None): return self.results(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier).to_dataframe(start_row=start_row, max_rows=max_rows)
Exports the query results to a Pandas dataframe. Args: start_row: the row of the table at which to start the export (default 0). max_rows: an upper limit on the number of rows to export (default None). use_cache: whether to use cached results or not (default True). dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A Pandas dataframe containing the table data.
codesearchnet
def processes(self, processes): if self._processes > 1: self._pool.close() self._pool.join() self._pool = multiprocessing.Pool(processes) else: self._pool = None self._logger.log('debug', 'Number of processes set to {}'.format( processes ))
Set the number of concurrent processes the ABC will utilize for fitness function evaluation; if <= 1, single process is used Args: processes (int): number of concurrent processes
juraj-google-style
def zeros_like(array, dtype=None, keepmeta=True): if keepmeta: return xr.zeros_like(array, dtype) else: return dc.zeros(array.shape, dtype)
Create an array of zeros with the same shape and type as the input array. Args: array (xarray.DataArray): The shape and data-type of it define these same attributes of the output array. dtype (data-type, optional): If specified, this function overrides the data-type of the output array. keepmeta (bool, optional): Whether *coords, attrs, and name of the input array are kept in the output one. Default is True. Returns: array (decode.array): Decode array filled with zeros.
codesearchnet
def on_put(self, req, resp, handler=None, **kwargs): self.handle((handler or self.update), req, resp, **kwargs) resp.status = falcon.HTTP_ACCEPTED
Respond on PUT HTTP request assuming resource update flow. This request handler assumes that PUT requests are associated with resource update/modification. Thus default flow for such requests is: * Modify existing resource instance and prepare its representation by calling its update method handler. * Set response status code to ``202 Accepted``. Args: req (falcon.Request): request object instance. resp (falcon.Response): response object instance to be modified handler (method): update method handler to be called. Defaults to ``self.update``. **kwargs: additional keyword arguments retrieved from url template.
codesearchnet
def parent(self): family = self.repository.get_parent_package_family(self.resource) return (PackageFamily(family) if family else None)
Get the parent package family. Returns: `PackageFamily`.
codesearchnet
def ndtri(p, name='ndtri'): with ops.name_scope(name, values=[p]): p = ops.convert_to_tensor(p, name='p') if p.dtype.as_numpy_dtype not in [np.float32, np.float64]: raise TypeError('p.dtype=%s is not handled, see docstring for supported types.' % p.dtype) return _ndtri(p)
The inverse of the CDF of the Normal distribution function. Returns x such that the area under the pdf from minus infinity to x is equal to p. A piece-wise rational approximation is done for the function. This is a port of the implementation in netlib. Args: p: `Tensor` of type `float32`, `float64`. name: Python string. A name for the operation (default="ndtri"). Returns: x: `Tensor` with `dtype=p.dtype`. Raises: TypeError: if `p` is not floating-type.
github-repos
def pre(fqdn, parent, stackdepth, *argl, **argd): global _atdepth_call, _cstack_call pcres = _pre_call(_atdepth_call, parent, fqdn, stackdepth+1, *argl, **argd) entry, _atdepth_call, reduced, bound, ekey = pcres _cstack_call.append(fqdn) return (entry, bound, ekey)
Adds logging for a call to the specified function that is being handled by an external module. Args: fqdn (str): fully-qualified domain name of the function being logged. parent: *object* that the function belongs to. stackdepth (int): maximum stack depth before entries are ignored. argl (list): positional arguments passed to the function call. argd (dict): keyword arguments passed to the function call.
juraj-google-style
def contains(self, value, equality_comparer=operator.eq): if self.closed(): raise ValueError("Attempt to call contains() on a " "closed Queryable.") if not is_callable(equality_comparer): raise TypeError("contains() parameter equality_comparer={0} is " "not callable".format(repr(equality_comparer))) if equality_comparer is operator.eq: return value in self._iterable for item in self: if equality_comparer(value, item): return True return False
Determines whether the sequence contains a particular value. Execution is immediate. Depending on the type of the sequence, all or none of the sequence may be consumed by this operation. Note: This method uses immediate execution. Args: value: The value to test for membership of the sequence Returns: True if value is in the sequence, otherwise False. Raises: ValueError: If the Queryable has been closed.
juraj-google-style
def configs_for_reader(reader=None, ppp_config_dir=None): search_paths = ((ppp_config_dir,) if ppp_config_dir else tuple()) if (reader is not None): if (not isinstance(reader, (list, tuple))): reader = [reader] new_readers = [] for reader_name in reader: if (reader_name.endswith('.yaml') or (reader_name not in OLD_READER_NAMES)): new_readers.append(reader_name) continue new_name = OLD_READER_NAMES[reader_name] raise ValueError("Reader name '{}' has been deprecated, use '{}' instead.".format(reader_name, new_name)) reader = new_readers config_files = [(r if r.endswith('.yaml') else (r + '.yaml')) for r in reader] else: reader_configs = glob_config(os.path.join('readers', '*.yaml'), *search_paths) config_files = set(reader_configs) for config_file in config_files: config_basename = os.path.basename(config_file) reader_configs = config_search_paths(os.path.join('readers', config_basename), *search_paths) if (not reader_configs): raise ValueError('No reader(s) named: {}'.format(reader)) (yield reader_configs)
Generator of reader configuration files for one or more readers Args: reader (Optional[str]): Yield configs only for this reader ppp_config_dir (Optional[str]): Additional configuration directory to search for reader configuration files. Returns: Generator of lists of configuration files
codesearchnet