code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def exclude(self, **filters): exclude = {'-%s' % key: value for key, value in filters.items()} return self.filter(**exclude)
Applies query filters for excluding matching records from result set. Args: **filters: Query filters as keyword arguments. Returns: Self. Queryset object. Examples: >>> Person.objects.exclude(age=None) >>> Person.objects.filter(name__startswith='jo').exclude(age__lte=16)
juraj-google-style
def to_file(self, destination, format='csv', csv_delimiter=',', csv_header=True): f = codecs.open(destination, 'w', 'utf-8') fieldnames = [] for column in self.schema: fieldnames.append(column.name) if sys.version_info[0] == 2: csv_delimiter = csv_delimiter.encode('unicode_escape') writer = csv.DictWriter(f, fieldnames=fieldnames, delimiter=csv_delimiter) if csv_header: writer.writeheader() for row in self: writer.writerow(row) f.close()
Save the results to a local file in CSV format. Args: destination: path on the local filesystem for the saved results. format: the format to use for the exported data; currently only 'csv' is supported. csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ',' csv_header: for CSV exports, whether to include an initial header line. Default true. Raises: An Exception if the operation failed.
juraj-google-style
def _parse_args(): parser = argparse.ArgumentParser(description='preprocess_coco_minival: Preprocess COCO minival dataset') parser.add_argument('--images_folder', type=str, help='Full path of the validation images folder.', required=True) parser.add_argument('--instances_file', type=str, help='Full path of the input JSON file, like instances_val20xx.json.', required=True) parser.add_argument('--allowlist_file', type=str, help='File with COCO image ids to preprocess, one on each line.', required=False) parser.add_argument('--num_images', type=int, help='Number of allowlisted images to preprocess into the output folder.', required=False) parser.add_argument('--output_folder', type=str, help='Full path to output images & text proto files into.', required=True) return parser.parse_known_args(args=sys.argv[1:])[0]
Creates a parser that parse the command line arguments. Returns: A namespace parsed from command line arguments.
github-repos
def sample(self, num_rows=1): self.check_fit() res = {} means = np.zeros(self.covariance.shape[0]) size = (num_rows,) clean_cov = np.nan_to_num(self.covariance) samples = np.random.multivariate_normal(means, clean_cov, size=size) for (i, (label, distrib)) in enumerate(self.distribs.items()): cdf = stats.norm.cdf(samples[(:, i)]) res[label] = distrib.percent_point(cdf) return pd.DataFrame(data=res)
Creates sintentic values stadistically similar to the original dataset. Args: num_rows: `int` amount of samples to generate. Returns: np.ndarray: Sampled data.
codesearchnet
def max(x, axis=None, keepdims=False, initial=None): if any_symbolic_tensors((x,)): return Max(axis=axis, keepdims=keepdims, initial=initial).symbolic_call(x) return backend.numpy.max(x, axis=axis, keepdims=keepdims, initial=initial)
Return the maximum of a tensor or maximum along an axis. Args: x: Input tensor. axis: Axis or axes along which to operate. By default, flattened input is used. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Defaults to `False`. initial: The minimum value of an output element. Defaults to `None`. Returns: Maximum of `x`.
github-repos
def fix_variable(self, v, value): adj = self.adj linear = self.linear if (value not in self.vartype.value): raise ValueError('expected value to be in {}, received {} instead'.format(self.vartype.value, value)) removed_interactions = [] for u in adj[v]: self.add_variable(u, (value * adj[v][u])) removed_interactions.append((u, v)) self.remove_interactions_from(removed_interactions) self.add_offset((value * linear[v])) self.remove_variable(v)
Fix the value of a variable and remove it from a binary quadratic model. Args: v (variable): Variable in the binary quadratic model to be fixed. value (int): Value assigned to the variable. Values must match the :class:`.Vartype` of the binary quadratic model. Examples: This example creates a binary quadratic model with one variable and fixes its value. >>> import dimod ... >>> bqm = dimod.BinaryQuadraticModel({'a': -.5, 'b': 0.}, {('a', 'b'): -1}, 0.0, dimod.SPIN) >>> bqm.fix_variable('a', -1) >>> bqm.offset 0.5 >>> bqm.linear['b'] 1.0 >>> 'a' in bqm False
codesearchnet
def session_manager(self): return self._session_manager
Return the SessionManager used by the Supervisor. Returns: A SessionManager object.
github-repos
def normalize(x, axis=-1, order=2): from keras.src import ops if isinstance(x, np.ndarray): norm = np.atleast_1d(np.linalg.norm(x, order, axis)) norm[norm == 0] = 1 axis = axis or -1 return x / np.expand_dims(norm, axis) return ops.nn.normalize(x, axis=axis, order=order)
Normalizes an array. If the input is a NumPy array, a NumPy array will be returned. If it's a backend tensor, a backend tensor will be returned. Args: x: Array to normalize. axis: axis along which to normalize. order: Normalization order (e.g. `order=2` for L2 norm). Returns: A normalized copy of the array.
github-repos
def development_verify(): with open(DEVELOPMENT_TEMPLATE, 'r') as file_obj: template = file_obj.read() expected = template.format(revision=REVISION, rtd_version=RTD_VERSION) with open(DEVELOPMENT_FILE, 'r') as file_obj: contents = file_obj.read() if (contents != expected): err_msg = ('\n' + get_diff(contents, expected, 'DEVELOPMENT.rst.actual', 'DEVELOPMENT.rst.expected')) raise ValueError(err_msg) else: print('DEVELOPMENT.rst contents are as expected.')
Populate template and compare to ``DEVELOPMENT.rst`` Raises: ValueError: If the current ``DEVELOPMENT.rst`` doesn't agree with the expected value computed from the template.
codesearchnet
def Launch(self, request, global_params=None): config = self.GetMethodConfig('Launch') return self._RunMethod(config, request, global_params=global_params)
Launch a job with a FlexTemplate. Args: request: (DataflowProjectsLocationsFlexTemplatesLaunchRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (LaunchFlexTemplateResponse) The response message.
github-repos
def ParseOptions(cls, options, output_module): if not isinstance(output_module, dynamic.DynamicOutputModule): raise errors.BadConfigObject( 'Output module is not an instance of DynamicOutputModule') default_fields = ','.join(cls._DEFAULT_FIELDS) fields = cls._ParseStringOption( options, 'fields', default_value=default_fields) additional_fields = cls._ParseStringOption( options, 'additional_fields') if additional_fields: fields = '{0:s},{1:s}'.format(fields, additional_fields) output_module.SetFields([ field_name.strip() for field_name in fields.split(',')])
Parses and validates options. Args: options (argparse.Namespace): parser options. output_module (OutputModule): output module to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when the output filename was not provided.
juraj-google-style
async def getProvStack(self, iden: str): return self.cell.provstor.getProvStack(s_common.uhex(iden))
Return the providence stack associated with the given iden. Args: iden (str): the iden from splice Note: the iden appears on each splice entry as the 'prov' property
juraj-google-style
def in_cache(self, objpath, metahash): try: self.path_in_cache(objpath, metahash) return True except CacheMiss: return False
Returns true if object is cached. Args: objpath: Filename relative to buildroot. metahash: hash object
juraj-google-style
def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
github-repos
def list_knowledge_bases(project_id): import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() project_path = client.project_path(project_id) print('Knowledge Bases for: {}'.format(project_id)) for knowledge_base in client.list_knowledge_bases(project_path): print(' - Display Name: {}'.format(knowledge_base.display_name)) print(' - Knowledge ID: {}\n'.format(knowledge_base.name))
Lists the Knowledge bases belonging to a project. Args: project_id: The GCP project linked with the agent.
juraj-google-style
def price(self, valuation_date, market, model=None): del model, valuation_date reference_curve = market.reference_curve fwd_rate = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fraction) return 100.0 * self._contract_notional * (1.0 - fwd_rate)
Returns the price of the contract on the valuation date. Args: valuation_date: A scalar `DateTensor` specifying the date on which valuation is being desired. market: A namedtuple of type `InterestRateMarket` which contains the necessary information for pricing the FRA instrument. model: Reserved for future use. Returns: A Rank 1 `Tensor` of real type containing the modeled price of each futures contract based on the input market data.
github-repos
def populate_readme(version, circleci_build, appveyor_build, coveralls_build, travis_build): with open(RELEASE_README_FILE, 'r') as file_obj: template = file_obj.read() contents = template.format(version=version, circleci_build=circleci_build, appveyor_build=appveyor_build, coveralls_build=coveralls_build, travis_build=travis_build) with open(README_FILE, 'w') as file_obj: file_obj.write(contents)
Populates ``README.rst`` with release-specific data. This is because ``README.rst`` is used on PyPI. Args: version (str): The current version. circleci_build (Union[str, int]): The CircleCI build ID corresponding to the release. appveyor_build (str): The AppVeyor build ID corresponding to the release. coveralls_build (Union[str, int]): The Coveralls.io build ID corresponding to the release. travis_build (int): The Travis CI build ID corresponding to the release.
codesearchnet
def dequantize_flow(dx, dy, max_val=0.02, denorm=True): assert (dx.shape == dy.shape) assert ((dx.ndim == 2) or ((dx.ndim == 3) and (dx.shape[(- 1)] == 1))) (dx, dy) = [dequantize(d, (- max_val), max_val, 255) for d in [dx, dy]] if denorm: dx *= dx.shape[1] dy *= dx.shape[0] flow = np.dstack((dx, dy)) return flow
Recover from quantized flow. Args: dx (ndarray): Quantized dx. dy (ndarray): Quantized dy. max_val (float): Maximum value used when quantizing. denorm (bool): Whether to multiply flow values with width/height. Returns: ndarray: Dequantized flow.
codesearchnet
def to_hgnc(self, hgnc_alias, build='37'): result = self.hgnc_genes(hgnc_symbol=hgnc_alias, build=build) if result: for gene in result: return gene['hgnc_symbol'] else: return None
Check if a hgnc symbol is an alias Return the correct hgnc symbol, if not existing return None Args: hgnc_alias(str) Returns: hgnc_symbol(str)
codesearchnet
def get_reserved_vlan_range(self, id_or_uri): uri = (self._client.build_uri(id_or_uri) + '/reserved-vlan-range') return self._client.get(uri)
Gets the reserved vlan ID range for the fabric. Note: This method is only available on HPE Synergy. Args: id_or_uri: ID or URI of fabric. Returns: dict: vlan-pool
codesearchnet
def get_changes_since(self, timestamp: str) -> Dict[(str, List)]: rg = [] cg = [] ra = [] ca = [] layers = [] if (self.last_modified() > timestamp): if (self.row_graphs.last_modified() > timestamp): for name in self.row_graphs.keys(): if (self.row_graphs.last_modified(name) > timestamp): rg.append(name) if (self.col_graphs.last_modified() > timestamp): for name in self.col_graphs.keys(): if (self.col_graphs.last_modified(name) > timestamp): cg.append(name) if (self.ra.last_modified() > timestamp): for name in self.ra.keys(): if (self.ra.last_modified(name) > timestamp): ra.append(name) if (self.ca.last_modified() > timestamp): for name in self.ca.keys(): if (self.ca.last_modified(name) > timestamp): ca.append(name) if (self.layers.last_modified() > timestamp): for name in self.layers.keys(): if (self.layers.last_modified(name) > timestamp): layers.append(name) return {'row_graphs': rg, 'col_graphs': cg, 'row_attrs': ra, 'col_attrs': ca, 'layers': layers}
Get a summary of the parts of the file that changed since the given time Args: timestamp: ISO8601 timestamp Return: dict: Dictionary like ``{"row_graphs": rg, "col_graphs": cg, "row_attrs": ra, "col_attrs": ca, "layers": layers}`` listing the names of objects that were modified since the given time
codesearchnet
def __setitem__(self, key, value): if not isinstance(key, basestring): raise Exception("LRU cache can only be indexed by strings") if key in self._cache: entry = self._cache[key] elif len(self._cache) < self._cache_size: self._cache[key] = entry = {} else: entry = min(list(self._cache.values()), key=lambda x: x['last_used']) self._cache.pop(entry['key']) self._cache[key] = entry entry['value'] = value entry['key'] = key entry['last_used'] = datetime.datetime.now()
Put an item in the cache. Args: key: a string key for retrieving the item. value: the item to cache. Raises: Exception if the key is not a string.
juraj-google-style
def from_file(cls, filename, constant_lattice=True, **kwargs): fname = os.path.basename(filename) if fnmatch(fname, "*XDATCAR*"): structures = Xdatcar(filename).structures elif fnmatch(fname, "vasprun*.xml*"): structures = Vasprun(filename).structures else: raise ValueError("Unsupported file") return cls.from_structures(structures, constant_lattice=constant_lattice, **kwargs)
Convenience constructor to obtain trajectory from XDATCAR or vasprun.xml file Args: filename (str): The filename to read from. constant_lattice (bool): Whether the lattice changes during the simulation, such as in an NPT MD simulation. True results in Returns: (Trajectory)
juraj-google-style
def RunMetadata(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.RunMetadata(tag)
Get the session.run() metadata associated with a TensorFlow run and tag. Args: run: A string name of a TensorFlow run. tag: A string name of the tag associated with a particular session.run(). Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: The metadata in the form of `RunMetadata` protobuf data structure.
juraj-google-style
def _CreateDynamicDisplayAdSettings(media_service, opener): image = _CreateImage(media_service, opener, 'https: logo = {'type': 'IMAGE', 'mediaId': image['mediaId'], 'xsi_type': 'Image'} dynamic_settings = {'landscapeLogoImage': logo, 'pricePrefix': 'as low as', 'promoText': 'Free shipping!', 'xsi_type': 'DynamicSettings'} return dynamic_settings
Creates settings for dynamic display ad. Args: media_service: a SudsServiceProxy instance for AdWords's MediaService. opener: an OpenerDirector instance. Returns: The dynamic display ad settings.
codesearchnet
def add_graph_building_optimization_tests(cls: _TC) -> _TC: if flags.config().graph_building_optimization.value(): return cls for name, value in cls.__dict__.copy().items(): if callable(value) and (name.startswith(unittest.TestLoader.testMethodPrefix) or name.startswith('benchmark')): setattr(cls, name + 'WithGraphBuildingOptimization', enable_graph_building_optimization(value)) return cls
Adds methods with graph_building_optimization enabled to the test suite. Example: @test_util.add_graph_building_optimization_tests class FooTest(test.TestCase): def testBar(self): ... Generated class: class FooTest(test.TestCase): def testBar(self): ... def testBarWithGraphBuildingOptimization(self): // Enable graph_building_optimization testBar(self) // Disable graph_building_optimization Args: cls: class to decorate. Returns: cls with new test methods added.
github-repos
def percentile(self, percent): if (percent >= 100): percent = 100 target = (len(self) - (len(self) * (percent / 100))) for k in reversed(sorted(self._data.keys())): target -= self._data[k] if (target < 0): return k return 10
Return the value that is the Nth precentile in the histogram. Args: percent (Union[int, float]): The precentile being sought. The default consumer implementations use consistently use ``99``. Returns: int: The value corresponding to the requested percentile.
codesearchnet
def merged(cls, *flatterms: 'FlatTerm') -> 'FlatTerm': return cls(cls._combined_wildcards_iter(sum(flatterms, cls.empty())))
Concatenate the given flatterms to a single flatterm. Args: *flatterms: The flatterms which are concatenated. Returns: The concatenated flatterms.
codesearchnet
def to_cache_timer(datetime_func): if (datetime_func is None): datetime_func = datetime.utcnow def _timer(): 'Return the timestamp since the epoch.' return (datetime_func() - datetime(1970, 1, 1)).total_seconds() return _timer
Converts a datetime_func to a timestamp_func. Args: datetime_func (callable[[datatime]]): a func that returns the current time Returns: time_func (callable[[timestamp]): a func that returns the timestamp from the epoch
codesearchnet
def __init__(self, validate_args=False, name="normal"): self._graph_parents = [] self._name = name self._validate_args = validate_args super(NormalCDF, self).__init__( validate_args=validate_args, forward_min_event_ndims=0, name=name)
Instantiates the `NormalCDF` bijector. Args: validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object.
juraj-google-style
def restore_state(self, state): state_name = state.get('state_name') state_version = state.get('state_version') if state_name != self.STATE_NAME or state_version != self.STATE_VERSION: raise ArgumentError("Invalid emulated device state name or version", found=(state_name, state_version), expected=(self.STATE_NAME, self.STATE_VERSION)) def _background_restore(): super(ReferenceDevice, self).restore_state(state) self.reset_count = state.get('reset_count', 0) self.script = base64.b64decode(state.get('received_script')) self.synchronize_task(_background_restore)
Restore the current state of this emulated device. Note that restore_state happens synchronously in the emulation thread to avoid any race conditions with accessing data members and ensure a consistent atomic restoration process. This method will block while the background restore happens. Args: state (dict): A previously dumped state produced by dump_state.
juraj-google-style
def _verify_params(self): reserved_in_use = self._RESERVED_PARAMS.intersection(self.extra_params) if reserved_in_use: raise ValueError('Using a reserved parameter', reserved_in_use)
Verifies the parameters don't use any reserved parameter. Raises: ValueError: If a reserved parameter is used.
codesearchnet
def manage_all_configs(save_results, filename): all_configs = get_all_configs() print_all_configs(all_configs[0], all_configs[1], all_configs[2]) if save_results: save_to_file(all_configs[3], filename)
Manages configuration detection and retrieval based on user input. Args: save_results: Boolean indicating whether to save the results to a file. filename: String that is the name of the output JSON file.
github-repos
def energy_at_conditions(self, pH, V): return ((self.energy + ((self.npH * PREFAC) * pH)) + (self.nPhi * V))
Get free energy for a given pH and V Args: pH (float): pH at which to evaluate free energy V (float): voltage at which to evaluate free energy Returns: free energy at conditions
codesearchnet
def _get_split_key(client_keys, num_splits): if not client_keys or len(client_keys) < num_splits - 1: return client_keys num_keys_per_split = max(1.0, float(len(client_keys)) / (num_splits - 1)) split_client_keys = [] for i in range(1, num_splits): split_index = int(round(i * num_keys_per_split) - 1) split_client_keys.append(client_keys[split_index]) return split_client_keys
Given a list of keys and a number of splits find the keys to split on. Args: client_keys: the list of keys. num_splits: the number of splits. Returns: A list of keys to split on.
github-repos
def process_multientry(entry_list, prod_comp, coeff_threshold=0.0001): dummy_oh = [Composition('H'), Composition('O')] try: entry_comps = [e.composition for e in entry_list] rxn = Reaction((entry_comps + dummy_oh), [prod_comp]) coeffs = (- np.array([rxn.get_coeff(comp) for comp in entry_comps])) if (coeffs > coeff_threshold).all(): return MultiEntry(entry_list, weights=coeffs.tolist()) else: return None except ReactionError: return None
Static method for finding a multientry based on a list of entries and a product composition. Essentially checks to see if a valid aqueous reaction exists between the entries and the product composition and returns a MultiEntry with weights according to the coefficients if so. Args: entry_list ([Entry]): list of entries from which to create a MultiEntry prod_comp (Composition): composition constraint for setting weights of MultiEntry coeff_threshold (float): threshold of stoichiometric coefficients to filter, if weights are lower than this value, the entry is not returned
codesearchnet
def __init__(self, certificate=None, private_key=None): self.private_key = private_key self.certificate = certificate self._ClearServerCipherCache() self.encrypted_cipher_cache = utils.FastStore(max_size=50000)
Creates a communicator. Args: certificate: Our own certificate. private_key: Our own private key.
juraj-google-style
def case(self, case_id=None): if case_id: for case in self.case_objs: if case.case_id == case_id: return case else: if self.cases: return list(self.case_objs)[0] return Case(case_id='unknown')
Return a Case object If no case_id is given return one case Args: case_id (str): A case id Returns: A Case object
juraj-google-style
def add_permissions(self, grp_name, resource, permissions): self.service.add_permissions( grp_name, resource, permissions, self.url_prefix, self.auth, self.session, self.session_send_opts)
Add additional permissions for the group associated with the given resource. Args: grp_name (string): Name of group. resource (intern.resource.boss.BossResource): Identifies which data model object to operate on. permissions (list): List of permissions to add to the given resource. Raises: requests.HTTPError on failure.
juraj-google-style
def get_javascript_error(self, return_type='string'): if BROME_CONFIG['proxy_driver']['intercept_javascript_error']: js_errors = self._driver.execute_script( 'return window.jsErrors; window.jsErrors = [];' ) if not js_errors: js_errors = [] if return_type == 'list': if len(js_errors): return js_errors else: return [] else: if len(js_errors): return os.linesep.join(js_errors) else: return self.no_javascript_error_string else: if return_type == 'list': return [] else: return self.no_javascript_error_string
Return the gathered javascript error Args: return_type: 'string' | 'list'; default: 'string'
juraj-google-style
def sub(x1, x2, output_shape=None, name=None): output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarAddOperation(x1, -x2).outputs[0] with tf.name_scope(name, default_name="sub"): x1, x2 = binary_arguments_to_tensors(x1, x2) return add(x1, negative(x2), output_shape=output_shape)
Binary subtraction with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
juraj-google-style
def pyside_load_ui(uifile, base_instance=None): form_class, base_class = load_ui_type(uifile) if not base_instance: typeName = form_class.__name__ finalType = type(typeName, (form_class, base_class), {}) base_instance = finalType() else: if not isinstance(base_instance, base_class): raise RuntimeError( 'The base_instance passed to loadUi does not inherit from' ' needed base type (%s)' % type(base_class)) typeName = type(base_instance).__name__ base_instance.__class__ = type(typeName, (form_class, type(base_instance)), {}) base_instance.setupUi(base_instance) return base_instance
Provide PyQt4.uic.loadUi functionality to PySide Args: uifile (str): Absolute path to .ui file base_instance (QWidget): The widget into which UI widgets are loaded Note: pysideuic is required for this to work with PySide. This seems to work correctly in Maya as well as outside of it as opposed to other implementations which involve overriding QUiLoader. Returns: QWidget: the base instance
juraj-google-style
def nhapDaiHan(self, cucSo, gioiTinh): for cung in self.thapNhiCung: khoangCach = khoangCachCung(cung.cungSo, self.cungMenh, gioiTinh) cung.daiHan(cucSo + khoangCach * 10) return self
Nhap dai han Args: cucSo (TYPE): Description gioiTinh (TYPE): Description Returns: TYPE: Description
juraj-google-style
def output_csv(filehandle: TextIO, values: Iterable[str]) -> None: line = ",".join(values) filehandle.write(line + "\n")
Write a line of CSV. POOR; does not escape things properly. DEPRECATED. Args: filehandle: file to write to values: values
juraj-google-style
def BuildChecks(self, request): result = [] if request.HasField("start_time") or request.HasField("end_time"): def FilterTimestamp(file_stat, request=request): return file_stat.HasField("st_mtime") and ( file_stat.st_mtime < request.start_time or file_stat.st_mtime > request.end_time) result.append(FilterTimestamp) if request.HasField("min_file_size") or request.HasField("max_file_size"): def FilterSize(file_stat, request=request): return file_stat.HasField("st_size") and ( file_stat.st_size < request.min_file_size or file_stat.st_size > request.max_file_size) result.append(FilterSize) if request.HasField("perm_mode"): def FilterPerms(file_stat, request=request): return (file_stat.st_mode & request.perm_mask) != request.perm_mode result.append(FilterPerms) if request.HasField("uid"): def FilterUID(file_stat, request=request): return file_stat.st_uid != request.uid result.append(FilterUID) if request.HasField("gid"): def FilterGID(file_stat, request=request): return file_stat.st_gid != request.gid result.append(FilterGID) if request.HasField("path_regex"): regex = request.path_regex def FilterPath(file_stat, regex=regex): return not regex.Search(file_stat.pathspec.Basename()) result.append(FilterPath) if request.HasField("data_regex"): def FilterData(file_stat, **_): return not self.TestFileContent(file_stat) result.append(FilterData) return result
Parses request and returns a list of filter callables. Each callable will be called with the StatEntry and returns True if the entry should be suppressed. Args: request: A FindSpec that describes the search. Returns: a list of callables which return True if the file is to be suppressed.
juraj-google-style
def get_rows_fieldnames_from_raw_sql( session: Union[Session, Engine, Connection], sql: str) -> Tuple[Sequence[Sequence[Any]], Sequence[str]]: result = session.execute(sql) fieldnames = result.keys() rows = result.fetchall() return rows, fieldnames
Returns results and column names from a query. Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object sql: raw SQL to execure Returns: ``(rows, fieldnames)`` where ``rows`` is the usual set of results and ``fieldnames`` are the name of the result columns/fields.
juraj-google-style
def _get_computer_object(): with salt.utils.winapi.Com(): nt = win32com.client.Dispatch('AdsNameSpaces') return nt.GetObject('', 'WinNT:
A helper function to get the object for the local machine Returns: object: Returns the computer object for the local machine
codesearchnet
def hasValue(self) -> 'Builder': return self._to_builder(_evaluation.HasValueFunction(self.node.context, self.node, []))
The FHIRPath hasValue() function. Returns: An expression that evaluates to True if the parent has a single value that is a primitive.
github-repos
def are_equivalent_xml(a_xml, b_xml, ignore_timestamps=False): 'Normalizes then compares SystemMetadata XML docs for equivalency.\n ``a_xml`` and ``b_xml`` should be utf-8 encoded DataONE System Metadata XML\n documents.\n ' return are_equivalent_pyxb(d1_common.xml.deserialize(a_xml), d1_common.xml.deserialize(b_xml), ignore_timestamps)
Determine if two SystemMetadata XML docs are semantically equivalent. Normalize then compare SystemMetadata XML docs for equivalency. Args: a_xml, b_xml: bytes UTF-8 encoded SystemMetadata XML docs to compare ignore_timestamps: bool ``True``: Timestamps in the SystemMetadata are ignored so that objects that are compared register as equivalent if only their timestamps differ. Returns: bool: **True** if SystemMetadata XML docs are semantically equivalent. Notes: The SystemMetadata is normalized by removing any redundant information and ordering all sections where there are no semantics associated with the order. The normalized SystemMetadata is intended to be semantically equivalent to the un-normalized one.
codesearchnet
def sca_xsect(scatterer, h_pol=True): if (scatterer.psd_integrator is not None): return scatterer.psd_integrator.get_angular_integrated(scatterer.psd, scatterer.get_geometry(), 'sca_xsect') old_geom = scatterer.get_geometry() def d_xsect(thet, phi): (scatterer.phi, scatterer.thet) = ((phi * rad_to_deg), (thet * rad_to_deg)) Z = scatterer.get_Z() I = sca_intensity(scatterer, h_pol) return (I * np.sin(thet)) try: xsect = dblquad(d_xsect, 0.0, (2 * np.pi), (lambda x: 0.0), (lambda x: np.pi))[0] finally: scatterer.set_geometry(old_geom) return xsect
Scattering cross section for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The scattering cross section.
codesearchnet
def _pack3(obj, fp, **options): global compatibility ext_handlers = options.get('ext_handlers') if (obj is None): _pack_nil(obj, fp, options) elif (ext_handlers and (obj.__class__ in ext_handlers)): _pack_ext(ext_handlers[obj.__class__](obj), fp, options) elif isinstance(obj, bool): _pack_boolean(obj, fp, options) elif isinstance(obj, int): _pack_integer(obj, fp, options) elif isinstance(obj, float): _pack_float(obj, fp, options) elif (compatibility and isinstance(obj, str)): _pack_oldspec_raw(obj.encode('utf-8'), fp, options) elif (compatibility and isinstance(obj, bytes)): _pack_oldspec_raw(obj, fp, options) elif isinstance(obj, str): _pack_string(obj, fp, options) elif isinstance(obj, bytes): _pack_binary(obj, fp, options) elif isinstance(obj, (list, tuple)): _pack_array(obj, fp, options) elif isinstance(obj, dict): _pack_map(obj, fp, options) elif isinstance(obj, datetime.datetime): _pack_ext_timestamp(obj, fp, options) elif isinstance(obj, Ext): _pack_ext(obj, fp, options) elif ext_handlers: t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None) if t: _pack_ext(ext_handlers[t](obj), fp, options) else: raise UnsupportedTypeException(('unsupported type: %s' % str(type(obj)))) else: raise UnsupportedTypeException(('unsupported type: %s' % str(type(obj))))
Serialize a Python object into MessagePack bytes. Args: obj: a Python object fp: a .write()-supporting file-like object Kwargs: ext_handlers (dict): dictionary of Ext handlers, mapping a custom type to a callable that packs an instance of the type into an Ext object force_float_precision (str): "single" to force packing floats as IEEE-754 single-precision floats, "double" to force packing floats as IEEE-754 double-precision floats. Returns: None. Raises: UnsupportedType(PackException): Object type not supported for packing. Example: >>> f = open('test.bin', 'wb') >>> umsgpack.pack({u"compact": True, u"schema": 0}, f) >>>
codesearchnet
def set(self, key, value, *, section=DataStoreDocumentSection.Data): key_notation = '.'.join([section, key]) try: self._delete_gridfs_data(self._data_from_dotnotation(key_notation, default=None)) except KeyError: logger.info('Adding new field {} to the data store'.format(key_notation)) result = self._collection.update_one({'_id': ObjectId(self._workflow_id)}, {'$set': {key_notation: self._encode_value(value)}, '$currentDate': {'lastModified': True}}) return (result.modified_count == 1)
Store a value under the specified key in the given section of the document. This method stores a value into the specified section of the workflow data store document. Any existing value is overridden. Before storing a value, any linked GridFS document under the specified key is deleted. Args: key (str): The key pointing to the value that should be stored/updated. It supports MongoDB's dot notation for nested fields. value: The value that should be stored/updated. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: bool: ``True`` if the value could be set/updated, otherwise ``False``.
codesearchnet
def open_window(self, private=False): handles_before = self.selenium.window_handles self.switch_to() with self.selenium.context(self.selenium.CONTEXT_CHROME): self.selenium.find_element(*self._file_menu_button_locator).click() if private: self.selenium.find_element(*self._file_menu_private_window_locator).click() else: self.selenium.find_element(*self._file_menu_new_window_button_locator).click() return self.wait.until(expected.new_browser_window_is_opened(self.selenium, handles_before), message='No new browser window opened')
Open a new browser window. Args: private (bool): Optional parameter to open a private browsing window. Defaults to False. Returns: :py:class:`BrowserWindow`: Opened window.
codesearchnet
def __init__(self, function_meta, functions_mapping=None, check_variables_set=None): self.functions_mapping = functions_mapping or {} self.check_variables_set = check_variables_set or set() self.cache_key = None self.__parse(function_meta)
init LazyFunction object with function_meta Args: function_meta (dict): function name, args and kwargs. { "func_name": "func", "args": [1, 2] "kwargs": {"a": 3, "b": 4} }
juraj-google-style
def read_table(fstream): pos = fstream.tell() line = fstream.readline().strip() fragments = line.split(",") fragments = [x for x in fragments if x is not None] partition = dict() if not len(fragments) >= 4: return None partition["table"] = fragments[0] partition["group"] = fragments[1] partition["set"] = fragments[2] partition["num_lines"] = fragments[3] struct = None if partition is not None and partition["table"] == "TABLE": num_lines = int(partition["num_lines"].strip()) struct = {} header = fetch_cols(fstream) struct.update({header[0]: header[1:]}) for _ in range(num_lines): cols = fetch_cols(fstream) struct.update({cols[0]: cols[1:]}) else: fstream.seek(pos) return struct
Read a likwid table info from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing likwid's table info as key/value pairs.
juraj-google-style
def run_numerical_categorical_analysis(args, schema_list): header = [column['name'] for column in schema_list] input_files = file_io.get_matching_files(args.input_file_pattern) for col_schema in schema_list: col_type = col_schema['type'].lower() if ((col_type != 'string') and (col_type != 'integer') and (col_type != 'float')): raise ValueError(('Schema contains an unsupported type %s.' % col_type)) def _init_numerical_results(): return {'min': float('inf'), 'max': float('-inf'), 'count': 0, 'sum': 0.0} numerical_results = collections.defaultdict(_init_numerical_results) categorical_results = collections.defaultdict(set) for input_file in input_files: with file_io.FileIO(input_file, 'r') as f: for line in f: parsed_line = dict(zip(header, line.strip().split(','))) for col_schema in schema_list: col_name = col_schema['name'] col_type = col_schema['type'] if (col_type.lower() == 'string'): categorical_results[col_name].update([parsed_line[col_name]]) else: if (not parsed_line[col_name].strip()): continue numerical_results[col_name]['min'] = min(numerical_results[col_name]['min'], float(parsed_line[col_name])) numerical_results[col_name]['max'] = max(numerical_results[col_name]['max'], float(parsed_line[col_name])) numerical_results[col_name]['count'] += 1 numerical_results[col_name]['sum'] += float(parsed_line[col_name]) for col_schema in schema_list: if (col_schema['type'].lower() != 'string'): col_name = col_schema['name'] mean = (numerical_results[col_name]['sum'] / numerical_results[col_name]['count']) del numerical_results[col_name]['sum'] del numerical_results[col_name]['count'] numerical_results[col_name]['mean'] = mean file_io.write_string_to_file(os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE), json.dumps(numerical_results, indent=2, separators=(',', ': '))) for (name, unique_labels) in six.iteritems(categorical_results): labels = '\n'.join(list(unique_labels)) file_io.write_string_to_file(os.path.join(args.output_dir, (CATEGORICAL_ANALYSIS_FILE % name)), labels)
Makes the numerical and categorical analysis files. Args: args: the command line args schema_list: python object of the schema json file. Raises: ValueError: if schema contains unknown column types.
codesearchnet
def gather_gpu_devices(): try: dev_info = _gather_gpu_devices_proc() if not dev_info: raise ValueError('No devices found') return dev_info except (IOError, ValueError, errors.OpError): pass try: return _gather_gpu_devices_cudart() except (OSError, ValueError, NotImplementedError, errors.OpError): return []
Gather gpu device info. Returns: A list of test_log_pb2.GPUInfo messages.
github-repos
def alloc_data(self, value): if isinstance(value, six.binary_type): return self._alloc_data(value) elif isinstance(value, six.text_type): return self._alloc_data(value.encode('utf-8') + b'\0') else: raise TypeError('No idea how to encode %s' % repr(value))
Allocate a piece of data that will be included in the shellcode body. Arguments: value(...): The value to add to the shellcode. Can be bytes or string type. Returns: ~pwnypack.types.Offset: The offset used to address the data.
juraj-google-style
def __init__(self, use_zeromq=True): super(PsortMultiProcessEngine, self).__init__() self._analysis_plugins = {} self._completed_analysis_processes = set() self._data_location = None self._event_filter_expression = None self._event_queues = {} self._event_tag_index = event_tag_index.EventTagIndex() self._events_status = processing_status.EventsStatus() self._export_event_heap = PsortEventHeap() self._export_event_timestamp = 0 self._guppy_memory_profiler = None self._knowledge_base = None self._memory_profiler = None self._merge_task = None self._number_of_consumed_event_tags = 0 self._number_of_consumed_events = 0 self._number_of_consumed_reports = 0 self._number_of_consumed_sources = 0 self._number_of_consumed_warnings = 0 self._number_of_produced_event_tags = 0 self._number_of_produced_events = 0 self._number_of_produced_reports = 0 self._number_of_produced_sources = 0 self._number_of_produced_warnings = 0 self._processing_configuration = None self._processing_profiler = None self._serializers_profiler = None self._status = definitions.STATUS_INDICATOR_IDLE self._status_update_callback = None self._use_zeromq = use_zeromq self._worker_memory_limit = definitions.DEFAULT_WORKER_MEMORY_LIMIT
Initializes an engine object. Args: use_zeromq (Optional[bool]): True if ZeroMQ should be used for queuing instead of Python's multiprocessing queue.
juraj-google-style
def get_attribute(json, attr): res = [json[entry][attr] for entry, _ in enumerate(json)] logger.debug('{0}s (from JSON):\n{1}'.format(attr, res)) return res
Gets the values of an attribute from JSON Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. attr: String of attribute in JSON file to collect. Returns: List of values of specified attribute from JSON
juraj-google-style
def console_map_ascii_code_to_font(asciiCode: int, fontCharX: int, fontCharY: int) -> None: lib.TCOD_console_map_ascii_code_to_font(_int(asciiCode), fontCharX, fontCharY)
Set a character code to new coordinates on the tile-set. `asciiCode` must be within the bounds created during the initialization of the loaded tile-set. For example, you can't use 255 here unless you have a 256 tile tile-set loaded. This applies to all functions in this group. Args: asciiCode (int): The character code to change. fontCharX (int): The X tile coordinate on the loaded tileset. 0 is the leftmost tile. fontCharY (int): The Y tile coordinate on the loaded tileset. 0 is the topmost tile.
codesearchnet
def noninteractive_changeset_update(self, fqn, template, old_parameters, parameters, stack_policy, tags, **kwargs): logger.debug('Using noninterative changeset provider mode for %s.', fqn) (_changes, change_set_id) = create_change_set(self.cloudformation, fqn, template, parameters, tags, 'UPDATE', service_role=self.service_role, **kwargs) self.deal_with_changeset_stack_policy(fqn, stack_policy) self.cloudformation.execute_change_set(ChangeSetName=change_set_id)
Update a Cloudformation stack using a change set. This is required for stacks with a defined Transform (i.e. SAM), as the default update_stack API cannot be used with them. Args: fqn (str): The fully qualified name of the Cloudformation stack. template (:class:`stacker.providers.base.Template`): A Template object to use when updating the stack. old_parameters (list): A list of dictionaries that defines the parameter list on the existing Cloudformation stack. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack.
codesearchnet
def classification_signature_def(examples, classes, scores): if examples is None: raise ValueError('Classification `examples` cannot be None.') if not isinstance(examples, tensor_lib.Tensor): raise ValueError(f'Classification `examples` must be a string Tensor. Found `examples` of type {type(examples)}.') if classes is None and scores is None: raise ValueError('Classification `classes` and `scores` cannot both be None.') input_tensor_info = utils.build_tensor_info(examples) if input_tensor_info.dtype != types_pb2.DT_STRING: raise ValueError(f'Classification input tensors must be of type string. Found tensors of type {input_tensor_info.dtype}') signature_inputs = {signature_constants.CLASSIFY_INPUTS: input_tensor_info} signature_outputs = {} if classes is not None: classes_tensor_info = utils.build_tensor_info(classes) if classes_tensor_info.dtype != types_pb2.DT_STRING: raise ValueError(f'Classification classes must be of type string Tensor. Found tensors of type {classes_tensor_info.dtype}.`') signature_outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES] = classes_tensor_info if scores is not None: scores_tensor_info = utils.build_tensor_info(scores) if scores_tensor_info.dtype != types_pb2.DT_FLOAT: raise ValueError('Classification scores must be a float Tensor.') signature_outputs[signature_constants.CLASSIFY_OUTPUT_SCORES] = scores_tensor_info signature_def = build_signature_def(signature_inputs, signature_outputs, signature_constants.CLASSIFY_METHOD_NAME) return signature_def
Creates classification signature from given examples and predictions. This function produces signatures intended for use with the TensorFlow Serving Classify API (tensorflow_serving/apis/prediction_service.proto), and so constrains the input and output types to those allowed by TensorFlow Serving. Args: examples: A string `Tensor`, expected to accept serialized tf.Examples. classes: A string `Tensor`. Note that the ClassificationResponse message requires that class labels are strings, not integers or anything else. scores: a float `Tensor`. Returns: A classification-flavored signature_def. Raises: ValueError: If examples is `None`.
github-repos
def dumps(o, encoder=None): retval = "" if encoder is None: encoder = TomlEncoder(o.__class__) addtoretval, sections = encoder.dump_sections(o, "") retval += addtoretval outer_objs = [id(o)] while sections: section_ids = [id(section) for section in sections] for outer_obj in outer_objs: if outer_obj in section_ids: raise ValueError("Circular reference detected") outer_objs += section_ids newsections = encoder.get_empty_table() for section in sections: addtoretval, addtosections = encoder.dump_sections( sections[section], section) if addtoretval or (not addtoretval and not addtosections): if retval and retval[-2:] != "\n\n": retval += "\n" retval += "[" + section + "]\n" if addtoretval: retval += addtoretval for s in addtosections: newsections[section + "." + s] = addtosections[s] sections = newsections return retval
Stringifies input dict as toml Args: o: Object to dump into toml preserve: Boolean parameter. If true, preserve inline tables. Returns: String containing the toml corresponding to dict
juraj-google-style
def handle_incoming_message(self, msg): if msg.type == MessageType.START_JOB: job = msg.message['job'] self.schedule_job(job) elif msg.type == MessageType.CANCEL_JOB: job_id = msg.message['job_id'] self.cancel(job_id)
Start or cancel a job, based on the msg. If msg.type == MessageType.START_JOB, then start the job given by msg.job. If msg.type == MessageType.CANCEL_JOB, then try to cancel the job given by msg.job.job_id. Args: msg (barbequeue.messaging.classes.Message): Returns: None
juraj-google-style
def _check_parameter_range(s_min, s_max): if (s_min == DEFAULT_S_MIN): return (0.0, 1.0) if (s_max == DEFAULT_S_MAX): return (s_min, s_min) return (s_min, s_max)
r"""Performs a final check on a clipped parameter range. .. note:: This is a helper for :func:`clip_range`. If both values are unchanged from the "unset" default, this returns the whole interval :math:`\left[0.0, 1.0\right]`. If only one of the values is set to some parameter :math:`s`, this returns the "degenerate" interval :math:`\left[s, s\right]`. (We rely on the fact that ``s_min`` must be the only set value, based on how :func:`_update_parameters` works.) Otherwise, this simply returns ``[s_min, s_max]``. Args: s_min (float): Current start of clipped interval. If "unset", this value will be ``DEFAULT_S_MIN``. s_max (float): Current end of clipped interval. If "unset", this value will be ``DEFAULT_S_MAX``. Returns: Tuple[float, float]: The (possibly updated) start and end of the clipped parameter range.
codesearchnet
def add_handler(self, handler: Handler, group: int=0): if isinstance(handler, DisconnectHandler): self.disconnect_handler = handler.callback else: self.dispatcher.add_handler(handler, group) return (handler, group)
Use this method to register an update handler. You can register multiple handlers, but at most one handler within a group will be used for a single update. To handle the same update more than once, register your handler using a different group id (lower group id == higher priority). Args: handler (``Handler``): The handler to be registered. group (``int``, *optional*): The group identifier, defaults to 0. Returns: A tuple of (handler, group)
codesearchnet
def authentication_required(req, resp, resource, uri_kwargs): if 'user' not in req.context: args = ["Unauthorized", "This resource requires authentication"] if FALCON_VERSION >= (1, 0, 0): args.append(req.context.get('challenges', [])) raise HTTPUnauthorized(*args)
Ensure that user is authenticated otherwise return ``401 Unauthorized``. If request fails to authenticate this authorization hook will also include list of ``WWW-Athenticate`` challenges. Args: req (falcon.Request): the request object. resp (falcon.Response): the response object. resource (object): the resource object. uri_kwargs (dict): keyword arguments from the URI template. .. versionadded:: 0.4.0
juraj-google-style
def oem_name(self, value): if value == self._defaults['ai.device.oemName'] and 'ai.device.oemName' in self._values: del self._values['ai.device.oemName'] else: self._values['ai.device.oemName'] = value
The oem_name property. Args: value (string). the property value.
juraj-google-style
def create_method_arguments(self, node, method, use_defaults=False): args = [] num_posargs = method.argcount(node) num_posargs_no_default = num_posargs - len(method.defaults) for i in range(num_posargs): default_idx = i - num_posargs_no_default if use_defaults and default_idx >= 0: arg = method.defaults[default_idx] else: arg = self.ctx.convert.create_new_unknown(node, force=not use_defaults) args.append(arg) kws = {} for key in method.signature.kwonly_params: if use_defaults and key in method.kw_defaults: kws[key] = method.kw_defaults[key] else: kws[key] = self.ctx.convert.create_new_unknown(node, force=not use_defaults) starargs = self.create_varargs(node) if method.has_varargs() else None starstarargs = self.create_kwargs(node) if method.has_kwargs() else None return (node, function.Args(posargs=tuple(args), namedargs=kws, starargs=starargs, starstarargs=starstarargs))
Create arguments for the given method. Creates Unknown objects as arguments for the given method. Note that we don't need to take parameter annotations into account as InterpreterFunction.call() will take care of that. Args: node: The current node. method: An abstract.InterpreterFunction. use_defaults: Whether to use parameter defaults for arguments. When True, unknown arguments are created with force=False, as it is fine to use Unsolvable rather than Unknown objects for type-checking defaults. Returns: A tuple of a node and a function.Args object.
github-repos
def _method_url(self, method_name): return '{base_url}/api/{api}/{method}'.format(base_url=self._base_url(), api=self.api_version, method=method_name)
Generate the URL for the requested method Args: method_name (str): Name of the method Returns: A string containing the URL of the method
codesearchnet
def search(nats_api, search_model, algo, dataset='cifar10', reporting_epoch=12, max_train_hours=20000.0): nats_api.reset_time() times, best_valids, best_tests = ([0.0], [0.0], [0.0]) valid_models = 0 time_spent = 0 start_time = time.time() last_report_time = start_time for model, feedback in pg.sample(search_model, algo): spec = model() validation_accuracy, _, _, _ = nats_api.simulate_train_eval(spec, dataset=dataset, hp=VALIDATION_SET_REPORTING_EPOCH) time_spent = nats_api.used_time more_info = nats_api.get_more_info(spec, dataset, hp=reporting_epoch) valid_models += 1 feedback(validation_accuracy) if validation_accuracy > best_valids[-1]: best_valids.append(validation_accuracy) best_tests.append(more_info['test-accuracy']) else: best_valids.append(best_valids[-1]) best_tests.append(best_tests[-1]) times.append(time_spent) time_spent_in_hours = time_spent / (60 * 60) if time_spent_in_hours > max_train_hours: break if feedback.id % 100 == 0: now = time.time() print(f'Tried {feedback.id} models, valid {valid_models}, time_spent {time_spent}, elapse since last report: {now - last_report_time} seconds.') last_report_time = now print(f'Total time elapse: {time.time() - start_time} seconds.') return (times[1:], best_valids[1:], best_tests[1:])
Define the search procedure. Args: nats_api: the NATS-Bench object. search_model: which is a `model` object annotated with `one_of`. algo: algorithm for search. dataset: the target dataset reporting_epoch: Use test set results for models trained for this many epochs. max_train_hours: max time budget to train the models, which is the sum of training time queried from NAS-Bench. Returns: A tuple of (total time spent at step i for all steps, best validation accuracy at step i for all steps, best test accuracy at step i for all steps)
github-repos
def hook(self, function, dependencies=None): if (not isinstance(dependencies, (Iterable, type(None), str))): raise TypeError('Invalid list of dependencies provided!') if (not hasattr(function, '__deps__')): function.__deps__ = dependencies if self.isloaded(function.__deps__): self.append(function) else: self._later.append(function) for ext in self._later: if self.isloaded(ext.__deps__): self._later.remove(ext) self.hook(ext)
Tries to load a hook Args: function (func): Function that will be called when the event is called Kwargs: dependencies (str): String or Iterable with modules whose hooks should be called before this one Raises: :class:TypeError Note that the dependencies are module-wide, that means that if `parent.foo` and `parent.bar` are both subscribed to `example` event and `child` enumerates `parent` as dependcy, **both** `foo` and `bar` must be called in order for the dependcy to get resolved.
codesearchnet
def _apply_threshold_to_predictions(self, result: AnomalyResult) -> AnomalyResult: predictions = [dataclasses.replace(p, label=self._threshold_fn.apply(p.score), threshold=self._threshold_fn.threshold) for p in result.predictions] return dataclasses.replace(result, predictions=predictions)
Updates the prediction labels in an AnomalyResult using the ThresholdFn. Args: result (AnomalyResult): The input `AnomalyResult` containing anomaly scores. Returns: AnomalyResult: A new `AnomalyResult` with updated prediction labels and threshold values.
github-repos
def serialize_streamnet(streamnet_file, output_reach_file): FileClass.copy_files(streamnet_file, output_reach_file) ds_reach = ogr_Open(output_reach_file, update=True) layer_reach = ds_reach.GetLayer(0) layer_def = layer_reach.GetLayerDefn() i_link = layer_def.GetFieldIndex(FLD_LINKNO) i_link_downslope = layer_def.GetFieldIndex(FLD_DSLINKNO) i_len = layer_def.GetFieldIndex(REACH_LENGTH) old_id_list = [] output_dic = {} ft = layer_reach.GetNextFeature() while ft is not None: link_id = ft.GetFieldAsInteger(i_link) reach_len = ft.GetFieldAsDouble(i_len) if link_id not in old_id_list: if reach_len < DELTA: downstream_id = ft.GetFieldAsInteger(i_link_downslope) output_dic[link_id] = downstream_id else: old_id_list.append(link_id) ft = layer_reach.GetNextFeature() old_id_list.sort() id_map = {} for i, old_id in enumerate(old_id_list): id_map[old_id] = i + 1 layer_reach.ResetReading() ft = layer_reach.GetNextFeature() while ft is not None: link_id = ft.GetFieldAsInteger(i_link) if link_id not in id_map: layer_reach.DeleteFeature(ft.GetFID()) ft = layer_reach.GetNextFeature() continue ds_id = ft.GetFieldAsInteger(i_link_downslope) ds_id = output_dic.get(ds_id, ds_id) ds_id = output_dic.get(ds_id, ds_id) ft.SetField(FLD_LINKNO, id_map[link_id]) if ds_id in id_map: ft.SetField(FLD_DSLINKNO, id_map[ds_id]) else: ft.SetField(FLD_DSLINKNO, -1) layer_reach.SetFeature(ft) ft = layer_reach.GetNextFeature() ds_reach.ExecuteSQL(str('REPACK reach')) layer_reach.SyncToDisk() ds_reach.Destroy() del ds_reach return id_map
Eliminate reach with zero length and return the reach ID map. Args: streamnet_file: original stream net ESRI shapefile output_reach_file: serialized stream net, ESRI shapefile Returns: id pairs {origin: newly assigned}
juraj-google-style
def get_tensor_size(self, tensor_name, partial_layout=None, mesh_dimension_to_size=None): return (self.get_tensor_dtype(tensor_name).size * self.get_tensor_num_entries(tensor_name, partial_layout, mesh_dimension_to_size))
The size of a tensor in bytes. If partial_layout is specified, then mesh_dimension_to_size must also be. In this case, the size on a single device is returned. Args: tensor_name: a string, name of a tensor in the graph. partial_layout: an optional {string: string}, from MTF dimension name to mesh dimension name. mesh_dimension_to_size: an optional {string: int}, from mesh dimension name to size. Returns: an integer
codesearchnet
def get_template(template): from cloud_inquisitor.database import db tmpl = db.Template.find_one(template_name=template) if (not tmpl): raise InquisitorError('No such template found: {}'.format(template)) tmplenv = Environment(loader=BaseLoader, autoescape=True) tmplenv.filters['json_loads'] = json.loads tmplenv.filters['slack_quote_join'] = (lambda data: ', '.join(('`{}`'.format(x) for x in data))) return tmplenv.from_string(tmpl.template)
Return a Jinja2 template by filename Args: template (str): Name of the template to return Returns: A Jinja2 Template object
codesearchnet
def call_next(self, *args, **kwargs) -> t.List[run.RunInfo]: all_results = [] for ext in self.next_extensions: LOG.debug(' %s ', ext) results = ext(*args, **kwargs) LOG.debug(' %s => %s', ext, results) if (results is None): LOG.warning('No result from: %s', ext) continue result_list = [] if isinstance(results, c.Iterable): result_list.extend(results) else: result_list.append(results) all_results.extend(result_list) return all_results
Call all child extensions with the given arguments. This calls all child extensions and collects the results for our own parent. Use this to control the execution of your nested extensions from your own extension. Returns: :obj:`list` of :obj:`RunInfo`: A list of collected results of our child extensions.
codesearchnet
def __init__(self, files=None, misspelling_file=None): if misspelling_file: self._misspelling_dict = defaultdict(list) with open(misspelling_file, 'r') as f: for line in f: bad_word, correction = line.strip().split(' ', 1) self._misspelling_dict[bad_word].append(correction) self._files = [] if files: self.add(files)
Initialises an Misspellings instance. Args: files: List of files to check. More can be added with add(). misspelling_file: Filename with a list of misspelled words and their alternatives. Raises: IOError: Raised if misspelling_file can't be found. ValueError: Raised if misspelling_file isn't correctly formatted.
juraj-google-style
def plot_loss_history(history, figsize=(15, 8)): plt.figure(figsize=figsize) plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.xlabel(" plt.ylabel("Loss") plt.legend(["Training", "Validation"]) plt.title("Loss over time") plt.show()
Plots the learning history for a Keras model, assuming the validation data was provided to the 'fit' function. Args: history: The return value from the 'fit' function. figsize: The size of the plot.
juraj-google-style
def parseEquation(self, inp): inp = MathService._preprocess(inp) split = inp.split(' ') for i, w in enumerate(split): if w in self.__unaryOperators__: op = self.__unaryOperators__[w] eq1 = ' '.join(split[:i]) eq2 = ' '.join(split[i + 1:]) result = MathService._applyUnary(self.parseEquation(eq2), op) return self.parseEquation(eq1 + " " + str(result)) def extractNumbersAndSymbols(inp): numbers = [] symbols = [] next_number = "" for w in inp.split(' '): if w in self.__binaryOperators__: symbols.append(self.__binaryOperators__[w]) if next_number: numbers.append(next_number) next_number = "" else: if next_number: next_number += " " next_number += w if next_number: numbers.append(next_number) def convert(n): if n in self.__constants__: return self.__constants__[n] converter = NumberService() return converter.parse(n) numbers = [convert(n) for n in numbers] return numbers, symbols numbers, symbols = extractNumbersAndSymbols(inp) return MathService._calculate(numbers, symbols)
Solves the equation specified by the input string. Args: inp (str): An equation, specified in words, containing some combination of numbers, binary, and unary operations. Returns: The floating-point result of carrying out the computation.
juraj-google-style
def _fits_surface(self, width, height): assert(width > 0 and height > 0) if self.rot and (width > self.width or height > self.height): width, height = height, width if width > self.width or height > self.height: return False else: return True
Test surface is big enough to place a rectangle Arguments: width (int, float): Rectangle width height (int, float): Rectangle height Returns: boolean: True if it could be placed, False otherwise
juraj-google-style
async def remove_participant(self, p: Participant): await self.connection('DELETE', 'tournaments/{}/participants/{}'.format(self._id, p._id)) if p in self.participants: self.participants.remove(p)
remove a participant from the tournament |methcoro| Args: p: the participant to remove Raises: APIException
juraj-google-style
def CompleteTask(self, task): with self._lock: if task.identifier not in self._tasks_merging: raise KeyError('Task {0:s} was not merging.'.format(task.identifier)) self.SampleTaskStatus(task, 'completed') del self._tasks_merging[task.identifier] logger.debug('Completed task {0:s}.'.format(task.identifier))
Completes a task. The task is complete and can be removed from the task manager. Args: task (Task): task. Raises: KeyError: if the task was not merging.
juraj-google-style
def set_max_steps_per_epoch(max_steps_per_epoch): global _MAX_STEPS_PER_EPOCH _MAX_STEPS_PER_EPOCH = max_steps_per_epoch
Limit the maximum number of steps for any call to fit/evaluate/predict. This will cap the number of steps for single epoch of a call to `fit()`, `evaluate()`, or `predict()`. This is purely for debugging, and can also be set via the `KERAS_MAX_STEPS_PER_EPOCH` environment variable to quickly run a scrip without modifying its source. Args: max_epochs: The integer limit on the number of epochs or `None`. If `None`, no limit is applied.
github-repos
def __init__(self, data=None, _KEY=None, _ATTRS=None): if self.__class__ is MapEntry: raise TypeError('MapEntry is an abstract class.') if data is None: return else: for key in data: setattr(self, key, data[key]) self.log = logging.getLogger(__name__)
This is an abstract class. Args: data: An optional dict of attribute, value pairs to populate with. Raises: TypeError: Bad argument, or attempt to instantiate abstract class.
github-repos
def load(self, source, as_defaults=False): if isinstance(source, six.string_types): source = os.path.expanduser(source) with open(source, encoding='utf-8') as f: self._rw.load_config_from_file(self._config, f, as_defaults=as_defaults) elif isinstance(source, (list, tuple)): for s in source: with open(s, encoding='utf-8') as f: self._rw.load_config_from_file(self._config, f, as_defaults=as_defaults) else: self._rw.load_config_from_file(self._config, source, as_defaults=as_defaults)
Load configuration values from the specified source. Args: source: as_defaults (bool): if ``True``, contents of ``source`` will be treated as schema of configuration items.
juraj-google-style
def get_commit(profile, sha): resource = ('/commits/' + sha) data = api.get_request(profile, resource) return prepare(data)
Fetch a commit. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of the commit to fetch. Returns: A dict with data about the commit.
codesearchnet
def _set_bearer_user_vars(allowed_client_ids, scopes): (all_scopes, sufficient_scopes) = _process_scopes(scopes) try: authorized_scopes = oauth.get_authorized_scopes(sorted(all_scopes)) except oauth.Error: _logger.debug('Unable to get authorized scopes.', exc_info=True) return if (not _are_scopes_sufficient(authorized_scopes, sufficient_scopes)): _logger.warning('Authorized scopes did not satisfy scope requirements.') return client_id = oauth.get_client_id(authorized_scopes) if ((list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK) and (client_id not in allowed_client_ids)): _logger.warning('Client ID is not allowed: %s', client_id) return os.environ[_ENV_USE_OAUTH_SCOPE] = ' '.join(authorized_scopes) _logger.debug('get_current_user() will return user from matched oauth_user.')
Validate the oauth bearer token and set endpoints auth user variables. If the bearer token is valid, this sets ENDPOINTS_USE_OAUTH_SCOPE. This provides enough information that our endpoints.get_current_user() function can get the user. Args: allowed_client_ids: List of client IDs that are acceptable. scopes: List of acceptable scopes.
codesearchnet
def get_ax3d_fig_plt(ax=None, **kwargs): import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d if ax is None: fig = plt.figure(**kwargs) ax = axes3d.Axes3D(fig) else: fig = plt.gcf() return ax, fig, plt
Helper function used in plot functions supporting an optional Axes3D argument. If ax is None, we build the `matplotlib` figure and create the Axes3D else we return the current active figure. Args: kwargs: keyword arguments are passed to plt.figure if ax is not None. Returns: ax: :class:`Axes` object figure: matplotlib figure plt: matplotlib pyplot module.
juraj-google-style
def AddTransaction(self, tx): if (BC.Default() is None): return False if (tx.Hash.ToBytes() in self.MemPool.keys()): return False if BC.Default().ContainsTransaction(tx.Hash): return False if (not tx.Verify(self.MemPool.values())): logger.error('Verifying tx result... failed') return False self.MemPool[tx.Hash.ToBytes()] = tx return True
Add a transaction to the memory pool. Args: tx (neo.Core.TX.Transaction): instance. Returns: bool: True if successfully added. False otherwise.
codesearchnet
def create_from_snapshot(self, data, timeout=(- 1)): uri = (self.URI + '/from-snapshot') return self._client.create(data, uri=uri, timeout=timeout)
Creates a new volume on the storage system from a snapshot of a volume. A volume template must also be specified when creating a volume from a snapshot. The global setting "StorageVolumeTemplateRequired" controls whether or not root volume templates can be used to provision volumes. The value of this setting defaults to "false". If the value is set to "true", then only templates with an "isRoot" value of "false" can be used to provision a volume. Args: data (dict): Object to create. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Created data.
codesearchnet
def summarize_variables(var_list=None, tag=None): if (var_list is None): var_list = tf.trainable_variables() if (tag is None): tag = 'training_variables/' name_to_var = {v.name: v for v in var_list} for v_name in list(name_to_var): v = name_to_var[v_name] tf.summary.histogram((tag + v_name), v)
Summarize the variables. Args: var_list: a list of variables; defaults to trainable_variables. tag: name scope of the summary; defaults to training_variables/.
codesearchnet
def Run(self): for e in self.events: if e.Run() is False: return False return True
Execute this state transition. Returns: Whether or not all event functions returned True.
github-repos
def poll(): event_ptr = ffi.new('SDL_Event *') while lib.SDL_PollEvent(event_ptr): (yield Event._from_ptr(event_ptr)) event_ptr = ffi.new('SDL_Event *')
Polls for currently pending events. Returns: Iterable[Event]: Events from the event queue.
codesearchnet
def add_bboxes_to_image(image, bboxes, color='red', width=1): def expanded_bbox(bbox, n): l = min(bbox[0][0], bbox[1][0]) r = max(bbox[0][0], bbox[1][0]) t = min(bbox[0][1], bbox[1][1]) b = max(bbox[0][1], bbox[1][1]) return ((l - n, t - n), (r + n, b + n)) from PIL import Image, ImageDraw draw = ImageDraw.Draw(image) for bbox in bboxes: for n in range(width): draw.rectangle(expanded_bbox(bbox, n), outline=color) return image
Draw rectangles on the image for the bounding boxes Returns a PIL.Image Arguments: image -- input image bboxes -- bounding boxes in the [((l, t), (r, b)), ...] format Keyword arguments: color -- color to draw the rectangles width -- line width of the rectangles Example: image = Image.open(filename) add_bboxes_to_image(image, bboxes[filename], width=2, color='#FF7700') image.show()
juraj-google-style
def __init__(self, url): self.url = url self.domain = urlparse(url).netloc self.index = None self.creation_ts = time.time() self.downloaded_ts = None self.processing_started_ts = None self.processing_ended_ts = None for key in worker_mapping().keys(): setattr(self, key, None)
Constructor. Args: url (str): URL to which this request is related.
juraj-google-style
def ws_db996(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `ws_db996`'.format(value)) self._ws_db996 = value
Corresponds to IDD Field `ws_db996` Mean wind speed coincident with 99.6% dry-bulb temperature Args: value (float): value for IDD Field `ws_db996` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def _resolve_grad_inputs(cond_graph, grad_graph): new_inputs = [] for t in grad_graph.external_captures: if t.graph != grad_graph.outer_graph: assert t.graph == cond_graph for i, output in enumerate(t.graph.outputs): if output is t: t = t.graph._forward_cond.outputs[i] break else: for i, output in enumerate(t.graph.internal_captures): if output is t: t = t.graph.external_captures[i] break else: raise ValueError('Could not find external tensor capture {tensor} in captures or outputs'.format(tensor=t)) assert t.graph == cond_graph.outer_graph new_inputs.append(t) return new_inputs
Returns the tensors to pass as inputs to `grad_graph`. The `grad_graph` may have external references to 1. Its outer graph containing the input gradients. These references are kept as is. 2. Tensors in the forward pass graph. These tensors may not be "live" when the gradient is being computed. We replace such references by their corresponding tensor in `cond_graph.outer_graph`. In the case of nested control flow or functions, the gradient logic handling `grad_graph.outer_graph` will make sure the tensor from `cond_graph.outer_graph` is also correctly captured. Args: cond_graph: FuncGraph. The forward-pass function. grad_graph: FuncGraph. The gradients function. Returns: A list of inputs tensors to be passed to grad_graph.
github-repos
def fetch(self, addon_id, data={}, **kwargs): return super(Addon, self).fetch(addon_id, data, **kwargs)
Fetch addon for given Id Args: addon_id : Id for which addon object has to be retrieved Returns: addon dict for given subscription Id
codesearchnet