code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_next(self, protocol='http', format=False, policy='loop'): if (not self.proxies[protocol]): return None if (policy == 'loop'): idx = self.idx[protocol] self.idx[protocol] = ((idx + 1) % len(self.proxies[protocol])) elif (policy == 'random'): idx = random.randint(0, (self.proxy_num(protocol) - 1)) else: self.logger.error('Unsupported get_next policy: {}'.format(policy)) exit() proxy = self.proxies[protocol][self.addr_list[protocol][idx]] if (proxy.weight < random.random()): return self.get_next(protocol, format, policy) if format: return proxy.format() else: return proxy
Get the next proxy Args: protocol (str): 'http' or 'https'. (default 'http') format (bool): Whether to format the proxy. (default False) policy (str): Either 'loop' or 'random', indicating the policy of getting the next proxy. If set to 'loop', will return proxies in turn, otherwise will return a proxy randomly. Returns: Proxy or dict: If format is true, then return the formatted proxy which is compatible with requests.Session parameters, otherwise a Proxy object.
codesearchnet
def det(x): if any_symbolic_tensors((x,)): return Det().symbolic_call(x) return _det(x)
Computes the determinant of a square tensor. Args: x: Input tensor of shape `(..., M, M)`. Returns: A tensor of shape `(...,)` representing the determinant of `x`.
github-repos
def main(argv=None): if (argv is None): argv = sys.argv[1:] args = _get_parser().parse_args(argv) mand(args.module_seq)
Execute each module in the same interpreter. Args: argv: Each item of argv will be treated as a separate module with potential arguments each item may be a string or a sequence of strings. If a given argument is a string, then treat string as shell arguments and split accordingly. If the given argument is a tuple or list, then assume that the given arguments are already parsed. The first item of each argument should be a module or module path
codesearchnet
def fit(self, trX, trY, batch_size=64, n_epochs=1, len_filter=LenFilter(), snapshot_freq=1, path=None): if (len_filter is not None): (trX, trY) = len_filter.filter(trX, trY) trY = standardize_targets(trY, cost=self.cost) n = 0.0 t = time() costs = [] for e in range(n_epochs): epoch_costs = [] for (xmb, ymb) in self.iterator.iterXY(trX, trY): c = self._train(xmb, ymb) epoch_costs.append(c) n += len(ymb) if (self.verbose >= 2): n_per_sec = (n / (time() - t)) n_left = (len(trY) - (n % len(trY))) time_left = (n_left / n_per_sec) sys.stdout.write(('\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds' % (e, n, np.mean(epoch_costs[(- 250):]), time_left))) sys.stdout.flush() costs.extend(epoch_costs) status = ('Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds' % (e, n, np.mean(epoch_costs[(- 250):]), (time() - t))) if (self.verbose >= 2): sys.stdout.write(('\r' + status)) sys.stdout.flush() sys.stdout.write('\n') elif (self.verbose == 1): print(status) if (path and ((e % snapshot_freq) == 0)): save(self, '{0}.{1}'.format(path, e)) return costs
Train model on given training examples and return the list of costs after each minibatch is processed. Args: trX (list) -- Inputs trY (list) -- Outputs batch_size (int, optional) -- number of examples in a minibatch (default 64) n_epochs (int, optional) -- number of epochs to train for (default 1) len_filter (object, optional) -- object to filter training example by length (default LenFilter()) snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1) path (str, optional) -- prefix of path where model snapshots are saved. If None, no snapshots are saved (default None) Returns: list -- costs of model after processing each minibatch
codesearchnet
def setup_test_logger(log_path, prefix=None, alias='latest', console_level=logging.INFO): utils.create_dir(log_path) _setup_test_logger(log_path, console_level, prefix) logging.debug('Test output folder: "%s"', log_path) if alias: create_latest_log_alias(log_path, alias=alias)
Customizes the root logger for a test run. In addition to configuring the Mobly logging handlers, this also sets two attributes on the `logging` module for the output directories: root_output_path: path to the directory for the entire test run. log_path: same as `root_output_path` outside of a test class run. In the context of a test class run, this is the output directory for files specific to a test class. Args: log_path: string, the location of the report file. prefix: optional string, a prefix for each log line in terminal. alias: optional string, The name of the alias to use for the latest log directory. If a falsy value is provided, then the alias directory will not be created, which is useful to save storage space when the storage system (e.g. ZIP files) does not properly support shortcut/symlinks. console_level: optional logging level, log level threshold used for log messages printed to the console. Logs with a level less severe than console_level will not be printed to the console.
github-repos
def get_files(self, commit, paths, recursive=False): filtered_file_infos = [] for path in paths: fi = self.inspect_file(commit, path) if fi.file_type == proto.FILE: filtered_file_infos.append(fi) else: filtered_file_infos += self.list_file(commit, path, recursive=recursive) filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE] return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
Returns the contents of a list of files at a specific Commit as a dictionary of file paths to data. Params: * commit: A tuple, string, or Commit object representing the commit. * paths: A list of paths to retrieve. * recursive: If True, will go into each directory in the list recursively.
juraj-google-style
def move_to(self, folder): if isinstance(folder, Folder): self.move_to(folder.id) else: self._move_to(folder)
Moves the email to the folder specified by the folder parameter. Args: folder: A string containing the folder ID the message should be moved to, or a Folder instance
codesearchnet
def _split_generators(self, dl_manager): path = dl_manager.download_and_extract(_DOWNLOAD_URL) return [ tfds.core.SplitGenerator( name=tfds.Split.TEST, num_shards=1, gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)}) ]
Return the test split of Cifar10. Args: dl_manager: download manager object. Returns: test split.
juraj-google-style
def _parse_volumes(volume_values: dict) -> str: for v_values in volume_values: for v_key, v_value in v_values.items(): if v_key == 'source': if v_value == '.': source = os.path.dirname( os.path.abspath(__file__)) else: source = v_value if v_key == 'target': target = v_value volume_spec = [source + ':' + target] return volume_spec
Parse volumes key. Args: volume_values (dict): volume configuration values Returns: string, volume specification with mount source and container path
juraj-google-style
def run(self): if not self._test_run_infos: raise Error('No tests to execute.') self._test_run_metadata.set_start_point() utils.create_dir(self._test_run_metadata.root_output_path) summary_writer = records.TestSummaryWriter(self._test_run_metadata.summary_file_path) def sigterm_handler(*args): logging.warning('Test received a SIGTERM. Aborting all tests.') raise signals.TestAbortAll('Test received a SIGTERM.') signal.signal(signal.SIGTERM, sigterm_handler) try: for test_run_info in self._test_run_infos: test_config = test_run_info.config.copy() test_config.log_path = self._test_run_metadata.root_output_path test_config.summary_writer = summary_writer test_config.test_class_name_suffix = test_run_info.test_class_name_suffix try: self._run_test_class(config=test_config, test_class=test_run_info.test_class, tests=test_run_info.tests) except signals.TestAbortAll as e: logging.warning('Abort all subsequent test classes. Reason: %s', e) raise finally: summary_writer.dump(self.results.summary_dict(), records.TestSummaryEntryType.SUMMARY) self._test_run_metadata.set_end_point() summary_lines = [f'Summary for test run {self._test_run_metadata.run_id}:', f'Total time elapsed {self._test_run_metadata.time_elapsed_sec}s', f'Artifacts are saved in "{self._test_run_metadata.root_output_path}"', f'Test summary saved in "{self._test_run_metadata.summary_file_path}"', f'Test results: {self.results.summary_str()}'] logging.info('\n'.join(summary_lines))
Executes tests. This will instantiate controller and test classes, execute tests, and print a summary. This meethod should usually be called within the runner's `mobly_logger` context. If you must use this method outside of the context, you should make sure `self._test_run_metadata.generate_test_run_log_path` is called before each invocation of `run`. Raises: Error: if no tests have previously been added to this runner using add_test_class(...).
github-repos
def from_json(cls, data): required_keys = ('hum_type', 'hum_value') optional_keys = {'barometric_pressure': 101325, 'schedule': '', 'wet_bulb_range': ''} for key in required_keys: assert key in data, 'Required key "{}" is missing!'.format(key) for key, val in optional_keys.items(): if key not in data: data[key] = val return cls(data['hum_type'], data['hum_value'], data['barometric_pressure'], data['schedule'], data['wet_bulb_range'])
Create a Humidity Condition from a dictionary. Args: data = { "hum_type": string, "hum_value": float, "barometric_pressure": float, "schedule": string, "wet_bulb_range": string}
juraj-google-style
def non_fluent_size(self) -> Sequence[Sequence[int]]: fluents = self.domain.non_fluents ordering = self.domain.non_fluent_ordering return self._fluent_size(fluents, ordering)
The size of each non-fluent in canonical order. Returns: Sequence[Sequence[int]]: A tuple of tuple of integers representing the shape and size of each non-fluent.
codesearchnet
def Process(self, parser_mediator, plist_name, top_level, **kwargs): super(MacUserPlugin, self).Process(parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)
Check if it is a valid MacOS system account plist file name. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. plist_name (str): name of the plist. top_level (dict[str, object]): plist top-level key.
codesearchnet
def __init__(self, uri=None, method=None, headers=None): self.headers = headers or {} self._body_parts = [] if method is not None: self.method = method if isinstance(uri, (str, unicode)): uri = Uri.parse_uri(uri) self.uri = uri or Uri() self.headers['MIME-version'] = '1.0' self.headers['Connection'] = 'close'
Construct an HTTP request. Args: uri: The full path or partial path as a Uri object or a string. method: The HTTP method for the request, examples include 'GET', 'POST', etc. headers: dict of strings The HTTP headers to include in the request.
juraj-google-style
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format: - single sequence: [CLS] X [SEP] - pair of sequences: [CLS] A [SEP] B [SEP] Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def update_vmss(access_token, subscription_id, resource_group, vmss_name, body): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) return do_put(endpoint, body, access_token)
Update a VMSS with a new JSON body. E.g. do a GET, change something, call this. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. body (dict): JSON body of the VM scale set. Returns: HTTP response.
juraj-google-style
def from_spec(cls, spec, name=None): return cls(spec.shape, spec.dtype, name or spec.name)
Returns a `TensorSpec` with the same shape and dtype as `spec`. >>> spec = tf.TensorSpec(shape=[8, 3], dtype=tf.int32, name="OriginalName") >>> tf.TensorSpec.from_spec(spec, "NewName") TensorSpec(shape=(8, 3), dtype=tf.int32, name='NewName') Args: spec: The `TypeSpec` used to create the new `TensorSpec`. name: The name for the new `TensorSpec`. Defaults to `spec.name`.
github-repos
def fit(self, col): dates = self.safe_datetime_cast(col) self.default_val = dates.groupby(dates).count().index[0].timestamp() * 1e9
Prepare the transformer to convert data. Args: col(pandas.DataFrame): Data to transform. Returns: None
juraj-google-style
def filepattern(self, data_dir, mode, shard=None): path = os.path.join(data_dir, self.dataset_filename()) shard_str = (('-%05d' % shard) if (shard is not None) else '') if (mode == DatasetSplit.TRAIN): suffix = 'train' elif (mode in [DatasetSplit.EVAL, tf.estimator.ModeKeys.PREDICT]): suffix = 'dev' else: assert (mode == DatasetSplit.TEST) suffix = 'test' return ('%s-%s%s*' % (path, suffix, shard_str))
Get filepattern for data files for mode. Matches mode to a suffix. * DatasetSplit.TRAIN: train * DatasetSplit.EVAL: dev * DatasetSplit.TEST: test * tf.estimator.ModeKeys.PREDICT: dev Args: data_dir: str, data directory. mode: DatasetSplit shard: int, if provided, will only read data from the specified shard. Returns: filepattern str
codesearchnet
def is_unitary( matrix: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8) -> bool: return (matrix.shape[0] == matrix.shape[1] and np.allclose(matrix.dot(np.conj(matrix.T)), np.eye(matrix.shape[0]), rtol=rtol, atol=atol))
Determines if a matrix is approximately unitary. A matrix is unitary if it's square and its adjoint is its inverse. Args: matrix: The matrix to check. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is unitary within the given tolerance.
juraj-google-style
def set_column_sizes(self, values): self.style['grid-template-columns'] = ' '.join(map((lambda value: (str(value) if str(value).endswith('%') else (str(value) + '%'))), values))
Sets the size value for each column Args: values (iterable of int or str): values are treated as percentage.
codesearchnet
def get_all_ad_units(inventory_service): statement = ad_manager.StatementBuilder(version='v201811').OrderBy('id', ascending=True) keep_iterating = True total_results = 0 found_ad_units = [] while keep_iterating: page = inventory_service.getAdUnitsByStatement(statement.ToStatement()) if (('results' in page) and len(page['results'])): total_results = page['totalResultSetSize'] found_ad_units.extend(page['results']) statement.offset += statement.limit keep_iterating = (statement.offset < total_results) return found_ad_units
Download all ad units. Args: inventory_service: An instance of the InventoryService. Returns: A list containing all ad units.
codesearchnet
def write_image(self, stream, image_format='svg', **kwargs): plt = self.get_plot(**kwargs) f = plt.gcf() f.set_size_inches((12, 10)) plt.savefig(stream, format=image_format)
Writes the phase diagram to an image in a stream. Args: stream: stream to write to. Can be a file stream or a StringIO stream. image_format format for image. Can be any of matplotlib supported formats. Defaults to svg for best results for vector graphics. \\*\\*kwargs: Pass through to get_plot functino.
codesearchnet
def serialize(obj): LOGGER.debug('serialize(%s)', obj) if isinstance(obj, datetime.date): return simplejson.dumps(obj, default=encoders.as_date) elif hasattr(obj, '__dict__'): return simplejson.dumps(obj, default=encoders.as_object) return simplejson.dumps(obj)
Serialize the given object into JSON. Args: obj: the object to be serialized. Returns: (str): JSON representation of the given object.
juraj-google-style
def add_plot_boundary(ax, padding=0.125): nodes = np.asfortranarray(np.vstack([line.get_xydata() for line in ax.lines]).T) (left, right, bottom, top) = _helpers.bbox(nodes) center_x = (0.5 * (right + left)) delta_x = (right - left) center_y = (0.5 * (top + bottom)) delta_y = (top - bottom) multiplier = ((1.0 + padding) * 0.5) ax.set_xlim((center_x - (multiplier * delta_x)), (center_x + (multiplier * delta_x))) ax.set_ylim((center_y - (multiplier * delta_y)), (center_y + (multiplier * delta_y)))
Add a buffer of empty space around a plot boundary. .. note:: This only uses ``line`` data from the axis. It **could** use ``patch`` data, but doesn't at this time. Args: ax (matplotlib.artist.Artist): A matplotlib axis. padding (Optional[float]): Amount (as a fraction of width and height) of padding to add around data. Defaults to ``0.125``.
codesearchnet
def slice(self, start, size): return SeriesWeld( grizzly_impl.slice( self.expr, start, size, self.weld_type ), self.weld_type, self.df, self.column_name )
Summary Args: start (TYPE): Description size (TYPE): Description Returns: TYPE: Description
juraj-google-style
def stop(self, consumer): stopped_workflows = [] for request in [r for r in consumer.controller.state.active_requests]: job = AsyncResult(request.id) workflow_id = job.result['workflow_id'] if workflow_id not in stopped_workflows: client = Client( SignalConnection(**consumer.app.user_options['config'].signal, auto_connect=True), request_key=workflow_id) client.send(Request(action='stop_workflow')) stopped_workflows.append(workflow_id)
This function is called when the worker received a request to terminate. Upon the termination of the worker, the workflows for all running jobs are stopped gracefully. Args: consumer (Consumer): Reference to the consumer object that handles messages from the broker.
juraj-google-style
def HashIt(self): while True: interval = self._GetNextInterval() if (interval is None): break self.file.seek(interval.start, os.SEEK_SET) block = self.file.read((interval.end - interval.start)) if (len(block) != (interval.end - interval.start)): raise RuntimeError('Short read on file.') self._HashBlock(block, interval.start, interval.end) self._AdjustIntervals(interval.start, interval.end) results = [] for finger in self.fingers: res = {} leftover = finger.CurrentRange() if leftover: if ((len(finger.ranges) > 1) or (leftover.start != self.filelength) or (leftover.end != self.filelength)): raise RuntimeError('Non-empty range remains.') res.update(finger.metadata) for hasher in finger.hashers: res[hasher.name] = hasher.digest() results.append(res) self.fingers = [] return sorted(results, key=(lambda r: r['name']))
Finalizing function for the Fingerprint class. This method applies all the different hash functions over the previously specified different ranges of the input file, and computes the resulting hashes. After calling HashIt, the state of the object is reset to its initial state, with no fingers defined. Returns: An array of dicts, with each dict containing name of fingerprint type, names of hashes and values, and additional, type-dependent key / value pairs, such as an array of SignedData tuples for the PE/COFF fingerprint type. Raises: RuntimeError: when internal inconsistencies occur.
codesearchnet
def __init__(self, option_strings, dest, help, metavar, flag_instance): del dest self._flag_instance = flag_instance flag_names = [self._flag_instance.name] if self._flag_instance.short_name: flag_names.append(self._flag_instance.short_name) self._flag_names = frozenset(flag_names) super(_BooleanFlagAction, self).__init__( option_strings=option_strings, dest=argparse.SUPPRESS, nargs=0, help=help, metavar=metavar)
Initializes _BooleanFlagAction. Args: option_strings: See argparse.Action. dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS. help: See argparse.Action. metavar: See argparse.Action. flag_instance: absl.flags.Flag, the absl flag instance.
juraj-google-style
def get_all_doctest_files() -> List[str]: py_files = [str(x.relative_to(PATH_TO_REPO)) for x in PATH_TO_REPO.glob('***.md')] test_files_to_run = py_files + md_files test_files_to_run = ['/'.join(Path(x).parts) for x in test_files_to_run] test_files_to_run = [x for x in test_files_to_run if 'models/deprecated' not in x] test_files_to_run = [x for x in test_files_to_run if x.startswith(('src/', 'docs/source/en/'))] test_files_to_run = [x for x in test_files_to_run if not x.endswith(('__init__.py',))] with open('utils/not_doctested.txt') as fp: not_doctested = {x.split(' ')[0] for x in fp.read().strip().split('\n')} test_files_to_run = [x for x in test_files_to_run if x not in not_doctested] return sorted(test_files_to_run)
Return the complete list of python and Markdown files on which we run doctest. At this moment, we restrict this to only take files from `src/` or `docs/source/en/` that are not in `utils/not_doctested.txt`. Returns: `List[str]`: The complete list of Python and Markdown files on which we run doctest.
github-repos
def get_master_port(real_launcher=False): master_port_base = os.environ.get('DS_TEST_PORT', DEFAULT_MASTER_PORT) if not real_launcher: master_port_base = str(int(master_port_base) + 1) return master_port_base
When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed) the issue is that once the port is tied it can't be used anywhere else outside of this process, since torch.dist doesn't free the port until the process exits. Therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports. This function will give the right port in the right context. For real launcher it'll give the base port, for emulated launcher it'll give the base port + 1. In both cases a string is returned. Args: `real_launcher`: whether a real launcher is going to be used, or the emulated one
github-repos
def mark_deprecated(replaced_by): def decorator(fn): @wraps(fn) def wrapper(*args, **kw): from peltak.core import shell if shell.is_tty: warnings.warn('This command is has been deprecated. Please use {new} instead.'.format(new=replaced_by)) return fn(*args, **kw) return wrapper return decorator
Mark command as deprecated. Args: replaced_by (str): The command that deprecated this command and should be used instead.
codesearchnet
def GetZipInfoByPathSpec(self, path_spec): location = getattr(path_spec, 'location', None) if (location is None): raise errors.PathSpecError('Path specification missing location.') if (not location.startswith(self.LOCATION_ROOT)): raise errors.PathSpecError('Invalid location in path specification.') if (len(location) > 1): return self._zip_file.getinfo(location[1:]) return None
Retrieves the ZIP info for a path specification. Args: path_spec (PathSpec): a path specification. Returns: zipfile.ZipInfo: a ZIP info object or None if not available. Raises: PathSpecError: if the path specification is incorrect.
codesearchnet
def screenshot(path=None): if not _rootinitialized: raise TDLError('Initialize first with tdl.init') if isinstance(path, str): _lib.TCOD_sys_save_screenshot(_encodeString(path)) elif path is None: filelist = _os.listdir('.') n = 1 filename = 'screenshot%.3i.png' % n while filename in filelist: n += 1 filename = 'screenshot%.3i.png' % n _lib.TCOD_sys_save_screenshot(_encodeString(filename)) else: tmpname = _os.tempnam() _lib.TCOD_sys_save_screenshot(_encodeString(tmpname)) with tmpname as tmpfile: path.write(tmpfile.read()) _os.remove(tmpname)
Capture the screen and save it as a png file. If path is None then the image will be placed in the current folder with the names: ``screenshot001.png, screenshot002.png, ...`` Args: path (Optional[Text]): The file path to save the screenshot.
juraj-google-style
def get_body(name): body = Pck()[name] body.propagate = lambda date: get_orbit(name, date) return body
Retrieve the Body structure of a JPL .bsp file object Args: name (str) Return: :py:class:`~beyond.constants.Body`
juraj-google-style
def parse_verilog(text): lex = VerilogLexer name = None kind = None saved_type = None mode = 'input' ptype = 'wire' metacomments = [] parameters = [] param_items = [] generics = [] ports = collections.OrderedDict() sections = [] port_param_index = 0 last_item = None array_range_start_pos = 0 objects = [] for pos, action, groups in lex.run(text): if action == 'metacomment': if last_item is None: metacomments.append(groups[0]) else: last_item.desc = groups[0] if action == 'section_meta': sections.append((port_param_index, groups[0])) elif action == 'module': kind = 'module' name = groups[0] generics = [] ports = collections.OrderedDict() param_items = [] sections = [] port_param_index = 0 elif action == 'parameter_start': net_type, vec_range = groups new_ptype = '' if net_type is not None: new_ptype += net_type if vec_range is not None: new_ptype += ' ' + vec_range ptype = new_ptype elif action == 'param_item': generics.append(VerilogParameter(groups[0], 'in', ptype)) elif action == 'module_port_start': new_mode, net_type, signed, vec_range = groups new_ptype = '' if net_type is not None: new_ptype += net_type if signed is not None: new_ptype += ' ' + signed if vec_range is not None: new_ptype += ' ' + vec_range for i in param_items: ports[i] = VerilogParameter(i, mode, ptype) param_items = [] if len(ports) > 0: last_item = next(reversed(ports)) mode = new_mode ptype = new_ptype elif action == 'port_param': ident = groups[0] param_items.append(ident) port_param_index += 1 elif action == 'end_module': for i in param_items: ports[i] = VerilogParameter(i, mode, ptype) vobj = VerilogModule(name, ports.values(), generics, dict(sections), metacomments) objects.append(vobj) last_item = None metacomments = [] return objects
Parse a text buffer of Verilog code Args: text (str): Source code to parse Returns: List of parsed objects.
juraj-google-style
def recall(truth, recommend, k=None): if len(truth) == 0: if len(recommend) == 0: return 1. return 0. if k is None: k = len(recommend) return count_true_positive(truth, recommend[:k]) / float(truth.size)
Recall@k. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: Recall@k.
juraj-google-style
def from_response(self, response_data): return HSAccessTokenAuth( response_data['access_token'], response_data['token_type'], response_data['refresh_token'], response_data['expires_in'], response_data.get('state') )
Builds a new HSAccessTokenAuth straight from response data Args: response_data (dict): Response data to use Returns: A HSAccessTokenAuth objet
juraj-google-style
def LoadFromString(cls, yaml_doc): return cls(**googleads.common.LoadFromString( yaml_doc, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES, cls._OPTIONAL_INIT_VALUES))
Creates an AdWordsClient with information stored in a yaml string. Args: yaml_doc: The yaml string containing the cached AdWords data. Returns: An AdWordsClient initialized with the values cached in the string. Raises: A GoogleAdsValueError if the given yaml string does not contain the information necessary to instantiate a client object - either a required key was missing or an OAuth2 key was missing.
juraj-google-style
def compose_r(self, r: Rotation) -> Rotation: r1 = self.get_rot_mats() r2 = r.get_rot_mats() new_rot_mats = rot_matmul(r1, r2) return Rotation(rot_mats=new_rot_mats, quats=None)
Compose the rotation matrices of the current Rotation object with those of another. Args: r: An update rotation object Returns: An updated rotation object
github-repos
def update_thread(cls, session, conversation, thread): data = thread.to_api() data['reload'] = True return cls( '/conversations/%s/threads/%d.json' % ( conversation.id, thread.id, ), data=data, request_type=RequestPaginator.PUT, singleton=True, session=session, )
Update a thread. Args: session (requests.sessions.Session): Authenticated session. conversation (helpscout.models.Conversation): The conversation that the thread belongs to. thread (helpscout.models.Thread): The thread to be updated. Returns: helpscout.models.Conversation: Conversation including freshly updated thread.
juraj-google-style
def _prune_hit(hit, model): hit_id = hit['_id'] hit_index = hit['_index'] if model.objects.in_search_queryset(hit_id, index=hit_index): logger.debug("%s with id=%s exists in the '%s' index queryset.", model, hit_id, hit_index) return None else: logger.debug("%s with id=%s does not exist in the '%s' index queryset and will be pruned.", model, hit_id, hit_index) return model(pk=hit_id)
Check whether a document should be pruned. This method uses the SearchDocumentManagerMixin.in_search_queryset method to determine whether a 'hit' (search document) should be pruned from an index, and if so it returns the hit as a Django object(id=hit_id). Args: hit: dict object the represents a document as returned from the scan_index function. (Contains object id and index.) model: the Django model (not object) from which the document was derived. Used to get the correct model manager and bulk action. Returns: an object of type model, with id=hit_id. NB this is not the object itself, which by definition may not exist in the underlying database, but a temporary object with the document id - which is enough to create a 'delete' action.
codesearchnet
def get_value_or_block_until_ready(self, side_input, task: TransformExecutor, block_until: Timestamp) -> Any: with self._lock: view = self._views[side_input] if view.watermark and view.watermark.output_watermark >= block_until: view.value = self._pvalue_to_value(side_input, view.elements) return view.value else: view.blocked_tasks.append((task, block_until)) task.blocked = True
Returns the value of a view whose task is unblocked or blocks its task. It gets the value of a view whose watermark has been updated and surpasses a given value. Args: side_input: ``_UnpickledSideInput`` value. task: ``TransformExecutor`` task waiting on a side input. block_until: Timestamp after which the task gets unblocked. Returns: The ``SideInputMap`` value of a view when the tasks it blocks are unblocked. Otherwise, None.
github-repos
def call(self, y_true, y_pred): if tensor_util.is_tf_type(y_pred) and tensor_util.is_tf_type(y_true): y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true) ag_fn = autograph.tf_convert(self.fn, ag_ctx.control_status_ctx()) return ag_fn(y_true, y_pred, **self._fn_kwargs)
Invokes the `LossFunctionWrapper` instance. Args: y_true: Ground truth values. y_pred: The predicted values. Returns: Loss values per sample.
github-repos
def on_value_event(self, event): if self._dump_dir: self._write_value_event(event) else: value = event.summary.value[0] tensor_value = debug_data.load_tensor_from_event(event) self._event_listener_servicer.debug_tensor_values[value.node_name].append(tensor_value) items = event.summary.value[0].node_name.split(':') node_name = items[0] output_slot = int(items[1]) debug_op = items[2] if (node_name, output_slot, debug_op) in self._event_listener_servicer.breakpoints: return debug_service_pb2.EventReply()
Implementation of the tensor value-carrying Event proto callback. Writes the Event proto to the file system for testing. The path written to follows the same pattern as the file:// debug URLs of tfdbg, i.e., the name scope of the op becomes the directory structure under the dump root directory. Args: event: The Event proto carrying a tensor value. Returns: If the debug node belongs to the set of currently activated breakpoints, a `EventReply` proto will be returned.
github-repos
def embed_image_html(image): if image is None: return None elif isinstance(image, PIL.Image.Image): pass elif isinstance(image, np.ndarray): image = PIL.Image.fromarray(image) else: raise ValueError('image must be a PIL.Image or a np.ndarray') fmt = image.format if not fmt: fmt = 'jpeg' else: fmt = fmt.lower() string_buf = StringIO() image.save(string_buf, format=fmt) data = string_buf.getvalue().encode('base64').replace('\n', '') return 'data:image/%s;base64,%s' % (fmt, data)
Returns an image embedded in HTML base64 format (Based on Caffe's web_demo) Arguments: image -- a PIL.Image or np.ndarray
juraj-google-style
def _jit_get_rotation_matrix(axis, angle): axis = _jit_normalize(axis) a = m.cos(angle / 2) b, c, d = axis * m.sin(angle / 2) rot_matrix = np.empty((3, 3)) rot_matrix[0, 0] = a**2 + b**2 - c**2 - d**2 rot_matrix[0, 1] = 2. * (b * c - a * d) rot_matrix[0, 2] = 2. * (b * d + a * c) rot_matrix[1, 0] = 2. * (b * c + a * d) rot_matrix[1, 1] = a**2 + c**2 - b**2 - d**2 rot_matrix[1, 2] = 2. * (c * d - a * b) rot_matrix[2, 0] = 2. * (b * d - a * c) rot_matrix[2, 1] = 2. * (c * d + a * b) rot_matrix[2, 2] = a**2 + d**2 - b**2 - c**2 return rot_matrix
Returns the rotation matrix. This function returns a matrix for the counterclockwise rotation around the given axis. The Input angle is in radians. Args: axis (vector): angle (float): Returns: Rotation matrix (np.array):
juraj-google-style
def pauli_single(cls, num_qubits, index, pauli_label): tmp = Pauli.from_label(pauli_label) z = np.zeros(num_qubits, dtype=np.bool) x = np.zeros(num_qubits, dtype=np.bool) z[index] = tmp.z[0] x[index] = tmp.x[0] return cls(z, x)
Generate single qubit pauli at index with pauli_label with length num_qubits. Args: num_qubits (int): the length of pauli index (int): the qubit index to insert the single qubii pauli_label (str): pauli Returns: Pauli: single qubit pauli
codesearchnet
def convert_to_rgb(image: ImageInput) -> ImageInput: requires_backends(convert_to_rgb, ['vision']) if not isinstance(image, PIL.Image.Image): return image if image.mode == 'RGB': return image image = image.convert('RGB') return image
Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image as is. Args: image (Image): The image to convert.
github-repos
def add_item(self, item): if (not (isinstance(item.name, basestring) and isinstance(item.description, basestring))): raise TypeError('Name and description should be strings, are of type {} and {}'.format(type(item.name), type(item.description))) if (not isinstance(item.flag_type, FlagType)): raise TypeError('Flag type should be of type FlagType, is of {}'.format(type(item.flag_type))) if (item.name not in self._flags): if (item.default is not None): if (item.default is not False): item.description = (item.description + ' (default: %(default)s)') self._flags[item.name] = item else: self._flags[item.name] = item
Add single command line flag Arguments: name (:obj:`str`): Name of flag used in command line flag_type (:py:class:`snap_plugin.v1.plugin.FlagType`): Indication if flag should store value or is simple bool flag description (:obj:`str`): Flag description used in command line default (:obj:`object`, optional): Optional default value for flag Raises: TypeError: Provided wrong arguments or arguments of wrong types, method will raise TypeError
codesearchnet
def _ConvertHeaderToId(header): if not (header.startswith('<') or header.endswith('>')): raise exceptions.BatchError( 'Invalid value for Content-ID: %s' % header) if '+' not in header: raise exceptions.BatchError( 'Invalid value for Content-ID: %s' % header) _, request_id = header[1:-1].rsplit('+', 1) return urllib_parse.unquote(request_id)
Convert a Content-ID header value to an id. Presumes the Content-ID header conforms to the format that _ConvertIdToHeader() returns. Args: header: A string indicating the Content-ID header value. Returns: The extracted id value. Raises: BatchError if the header is not in the expected format.
juraj-google-style
def ScanForVolumeSystem(self, source_path_spec): if source_path_spec.type_indicator == definitions.TYPE_INDICATOR_VSHADOW: return None if source_path_spec.IsVolumeSystemRoot(): return source_path_spec if source_path_spec.type_indicator == ( definitions.TYPE_INDICATOR_APFS_CONTAINER): return None try: type_indicators = analyzer.Analyzer.GetVolumeSystemTypeIndicators( source_path_spec, resolver_context=self._resolver_context) except (IOError, RuntimeError) as exception: raise errors.BackEndError(( 'Unable to process source path specification with error: ' '{0!s}').format(exception)) if not type_indicators: return None if len(type_indicators) > 1: raise errors.BackEndError( 'Unsupported source found more than one volume system types.') if (type_indicators[0] == definitions.TYPE_INDICATOR_TSK_PARTITION and source_path_spec.type_indicator in [ definitions.TYPE_INDICATOR_TSK_PARTITION]): return None if type_indicators[0] in definitions.VOLUME_SYSTEM_TYPE_INDICATORS: return path_spec_factory.Factory.NewPathSpec( type_indicators[0], location='/', parent=source_path_spec) return path_spec_factory.Factory.NewPathSpec( type_indicators[0], parent=source_path_spec)
Scans the path specification for a supported volume system format. Args: source_path_spec (PathSpec): source path specification. Returns: PathSpec: volume system path specification or None if no supported volume system type was found. Raises: BackEndError: if the source cannot be scanned or more than one volume system type is found.
juraj-google-style
def _Open(self, hostname, port): try: self._xmlrpc_server = SimpleXMLRPCServer.SimpleXMLRPCServer((hostname, port), logRequests=False, allow_none=True) except SocketServer.socket.error as exception: logger.warning('Unable to bind a RPC server on {0:s}:{1:d} with error: {2!s}'.format(hostname, port, exception)) return False self._xmlrpc_server.register_function(self._callback, self._RPC_FUNCTION_NAME) return True
Opens the RPC communication channel for clients. Args: hostname (str): hostname or IP address to connect to for requests. port (int): port to connect to for requests. Returns: bool: True if the communication channel was successfully opened.
codesearchnet
def make_persister(self, to_persist): if (not self.meta_data): raise Exception("Root not set. Can't create persister.") def persister(c, broker): if (c in to_persist): self.dehydrate(c, broker) return persister
Returns a function that hydrates components as they are evaluated. The function should be registered as an observer on a Broker just before execution. Args: to_persist (set): Set of components to persist. Skip everything else.
codesearchnet
def search(self, search_string): updates = Updates() found = updates.updates if isinstance(search_string, six.string_types): search_string = [search_string] if isinstance(search_string, six.integer_types): search_string = [six.text_type(search_string)] for update in self._updates: for find in search_string: if (find == update.Identity.UpdateID): found.Add(update) continue if (find in [('KB' + item) for item in update.KBArticleIDs]): found.Add(update) continue if (find in [item for item in update.KBArticleIDs]): found.Add(update) continue if (find in update.Title): found.Add(update) continue return updates
Search for either a single update or a specific list of updates. GUIDs are searched first, then KB numbers, and finally Titles. Args: search_string (str, list): The search string to use to find the update. This can be the GUID or KB of the update (preferred). It can also be the full Title of the update or any part of the Title. A partial Title search is less specific and can return multiple results. Returns: Updates: An instance of Updates with the results of the search Code Example: .. code-block:: python import salt.utils.win_update wua = salt.utils.win_update.WindowsUpdateAgent() # search for a single update and show its details updates = wua.search('KB3194343') updates.list() # search for a list of updates and show their details updates = wua.search(['KB3195432', '12345678-abcd-1234-abcd-1234567890ab']) updates.list()
codesearchnet
def _update_install_json(self, install_json): updated = False install_json.setdefault('features', []) for feature in self.features: if feature not in install_json.get('features'): install_json['features'].append(feature) updated = True self.package_data['updates'].append( {'action': 'Updated Feature:', 'output': feature} ) return install_json, updated
Write install.json file. Args: install_json (dict): The contents of the install.json file. Returns: dict, bool: The contents of the install.json file and boolean value that is True if an update was made.
juraj-google-style
def prompt_for_password(url, user=None, default_user=None): if (user is None): default_user = (default_user or getpass.getuser()) while (user is None): user = compat.console_input('Enter username for {} [{}]: '.format(url, default_user)) if ((user.strip() == '') and default_user): user = default_user if user: pw = getpass.getpass('Enter password for {}@{} (Ctrl+C to abort): '.format(user, url)) if (pw or (pw == '')): return (user, pw) return None
Prompt for username and password. If a user name is passed, only prompt for a password. Args: url (str): hostname user (str, optional): Pass a valid name to skip prompting for a user name default_user (str, optional): Pass a valid name that is used as default when prompting for a user name Raises: KeyboardInterrupt if user hits Ctrl-C Returns: (username, password) or None
codesearchnet
def healthy_services(self, role=None): try: query = self.rr.table(self.table) if role: query = query.get_all(role, index='role') query = query.filter((lambda svc: (r.now().sub(svc['last_heartbeat']) < svc['ttl']))).order_by('load') result = query.run() return result except r.ReqlNonExistenceError: return []
Look up healthy services in the registry. A service is considered healthy if its 'last_heartbeat' was less than 'ttl' seconds ago Args: role (str, optional): role name Returns: If `role` is supplied, returns list of healthy services for the given role, otherwise returns list of all healthy services. May return an empty list.
codesearchnet
def get_select_expressions(self) -> Tuple[column_expression_builder.ColumnExpressionBuilder, ...]: return self._fields
Returns the fields used in the view and their corresponding expressions. Returns: An immutable dictionary of selected field names and the expression used to populate them.
github-repos
def _serialize_signature_def_map(signature_def_map: _SignatureDefMap) -> dict[str, bytes]: signature_def_map_serialized = {} for key, signature_def in signature_def_map.items(): signature_def_map_serialized[key] = signature_def.SerializeToString() return signature_def_map_serialized
Serializes SignatureDef values in `signature_def_map`. Args: signature_def_map: Signature key -> SignatureDef mapping. Returns: Signature def map where the values (`SignatureDef`) are serialized.
github-repos
def __add__(self, other): if not all(np.equal(self.frequencies, other.frequencies)): raise ValueError("Frequencies of both DOS are not compatible!") densities = self.densities + other.densities return PhononDos(self.frequencies, densities)
Adds two DOS together. Checks that frequency scales are the same. Otherwise, a ValueError is thrown. Args: other: Another DOS object. Returns: Sum of the two DOSs.
juraj-google-style
def format(self, s, pretty=None, expand=None): if (pretty is None): pretty = self.format_pretty if (expand is None): expand = self.format_expand formatter = ObjectStringFormatter(self, pretty=pretty, expand=expand) return formatter.format(s)
Format a string. Args: s (str): String to format, eg "hello {name}" pretty (bool): If True, references to non-string attributes such as lists are converted to basic form, with characters such as brackets and parenthesis removed. If None, defaults to the object's 'format_pretty' attribute. expand (`StringFormatType`): Expansion mode. If None, will default to the object's 'format_expand' attribute. Returns: The formatting string.
codesearchnet
def get_es_ids(obj, def_obj): try: path = "" for base in [def_obj.__class__] + list(def_obj.__class__.__bases__): if hasattr(base, 'es_defs') and base.es_defs: path = "%s/%s/" % (base.es_defs['kds_esIndex'][0], base.es_defs['kds_esDocType'][0]) continue except KeyError: path = "" if def_obj.subject.type == 'uri': obj['uri'] = def_obj.subject.clean_uri obj['id'] = path + make_es_id(obj['uri']) elif def_obj.subject.type == 'bnode': obj['id'] = path + def_obj.bnode_id() else: obj['id'] = path + make_es_id(str(obj['value'])) return obj
Returns the object updated with the 'id' and 'uri' fields for the elasticsearch document args: obj: data object to update def_obj: the class instance that has defintion values
juraj-google-style
def filter_devices(ads, func): results = [] for ad in ads: if func(ad): results.append(ad) return results
Finds the AndroidDevice instances from a list that match certain conditions. Args: ads: A list of AndroidDevice instances. func: A function that takes an AndroidDevice object and returns True if the device satisfies the filter condition. Returns: A list of AndroidDevice instances that satisfy the filter condition.
juraj-google-style
def _clip(params, ids, max_norm): def _rank(x): rank = ops.convert_to_tensor(x).get_shape().ndims if rank: return (rank, True) else: return (array_ops.rank(x), False) if max_norm is None: return params ids_rank, ids_static = _rank(ids) params_rank, params_static = _rank(params) return clip_ops.clip_by_norm(params, max_norm, axes=list(range(ids_rank, params_rank)) if ids_static and params_static else math_ops.range(ids_rank, params_rank))
Helper function for _embedding_lookup_and_transform. This function optionally clips embeddings to an l2-norm of max_norm. Args: params: A `Tensor` of embeddings retrieved by `gather`. ids: The `ids` argument that was passed to `gather`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value. Returns: A `Tensor` with the same type as `params`.
github-repos
def _VerifyValues(self, image, ksizes, strides, rates, padding, patches): ksizes = [1] + ksizes + [1] strides = [1] + strides + [1] rates = [1] + rates + [1] with self.session(): image_placeholder = array_ops.placeholder(dtypes.float32) with self.test_scope(): out_tensor = array_ops.extract_image_patches(image_placeholder, ksizes=ksizes, strides=strides, rates=rates, padding=padding, name='im2col') feed_dict = {image_placeholder: image} self.assertAllClose(patches, out_tensor.eval(feed_dict=feed_dict))
Tests input-output pairs for the ExtractImagePatches op. Args: image: Input tensor with shape: [batch, in_rows, in_cols, depth]. ksizes: Patch size specified as: [ksize_rows, ksize_cols]. strides: Output strides, specified as [stride_rows, stride_cols]. rates: Atrous rates, specified as [rate_rows, rate_cols]. padding: Padding type. patches: Expected output.
github-repos
def get_failed_enrollment_message(cls, users, enrolled_in): failed_emails = [user.email for user in users] return ('error', _('The following learners could not be enrolled in {enrolled_in}: {user_list}').format(enrolled_in=enrolled_in, user_list=', '.join(failed_emails)))
Create message for the users who were not able to be enrolled in a course or program. Args: users: An iterable of users who were not successfully enrolled enrolled_in (str): A string identifier for the course or program with which enrollment was attempted Returns: tuple: A 2-tuple containing a message type and message text
codesearchnet
def state_updates(self): warnings.warn('`Model.state_updates` will be removed in a future version. This property should not be used in TensorFlow 2.0, as `updates` are applied automatically.') state_updates = [] for layer in self.layers: if getattr(layer, 'stateful', False): if hasattr(layer, 'updates'): state_updates += layer.updates return state_updates
Deprecated, do NOT use! Returns the `updates` from all layers that are stateful. This is useful for separating training updates and state updates, e.g. when we need to update a layer's internal state during prediction. Returns: A list of update ops.
github-repos
def decode_conjure_enum_type(cls, obj, conjure_type): if not (isinstance(obj, str) or str(type(obj)) == "<type 'unicode'>"): raise Exception( 'Expected to find str type but found {} instead'.format( type(obj))) if obj in conjure_type.__members__: return conjure_type[obj] else: return conjure_type["UNKNOWN"]
Decodes json into a conjure enum type. Args: obj: the json object to decode conjure_type: a class object which is the enum type we're decoding into. Returns: An instance of enum of type conjure_type.
juraj-google-style
def init_c_overturn(step): rbot, rtop = misc.get_rbounds(step) xieut = step.sdat.par['tracersin']['fe_eut'] k_fe = step.sdat.par['tracersin']['k_fe'] xi0l = step.sdat.par['tracersin']['fe_cont'] xi0s = k_fe * xi0l xired = xi0l / xieut rsup = (rtop**3 - xired**(1 / (1 - k_fe)) * (rtop**3 - rbot**3))**(1 / 3) def initprof(rpos): if rpos < rsup: return xi0s * ((rtop**3 - rbot**3) / (rtop**3 - rpos**3))**(1 - k_fe) return xieut rad = np.linspace(rbot, rtop, 500) initprof = np.vectorize(initprof) return initprof(rad), rad
Initial concentration. This compute the resulting composition profile if fractional crystallization of a SMO is assumed. Args: step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData instance. Returns: tuple of :class:`numpy.array`: the composition and the radial position at which it is evaluated.
juraj-google-style
def fit(self, vecs, iter=20, seed=123): assert (vecs.dtype == np.float32) assert (vecs.ndim == 2) (N, D) = vecs.shape assert (self.Ks < N), 'the number of training vector should be more than Ks' assert ((D % self.M) == 0), 'input dimension must be dividable by M' self.Ds = int((D / self.M)) np.random.seed(seed) if self.verbose: print('iter: {}, seed: {}'.format(iter, seed)) self.codewords = np.zeros((self.M, self.Ks, self.Ds), dtype=np.float32) for m in range(self.M): if self.verbose: print('Training the subspace: {} / {}'.format(m, self.M)) vecs_sub = vecs[(:, (m * self.Ds):((m + 1) * self.Ds))] (self.codewords[m], _) = kmeans2(vecs_sub, self.Ks, iter=iter, minit='points') return self
Given training vectors, run k-means for each sub-space and create codewords for each sub-space. This function should be run once first of all. Args: vecs (np.ndarray): Training vectors with shape=(N, D) and dtype=np.float32. iter (int): The number of iteration for k-means seed (int): The seed for random process Returns: object: self
codesearchnet
def isCaCert(self, name): crtpath = self._getPathJoin('cas', ('%s.crt' % name)) return os.path.isfile(crtpath)
Checks if a CA certificate exists. Args: name (str): The name of the CA keypair. Examples: Check if the CA certificate for "myca" exists: exists = cdir.isCaCert('myca') Returns: bool: True if the certificate is present, False otherwise.
codesearchnet
def plot_neuron(ax, nrn, neurite_type=NeuriteType.all, plane='xy', soma_outline=True, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): plot_soma(ax, nrn.soma, plane=plane, soma_outline=soma_outline, linewidth=linewidth, color=color, alpha=alpha) for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)): plot_tree(ax, neurite, plane=plane, diameter_scale=diameter_scale, linewidth=linewidth, color=color, alpha=alpha) ax.set_title(nrn.name) ax.set_xlabel(plane[0]) ax.set_ylabel(plane[1])
Plots a 2D figure of the neuron, that contains a soma and the neurites Args: ax(matplotlib axes): on what to plot neurite_type(NeuriteType): an optional filter on the neurite type nrn(neuron): neuron to be plotted soma_outline(bool): should the soma be drawn as an outline plane(str): Any pair of 'xyz' diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values
codesearchnet
def union_with_variable(self, variable: str, replacement: VariableReplacement) -> 'Substitution': new_subst = Substitution(self) new_subst.try_add_variable(variable, replacement) return new_subst
Try to create a new substitution with the given variable added. See :meth:`try_add_variable` for a version of this method that modifies the substitution in place. Args: variable_name: The name of the variable to add. replacement: The substitution for the variable. Returns: The new substitution with the variable_name added or merged. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable.
codesearchnet
def log_transition(self, transition, from_state, instance, *args, **kwargs): logger = logging.getLogger('xworkflows.transitions') try: instance_repr = u(repr(instance), 'ignore') except (UnicodeEncodeError, UnicodeDecodeError): instance_repr = u("<bad repr>") logger.info( u("%s performed transition %s.%s (%s -> %s)"), instance_repr, self.__class__.__name__, transition.name, from_state.name, transition.target.name)
Log a transition. Args: transition (Transition): the name of the performed transition from_state (State): the source state instance (object): the modified object Kwargs: Any passed when calling the transition
juraj-google-style
def compute_bleu_summaries(hook_args): decode_hparams = hook_args.decode_hparams if not (decode_hparams.decode_reference and decode_hparams.decode_to_file): return None values = [] bleu = 100 * bleu_hook.bleu_wrapper( decode_hparams.decode_reference, decode_hparams.decode_to_file) values.append(tf.Summary.Value(tag="BLEU", simple_value=bleu)) tf.logging.info("%s: BLEU = %6.2f" % (decode_hparams.decode_to_file, bleu)) if hook_args.hparams.mlperf_mode: current_step = decode_hparams.mlperf_decode_step mlperf_log.transformer_print( key=mlperf_log.EVAL_TARGET, value=decode_hparams.mlperf_threshold) mlperf_log.transformer_print( key=mlperf_log.EVAL_ACCURACY, value={ "epoch": max(current_step 0), "value": bleu }) mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP) if bleu >= decode_hparams.mlperf_threshold: decode_hparams.set_hparam("mlperf_success", True) return values
Compute BLEU core summaries using the decoder output. Args: hook_args: DecodeHookArgs namedtuple Returns: A list of tf.Summary values if hook_args.hparams contains the reference file and the translated file.
juraj-google-style
def generate_md5_key(list_of_arguments): for arg in list_of_arguments: if not isinstance(arg, string_types): raise SyntaxError("Error in generate_md5_key: " "Argument: {0} is a {1}".format(arg, type(arg))) hash = hashlib.md5() hash.update(' '.join(list_of_arguments).encode('utf-8')) return hash.hexdigest()
Generate an md5-key from a list of arguments. Args: list_of_arguments: A list of strings Returns: A md5-key object generated from the list of strings.
juraj-google-style
def get_kinds(start=None, end=None): q = Kind.query() if start is not None and start != '': q = q.filter(Kind.key >= Kind.key_for_kind(start)) if end is not None: if end == '': return [] q = q.filter(Kind.key < Kind.key_for_kind(end)) return [x.kind_name for x in q]
Return all kinds in the specified range, for the current namespace. Args: start: only return kinds >= start if start is not None. end: only return kinds < end if end is not None. Returns: A list of kind names between the (optional) start and end values.
juraj-google-style
def get_pdf_from_html(html: str, header_html: str=None, footer_html: str=None, wkhtmltopdf_filename: str=_WKHTMLTOPDF_FILENAME, wkhtmltopdf_options: Dict[(str, Any)]=None, file_encoding: str='utf-8', debug_options: bool=False, debug_content: bool=False, debug_wkhtmltopdf_args: bool=True, fix_pdfkit_encoding_bug: bool=None, processor: str=_DEFAULT_PROCESSOR) -> bytes: result = make_pdf_from_html(on_disk=False, html=html, header_html=header_html, footer_html=footer_html, wkhtmltopdf_filename=wkhtmltopdf_filename, wkhtmltopdf_options=wkhtmltopdf_options, file_encoding=file_encoding, debug_options=debug_options, debug_content=debug_content, debug_wkhtmltopdf_args=debug_wkhtmltopdf_args, fix_pdfkit_encoding_bug=fix_pdfkit_encoding_bug, processor=processor) return result
Takes HTML and returns a PDF. See the arguments to :func:`make_pdf_from_html` (except ``on_disk``). Returns: the PDF binary as a ``bytes`` object
codesearchnet
def checkpoint(self, tasks=None): with self.checkpoint_lock: checkpoint_queue = None if tasks: checkpoint_queue = tasks else: checkpoint_queue = self.tasks checkpoint_dir = '{0}/checkpoint'.format(self.run_dir) checkpoint_dfk = (checkpoint_dir + '/dfk.pkl') checkpoint_tasks = (checkpoint_dir + '/tasks.pkl') if (not os.path.exists(checkpoint_dir)): try: os.makedirs(checkpoint_dir) except FileExistsError: pass with open(checkpoint_dfk, 'wb') as f: state = {'rundir': self.run_dir, 'task_count': self.task_count} pickle.dump(state, f) count = 0 with open(checkpoint_tasks, 'ab') as f: for task_id in checkpoint_queue: if ((not self.tasks[task_id]['checkpoint']) and self.tasks[task_id]['app_fu'].done() and (self.tasks[task_id]['app_fu'].exception() is None)): hashsum = self.tasks[task_id]['hashsum'] if (not hashsum): continue t = {'hash': hashsum, 'exception': None, 'result': None} try: r = self.memoizer.hash_lookup(hashsum).result() except Exception as e: t['exception'] = e else: t['result'] = r pickle.dump(t, f) count += 1 self.tasks[task_id]['checkpoint'] = True logger.debug('Task {} checkpointed'.format(task_id)) self.checkpointed_tasks += count if (count == 0): if (self.checkpointed_tasks == 0): logger.warn('No tasks checkpointed so far in this run. Please ensure caching is enabled') else: logger.debug('No tasks checkpointed in this pass.') else: logger.info('Done checkpointing {} tasks'.format(count)) return checkpoint_dir
Checkpoint the dfk incrementally to a checkpoint file. When called, every task that has been completed yet not checkpointed is checkpointed to a file. Kwargs: - tasks (List of task ids) : List of task ids to checkpoint. Default=None if set to None, we iterate over all tasks held by the DFK. .. note:: Checkpointing only works if memoization is enabled Returns: Checkpoint dir if checkpoints were written successfully. By default the checkpoints are written to the RUNDIR of the current run under RUNDIR/checkpoints/{tasks.pkl, dfk.pkl}
codesearchnet
async def checked_run(*cmd): logging.info('Running: %s', expand_cmd_str(cmd)) with logged_timer('{} finished'.format(get_cmd_name(cmd))): p = await asyncio.create_subprocess_exec( *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT) chunks = [] while True: chunk = await p.stdout.read(16 * 1024) if not chunk: break chunks.append(chunk) await p.wait() stdout = b''.join(chunks).decode()[:-1] if p.returncode: raise RuntimeError('Return code {} from process: {}\n{}'.format( p.returncode, expand_cmd_str(cmd), stdout)) return stdout
Run the given subprocess command in a coroutine. Args: *cmd: the command to run and its arguments. Returns: The output that the command wrote to stdout. Raises: RuntimeError: if the command returns a non-zero result.
juraj-google-style
def make_movie(structures, output_filename='movie.mp4', zoom=1.0, fps=20, bitrate='10000k', quality=1, **kwargs): vis = StructureVis(**kwargs) vis.show_help = False vis.redraw() vis.zoom(zoom) sigfig = int((math.floor(math.log10(len(structures))) + 1)) filename = (('image{0:0' + str(sigfig)) + 'd}.png') for (i, s) in enumerate(structures): vis.set_structure(s) vis.write_image(filename.format(i), 3) filename = (('image%0' + str(sigfig)) + 'd.png') args = ['ffmpeg', '-y', '-i', filename, '-q:v', str(quality), '-r', str(fps), '-b:v', str(bitrate), output_filename] subprocess.Popen(args)
Generate a movie from a sequence of structures using vtk and ffmpeg. Args: structures ([Structure]): sequence of structures output_filename (str): filename for structure output. defaults to movie.mp4 zoom (float): A zoom to be applied to the visualizer. Defaults to 1.0. fps (int): Frames per second for the movie. Defaults to 20. bitrate (str): Video bitate. Defaults to "10000k" (fairly high quality). quality (int): A quality scale. Defaults to 1. \\*\\*kwargs: Any kwargs supported by StructureVis to modify the images generated.
codesearchnet
def resolve(self, context, provider): resolve_variables(self.variables, context, provider) self.blueprint.resolve_variables(self.variables)
Resolve the Stack variables. This resolves the Stack variables and then prepares the Blueprint for rendering by passing the resolved variables to the Blueprint. Args: context (:class:`stacker.context.Context`): stacker context provider (:class:`stacker.provider.base.BaseProvider`): subclass of the base provider
juraj-google-style
def _PrintParsersCounter(self, parsers_counter, session_identifier=None): if not parsers_counter: return title = 'Events generated per parser' if session_identifier: title = '{0:s}: {1:s}'.format(title, session_identifier) table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Parser (plugin) name', 'Number of events'], title=title) for key, value in sorted(parsers_counter.items()): if key == 'total': continue table_view.AddRow([key, value]) table_view.AddRow(['Total', parsers_counter['total']]) table_view.Write(self._output_writer)
Prints the parsers counter Args: parsers_counter (collections.Counter): number of events per parser or parser plugin. session_identifier (Optional[str]): session identifier.
juraj-google-style
def _ReSearch(self): self.Search(*self._last_search_params)
Performs self.Search again with the previously used parameters. Returns: self.Search result.
github-repos
def with_rank_at_most(self, rank): if self.ndims is not None and self.ndims > rank: raise ValueError("Shape %s must have rank at most %d" % (self, rank)) else: return self
Returns a shape based on `self` with at most the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at most the given rank. Raises: ValueError: If `self` does not represent a shape with at most the given `rank`.
juraj-google-style
def _init_step(pos, prev_step, func, psi_1, psi_2, quad_step): phi_0 = pos.f derphi_0 = pos.df step = func(psi_1 * prev_step) can_take = step.f > phi_0 result = _StepGuessResult(step=step, func_evals=1, can_take=can_take, may_terminate=tf.zeros_like(can_take)) if quad_step: q_koef = step.f - phi_0 - step.x * derphi_0 quad_step_success = tf.logical_and(step.f <= phi_0, q_koef > 0.0) def update_result_1(): new_x = tf.compat.v1.where(quad_step_success, -0.5 * tf.math.divide_no_nan(derphi_0 * step.x ** 2, q_koef), result.step.x) return _StepGuessResult(step=func(new_x), func_evals=result.func_evals + 1, can_take=tf.math.logical_or(result.can_take, quad_step_success), may_terminate=tf.math.logical_or(result.may_terminate, quad_step_success)) result = tf.cond(tf.math.reduce_any(quad_step_success), update_result_1, lambda: result) def update_result_2(): new_x = tf.compat.v1.where(can_take, result.step.x, psi_2 * prev_step) return _StepGuessResult(step=func(new_x), func_evals=result.func_evals + 1, can_take=tf.ones_like(can_take), may_terminate=result.may_terminate) result = tf.cond(tf.math.reduce_all(result.can_take), lambda: result, update_result_2) return result
Finds initial step size for line seacrh at given point. Corresponds to I1-I2 in [HZ2006]. Args: pos: ValueAndGradient for current point. prev_step: Step size at previous iteration. func: Callable taking real `Tensor` and returning ValueAndGradient, describes scalar function for line search. psi_1: Real scalar `Tensor`. Factor to multiply previous step to get right point for quadratic interpolation. psi_2: Real scalar `Tesnor`. Factor to multiply previous step if qudratic interpolation failed. quad_step: Boolean. Whether to try quadratic interpolation. Returns: _StepGuessResult namedtuple containing initial guess and additional data.
github-repos
def _type_digest(self, config: bool) -> Dict[(str, Any)]: res = {'base': self.yang_type()} if (self.name is not None): res['derived'] = self.name return res
Return receiver's type digest. Args: config: Specifies whether the type is on a configuration node.
codesearchnet
def tomof(self, maxline=MAX_MOF_LINE): mof = [] mof.append(_qualifiers_tomof(self.qualifiers, MOF_INDENT, maxline)) mof.append(u'class ') mof.append(self.classname) mof.append(u' ') if (self.superclass is not None): mof.append(u': ') mof.append(self.superclass) mof.append(u' ') mof.append(u'{\n') for p in self.properties.itervalues(): mof.append(u'\n') mof.append(p.tomof(False, MOF_INDENT, maxline)) for m in self.methods.itervalues(): mof.append(u'\n') mof.append(m.tomof(MOF_INDENT, maxline)) mof.append(u'\n};\n') return u''.join(mof)
Return a MOF string with the declaration of this CIM class. The returned MOF string conforms to the ``classDeclaration`` ABNF rule defined in :term:`DSP0004`. The order of properties, methods, parameters, and qualifiers is preserved. The :attr:`~pywbem.CIMClass.path` attribute of this object will not be included in the returned MOF string. Consistent with that, class path information is not included in the returned MOF string. Returns: :term:`unicode string`: MOF string.
codesearchnet
def LearnToExecute(batch_size, max_length=1, max_nesting=1, token_by_char=True, mode=Mode.TRAIN_COMBINE, loss_threshold=0.1, min_tries=DEFAULT_MIN_CURRICULUM_EVAL_TRIES, task_type=TaskType.ALG_CTRL): if (mode == Mode.TRAIN_COMBINE): curriculum = CombineCurriculum(max_length, max_nesting, loss_threshold, min_tries=min_tries) elif (mode == Mode.TRAIN_MIX): curriculum = MixCurriculum(max_length, max_nesting, loss_threshold, min_tries=min_tries) elif (mode == Mode.TRAIN_NAIVE): curriculum = NaiveCurriculum(max_length, max_nesting, loss_threshold, min_tries=min_tries) elif (mode == Mode.TEST): curriculum = BaselineCurriculum(max_length, max_nesting, loss_threshold, min_tries=0) else: raise ValueError('Invalid mode.') lte = LearnToExecuteState(batch_size, max_length, max_nesting, curriculum, token_by_char, task_type=task_type) types_ = (tf.float32, tf.float32, tf.float32, tf.int64, tf.int64) shapes_ = (tf.TensorShape([lte.num_steps, batch_size, lte.vocab_size]), tf.TensorShape([lte.num_steps_out, batch_size, lte.vocab_size]), tf.TensorShape([lte.num_steps_out, batch_size, lte.vocab_size]), tf.TensorShape([batch_size]), tf.TensorShape([batch_size])) dataset = tf.data.Dataset.from_generator(lte.make_batch, types_, shapes_) dataset.state = lte return dataset
Factory method for LearnToExecute Dataset module. Args: batch_size: (int). The number of elements in a mini-batch. max_length: (int). Maximum character length. max_nesting: (int). Maximum level of statement nesting. token_by_char: (bool). Tokenize by character or words? mode: (string). Either 'train', 'test'. loss_threshold: (int) curriculum threshold for error below which increase the task difficulty. min_tries: (int) minimum update tries for curriculum difficulty level. task_type: (string) defines the task by allowable ops (see TASK_TYPE_OPS). Returns: tf.Data.Dataset for LearnToExecute sample generator with the LearnToExecuteState monkey patched into the `state` attribute. Raises: ValueError: in case of bad `mode`.
codesearchnet
def __validate_path_parameters(self, field, path_parameters): for param in path_parameters: segment_list = param.split('.') if (segment_list[0] != field.name): raise TypeError(("Subfield %r can't come from field %r." % (param, field.name))) self.__validate_simple_subfield(field.name, field, segment_list[1:])
Verifies that all path parameters correspond to an existing subfield. Args: field: An instance of a subclass of messages.Field. Should be the root level property name in each path parameter in path_parameters. For example, if the field is called 'foo', then each path parameter should begin with 'foo.'. path_parameters: A list of Strings representing URI parameter variables. Raises: TypeError: If one of the path parameters does not start with field.name.
codesearchnet
def execute(self, command, timeout=1): self.send(command) return self.read_untill(timeout)
Execute rcon command on server and fetch result Args: command --- executed command timeout --- read timeout Returns: bytes response
juraj-google-style
def token_network_register(self, registry_address: PaymentNetworkID, token_address: TokenAddress, channel_participant_deposit_limit: TokenAmount, token_network_deposit_limit: TokenAmount, retry_timeout: NetworkTimeout=DEFAULT_RETRY_TIMEOUT) -> TokenNetworkAddress: if (not is_binary_address(registry_address)): raise InvalidAddress('registry_address must be a valid address in binary') if (not is_binary_address(token_address)): raise InvalidAddress('token_address must be a valid address in binary') if (token_address in self.get_tokens_list(registry_address)): raise AlreadyRegisteredTokenAddress('Token already registered') contracts_version = self.raiden.contract_manager.contracts_version registry = self.raiden.chain.token_network_registry(registry_address) try: if (contracts_version == DEVELOPMENT_CONTRACT_VERSION): return registry.add_token_with_limits(token_address=token_address, channel_participant_deposit_limit=channel_participant_deposit_limit, token_network_deposit_limit=token_network_deposit_limit) else: return registry.add_token_without_limits(token_address=token_address) except RaidenRecoverableError as e: if ('Token already registered' in str(e)): raise AlreadyRegisteredTokenAddress('Token already registered') raise finally: next_block = (self.raiden.get_block_number() + 1) waiting.wait_for_block(self.raiden, next_block, retry_timeout)
Register the `token_address` in the blockchain. If the address is already registered but the event has not been processed this function will block until the next block to make sure the event is processed. Raises: InvalidAddress: If the registry_address or token_address is not a valid address. AlreadyRegisteredTokenAddress: If the token is already registered. TransactionThrew: If the register transaction failed, this may happen because the account has not enough balance to pay for the gas or this register call raced with another transaction and lost.
codesearchnet
def get_output_batch_type(self, input_element_type) -> typing.Optional[typing.Union[TypeConstraint, type]]: output_batch_type = None if self._process_defined and self._process_yields_batches: output_batch_type = self._get_element_type_from_return_annotation(self.process, input_element_type) if self._process_batch_defined and (not self._process_batch_yields_elements): process_batch_type = self._get_element_type_from_return_annotation(self.process_batch, self._get_input_batch_type_normalized(input_element_type)) if output_batch_type is not None and (not process_batch_type == output_batch_type): raise TypeError(f'DoFn {self!r} yields batches from both process and process_batch, but they produce different types:\n process: {output_batch_type}\n process_batch: {process_batch_type!r}') output_batch_type = process_batch_type return output_batch_type
Determine the batch type produced by this DoFn's ``process_batch`` implementation and/or its ``process`` implementation with ``@yields_batch``. The default implementation of this method observes the return type annotations on ``process_batch`` and/or ``process``. A Batched DoFn may override this method if a dynamic approach is required. Args: input_element_type: The **element type** of the input PCollection this DoFn is being applied to. Returns: ``None`` if this DoFn will never yield batches, else a Beam typehint or a native Python typehint.
github-repos
def load_file_to_str(path): with open(path, 'rt') as f: string = f.read().replace(linesep, '') if not string: raise LoadError('%s file is empty!' % path) return string
Load file into a string removing newlines Args: path (str): Path to file Returns: str: String contents of file
juraj-google-style
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: raise NotImplementedError
Save only the vocabulary of the tokenizer (vocabulary + added tokens). This method won't save the configuration and special token mappings of the tokenizer. Use [`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer. Args: save_directory (`str`): The directory in which to save the vocabulary. filename_prefix (`str`, *optional*): An optional prefix to add to the named of the saved files. Returns: `Tuple(str)`: Paths to the files saved.
github-repos
def _transform_variable_to_expression(expression, node, context): variable_name = expression.variable_name if (not variable_name.startswith(u'$')): raise AssertionError(u'Unexpectedly received variable name {} that is not prefixed with "$"'.format(variable_name)) return bindparam(variable_name[1:])
Transform a Variable compiler expression into its SQLAlchemy expression representation. Args: expression: expression, Variable compiler expression. node: SqlNode, the SqlNode the expression applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy expression.
codesearchnet
def from_array(array): try: raw_data = blosc.pack_array(array) except Exception as e: raise ValueError("Could not compress data from array. {}".format(e)) return raw_data
Export a numpy array to a blosc array. Arguments: array: The numpy array to compress to blosc array Returns: Bytes/String. A blosc compressed array
juraj-google-style
def dprintx(passeditem, special=False): if DEBUGALL: if special: from pprint import pprint pprint(passeditem) else: print("%s%s%s" % (C_TI, passeditem, C_NORM))
Print Text if DEBUGALL set, optionally with PrettyPrint. Args: passeditem (str): item to print special (bool): determines if item prints with PrettyPrint or regular print.
juraj-google-style
def verified(context, collaborator, test, outpath=None): written_files = 0 collaborator = collaborator or 'cust000' LOG.info('Exporting verified variants for cust {}'.format(collaborator)) adapter = context.obj['adapter'] verified_vars = adapter.verified(institute_id=collaborator) LOG.info('FOUND {} verified variants for institute {}'.format(len(verified_vars), collaborator)) if not verified_vars: LOG.warning('There are no verified variants for institute {} in database!'.format(collaborator)) return None document_lines = export_verified_variants(verified_vars) today = datetime.datetime.now().strftime('%Y-%m-%d') document_name = '.'.join(['verified_variants', collaborator, today]) + '.xlsx' if test and document_lines: written_files +=1 LOG.info('Success. Verified variants file contains {} lines'.format(len(document_lines))) return written_files if not outpath: outpath = str(os.getcwd()) workbook = Workbook(os.path.join(outpath,document_name)) Report_Sheet = workbook.add_worksheet() row = 0 for col,field in enumerate(VERIFIED_VARIANTS_HEADER): Report_Sheet.write(row,col,field) for row, line in enumerate(document_lines,1): for col, field in enumerate(line): Report_Sheet.write(row,col,field) workbook.close() if os.path.exists(os.path.join(outpath,document_name)): LOG.info('Success. Verified variants file of {} lines was written to disk'. format(len(document_lines))) written_files += 1 return written_files
Export variants which have been verified for an institute and write them to an excel file. Args: collaborator(str): institute id test(bool): True if the function is called for testing purposes outpath(str): path to output file Returns: written_files(int): number of written or simulated files
juraj-google-style