code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def AddEventData(self, event_data): self._RaiseIfNotWritable() event_data = self._PrepareAttributeContainer(event_data) identifier = event_data.GetIdentifier() lookup_key = identifier.CopyToString() self._event_data[lookup_key] = event_data
Adds event data. Args: event_data (EventData): event data. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
juraj-google-style
def getUrlMeta(self, url): return self.conn('GET', SkypeConnection.API_URL, params={'url': url}, auth=SkypeConnection.Auth.Authorize).json()
Retrieve various metadata associated with a URL, as seen by Skype. Args: url (str): address to ping for info Returns: dict: metadata for the website queried
codesearchnet
def _apply_merge_op_and_or_mask(self, op_fn, inputs): output = None output_mask = None for x in inputs: mask = backend.get_keras_mask(x) if mask is not None: mask = ops.broadcast_to(ops.expand_dims(mask, -1), ops.shape(x)) if output is None: output = x output_mask = mask continue if mask is not None: x = ops.where(mask, x, output) if output_mask is not None: output = ops.where(output_mask, output, x) if mask is not None and output_mask is not None: output_mask = ops.logical_or(output_mask, mask) else: output_mask = None output = op_fn(output, x) if output_mask is not None: output_mask = ops.any(output_mask, axis=-1, keepdims=False) backend.set_keras_mask(output, output_mask) return output
Merge a set of inputs by applying `op_fn` and ORing the masks. We use this for `Minimum` and `Maximum` as it handles the fact that there is no identity element. If applicable, the mask obtained by ORing all masks is set on the output. Args: op_fn: binary operation to apply to tensor pair. inputs: array of tensors to apply operation on.
github-repos
def get_dos(self, partial_dos=False, npts_mu=10000, T=None): spin = self.data.spin if isinstance(self.data.spin,int) else 1 energies, densities, vvdos, cdos = BL.BTPDOS(self.eband, self.vvband, npts=npts_mu) if T is not None: densities = BL.smoothen_DOS(energies, densities, T) tdos = Dos(self.efermi / units.eV, energies / units.eV, {Spin(spin): densities}) if partial_dos: tdos = self.get_partial_doses(tdos=tdos, npts_mu=npts_mu, T=T) return tdos
Return a Dos object interpolating bands Args: partial_dos: if True, projections will be interpolated as well and partial doses will be return. Projections must be available in the loader. npts_mu: number of energy points of the Dos T: parameter used to smooth the Dos
juraj-google-style
def get_pattern_link_topattern(self, patternnumber): _checkPatternNumber(patternnumber) address = _calculateRegisterAddress('linkpattern', patternnumber) return self.read_register(address)
Get the 'linked pattern' value for a given pattern. Args: patternnumber (integer): From 0-7 Returns: The 'linked pattern' value (int).
juraj-google-style
def cache_file(symbol, func, has_date, root, date_type='date'): cur_mod = sys.modules[func.__module__] data_tz = getattr(cur_mod, 'DATA_TZ') if hasattr(cur_mod, 'DATA_TZ') else 'UTC' cur_dt = utils.cur_time(typ=date_type, tz=data_tz, trading=False) if has_date: if hasattr(cur_mod, 'FILE_WITH_DATE'): file_fmt = getattr(cur_mod, 'FILE_WITH_DATE') else: file_fmt = '{root}/{typ}/{symbol}/{cur_dt}.parq' else: if hasattr(cur_mod, 'FILE_NO_DATE'): file_fmt = getattr(cur_mod, 'FILE_NO_DATE') else: file_fmt = '{root}/{typ}/{symbol}.parq' return data_file( file_fmt=file_fmt, root=root, cur_dt=cur_dt, typ=func.__name__, symbol=symbol )
Data file Args: symbol: symbol func: use function to categorize data has_date: contains date in data file root: root path date_type: parameters pass to utils.cur_time, [date, time, time_path, ...] Returns: str: date file
juraj-google-style
def request_and_check(self, url, method='get', expected_content_type=None, **kwargs): assert (method in ['get', 'post']) result = self.driver.request(method, url, **kwargs) if (result.status_code != requests.codes.ok): raise RuntimeError(('Error requesting %r, status = %d' % (url, result.status_code))) if (expected_content_type is not None): content_type = result.headers.get('content-type', '') if (not re.match(expected_content_type, content_type)): raise RuntimeError(('Error requesting %r, content type %r does not match %r' % (url, content_type, expected_content_type))) return result
Performs a request, and checks that the status is OK, and that the content-type matches expectations. Args: url: URL to request method: either 'get' or 'post' expected_content_type: prefix to match response content-type against **kwargs: passed to the request method directly. Raises: RuntimeError if status_code does not match.
codesearchnet
def initialize_logger(debug): level = (logging.DEBUG if debug else logging.INFO) logger = logging.getLogger('cucco') logger.setLevel(level) formatter = logging.Formatter('%(asctime)s %(levelname).1s %(message)s') console_handler = logging.StreamHandler() console_handler.setLevel(level) console_handler.setFormatter(formatter) logger.addHandler(console_handler) return logger
Set up logger to be used by the library. Args: debug: Wheter to use debug level or not. Returns: A logger ready to be used.
codesearchnet
def process_configs(file_lookup, app_config_format, pipeline_config): app_configs = collections.defaultdict(dict) for env in ENVS: file_json = app_config_format.format(env=env) try: env_config = file_lookup.json(filename=file_json) app_configs[env] = apply_region_configs(env_config) except FileNotFoundError: LOG.critical('Application configuration not available for %s.', env) continue try: app_configs['pipeline'] = file_lookup.json(filename=pipeline_config) except FileNotFoundError: LOG.warning('Unable to process pipeline.json. Using defaults.') app_configs['pipeline'] = {'env': ['stage', 'prod']} LOG.debug('Application configs:\n%s', app_configs) return app_configs
Processes the configs from lookup sources. Args: file_lookup (FileLookup): Source to look for file/config app_config_format (str): The format for application config files. pipeline_config (str): Name/path of the pipeline config Returns: dict: Retreived application config
codesearchnet
def lit(literal: Sequence[Input], *literals: Sequence[Sequence[Input]]) -> Parser: if (len(literals) > 0): return AlternativeParser(options.handle_literal(literal), *map(options.handle_literal, literals)) else: return options.handle_literal(literal)
Match a literal sequence. In the `TextParsers`` context, this matches the literal string provided. In the ``GeneralParsers`` context, this matches a sequence of input. If multiple literals are provided, they are treated as alternatives. e.g. ``lit('+', '-')`` is the same as ``lit('+') | lit('-')``. Args: literal: A literal to match *literals: Alternative literals to match Returns: A ``LiteralParser`` in the ``GeneralContext``, a ``LiteralStringParser`` in the ``TextParsers`` context, and an ``AlternativeParser`` if multiple arguments are provided.
codesearchnet
def list(name, default=None, allow_none=False, fallback=None, separator=','): value = read(name, default, allow_none, fallback=fallback) if isinstance(value, builtins.list): return value elif isinstance(value, builtins.str): return _str_to_list(value, separator) elif ((value is None) and allow_none): return None else: return [builtins.str(value)]
Get a list of strings or the default. The individual list elements are whitespace-stripped. Args: name: The environment variable name default: The default value to use if no environment variable is found allow_none: If the return value can be `None` (i.e. optional) separator: The list item separator character or pattern
codesearchnet
def validate_and_copy_one_submission(self, submission_path): if os.path.exists(self.download_dir): shutil.rmtree(self.download_dir) os.makedirs(self.download_dir) if os.path.exists(self.validate_dir): shutil.rmtree(self.validate_dir) os.makedirs(self.validate_dir) logging.info(((('\n' + (' local_path = self.copy_submission_locally(submission_path) metadata = self.base_validator.validate_submission(local_path) if (not metadata): logging.error('Submission "%s" is INVALID', submission_path) self.stats.add_failure() return submission_type = metadata['type'] container_name = metadata['container_gpu'] logging.info('Submission "%s" is VALID', submission_path) self.list_of_containers.add(container_name) self.stats.add_success(submission_type) if self.do_copy: submission_id = '{0:04}'.format(self.cur_submission_idx) self.cur_submission_idx += 1 self.copy_submission_to_destination(submission_path, TYPE_TO_DIR[submission_type], submission_id) self.id_to_path_mapping[submission_id] = submission_path
Validates one submission and copies it to target directory. Args: submission_path: path in Google Cloud Storage of the submission file
codesearchnet
def gunzip_file(gz_path, new_path): if tf.gfile.Exists(new_path): tf.logging.info(('File %s already exists, skipping unpacking' % new_path)) return tf.logging.info(('Unpacking %s to %s' % (gz_path, new_path))) mode = (stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH) os.chmod(os.path.dirname(new_path), mode) with gzip.open(gz_path, 'rb') as gz_file: with tf.gfile.GFile(new_path, mode='wb') as new_file: for line in gz_file: new_file.write(line)
Unzips from gz_path into new_path. Args: gz_path: path to the zipped file. new_path: path to where the file will be unzipped.
codesearchnet
def assert_equal_graph_def_v1(actual: graph_pb2.GraphDef, expected: graph_pb2.GraphDef, checkpoint_v2: bool=False, hash_table_shared_name: bool=False) -> None: assert_equal_graph_def(actual, expected, checkpoint_v2, hash_table_shared_name)
Asserts that two `GraphDef`s are (mostly) the same. Compares two `GraphDef` protos for equality, ignoring versions and ordering of nodes, attrs, and control inputs. Node names are used to match up nodes between the graphs, so the naming of nodes must be consistent. Args: actual: The `GraphDef` we have. expected: The `GraphDef` we expected. checkpoint_v2: boolean determining whether to ignore randomized attribute values that appear in V2 checkpoints. hash_table_shared_name: boolean determining whether to ignore randomized shared_names that appear in HashTableV2 op defs. Raises: AssertionError: If the `GraphDef`s do not match. TypeError: If either argument is not a `GraphDef`.
github-repos
def __init__(self, timestamp=None): super(OLEAutomationDate, self).__init__() self._precision = definitions.PRECISION_1_MICROSECOND self._timestamp = timestamp
Initializes an OLE Automation date. Args: timestamp (Optional[float]): OLE Automation date.
juraj-google-style
def coerce_to_pendulum(x: PotentialDatetimeType, assume_local: bool = False) -> Optional[DateTime]: if not x: return None if isinstance(x, DateTime): return x tz = get_tz_local() if assume_local else get_tz_utc() if isinstance(x, datetime.datetime): return pendulum.instance(x, tz=tz) elif isinstance(x, datetime.date): midnight = DateTime.min.time() dt = DateTime.combine(x, midnight) return pendulum.instance(dt, tz=tz) elif isinstance(x, str): return pendulum.parse(x, tz=tz) else: raise ValueError("Don't know how to convert to DateTime: " "{!r}".format(x))
Converts something to a :class:`pendulum.DateTime`. Args: x: something that may be coercible to a datetime assume_local: if ``True``, assume local timezone; if ``False``, assume UTC Returns: a :class:`pendulum.DateTime`, or ``None``. Raises: pendulum.parsing.exceptions.ParserError: if a string fails to parse ValueError: if no conversion possible
juraj-google-style
def _RemoveAllFlagAppearances(self, name): flag_dict = self.FlagDict() if name not in flag_dict: raise exceptions.UnrecognizedFlagError(name) flag = flag_dict[name] names_to_remove = {name} names_to_remove.add(flag.name) if flag.short_name: names_to_remove.add(flag.short_name) for n in names_to_remove: self.__delattr__(n)
Removes flag with name for all appearances. A flag can be registered with its long name and an optional short name. This method removes both of them. This is different than __delattr__. Args: name: Either flag's long name or short name. Raises: UnrecognizedFlagError: When flag name is not found.
juraj-google-style
def get_link_or_none(pattern_name, request, view_kwargs=None): from is_core.patterns import reverse_pattern pattern = reverse_pattern(pattern_name) assert (pattern is not None), 'Invalid pattern name {}'.format(pattern_name) if pattern.has_permission('get', request, view_kwargs=view_kwargs): return pattern.get_url_string(request, view_kwargs=view_kwargs) else: return None
Helper that generate URL prom pattern name and kwargs and check if current request has permission to open the URL. If not None is returned. Args: pattern_name (str): slug which is used for view registratin to pattern request (django.http.request.HttpRequest): Django request object view_kwargs (dict): list of kwargs necessary for URL generator Returns:
codesearchnet
def _get_jwt_for_audience(self, audience): token, expiry = self._cache.get(audience, (None, None)) if token is None or expiry < _helpers.utcnow(): token, expiry = self._make_jwt_for_audience(audience) self._cache[audience] = token, expiry return token
Get a JWT For a given audience. If there is already an existing, non-expired token in the cache for the audience, that token is used. Otherwise, a new token will be created. Args: audience (str): The intended audience. Returns: bytes: The encoded JWT.
juraj-google-style
def outputs(self) -> Mapping[str, Mapping[int, str]]: common_outputs = self._tasks_to_common_outputs[self.task] return copy.deepcopy(common_outputs)
Mapping containing the axis definition of the output tensors to provide to the model Returns: For each output: its name associated to the axes symbolic name and the axis position within the tensor
github-repos
def _get(self, obj): if (not hasattr(obj, '_property_values')): raise RuntimeError(("Cannot get a property value '%s' from a %s instance before HasProps.__init__" % (self.name, obj.__class__.__name__))) if (self.name not in obj._property_values): return self._get_default(obj) else: return obj._property_values[self.name]
Internal implementation of instance attribute access for the ``BasicPropertyDescriptor`` getter. If the value has not been explicitly set by a user, return that value. Otherwise, return the default. Args: obj (HasProps) : the instance to get a value of this property for Returns: object Raises: RuntimeError If the |HasProps| instance has not yet been initialized, or if this descriptor is on a class that is not a |HasProps|.
codesearchnet
def add_error(self, error): self._count += 1 self._record.add_error('expect@%s+%s' % (time.time(), self._count), error)
Record an error from expect APIs. This method generates a position stamp for the expect. The stamp is composed of a timestamp and the number of errors recorded so far. Args: error: Exception or signals.ExceptionRecord, the error to add.
github-repos
def _assertOpOutputMatchesExpected(self, op, inp, expected, equality_test=None, rtol=0.001, atol=1e-05): with self.session() as session: with self.test_scope(): pinp = array_ops.placeholder(dtypes.as_dtype(inp.dtype), inp.shape, name='a') output = op(pinp) result = session.run(output, {pinp: inp}) if equality_test is None: self.assertEqual(output.dtype, expected.dtype) self.assertAllCloseAccordingToType(expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03) else: equality_test(result, expected, rtol=rtol, atol=atol)
Verifies that 'op' produces 'expected' when fed input 'inp' . Args: op: operator to test inp: numpy input array to use as input to 'op'. expected: numpy array representing the expected output of 'op'. equality_test: either None, or a function that tests two numpy arrays for equality. If None, self.assertAllClose is used. rtol: relative tolerance for equality test. atol: absolute tolerance for equality test.
github-repos
def animation(frame_function: types.FrameFunction) -> types.Animation: animation_ = core.Animation(frame_function) @functools.wraps(frame_function) def wrapper(*args, **kwargs): return animation_(*args, **kwargs) return wrapper
Turn a FrameFunction into an Animation. Args: frame_function: A function that returns a FrameGenerator. Returns: an Animation decorator function.
codesearchnet
def GetAnalysisStatusUpdateCallback(self): if (self._mode == self.MODE_LINEAR): return self._PrintAnalysisStatusUpdateLinear if (self._mode == self.MODE_WINDOW): return self._PrintAnalysisStatusUpdateWindow return None
Retrieves the analysis status update callback function. Returns: function: status update callback function or None if not available.
codesearchnet
def get(self): return self._quantile_tracker.get()
Calculates and returns the median (q = 0.5). Returns: float: The median of the values in the window setting specified in the internal quantile tracker. Returns NaN if the window is empty.
github-repos
def integer_key_convert(dictin, dropfailedkeys=False): return key_value_convert(dictin, keyfn=int, dropfailedkeys=dropfailedkeys)
Convert keys of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with keys converted to integers
juraj-google-style
def decrypt(self, message): message = json.loads(message) unencrypted_msg = [] for line in message: enc_line = binascii.a2b_base64(line) unencrypted_line = rsa.decrypt(enc_line, self.private_key) unencrypted_msg.append(unencrypted_line) unencrypted_msg = "".join(unencrypted_msg) return unencrypted_msg
Decrypts a string using our own private key object. Args: message (string): The string of the message to decrypt. Returns: The unencrypted string.
juraj-google-style
def get_repo_data(saltenv='base'): repo_details = _get_repo_details(saltenv) if (repo_details.winrepo_age == (- 1)): log.debug('No winrepo.p cache file. Refresh pkg db now.') refresh_db(saltenv=saltenv) if ('winrepo.data' in __context__): log.trace('get_repo_data returning results from __context__') return __context__['winrepo.data'] else: log.trace('get_repo_data called reading from disk') try: serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(repo_details.winrepo_file, 'rb') as repofile: try: repodata = salt.utils.data.decode((serial.loads(repofile.read()) or {})) __context__['winrepo.data'] = repodata return repodata except Exception as exc: log.exception(exc) return {} except IOError as exc: log.error('Not able to read repo file') log.exception(exc) return {}
Returns the existing package metadata db. Will create it, if it does not exist, however will not refresh it. Args: saltenv (str): Salt environment. Default ``base`` Returns: dict: A dict containing contents of metadata db. CLI Example: .. code-block:: bash salt '*' pkg.get_repo_data
codesearchnet
def evaluate_repeatedly(self, accuracy, num_steps, feed_vars=(), feed_data=None, summary_tag=None, evaluation_times=(- 1)): current_checkpoint = None try: for i in itertools.count(0): with self.session() as sess: current_checkpoint = self.load_new_checkpoint_when_available(sess, current_checkpoint) self._run_init_test_vars_op() accuracy_result = self.evaluate_model(accuracy, num_steps, summary_tag=summary_tag, print_every=0, feed_vars=feed_vars, feed_data=feed_data) if (not summary_tag): print(('[%d] %s' % (sess.run(bookkeeper.global_step()), accuracy_result))) if ((i + 1) == evaluation_times): return accuracy_result finally: print('Shutting down') sys.stdout.flush() self.stop_queues()
Runs the evaluation in a loop for `evaluation_times`. On each iteration, `evaluate_model` is called with the supplied arguments. This manages the queue threads itself. Args: accuracy: The metric that is being evaluated. num_steps: The number of steps to run in the evaluator. feed_vars: A list or tuple of the variables that will be fed. feed_data: A generator that produces tuples of the same length as feed_vars. summary_tag: If provided, the final result of each evaluation will be published to this tag. evaluation_times: Run this loop for this many times or forever if it is `-1`. Returns: The final evaluation result from `evaluate_model` if `evaluation_times` ever ends.
codesearchnet
def filter_aliases(alias_table): for alias in alias_table.sections(): if alias_table.has_option(alias, 'command'): yield (alias.split()[0], remove_pos_arg_placeholders(alias_table.get(alias, 'command')))
Filter aliases that does not have a command field in the configuration file. Args: alias_table: The alias table. Yield: A tuple with [0] being the first word of the alias and [1] being the command that the alias points to.
juraj-google-style
def reload(self, napps=None): client = NAppsClient(self._config) client.reload_napps(napps)
Reload a NApp or all NApps. Args: napps (list): NApp list to be reloaded. Raises: requests.HTTPError: When there's a server error.
juraj-google-style
def prepare_to_run_task(context, claim_task): current_task_info = {} context.claim_task = claim_task current_task_info['taskId'] = get_task_id(claim_task) current_task_info['runId'] = get_run_id(claim_task) log.info("Going to run taskId {taskId} runId {runId}!".format( **current_task_info )) context.write_json( os.path.join(context.config['work_dir'], 'current_task_info.json'), current_task_info, "Writing current task info to {path}..." ) return current_task_info
Given a `claim_task` json dict, prepare the `context` and `work_dir`. Set `context.claim_task`, and write a `work_dir/current_task_info.json` Args: context (scriptworker.context.Context): the scriptworker context. claim_task (dict): the claim_task dict. Returns: dict: the contents of `current_task_info.json`
juraj-google-style
def __init__(self, channel): self.LeaseGrant = channel.unary_unary( '/etcdserverpb.Lease/LeaseGrant', request_serializer=rpc__pb2.LeaseGrantRequest.SerializeToString, response_deserializer=rpc__pb2.LeaseGrantResponse.FromString, ) self.LeaseRevoke = channel.unary_unary( '/etcdserverpb.Lease/LeaseRevoke', request_serializer=rpc__pb2.LeaseRevokeRequest.SerializeToString, response_deserializer=rpc__pb2.LeaseRevokeResponse.FromString, ) self.LeaseKeepAlive = channel.stream_stream( '/etcdserverpb.Lease/LeaseKeepAlive', request_serializer=rpc__pb2.LeaseKeepAliveRequest.SerializeToString, response_deserializer=rpc__pb2.LeaseKeepAliveResponse.FromString, ) self.LeaseTimeToLive = channel.unary_unary( '/etcdserverpb.Lease/LeaseTimeToLive', request_serializer=rpc__pb2.LeaseTimeToLiveRequest.SerializeToString, response_deserializer=rpc__pb2.LeaseTimeToLiveResponse.FromString, ) self.LeaseLeases = channel.unary_unary( '/etcdserverpb.Lease/LeaseLeases', request_serializer=rpc__pb2.LeaseLeasesRequest.SerializeToString, response_deserializer=rpc__pb2.LeaseLeasesResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def apply_to_tensor(self, tensor, assign_tuple_sharding=False, use_sharding_op=False, unspecified_dims=None): if unspecified_dims: assert use_sharding_op and (not assign_tuple_sharding) proto = self._proto if isinstance(tensor, resource_variable_ops.BaseResourceVariable) and context.xla_sharding_for_resource_variables_enabled(): if assign_tuple_sharding: proto = self._create_tuple_proto(num_outputs=1) tensor._set_xla_sharding(proto) return tensor if use_sharding_op: if assign_tuple_sharding: proto = self._create_tuple_proto(num_outputs=1) tensor = tf2xla.sharding(tensor, sharding=proto.SerializeToString()) else: tensor = tf2xla.sharding(tensor, sharding=proto.SerializeToString(), unspecified_dims=unspecified_dims or []) elif assign_tuple_sharding or len(tensor.op.outputs) > 1: proto = self._get_or_create_tuple_proto(tensor.op) tuple_shardings = list(proto.tuple_shardings) tuple_shardings[tensor.value_index] = self._proto proto = xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.TUPLE, tuple_shardings=tuple_shardings) tensor.op._set_attr('_XlaSharding', attr_value_pb2.AttrValue(s=proto.SerializeToString())) return tensor
Applies this Sharding attribute to `tensor`. Args: tensor: A tf.Tensor to split. assign_tuple_sharding: If the sharding type should be a tuple. use_sharding_op: Whether to create a sharding op on `tensor`. unspecified_dims: An optional list of dimensions unspecified. Returns: The tensor with Sharding attribute.
github-repos
def choose_branch(exclude=None): if (exclude is None): master = conf.get('git.master_branch', 'master') develop = conf.get('git.devel_branch', 'develop') exclude = {master, develop} branches = list((set(git.branches()) - exclude)) for (i, branch_name) in enumerate(branches): shell.cprint('<90>[{}] <33>{}'.format((i + 1), branch_name)) choice = 0 while ((choice < 1) or (choice > len(branches))): prompt = 'Pick a base branch from the above [1-{}]'.format(len(branches)) choice = click.prompt(prompt, value_proc=int) if (not (1 <= choice <= len(branches))): fmt = 'Invalid choice {}, you must pick a number between {} and {}' log.err(fmt.format(choice, 1, len(branches))) return branches[(choice - 1)]
Show the user a menu to pick a branch from the existing ones. Args: exclude (list[str]): List of branch names to exclude from the menu. By default it will exclude master and develop branches. To show all branches pass an empty array here. Returns: str: The name of the branch chosen by the user. If the user inputs an invalid choice, he will be asked again (and again) until he picks a a valid branch.
codesearchnet
def decode(self, ids, strip_extraneous=False): if strip_extraneous: ids = strip_ids(ids, list(range((self._num_reserved_ids or 0)))) return unicode_to_native(tokenizer.decode(self._subtoken_ids_to_tokens(ids)))
Converts a sequence of subtoken ids to a native string. Args: ids: a list of integers in the range [0, vocab_size) strip_extraneous: bool, whether to strip off extraneous tokens (EOS and PAD). Returns: a native string
codesearchnet
def read_html_file(data_dir, fileroot, encoding=None): fname = os.path.join( data_dir, RAW_HTML_DIRNAME, fileroot + RAW_HTML_EXT) encodings = (encoding,) if encoding else ('utf-8', 'iso-8859-1') for encoding in encodings: try: with io.open(fname, mode='rt', encoding=encoding) as f: raw_html = f.read() break except (UnicodeDecodeError, UnicodeError): raw_html = None return ftfy.fix_encoding(raw_html).strip()
Read the HTML file corresponding to identifier ``fileroot`` in the raw HTML directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) encoding (str) Returns: str
juraj-google-style
def write_pattern(lines_per_file, no_data=False, return_filenames=False): temp_dir = tempfile.mkdtemp() all_data = [] file_name = None start_index = 0 for i in range(len(lines_per_file)): file_name, data = write_data(lines_per_file[i], no_data=no_data, directory=temp_dir, prefix='mytemp') if return_filenames: all_data.extend(zip([file_name] * len(data), data)) else: all_data.extend(data) start_index += lines_per_file[i] assert file_name return (file_name[:file_name.rfind(os.path.sep)] + os.path.sep + 'mytemp*', all_data)
Writes a pattern of temporary files. Args: lines_per_file (List[int]): The number of lines to write per file. no_data (bool): If :data:`True`, empty lines will be written, otherwise each line will contain a concatenation of b'line' and the line number. return_filenames (bool): If True, returned list will contain (filename, data) pairs. Returns: Tuple[str, List[Union[str, (str, str)]]]: A tuple of the filename pattern and a list of the utf-8 decoded written data or (filename, data) pairs.
github-repos
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0): local_buffer = utils.BytearrayStream() if self._maximum_items: self._maximum_items.write(local_buffer, kmip_version=kmip_version) if self._offset_items: self._offset_items.write(local_buffer, kmip_version=kmip_version) if self._storage_status_mask: self._storage_status_mask.write( local_buffer, kmip_version=kmip_version ) if self._object_group_member: self._object_group_member.write( local_buffer, kmip_version=kmip_version ) if kmip_version < enums.KMIPVersion.KMIP_2_0: if self._attributes: for attribute in self.attributes: attribute.write( local_buffer, kmip_version=kmip_version ) else: if self._attributes: template_attribute = objects.TemplateAttribute( attributes=self.attributes ) attributes = objects.convert_template_attribute_to_attributes( template_attribute ) attributes.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField( "The Locate request payload is missing the attributes " "list." ) self.length = local_buffer.length() super(LocateRequestPayload, self).write( output_buffer, kmip_version=kmip_version ) output_buffer.write(local_buffer.buffer)
Write the data encoding the Locate request payload to a buffer. Args: output_buffer (stream): A data buffer in which to encode object data, supporting a write method. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def line_count(fn): with open(fn) as f: for (i, l) in enumerate(f): pass return (i + 1)
Get line count of file Args: fn (str): Path to file Return: Number of lines in file (int)
codesearchnet
async def update_read_timestamp(self, read_timestamp=None): if (read_timestamp is None): read_timestamp = (self.events[(- 1)].timestamp if self.events else datetime.datetime.now(datetime.timezone.utc)) if (read_timestamp > self.latest_read_timestamp): logger.info('Setting {} latest_read_timestamp from {} to {}'.format(self.id_, self.latest_read_timestamp, read_timestamp)) state = self._conversation.self_conversation_state state.self_read_state.latest_read_timestamp = parsers.to_timestamp(read_timestamp) try: (await self._client.update_watermark(hangouts_pb2.UpdateWatermarkRequest(request_header=self._client.get_request_header(), conversation_id=hangouts_pb2.ConversationId(id=self.id_), last_read_timestamp=parsers.to_timestamp(read_timestamp)))) except exceptions.NetworkError as e: logger.warning('Failed to update read timestamp: {}'.format(e)) raise
Update the timestamp of the latest event which has been read. This method will avoid making an API request if it will have no effect. Args: read_timestamp (datetime.datetime): (optional) Timestamp to set. Defaults to the timestamp of the newest event. Raises: .NetworkError: If the timestamp cannot be updated.
codesearchnet
def check_origin(self, origin): from ..util import check_whitelist parsed_origin = urlparse(origin) origin_host = parsed_origin.netloc.lower() allowed_hosts = self.application.websocket_origins if settings.allowed_ws_origin(): allowed_hosts = set(settings.allowed_ws_origin()) allowed = check_whitelist(origin_host, allowed_hosts) if allowed: return True else: log.error("Refusing websocket connection from Origin '%s'; \ use --allow-websocket-origin=%s or set BOKEH_ALLOW_WS_ORIGIN=%s to permit this; currently we allow origins %r", origin, origin_host, origin_host, allowed_hosts) return False
Implement a check_origin policy for Tornado to call. The supplied origin will be compared to the Bokeh server whitelist. If the origin is not allow, an error will be logged and ``False`` will be returned. Args: origin (str) : The URL of the connection origin Returns: bool, True if the connection is allowed, False otherwise
juraj-google-style
def _check_initialized(self): baddies = self._find_uninitialized() if baddies: raise datastore_errors.BadValueError(('Entity has uninitialized properties: %s' % ', '.join(baddies)))
Internal helper to check for uninitialized properties. Raises: BadValueError if it finds any.
codesearchnet
def get_average_voltage(self, min_voltage=None, max_voltage=None): pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage) if (len(pairs_in_range) == 0): return 0 total_cap_in_range = sum([p.mAh for p in pairs_in_range]) total_edens_in_range = sum([(p.mAh * p.voltage) for p in pairs_in_range]) return (total_edens_in_range / total_cap_in_range)
Average voltage for path satisfying between a min and max voltage. Args: min_voltage (float): The minimum allowable voltage for a given step. max_voltage (float): The maximum allowable voltage allowable for a given step. Returns: Average voltage in V across the insertion path (a subset of the path can be chosen by the optional arguments)
codesearchnet
def _merge_heads(self, x: torch.Tensor) -> torch.Tensor: batch_size_and_num_heads, seq_length, _ = x.shape batch_size = batch_size_and_num_heads x = x.view(batch_size, self.num_heads, seq_length, self.head_dim) x = x.permute(0, 2, 1, 3) return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim)
Merge heads together over the last dimension Args: x (`torch.tensor`): [batch_size * num_heads, seq_length, head_dim] Returns: torch.tensor: [batch_size, seq_length, num_heads * head_dim]
github-repos
def _prepare_replacement(self, replaced, key): repl = self.replacements[key] new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos) if isinstance(new_nodes, gast.AST): new_nodes = [new_nodes] return new_nodes
Prepares a replacement AST that's safe to swap in for a node. Args: replaced: ast.AST, the node being replaced key: Hashable, the key of the replacement AST Returns: ast.AST, the replacement AST
github-repos
def plot_spectra_overlapped(ss, title=None, setup=_default_setup): plt.figure() draw_spectra_overlapped(ss, title, setup) plt.show()
Plots one or more spectra in the same plot. Args: ss: list of Spectrum objects title=None: window title setup: PlotSpectrumSetup object
codesearchnet
def attention_bias_proximal(length): r = tf.to_float(tf.range(length)) diff = tf.expand_dims(r, 0) - tf.expand_dims(r, 1) return tf.expand_dims(tf.expand_dims(-tf.log1p(tf.abs(diff)), 0), 0)
Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. Returns: a Tensor with shape [1, 1, length, length]
juraj-google-style
def get_likelihood(self, uni_matrix): uni_dim = uni_matrix.shape[1] num_edge = len(self.edges) values = np.zeros([1, num_edge]) new_uni_matrix = np.empty([uni_dim, uni_dim]) for i in range(num_edge): edge = self.edges[i] (value, left_u, right_u) = edge.get_likelihood(uni_matrix) new_uni_matrix[(edge.L, edge.R)] = left_u new_uni_matrix[(edge.R, edge.L)] = right_u values[(0, i)] = np.log(value) return (np.sum(values), new_uni_matrix)
Compute likelihood of the tree given an U matrix. Args: uni_matrix(numpy.array): univariate matrix to evaluate likelihood on. Returns: tuple[float, numpy.array]: likelihood of the current tree, next level conditional univariate matrix
codesearchnet
class AriaGroupedExpertsMLP(nn.Module): def __init__(self, config: AriaTextConfig) -> None: super().__init__() self.config = config self.fc1 = AriaGroupedExpertsGemm(config.hidden_size, config.intermediate_size * 2, config.moe_num_experts) self.fc2 = AriaGroupedExpertsGemm(config.intermediate_size, config.hidden_size, config.moe_num_experts) def forward(self, permuted_tokens, tokens_per_expert): fc1_output = self.fc1(permuted_tokens, tokens_per_expert) projection, gate = torch.chunk(fc1_output, 2, dim=-1) fc1_output = nn.functional.silu(projection) * gate fc2_output = self.fc2(fc1_output, tokens_per_expert) return fc2_output
Grouped MLP module for Mixture of Experts. Args: config (`AriaTextConfig`): Configuration object for the model.
github-repos
def _step(time, output_ta_t, *states): current_input = tuple((ta.read(time) for ta in input_ta)) current_input = tf.nest.pack_sequence_as(inputs, current_input) output, new_states = step_function(current_input, tuple(states) + tuple(constants)) flat_state = tf.nest.flatten(states) flat_new_state = tf.nest.flatten(new_states) for state, new_state in zip(flat_state, flat_new_state): if isinstance(new_state, tf.Tensor): new_state.set_shape(state.shape) flat_output = tf.nest.flatten(output) ta_index_to_write = time if return_all_outputs else 0 output_ta_t = tuple((ta.write(ta_index_to_write, out) for ta, out in zip(output_ta_t, flat_output))) new_states = tf.nest.pack_sequence_as(initial_states, flat_new_state) return (time + 1, output_ta_t) + tuple(new_states)
RNN step function. Args: time: Current timestep value. output_ta_t: TensorArray. *states: List of states. Returns: Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
github-repos
def get_instances(serials): results = [] for s in serials: results.append(AndroidDevice(s)) return results
Create AndroidDevice instances from a list of serials. Args: serials: A list of android device serials. Returns: A list of AndroidDevice objects.
juraj-google-style
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor: pts = pts - self._trans return self._rots.invert_apply(pts)
Applies the inverse of the transformation to a coordinate tensor. Args: pts: A [*, 3] coordinate tensor Returns: The transformed points.
github-repos
def time_to_jump(self): k_tot = (rate_prefactor * np.sum(self.p)) return ((- (1.0 / k_tot)) * math.log(random.random()))
The timestep until the next jump. Args: None Returns: (Float): The timestep until the next jump.
codesearchnet
def get_type_name_in_language(cls, type_name, sub_type, language): if (language in cls.type_methods_cache): m = cls.type_methods_cache[language] if (not m): return type_name return m(type_name) (found, method) = load_language_plugins(language, 'get_type_name') if found: cls.type_methods_cache[language] = method if method: return method(type_name, sub_type) else: return type_name module = importlib.import_module(('.lang.%s' % language), package='monolithe.generators') if (not hasattr(module, 'get_type_name')): cls.type_methods_cache[language] = None return type_name method = getattr(module, 'get_type_name') cls.type_methods_cache[language] = method return method(type_name, sub_type)
Get the type for the given language Args: type_name (str): the type to convert language (str): the language to use Returns: a type name in the given language Example: get_type_name_in_language("Varchar", "python") >>> str
codesearchnet
def get_handler(progname, fmt=None, datefmt=None, project_id=None, credentials=None, debug_thread_worker=False, **_): builder = CloudLoggingHandlerBuilder(progname, fmt=fmt, datefmt=datefmt, project_id=project_id, credentials=credentials, debug_thread_worker=debug_thread_worker) return builder.get_handler()
Helper function to create a Stackdriver handler. See `ulogger.stackdriver.CloudLoggingHandlerBuilder` for arguments and supported keyword arguments. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler`
codesearchnet
def create_temp_parfile(self): output_dir = os.path.dirname(self.output_filename) return tempfile.NamedTemporaryFile(dir=output_dir, delete=False)
Create the first part of a parfile. Returns: A file-like object with a 'name' attribute
github-repos
def dot_distance(t1, t2, name=None): with tf.name_scope(name, 'dot_distance', [t1, t2]) as scope: return -dot_product(t1, t2, name=scope)
dot "distance" between t1 and t2. Args: t1: A tensor. t2: A tensor that is the same size as t1. name: Optional name for this op. Returns: The dot distance between t1 and t2.
juraj-google-style
def diff_prof(step): rbot, rtop = misc.get_rbounds(step) rad = step.rprof['r'].values + rbot tprof = step.rprof['Tmean'].values diff = (tprof[:-1] - tprof[1:]) / (rad[1:] - rad[:-1]) diff = np.insert(diff, 0, (1 - tprof[0]) / (rad[0] - rbot)) diff = np.append(diff, tprof[-1] / (rtop - rad[-1])) return diff, np.append(rad, rtop)
Diffusion. Args: step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData instance. Returns: tuple of :class:`numpy.array`: the diffusion and the radial position at which it is evaluated.
juraj-google-style
def add_vcf_info(keyword, variant_line=None, variant_dict=None, annotation=None): logger = logging.getLogger(__name__) if annotation: new_info = '{0}={1}'.format(keyword, annotation) else: new_info = keyword logger.debug("Adding new variant information {0}".format(new_info)) fixed_variant = None if variant_line: logger.debug("Adding information to a variant line") splitted_variant = variant_line.rstrip('\n').split('\t') logger.debug("Adding information to splitted variant line") old_info = splitted_variant[7] if old_info == '.': splitted_variant[7] = new_info else: splitted_variant[7] = "{0};{1}".format(splitted_variant[7], new_info) fixed_variant = '\t'.join(splitted_variant) elif variant_dict: logger.debug("Adding information to a variant dict") old_info = variant_dict['INFO'] if old_info == '.': variant_dict['INFO'] = new_info else: variant_dict['INFO'] = "{0};{1}".format(old_info, new_info) fixed_variant = variant_dict return fixed_variant
Add information to the info field of a vcf variant line. Arguments: variant_line (str): A vcf formatted variant line keyword (str): The info field key annotation (str): If the annotation is a key, value pair this is the string that represents the value Returns: fixed_variant : str if variant line, or dict if variant_dict
juraj-google-style
def run(self, host: str = '0.0.0.0', port: int = 8080): self._loop.run_until_complete(self._configure_plugins()) web.run_app(self._app, host=host, port=port)
Start sirbot Configure sirbot and start the aiohttp.web.Application Args: host (str): host port (int): port
juraj-google-style
def do_phonefy(self, query, **kwargs): results = [] test = self.check_phonefy(query, kwargs) if test: r = { "type": "i3visio.phone", "value": self.platformName + " - " + query, "attributes": [] } try: aux = { "type": "i3visio.uri", "value": self.createURL(query, mode="phonefy"), "attributes": [] } r["attributes"].append(aux) except: pass aux = { "type": "i3visio.platform", "value": self.platformName, "attributes": [] } r["attributes"].append(aux) r["attributes"] += self.process_phonefy(test) results.append(r) return results
Verifying a phonefy query in this platform. This might be redefined in any class inheriting from Platform. Args: ----- query: The element to be searched. Return: ------- A list of elements to be appended.
juraj-google-style
def report_clean(rows): print('DCM REPORT CLEAN') first = True last = False for row in rows: if row and row[0] == 'Report Fields': break for row in rows: if 'No data returned by the reporting service.' in row: break if not row or row[0] == 'Grand Total:': break if first: try: date_column = row.index('Date') row[date_column] = 'Report_Day' except ValueError: pass row = [column_header_sanitize(cell) for cell in row] row = ['' if cell.strip() in ('(not set)', '-') else cell for cell in row] yield row first = False
Helper to fix DCM report issues for BigQuery and ensure schema compliance. Memory efficiently cleans each row by fixing: * Strips header and footer to preserve only data rows. * Changes 'Date' to 'Report_Day' to avoid using reserved name in BigQuery. * removes '-' as columns * Changes data format to match data studio if datastusio=True. Usage example: ``` filename, report = report_file(...) rows = report_to_rows(report) rows = report_clean(rows) ``` Args: * rows: (iterator) Rows to clean. Returns: * Iterator of cleaned rows.
github-repos
def _make_fake_dataset_fn(initial_delay_us, remainder_delay_us): def fake_dataset_fn(unused): del unused def make_dataset(time_us, num_elements): dataset = dataset_ops.Dataset.range(num_elements) if time_us > 0: dataset = dataset.apply(testing.sleep(time_us)) return dataset if not initial_delay_us: return make_dataset(remainder_delay_us, 100) return make_dataset(initial_delay_us, 0).concatenate(make_dataset(remainder_delay_us, 100)) return fake_dataset_fn
Returns a dataset that emulates a remote storage data source. Returns a dataset factory which creates a dataset with 100 elements that emulates the performance characteristic of a file-based dataset stored in a remote storage. In particular, the first element will take an order of magnitude longer to produce than the remaining elements (100ms vs. 1ms). Args: initial_delay_us: How long to wait before producing the first element. remainder_delay_us: How long to wait before producing subsequent elements.
github-repos
def __init__(self, model: PreTrainedModel, max_batch_size: int=1, max_cache_len: int=4096): super().__init__() if not hasattr(model.config, 'use_cache') or model.config.use_cache is False: raise ValueError('The model must have caching enabled to be performant.') if hasattr(model.config, 'layer_types') and getattr(model.config, 'sliding_window', None) is not None: self.model = TorchExportableModuleWithHybridCache(model, max_batch_size, max_cache_len) else: logging.info('Using `StaticCache` for export as `layer_types` is not specified or `sliding_window` is `null` in the config.') self.model = TorchExportableModuleWithStaticCache(model)
Initializes the exportable module with `HybridCache`. Args: model (`PreTrainedModel`): The pretrained model to wrap. max_batch_size (int): Maximum batch size for the cache. max_cache_len (int): Maximum sequence length for the cache. Raises: ValueError: If the model is configured with a unsupported cache implementation.
github-repos
def highlight_html(code: str) -> str: theme = resource_utils.resource_import('static/highlight.css', module='etils.ecolab') html_str = '\n {theme}\n <script src=" html_str = epy.dedent(html_str) html_str = html_str.format(theme=theme, code=html.escape(code)) return html_str
Add Python syntax highlighting to a Python code string. Usage: Example: ```python @dataclasses.dataclass class A: x: int def _repr_html_(self) -> str: from etils import ecolab # Lazy-import ecolab return ecolab.highlight_html(repr(self)) ``` Args: code: The string to wrap Returns: The HTML string representation
github-repos
def nPr(n, r): f = math.factorial return int(f(n) / f(n-r))
Calculates nPr. Args: n (int): total number of items. r (int): items to permute Returns: nPr.
juraj-google-style
def unpause(self, container): url = self._url('/containers/{0}/unpause', container) res = self._post(url) self._raise_for_status(res)
Unpause all processes within a container. Args: container (str): The container to unpause
juraj-google-style
def attributes(self, main_type, sub_type, unique_id, owner=None, params=None): params = params or {} if owner: params['owner'] = owner if not sub_type: url = '/v2/{}/{}/attributes'.format(main_type, unique_id) else: url = '/v2/{}/{}/{}/attributes'.format(main_type, sub_type, unique_id) for a in self._iterate(url, params, 'attribute'): yield a
Args: owner: main_type: sub_type: unique_id: params: Return:
juraj-google-style
def forward(self, input_points: Optional[Tuple[torch.Tensor, torch.Tensor]], input_labels: Optional[torch.Tensor], input_boxes: Optional[torch.Tensor], input_masks: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: sparse_embeddings = None batch_size = 1 target_device = self.shared_embedding.positional_embedding.device if input_points is not None: batch_size, point_batch_size = input_points.shape[:2] if input_labels is None: raise ValueError('If points are provided, labels must also be provided.') point_embeddings = self._embed_points(input_points, input_labels, pad=input_boxes is None) sparse_embeddings = point_embeddings if input_boxes is not None: batch_size = input_boxes.shape[0] box_embeddings = self._embed_boxes(input_boxes) if sparse_embeddings is None: sparse_embeddings = box_embeddings else: sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2) if input_masks is not None: dense_embeddings = self.mask_embed(input_masks) else: dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1]) if sparse_embeddings is None: sparse_embeddings = torch.zeros((batch_size, 1, 1, self.hidden_size), device=target_device) return (sparse_embeddings, dense_embeddings)
Embeds different types of prompts, returning both sparse and dense embeddings. Args: points (`torch.Tensor`, *optional*): point coordinates and labels to embed. boxes (`torch.Tensor`, *optional*): boxes to embed masks (`torch.Tensor`, *optional*): masks to embed
github-repos
def name_based_save(mesh: layout_lib.Mesh, checkpoint_prefix: Union[str, tensor_lib.Tensor], name_tensor_dict: Dict[str, Union[tensor_lib.Tensor, tf_variables.Variable]]): if not context.executing_eagerly(): raise ValueError('name based save must run eagerly.') ordered_name_tensor_dict = name_tensor_dict if not isinstance(name_tensor_dict, collections.OrderedDict): ordered_name_tensor_dict = collections.OrderedDict(name_tensor_dict) checkpoint_prefix = api.pack([checkpoint_prefix] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=0)) tensor_names = api.pack([list(ordered_name_tensor_dict.keys())] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=1)) sharded_save(mesh, file_prefix=checkpoint_prefix, tensor_names=tensor_names, shape_and_slices=[''] * len(ordered_name_tensor_dict), tensors=list(ordered_name_tensor_dict.values()))
Saves name based Tensor into a Checkpoint. The function prepares the input dictionary to the format of a `sharded_save`, so that it can take advantage of DTensor SPMD based distributed save. Same as restore, the function only supports saving on the single mesh. Args: mesh: The single mesh that all Tensors would be restored to. checkpoint_prefix : The prefix of checkpoint to be restored. name_tensor_dict: A ordered dictionary of tensor_names to a DTensor. The DTensor shape/dtype must match the tensors being saved/restored for now.
github-repos
def generate_identified_filename(filename: Path, identifier: str) -> Path: return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)
Append a string-identifier at the end (before the extension, if any) to the provided filepath Args: filename: pathlib.Path The actual path object we would like to add an identifier suffix identifier: The suffix to add Returns: String with concatenated identifier at the end of the filename
github-repos
def _normalize_feature_columns(feature_columns): if isinstance(feature_columns, fc_types.FeatureColumn): feature_columns = [feature_columns] if isinstance(feature_columns, collections_abc.Iterator): feature_columns = list(feature_columns) if isinstance(feature_columns, dict): raise ValueError('Expected feature_columns to be iterable, found dict.') for column in feature_columns: if not isinstance(column, fc_types.FeatureColumn): raise ValueError('Items of feature_columns must be a FeatureColumn. Given (type {}): {}.'.format(type(column), column)) if not feature_columns: raise ValueError('feature_columns must not be empty.') name_to_column = {} for column in feature_columns: if column.name in name_to_column: raise ValueError('Duplicate feature column name found for columns: {} and {}. This usually means that these columns refer to same base feature. Either one must be discarded or a duplicated but renamed item must be inserted in features dict.'.format(column, name_to_column[column.name])) name_to_column[column.name] = column return sorted(feature_columns, key=lambda x: x.name)
Normalizes the `feature_columns` input. This method converts the `feature_columns` to list type as best as it can. In addition, verifies the type and other parts of feature_columns, required by downstream library. Args: feature_columns: The raw feature columns, usually passed by users. Returns: The normalized feature column list. Raises: ValueError: for any invalid inputs, such as empty, duplicated names, etc.
github-repos
def add_variant(self, variant): LOG.debug("Upserting variant: {0}".format(variant.get('_id'))) update = self._get_update(variant) message = self.db.variant.update_one( {'_id': variant['_id']}, update, upsert=True ) if message.modified_count == 1: LOG.debug("Variant %s was updated", variant.get('_id')) else: LOG.debug("Variant was added to database for first time") return
Add a variant to the variant collection If the variant exists we update the count else we insert a new variant object. Args: variant (dict): A variant dictionary
juraj-google-style
def take_at_most_n_seconds(time_s, func, *args, **kwargs): thread = threading.Thread(target=func, args=args, kwargs=kwargs) thread.start() thread.join(time_s) if thread.is_alive(): return False return True
A function that returns whether a function call took less than time_s. NOTE: The function call is not killed and will run indefinitely if hung. Args: time_s: Maximum amount of time to take. func: Function to call. *args: Arguments to call the function with. **kwargs: Keyword arguments to call the function with. Returns: True if the function finished in less than time_s seconds.
codesearchnet
def split(self, desired_bundle_size: int, start_position: Optional[Any]=None, stop_position: Optional[Any]=None) -> Iterator[SourceBundle]: raise NotImplementedError
Splits the source into a set of bundles. Bundles should be approximately of size ``desired_bundle_size`` bytes. Args: desired_bundle_size: the desired size (in bytes) of the bundles returned. start_position: if specified the given position must be used as the starting position of the first bundle. stop_position: if specified the given position must be used as the ending position of the last bundle. Returns: an iterator of objects of type 'SourceBundle' that gives information about the generated bundles.
github-repos
def verify_mfa(self, mfa_token): response = self.resource.verify_mfa({'mfa_token': mfa_token}) return (response['valid'] == True or response['valid'] == 'true')
Verify an SMS or TOTP MFA token for this user. Args: mfa_token (str): An alphanumeric code from either a User's TOTP application or sent to them via SMS. Returns: True if the mfa_token is valid, False otherwise.
juraj-google-style
def compile(self, ops): def _compile(): code = [] for op in ops: if isinstance(op, SyscallInvoke): code.extend(self.syscall(op)) elif isinstance(op, LoadRegister): code.extend(self.reg_load(op.register, op.value)) elif isinstance(op, str): code.extend(op.split('\n')) else: raise ValueError('No idea how to assemble "%s"' % repr(op)) return ['\t%s' % line for line in code] _compile() return '\n'.join(self.finalize(self.data_finalizer(_compile(), self.data))) + '\n'
Translate a list of operations into its assembler source. Arguments: ops(list): A list of shellcode operations. Returns: str: The assembler source code that implements the shellcode.
juraj-google-style
def multiplier_with_docstring(num, rate=2): return num * rate
Multiplies num by rate. Args: num (int): the num you want to multiply rate (int): the rate for multiplication Returns: Multiplication of num by rate
github-repos
def forecast(stl, fc_func, steps=10, seasonal=False, **fc_func_kwargs): forecast_array = np.array([]) trend_array = stl.trend for step in range(steps): pred = fc_func(np.append(trend_array, forecast_array), **fc_func_kwargs) forecast_array = np.append(forecast_array, pred) col_name = fc_func.__name__ observed_timedelta = (stl.observed.index[(- 1)] - stl.observed.index[(- 2)]) forecast_idx_start = (stl.observed.index[(- 1)] + observed_timedelta) forecast_idx = pd.date_range(start=forecast_idx_start, periods=steps, freq=pd.tseries.frequencies.to_offset(observed_timedelta)) if seasonal: seasonal_ix = 0 max_correlation = (- np.inf) detrended_array = np.asanyarray((stl.observed - stl.trend)).squeeze() for (i, x) in enumerate(stl.period_averages): if (i == 0): detrended_slice = detrended_array[(- len(stl.period_averages)):] else: detrended_slice = detrended_array[(- (len(stl.period_averages) + i)):(- i)] this_correlation = np.correlate(detrended_slice, stl.period_averages)[0] if (this_correlation > max_correlation): max_correlation = this_correlation seasonal_ix = i rolled_period_averages = np.roll(stl.period_averages, (- seasonal_ix)) tiled_averages = np.tile(rolled_period_averages, ((steps forecast_array += tiled_averages col_name += '+seasonal' forecast_frame = pd.DataFrame(data=forecast_array, index=forecast_idx) forecast_frame.columns = [col_name] return forecast_frame
Forecast the given decomposition ``stl`` forward by ``steps`` steps using the forecasting function ``fc_func``, optionally including the calculated seasonality. This is an additive model, Y[t] = T[t] + S[t] + e[t] Args: stl (a modified statsmodels.tsa.seasonal.DecomposeResult): STL decomposition of observed time series created using the ``stldecompose.decompose()`` method. fc_func (function): Function which takes an array of observations and returns a single valued forecast for the next point. steps (int, optional): Number of forward steps to include in the forecast seasonal (bool, optional): Include seasonal component in forecast fc_func_kwargs: keyword arguments All remaining arguments are passed to the forecasting function ``fc_func`` Returns: forecast_frame (pd.Dataframe): A ``pandas.Dataframe`` containing forecast values and a DatetimeIndex matching the observed index.
codesearchnet
def get_unverified_claims(token): try: claims = jws.get_unverified_claims(token) except: raise JWTError('Error decoding token claims.') try: claims = json.loads(claims.decode('utf-8')) except ValueError as e: raise JWTError('Invalid claims string: %s' % e) if not isinstance(claims, Mapping): raise JWTError('Invalid claims string: must be a json object') return claims
Returns the decoded claims without verification of any kind. Args: token (str): A signed JWT to decode the headers from. Returns: dict: The dict representation of the token claims. Raises: JWTError: If there is an exception decoding the token.
juraj-google-style
def star(self, input_string): if input_string != self.epsilon and input_string != self.empty: return "(" + input_string + ")*" else: return ""
Kleene star operation Args: input_string (str): The string that the kleene star will be made Returns: str: The applied Kleene star operation on the input string
juraj-google-style
def play_human(env): try: play(env, fps=env.metadata['video.frames_per_second']) except KeyboardInterrupt: pass env.close()
Play the environment using keyboard as a human. Args: env (gym.Env): the initialized gym environment to play Returns: None
codesearchnet
def index_of_coincidence(*texts): if (not texts): raise ValueError('texts must not be empty') return statistics.mean((_calculate_index_of_coincidence(frequency_analyze(text), len(text)) for text in texts))
Calculate the index of coincidence for one or more ``texts``. The results are averaged over multiple texts to return the delta index of coincidence. Examples: >>> index_of_coincidence("aabbc") 0.2 >>> index_of_coincidence("aabbc", "abbcc") 0.2 Args: *texts (variable length argument list): The texts to analyze Returns: Decimal value of the index of coincidence Raises: ValueError: If texts is empty ValueError: If any text is less that 2 character long
codesearchnet
def run_shell_command(state, host, command, get_pty=False, timeout=None, print_output=False, **command_kwargs): command = make_command(command, **command_kwargs) logger.debug('--> Running command on localhost: {0}'.format(command)) if print_output: print('{0}>>> {1}'.format(host.print_prefix, command)) process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE) stdout_reader = gevent.spawn(read_buffer, process.stdout, print_output=print_output, print_func=(lambda line: '{0}{1}'.format(host.print_prefix, line))) stderr_reader = gevent.spawn(read_buffer, process.stderr, print_output=print_output, print_func=(lambda line: '{0}{1}'.format(host.print_prefix, click.style(line, 'red')))) greenlets = gevent.wait((stdout_reader, stderr_reader), timeout=timeout) if (len(greenlets) != 2): stdout_reader.kill() stderr_reader.kill() raise timeout_error() stdout = stdout_reader.get() stderr = stderr_reader.get() logger.debug('--> Waiting for exit status...') process.wait() process.stdout.close() logger.debug('--> Command exit status: {0}'.format(process.returncode)) return ((process.returncode == 0), stdout, stderr)
Execute a command on the local machine. Args: state (``pyinfra.api.State`` obj): state object for this command hostname (string): hostname of the target command (string): actual command to execute sudo (boolean): whether to wrap the command with sudo sudo_user (string): user to sudo to get_pty (boolean): whether to get a PTY before executing the command env (dict): envrionment variables to set timeout (int): timeout for this command to complete before erroring Returns: tuple: (exit_code, stdout, stderr) stdout and stderr are both lists of strings from each buffer.
codesearchnet
def get_events_for_blocks(self, blocks, subscriptions): events = [] for blkw in blocks: events.extend(self.get_events_for_block(blkw, subscriptions)) return events
Get a list of events associated with all the blocks. Args: blocks (list of BlockWrapper): The blocks to search for events that match each subscription. subscriptions (list of EventSubscriptions): EventFilter and event type to filter events. Returns (list of Events): The Events associated which each block id. Raises: KeyError A receipt is missing from the receipt store.
codesearchnet
def FormatSOAPDateTime(value): value_date = value['date'] return '%s-%s-%s %s:%s:%s (%s)' % ( value_date['year'], value_date['month'], value_date['day'], value['hour'], value['minute'], value['second'], value['timeZoneId'])
Format a SOAP DateTime object for printing. Args: value: The DateTime object to format. Returns: A string representing the value.
juraj-google-style
def to_valid_state_vector(state_rep: Union[(int, np.ndarray)], num_qubits: int, dtype: Type[np.number]=np.complex64) -> np.ndarray: if isinstance(state_rep, np.ndarray): if (len(state_rep) != (2 ** num_qubits)): raise ValueError('initial state was of size {} but expected state for {} qubits'.format(len(state_rep), num_qubits)) state = state_rep elif isinstance(state_rep, int): if (state_rep < 0): raise ValueError('initial_state must be positive') elif (state_rep >= (2 ** num_qubits)): raise ValueError('initial state was {} but expected state for {} qubits'.format(state_rep, num_qubits)) else: state = np.zeros((2 ** num_qubits), dtype=dtype) state[state_rep] = 1.0 else: raise TypeError('initial_state was not of type int or ndarray') validate_normalized_state(state, num_qubits, dtype) return state
Verifies the state_rep is valid and converts it to ndarray form. This method is used to support passing in an integer representing a computational basis state or a full wave function as a representation of a state. Args: state_rep: If an int, the state returned is the state corresponding to a computational basis state. If an numpy array this is the full wave function. Both of these are validated for the given number of qubits, and the state must be properly normalized and of the appropriate dtype. num_qubits: The number of qubits for the state. The state_rep must be valid for this number of qubits. dtype: The numpy dtype of the state, will be used when creating the state for a computational basis state, or validated against if state_rep is a numpy array. Returns: A numpy ndarray corresponding to the state on the given number of qubits. Raises: ValueError if the state is not valid.
codesearchnet
def DEFINE_multi_float(name, default, help, lower_bound=None, upper_bound=None, flag_values=FLAGS, **args): parser = FloatParser(lower_bound, upper_bound) serializer = ArgumentSerializer() DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
Registers a flag whose value can be a list of arbitrary floats. Use the flag on the command line multiple times to place multiple float values into the list. The 'default' may be a single float (which will be converted into a single-element list) or a list of floats. Args: name: A string, the flag name. default: The default value of the flag. help: A help string. lower_bound: float, min values of the flag. upper_bound: float, max values of the flag. flag_values: FlagValues object with which the flag will be registered. **args: Dictionary with extra keyword args that are passed to the Flag __init__.
codesearchnet
def _parse_mode(self, config): value = re.search(r'switchport mode (\w+)', config, re.M) return dict(mode=value.group(1))
Scans the specified config and parses the switchport mode value Args: config (str): The interface configuration block to scan Returns: dict: A Python dict object with the value of switchport mode. The dict returned is intended to be merged into the resource dict
juraj-google-style
def get_path(self, path: str, data: dict) -> Tuple[dict, dict]: path = self._insert_vars(path, data) path = self.BASE_URL + path data = self.cache.check(path) if data: return data self._try_refresh_access_token() r = self.session.get(path) self.cache.set(r) return r.json()
Queries the ESI by an endpoint URL. This method is not marked "private" as it _can_ be used by consuming code, but it's probably easier to call the `get_op` method instead. Args: path: raw ESI URL path data: data to insert into the URL Returns: ESI data
juraj-google-style
def RecursiveDownload(dir_obj, target_dir, max_depth=10, depth=1, overwrite=False, max_threads=10): if (not isinstance(dir_obj, aff4.AFF4Volume)): return thread_pool = threadpool.ThreadPool.Factory('Downloader', max_threads) thread_pool.Start() for sub_file_entry in dir_obj.OpenChildren(): path_elements = [target_dir] sub_target_dir = u'/'.join(path_elements) try: if isinstance(sub_file_entry, aff4.AFF4Stream): args = (sub_file_entry.urn, sub_target_dir, sub_file_entry.token, overwrite) thread_pool.AddTask(target=CopyAFF4ToLocal, args=args, name='Downloader') elif ('Container' in sub_file_entry.behaviours): if (depth >= max_depth): continue try: os.makedirs(sub_target_dir) except OSError: pass RecursiveDownload(sub_file_entry, sub_target_dir, overwrite=overwrite, depth=(depth + 1)) except IOError: logging.exception('Unable to download %s', sub_file_entry.urn) finally: sub_file_entry.Close() if (depth <= 1): thread_pool.Stop(join_timeout=THREADPOOL_JOIN_TIMEOUT)
Recursively downloads a file entry to the target path. Args: dir_obj: An aff4 object that contains children. target_dir: Full path of the directory to write to. max_depth: Depth to download to. 1 means just the directory itself. depth: Current depth of recursion. overwrite: Should we overwrite files that exist. max_threads: Use this many threads to do the downloads.
codesearchnet
def load_template(path_or_buffer): from itertools import groupby from operator import itemgetter path_or_buffer = _stringify_path(path_or_buffer) if is_file_like(path_or_buffer): templates = json.load(path_or_buffer) else: with open(path_or_buffer, 'r') as f: templates = json.load(f) options = [] grouper = itemgetter('page', 'extraction_method') for (key, grp) in groupby(sorted(templates, key=grouper), grouper): tmp_options = [_convert_template_option(e) for e in grp] if (len(tmp_options) == 1): options.append(tmp_options[0]) continue option = tmp_options[0] areas = [e.get('area') for e in tmp_options] option['area'] = areas option['multiple_tables'] = True options.append(option) return options
Build tabula-py option from template file Args: file_like_obj: File like object of Tabula app template Returns: `obj`:dict: tabula-py options
codesearchnet
def add_metric(self, value, aggregation=None, name=None): if aggregation is not None and aggregation != 'mean': raise ValueError('We currently support only `mean` sample-wise metric aggregation. You provided aggregation=`%s`' % aggregation) from_metric_obj = hasattr(value, '_metric_obj') is_symbolic = tf_utils.is_symbolic_tensor(value) in_call_context = base_layer_utils.call_context().in_call if name is None and (not from_metric_obj): raise ValueError("Please provide a name for your metric like `self.add_metric(tf.reduce_sum(inputs), name='mean_activation', aggregation='mean')`") elif from_metric_obj: name = value._metric_obj.name if in_call_context: self._symbolic_add_metric(value, aggregation, name) else: if not is_symbolic: raise ValueError('Expected a symbolic Tensor for the metric value, received: ' + str(value)) if not getattr(self, '_is_graph_network', False): with backend.get_graph().as_default(): self._symbolic_add_metric(value, aggregation, name) return if from_metric_obj: raise ValueError('Using the result of calling a `Metric` object when calling `add_metric` on a Functional Model is not supported. Please pass the Tensor to monitor directly.') self._graph_network_add_metric(value, aggregation, name)
Adds metric tensor to the layer. Args: value: Metric tensor. aggregation: Sample-wise metric reduction function. If `aggregation=None`, it indicates that the metric tensor provided has been aggregated already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by `model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the given metric tensor will be sample-wise reduced using `mean` function. eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean', aggregation='mean')`. name: String metric name. Raises: ValueError: If `aggregation` is anything other than None or `mean`.
github-repos
def cast(x, dtype): dtype = backend.standardize_dtype(dtype) if any_symbolic_tensors((x,)): return Cast(dtype=dtype)(x) return backend.core.cast(x, dtype)
Cast a tensor to the desired dtype. Args: x: A tensor or variable. dtype: The target type. Returns: A tensor of the specified `dtype`. Example: >>> x = keras.ops.arange(4) >>> x = keras.ops.cast(x, dtype="float16")
github-repos
def get_pipeline_options(project: str, job_name: str, mode: str, num_workers: int=cfg.NUM_WORKERS, streaming: bool=True) -> PipelineOptions: job_name = f'{job_name}-{datetime.now().strftime('%Y%m%d%H%M%S')}' staging_bucket = f'gs: dataflow_options = {'runner': 'DirectRunner' if mode == 'local' else 'DataflowRunner', 'job_name': job_name, 'project': project, 'region': 'us-central1', 'staging_location': f'{staging_bucket}/dflow-staging', 'temp_location': f'{staging_bucket}/dflow-temp', 'setup_file': './setup.py', 'streaming': streaming} if num_workers: dataflow_options.update({'num_workers': num_workers}) return PipelineOptions(flags=[], **dataflow_options)
Function to retrieve the pipeline options. Args: project: GCP project to run on mode: Indicator to run local, cloud or template num_workers: Number of Workers for running the job parallely max_num_workers: Maximum number of workers running the job parallely Returns: Dataflow pipeline options
github-repos
def _send_trace(self, chunk=None): self._trace_sm_running = True if (chunk is None): chunk = self._next_tracing_chunk(20) if ((chunk is None) or (len(chunk) == 0)): self._trace_sm_running = False return try: self._send_notification(TracingChar.value_handle, chunk) self._defer(self._send_trace) except bable_interface.BaBLEException as err: if (err.packet.status == 'Rejected'): time.sleep(0.05) self._defer(self._send_trace, [chunk]) else: self._audit('ErrorStreamingTrace') self._logger.exception('Error while tracing data')
Stream tracing data to the ble client in 20 byte chunks Args: chunk (bytearray): A chunk that should be sent instead of requesting a new chunk from the pending reports.
codesearchnet
def check_video_availability(request, video_id): api = Api() api.authenticate() availability = api.check_upload_status(video_id) if (availability is not True): data = {'success': False} else: data = {'success': True} return HttpResponse(json.dumps(data), content_type='application/json')
Controls the availability of the video. Newly uploaded videos are in processing stage. And others might be rejected. Returns: json response
codesearchnet
def __init__(self, counter_factory, state_sampler, declaring_step, input_index): super().__init__(counter_factory, state_sampler) self.declaring_step = declaring_step self.input_index = input_index self.update_current_step()
Create a side input read counter. Args: counter_factory: A counters.CounterFactory to create byte counters. state_sampler: A statesampler.StateSampler to transition into read states. declaring_step: A string with the step name of the step that directly receives the side input initially. input_index: The index of the side input in the list of inputs of the declaring step. The side input is uniquely identified by (declaring_step, input_index); where declaring_step is the step that receives the PCollectionView as a side input, and input_index is the index of the PCollectionView within the list of inputs.
github-repos