code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def related_domains(self, domains): api_name = 'opendns-related_domains' fmt_url_path = u'links/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
Get list of domain names that have been seen requested around the same time (up to 60 seconds before or after) to the given domain name. Args: domains: an enumerable of strings domain names Returns: An enumerable of [domain name, scores]
juraj-google-style
def _parse_line(self, instrumentation_block, line): if instrumentation_block.state == _InstrumentationBlockStates.METHOD: return self._parse_method_block_line(instrumentation_block, line) elif instrumentation_block.state == _InstrumentationBlockStates.RESULT: return self._parse_result_block_line(instrumentation_block, line) else: return self._parse_unknown_block_line(instrumentation_block, line)
Parses an arbitrary line from the instrumentation output based upon the current parser state. Args: instrumentation_block: _InstrumentationBlock, an instrumentation block with any of the possible parser states. line: string, the raw instrumentation output line to parse appropriately. Returns: The next instrumenation block to continue parsing with.
github-repos
def add_get(self, path, controller, template, raw=False): if raw: fn = controller else: fn = self._prepare_controller(controller, template) self.app.router.add_get(path, fn)
Setup a route of type GET Args: path (str): URL to listen to controller (coroutine): the coroutine to handle the request template (str): the template to render the response or None if it is a JSON response raw (bool): indicates if post-processing (jinja, json, etc) is needed or not
juraj-google-style
def empty_like(x, init=None): x = ops.convert_to_tensor(x) return gen_array_ops.empty(array_ops.shape(x), x.dtype, init=init)
Returns a non-initialized tensor with the same shape and dtype as x. Args: x: A Tensor. init: Initialize the returned tensor with the default value of x.dtype(), if True. Otherwise, do not initialize. Defaults to None. Returns: A tensor y, whose dtype and shape are the same as those of x. y is guaranteed not to be an alias of x. Upon return, y may contain arbitrary data.
github-repos
def valid(self, value, level=[]): self.validation_failures = [] if value is None and self._optional: return True if not isinstance(value, dict): self.validation_failures.append(('.'.join(level), str(value))) return False bRet = True for k in self._nodes: lLevel = level[:] lLevel.append(k) if k not in value: if not self._nodes[k]._optional: self.validation_failures.append(('.'.join(lLevel), 'missing')) bRet = False continue if not self._nodes[k].valid(value[k], lLevel): self.validation_failures.extend(self._nodes[k].validation_failures) bRet = False continue if k in self._requires: for f in self._requires[k]: if f not in value or value[f] in ('0000-00-00','',None): self.validation_failures.append(('.'.join(lLevel), 'requires \'%s\' to also be set' % str(f))) bRet = False return bRet
Valid Checks if a value is valid based on the instance's values Arguments: value {mixed} -- The value to validate Returns: bool
juraj-google-style
def chmod_r(root: str, permission: int) -> None: os.chmod(root, permission) for (dirpath, dirnames, filenames) in os.walk(root): for d in dirnames: os.chmod(os.path.join(dirpath, d), permission) for f in filenames: os.chmod(os.path.join(dirpath, f), permission)
Recursive ``chmod``. Args: root: directory to walk down permission: e.g. ``e.g. stat.S_IWUSR``
codesearchnet
def pre_run_cell(self, cellno, code): self.cellid = cellno import ast if findloop(ast.parse(code)): from acorn.logging.decoration import set_streamlining set_streamlining(True) from time import time self.pre = { "m": "loop", "a": None, "s": time(), "r": None, "c": code, }
Executes before the user-entered code in `ipython` is run. This intercepts loops and other problematic code that would produce lots of database entries and streamlines it to produce only a single entry. Args: cellno (int): the cell number that is about to be executed. code (str): python source code that is about to be executed.
juraj-google-style
def get_ini(self, incl_unset=False): configp = configparser.ConfigParser(allow_no_value=True) configp.read_dict(self._config) with StringIO() as config_ini: if self._parser: self._parser.set_defaults( **self.get_section(self.root_section) ) argparse_ini = argparse_to_ini( parser=self._parser, incl_unset=incl_unset ) return argparse_ini else: configp.write(config_ini) return config_ini.getvalue()
Return the config dictionary in INI format Args: incl_unset (bool): include variables with no defaults. Returns: str: string of the config file in INI format
juraj-google-style
def _display(port=None, height=None, print_message=False, display_handle=None): if (height is None): height = 800 if (port is None): infos = manager.get_all() if (not infos): raise ValueError("Can't display TensorBoard: no known instances running.") else: info = max(manager.get_all(), key=(lambda x: x.start_time)) port = info.port else: infos = [i for i in manager.get_all() if (i.port == port)] info = (max(infos, key=(lambda x: x.start_time)) if infos else None) if print_message: if (info is not None): message = 'Selecting TensorBoard with {data_source} (started {delta} ago; port {port}, pid {pid}).'.format(data_source=manager.data_source_from_info(info), delta=_time_delta_from_info(info), port=info.port, pid=info.pid) print(message) else: pass fn = {_CONTEXT_COLAB: _display_colab, _CONTEXT_IPYTHON: _display_ipython, _CONTEXT_NONE: _display_cli}[_get_context()] return fn(port=port, height=height, display_handle=display_handle)
Internal version of `display`. Args: port: As with `display`. height: As with `display`. print_message: True to print which TensorBoard instance was selected for display (if applicable), or False otherwise. display_handle: If not None, an IPython display handle into which to render TensorBoard.
codesearchnet
def set_flowcontrol(self, name, direction, value=None, default=False, disable=False): if (value is not None): if (value not in ['on', 'off']): raise ValueError('invalid flowcontrol value') if (direction not in ['send', 'receive']): raise ValueError('invalid direction specified') commands = [('interface %s' % name)] commands.append(self.command_builder(('flowcontrol %s' % direction), value=value, default=default, disable=disable)) return self.configure(commands)
Configures the interface flowcontrol value Args: name (string): The interface identifier. It must be a full interface name (ie Ethernet, not Et) direction (string): one of either 'send' or 'receive' value (boolean): True if the interface should enable flow control packet handling, otherwise False default (boolean): Specifies to default the interface flow control send or receive value disable (boolean): Specifies to disable the interface flow control send or receive value Returns: True if the operation succeeds otherwise False is returned
codesearchnet
def process(self, batch, device=None): padded = self.pad(batch) tensor = self.numericalize(padded, device=device) return tensor
Process a list of examples to create a torch.Tensor. Pad, numericalize, and postprocess a batch and create a tensor. Args: batch (list(object)): A list of object from a batch of examples. Returns: torch.autograd.Variable: Processed object given the input and custom postprocessing Pipeline.
juraj-google-style
def coupling_efficiency(mode_solver, fibre_mfd, fibre_offset_x=0, fibre_offset_y=0, n_eff_fibre=1.441): etas = [] gaus = _make_gaussian(mode_solver._structure.xc, mode_solver._structure.yc, fibre_mfd, fibre_offset_x, fibre_offset_y) for (mode, n_eff) in zip(mode_solver.modes, mode_solver.n_effs): o = abs(_overlap(mode, gaus)) t = abs(transmission(n_eff, n_eff_fibre)) eta = (o * t) etas.append(eta) return etas
Finds the coupling efficiency between a solved fundamental mode and a fibre of given MFD. Args: mode_solver (_ModeSolver): Mode solver that has found a fundamental mode. fibre_mfd (float): The mode-field diameter (MFD) of the fibre. fibre_offset_x (float): Offset the fibre from the centre position of the window in x. Default is 0 (no offset). fibre_offset_y (float): Offset the fibre from the centre position of the window in y. Default is 0 (no offset). n_eff_fibre (float): The effective index of the fibre mode. Default is 1.441. Returns: float: The power coupling efficiency.
codesearchnet
def needle_statistics(infile): alignments = list(AlignIO.parse(infile, "emboss")) alignment_properties = defaultdict(dict) with open(infile) as f: line = f.readline() for i in range(len(alignments)): while line.rstrip() != " line = f.readline() if not line: raise StopIteration while line[0] == " parts = line[1:].split(":", 1) key = parts[0].lower().strip() if key == '1': a_id = parts[1].strip() if key == '2': b_id = parts[1].strip() if key == 'identity': ident_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split() ident_num = int(ident_parse[0].split('/')[0]) ident_percent = float(ident_parse[1]) alignment_properties[a_id + '_' + b_id]['identity'] = ident_num alignment_properties[a_id + '_' + b_id]['percent_identity'] = ident_percent if key == 'similarity': sim_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split() sim_num = int(sim_parse[0].split('/')[0]) sim_percent = float(sim_parse[1]) alignment_properties[a_id + '_' + b_id]['similarity'] = sim_num alignment_properties[a_id + '_' + b_id]['percent_similarity'] = sim_percent if key == 'gaps': gap_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split() gap_num = int(gap_parse[0].split('/')[0]) gap_percent = float(gap_parse[1]) alignment_properties[a_id + '_' + b_id]['gaps'] = gap_num alignment_properties[a_id + '_' + b_id]['percent_gaps'] = gap_percent if key == 'score': score = float(parts[1].strip()) alignment_properties[a_id + '_' + b_id]['score'] = score line = f.readline() return alignment_properties
Reads in a needle alignment file and spits out statistics of the alignment. Args: infile (str): Alignment file name Returns: dict: alignment_properties - a dictionary telling you the number of gaps, identity, etc.
juraj-google-style
def assign_nested_vars(variables, tensors, indices=None): if isinstance(variables, (tuple, list)): return tf.group(*[assign_nested_vars(variable, tensor) for (variable, tensor) in zip(variables, tensors)]) if (indices is None): return variables.assign(tensors) else: return tf.scatter_update(variables, indices, tensors)
Assign tensors to matching nested tuple of variables. Args: variables: Nested tuple or list of variables to update. tensors: Nested tuple or list of tensors to assign. indices: Batch indices to assign to; default to all. Returns: Operation.
codesearchnet
def abs_path_from_base(base_path, rel_path): return os.path.abspath( os.path.join( os.path.dirname(sys._getframe(1).f_code.co_filename), base_path, rel_path ) )
Join a base and a relative path and return an absolute path to the resulting location. Args: base_path: str Relative or absolute path to prepend to ``rel_path``. rel_path: str Path relative to the location of the module file from which this function is called. Returns: str : Absolute path to the location specified by ``rel_path``.
juraj-google-style
async def search_participant(self, name, force_update=False): if force_update or self.participants is None: await self.get_participants() if self.participants is not None: for p in self.participants: if p.name == name: return p return None
search a participant by (display) name |methcoro| Args: name: display name of the participant force_update (dfault=False): True to force an update to the Challonge API Returns: Participant: None if not found Raises: APIException
juraj-google-style
def get_credentials(self): with self.AUTHENTICATION_LOCK: log.info('Starting authentication for %s', self.target) store = oauth2client.file.Storage(self.credentials_path) credentials = store.get() if ((not credentials) or credentials.invalid): log.info('No valid login. Starting OAUTH flow.') flow = oauth2client.client.flow_from_clientsecrets(self.client_secret_path, self.SCOPES) flow.user_agent = self.APPLICATION_NAME flags = oauth2client.tools.argparser.parse_args([]) credentials = oauth2client.tools.run_flow(flow, store, flags) log.info('Storing credentials to %r', self.credentials_path) return credentials
Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential.
codesearchnet
def _get_example_from_basic_type(type): if (type == 'integer'): return [42, 24] elif (type == 'number'): return [5.5, 5.5] elif (type == 'string'): return ['string', 'string2'] elif (type == 'datetime'): return ['2015-08-28T09:02:57.481Z', '2015-08-28T09:02:57.481Z'] elif (type == 'boolean'): return [False, True] elif (type == 'null'): return ['null', 'null']
Get example from the given type. Args: type: the type you want an example of. Returns: An array with two example values of the given type.
codesearchnet
def drop_if(df, fun): def _filter_f(col): try: return fun(df[col]) except: return False cols = list(filter(_filter_f, df.columns)) return df.drop(cols, axis=1)
Drops columns where fun(ction) is true Args: fun: a function that will be applied to columns
juraj-google-style
def as_functor(func: Callable, ignore_extra_args: bool=False) -> Functor: return functor_class(func)(ignore_extra_args=ignore_extra_args)
Make a functor object from a regular python function. NOTE(daiyip): This method is designed to create on-the-go functor object, usually for lambdas. To create a reusable functor class, please use `functor_class` method. Args: func: A regular python function. ignore_extra_args: If True, extra argument which is not acceptable by `func` will be ignored. Returns: Functor object from input function.
github-repos
def push_file(self, local_source, remote_dir): remote_dest = ((remote_dir + '/') + os.path.basename(local_source)) try: self.makedirs(remote_dir, exist_ok=True) except IOError as e: logger.exception('Pushing {0} to {1} failed'.format(local_source, remote_dir)) if (e.errno == 2): raise BadScriptPath(e, self.hostname) elif (e.errno == 13): raise BadPermsScriptPath(e, self.hostname) else: logger.exception('File push failed due to SFTP client failure') raise FileCopyException(e, self.hostname) try: self.sftp_client.put(local_source, remote_dest, confirm=True) self.sftp_client.chmod(remote_dest, 511) except Exception as e: logger.exception('File push from local source {} to remote destination {} failed'.format(local_source, remote_dest)) raise FileCopyException(e, self.hostname) return remote_dest
Transport a local file to a directory on a remote machine Args: - local_source (string): Path - remote_dir (string): Remote path Returns: - str: Path to copied file on remote machine Raises: - BadScriptPath : if script path on the remote side is bad - BadPermsScriptPath : You do not have perms to make the channel script dir - FileCopyException : FileCopy failed.
codesearchnet
def decorate_set_on_listener(prototype): def add_annotation(method): method._event_info = {} method._event_info['name'] = method.__name__ method._event_info['prototype'] = prototype return method return add_annotation
Private decorator for use in the editor. Allows the Editor to create listener methods. Args: params (str): The list of parameters for the listener method (es. "(self, new_value)")
juraj-google-style
def _ParseRecords(self, parser_mediator, evtx_file): for record_index in range(evtx_file.number_of_records): if parser_mediator.abort: break try: evtx_record = evtx_file.get_record(record_index) self._ParseRecord(parser_mediator, record_index, evtx_record) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse event record: {0:d} with error: {1!s}'.format( record_index, exception)) for record_index in range(evtx_file.number_of_recovered_records): if parser_mediator.abort: break try: evtx_record = evtx_file.get_recovered_record(record_index) self._ParseRecord( parser_mediator, record_index, evtx_record, recovered=True) except IOError as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse recovered event record: {0:d} with error: ' '{1!s}').format(record_index, exception))
Parses Windows XML EventLog (EVTX) records. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. evtx_file (pyevt.file): Windows XML EventLog (EVTX) file.
juraj-google-style
def linear(self, x): with tf.name_scope("presoftmax_linear"): batch_size = tf.shape(x)[0] length = tf.shape(x)[1] x = tf.reshape(x, [-1, self.hidden_size]) logits = tf.matmul(x, self.shared_weights, transpose_b=True) return tf.reshape(logits, [batch_size, length, self.vocab_size])
Computes logits by running x through a linear layer. Args: x: A float32 tensor with shape [batch_size, length, hidden_size] Returns: float32 tensor with shape [batch_size, length, vocab_size].
juraj-google-style
def get_environ(cls, prefix): return ((key[(len(prefix) + 1):], value) for (key, value) in os.environ.items() if key.startswith(('%s_' % prefix)))
Retrieves environment variables from a namespace. Args: prefix (str): The prefix, without a trailing underscore. Returns: list: A list of environment variable keys and values.
codesearchnet
def swipe(self, x1, y1, x2, y2, duration=0.5): scale = self.scale x1, y1, x2, y2 = x1/scale, y1/scale, x2/scale, y2/scale self.session.swipe(x1, y1, x2, y2, duration)
Simulate swipe operation Args: x1, y1(int): from position x2, y2(int): to position duration(float): swipe duration, unit seconds
juraj-google-style
def compare(expr, value, regex_expr=False): if (expr == value): return True negate = False if isinstance(expr, str): negate = expr.startswith(NEGATE) expr = (strip_negate(expr) if negate else expr) try: test(expr, value, regex_expr=regex_expr) except Exception as err: if negate: return True else: raise err return True
Compares an string or regular expression againast a given value. Arguments: expr (str|regex): string or regular expression value to compare. value (str): value to compare against to. regex_expr (bool, optional): enables string based regex matching. Raises: AssertionError: in case of assertion error. Returns: bool
codesearchnet
def firmware_version(self): buf = (ctypes.c_char * self.MAX_BUF_SIZE)() self._dll.JLINKARM_GetFirmwareString(buf, self.MAX_BUF_SIZE) return ctypes.string_at(buf).decode()
Returns a firmware identification string of the connected J-Link. It consists of the following: - Product Name (e.g. J-Link) - The string: compiled - Compile data and time. - Optional additional information. Args: self (JLink): the ``JLink`` instance Returns: Firmware identification string.
juraj-google-style
def poll(self, channel_id=None, json=None, **kwargs): path = '/event-service/v1/channels/{}/poll'.format(channel_id) r = self._httpclient.request(method='POST', url=self.url, json=json, path=path, **kwargs) return r
Read one or more events from a channel. Reads events (log records) from the identified channel. Events are read in chronological order. Args: channel_id (str): The channel ID. json (dict): Payload/request body. **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Returns: requests.Response: Requests Response() object. Examples: Refer to ``event_poll.py`` example.
codesearchnet
def _GetTimeValue(self, name): timestamp = getattr(self._tsk_file.info.meta, name, None) if self._file_system_type in self._TSK_HAS_NANO_FS_TYPES: name_fragment = '{0:s}_nano'.format(name) fraction_of_second = getattr( self._tsk_file.info.meta, name_fragment, None) else: fraction_of_second = None return TSKTime(timestamp=timestamp, fraction_of_second=fraction_of_second)
Retrieves a date and time value. Args: name (str): name of the date and time value, for example "atime" or "mtime". Returns: dfdatetime.DateTimeValues: date and time value or None if not available.
juraj-google-style
def get_commands_in_namespace(namespace=None, level=1): from ..command import Command commands = {} if namespace is None: frame = inspect.stack()[level][0] namespace = frame.f_globals elif inspect.ismodule(namespace): namespace = vars(namespace) for name in namespace: obj = namespace[name] if isinstance(obj, Command): commands[name] = obj return OrderedDict((name, commands[name]) for name in sorted(commands))
Get commands in namespace. Args: namespace (dict|module): Typically a module. If not passed, the globals from the call site will be used. level (int): If not called from the global scope, set this appropriately to account for the call stack. Returns: OrderedDict: The commands found in the namespace, ordered by name. Can be used to create ``__all__`` lists:: __all__ = list(get_commands_in_namespace())
juraj-google-style
def get_current(self, cycle=None, dataset_number=None, full=True): dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt current_header = self.headers_normal.current_txt test = self.datasets[dataset_number].dfdata if cycle: self.logger.debug(f"getting current for cycle {cycle}") c = test[(test[cycle_index_header] == cycle)] if not self.is_empty(c): v = c[current_header] return v else: if not full: self.logger.debug( "getting a list of current-curves for all cycles" ) v = [] no_cycles = np.amax(test[cycle_index_header]) for j in range(1, no_cycles + 1): txt = "Cycle %i: " % j self.logger.debug(txt) c = test[(test[cycle_index_header] == j)] v.append(c[current_header]) else: self.logger.debug("getting all current-curves ") v = test[current_header] return v
Returns current (in mA). Args: cycle: cycle number (all cycles if None) dataset_number: first dataset if None full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False)
juraj-google-style
def enumeration(*values, **kwargs): if (not (values and all(((isinstance(value, string_types) and value) for value in values)))): raise ValueError(('expected a non-empty sequence of strings, got %s' % values)) if (len(values) != len(set(values))): raise ValueError(('enumeration items must be unique, got %s' % values)) attrs = {value: value for value in values} attrs.update({'_values': list(values), '_default': values[0], '_case_sensitive': kwargs.get('case_sensitive', True), '_quote': kwargs.get('quote', False)}) return type(str('Enumeration'), (Enumeration,), attrs)()
Create an |Enumeration| object from a sequence of values. Call ``enumeration`` with a sequence of (unique) strings to create an Enumeration object: .. code-block:: python #: Specify the horizontal alignment for rendering text TextAlign = enumeration("left", "right", "center") Args: values (str) : string enumeration values, passed as positional arguments The order of arguments is the order of the enumeration, and the first element will be considered the default value when used to create |Enum| properties. Keyword Args: case_sensitive (bool, optional) : Whether validation should consider case or not (default: True) quote (bool, optional): Whther values should be quoted in the string representations (default: False) Raises: ValueError if values empty, if any value is not a string or not unique Returns: Enumeration
codesearchnet
def username(self, value): self._username = value self._connectionXML.set('username', value)
Set the connection's username property. Args: value: New username value. String. Returns: Nothing.
codesearchnet
def build_kalman_mean_step(get_transition_matrix_for_timestep, get_transition_noise_for_timestep, get_observation_matrix_for_timestep, get_observation_noise_for_timestep): def mean_step(previous_means, t): 'Single step of prior mean recursion.' (previous_latent_mean, _) = previous_means latent_mean = _propagate_mean(previous_latent_mean, get_transition_matrix_for_timestep((t - 1)), get_transition_noise_for_timestep((t - 1))) observation_mean = _propagate_mean(latent_mean, get_observation_matrix_for_timestep(t), get_observation_noise_for_timestep(t)) return (latent_mean, observation_mean) return mean_step
Build a callable that performs one step of Kalman mean recursion. Args: get_transition_matrix_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `LinearOperator` of shape `[latent_size, latent_size]`. get_transition_noise_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `MultivariateNormalLinearOperator` of event shape `[latent_size]`. get_observation_matrix_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `LinearOperator` of shape `[observation_size, observation_size]`. get_observation_noise_for_timestep: callable taking a timestep as an integer `Tensor` argument, and returning a `MultivariateNormalLinearOperator` of event shape `[observation_size]`. Returns: kalman_mean_step: a callable that computes latent state and observation means at time `t`, given latent mean at time `t-1`.
codesearchnet
def inspect_plugin(self, name): url = self._url('/plugins/{0}/json', name) return self._result(self._get(url), True)
Retrieve plugin metadata. Args: name (string): The name of the plugin. The ``:latest`` tag is optional, and is the default if omitted. Returns: A dict containing plugin info
codesearchnet
def searchsorted(sorted_sequence, values, side='left'): if any_symbolic_tensors((sorted_sequence, values)): return SearchSorted(side=side).symbolic_call(sorted_sequence, values) sorted_sequence = backend.convert_to_tensor(sorted_sequence) values = backend.convert_to_tensor(values) return backend.numpy.searchsorted(sorted_sequence, values, side=side)
Perform a binary search, returning indices for insertion of `values` into `sorted_sequence` that maintain the sorting order. Args: sorted_sequence: 1-D input tensor, sorted along the innermost dimension. values: N-D tensor of query insertion values. side: 'left' or 'right', specifying the direction in which to insert for the equality case (tie-breaker). Returns: Tensor of insertion indices of same shape as `values`.
github-repos
def Benchmark(tf_bench, builder_fn, use_xla_jit, device, separate_compiled_gradients=False): with ops.Graph().as_default(): name = None targets = [] with ops.device(device): fetches = [] jit_scope = jit.experimental_jit_scope with jit_scope(compile_ops=use_xla_jit, separate_compiled_gradients=separate_compiled_gradients): name, fetches = builder_fn() for fetch in fetches: targets.append(array_ops.identity(fetch).op) config = config_pb2.ConfigProto(allow_soft_placement=True) with session.Session(config=config) as sess: sess.run(variables.global_variables_initializer()) xla = 'xla_' if use_xla_jit else '' tf_bench.run_op_benchmark(sess, targets, name='%s_%s%s' % (name, xla, device))
Build a graph and run benchmarks against it, with or without XLA. Args: tf_bench: An instance of tf.test.Benchmark, used to run the benchmark. builder_fn: A function that builds a graph when invoked, and returns (name, fetches), where name is the name of the test, and fetches is a list of tensors to fetch as output. use_xla_jit: If true compile with the XLA JIT, otherwise use regular TF. device: The tensorflow device to run on, e.g. "cpu", "gpu". separate_compiled_gradients: If true put each gradient subgraph into a separate compilation scope. This gives fine-grained control over which portions of the graph will be compiled as a single unit. Compiling gradients separately may yield better performance for some graphs. The scope is named based on the scope of the forward computation as well as the name of the gradients. As a result, the gradients will be compiled in a scope that is separate from both the forward computation, and from other gradients.
github-repos
def activate(self, uid=None): if (uid is not None): if (not isinstance(uid, six.string_types)): raise TypeError('uid must be a string') result = self.proxy.activate(uid) status = result.result_status.value if (status == enums.ResultStatus.SUCCESS): return else: reason = result.result_reason.value message = result.result_message.value raise exceptions.KmipOperationFailure(status, reason, message)
Activate a managed object stored by a KMIP appliance. Args: uid (string): The unique ID of the managed object to activate. Optional, defaults to None. Returns: None Raises: ClientConnectionNotOpen: if the client connection is unusable KmipOperationFailure: if the operation result is a failure TypeError: if the input argument is invalid
codesearchnet
def _find_classes(self, dir): if sys.version_info >= (3, 5): classes = [d.name for d in os.scandir(dir) if d.is_dir()] else: classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] classes.sort() class_to_idx = {classes[i]: i for i in range(len(classes))} return classes, class_to_idx
Finds the class folders in a dataset. Args: dir (string): Root directory path. Returns: tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary. Ensures: No class is a subdirectory of another.
juraj-google-style
def migrate(belstr: str) -> str: bo.ast = bel.lang.partialparse.get_ast_obj(belstr, '2.0.0') return migrate_ast(bo.ast).to_string()
Migrate BEL 1 to 2.0.0 Args: bel: BEL 1 Returns: bel: BEL 2
codesearchnet
def register_domain(self, domain=0, tokenizer=None, trie=None): self.domains[domain] = IntentDeterminationEngine(tokenizer=tokenizer, trie=trie)
Register a domain with the intent engine. Args: tokenizer(tokenizer): The tokenizer you wish to use. trie(Trie): the Trie() you wish to use. domain(str): a string representing the domain you wish to add
codesearchnet
def _get_func_name(): return tf_inspect.stack()[1][3]
Get the name of current function. Returns: String that is the name of current function.
github-repos
def solve(self): hierarchy = type_match.get_all_subclasses([self.ast, self.builtins]) factory_protocols = type_match.TypeMatch(hierarchy) factory_partial = type_match.TypeMatch(hierarchy) solver_protocols = factory_protocols.solver solver_partial = factory_partial.solver unknown_classes = set() partial_classes = set() complete_classes = set() for cls in self.ast.classes: if is_unknown(cls): solver_protocols.register_variable(cls.name) solver_partial.register_variable(cls.name) unknown_classes.add(cls) elif is_partial(cls): partial_classes.add(cls) else: complete_classes.add(cls) protocol_classes_and_aliases = set(self.protocols.classes) for alias in self.protocols.aliases: if not isinstance(alias.type, pytd.AnythingType) and alias.name != 'protocols.Protocol': protocol_classes_and_aliases.add(alias.type.cls) for protocol in protocol_classes_and_aliases: for unknown in unknown_classes: self.match_unknown_against_protocol(factory_protocols, solver_protocols, unknown, protocol) for complete in complete_classes.union(self.builtins.classes): for partial in partial_classes: if escape.unpack_partial(partial.name) == complete.name: self.match_partial_against_complete(factory_partial, solver_partial, partial, complete) partial_functions = set() complete_functions = set() for f in self.ast.functions: if is_partial(f): partial_functions.add(f) else: complete_functions.add(f) for partial in partial_functions: for complete in complete_functions.union(self.builtins.functions): if escape.unpack_partial(partial.name) == complete.name: self.match_call_record(factory_partial, solver_partial, partial, complete) log.info('=========== Equations to solve =============\n%s', solver_protocols) log.info('=========== Equations to solve (end) =======') solved_protocols = solver_protocols.solve() log.info('=========== Call trace equations to solve =============\n%s', solver_partial) log.info('=========== Call trace equations to solve (end) =======') solved_partial = solver_partial.solve() merged_solution = {} for unknown in itertools.chain(solved_protocols, solved_partial): if unknown in solved_protocols and unknown in solved_partial: merged_solution[unknown] = solved_protocols[unknown].union(solved_partial[unknown]) merged_solution[unknown].discard('?') elif unknown in solved_protocols: merged_solution[unknown] = solved_protocols[unknown] else: merged_solution[unknown] = solved_partial[unknown] return merged_solution
Solve the equations generated from the pytd. Returns: A dictionary (str->str), mapping unknown class names to known class names. Raises: AssertionError: If we detect an internal error.
github-repos
def reply_code_tuple(code: int) -> Tuple[(int, int, int)]: return ((code
Return the reply code as a tuple. Args: code: The reply code. Returns: Each item in the tuple is the digit.
codesearchnet
def process(self): client = self._get_client_by_hostname(self.host) self._await_flow(client, self.flow_id) collected_flow_data = self._download_files(client, self.flow_id) if collected_flow_data: print('{0:s}: Downloaded: {1:s}'.format(self.flow_id, collected_flow_data)) fqdn = client.data.os_info.fqdn.lower() self.state.output.append((fqdn, collected_flow_data))
Collect the results. Raises: DFTimewolfError: if no files specified
codesearchnet
def TerminateFlow(client_id, flow_id, reason=None, flow_state=rdf_flow_objects.Flow.FlowState.ERROR): to_terminate = [data_store.REL_DB.ReadFlowObject(client_id, flow_id)] while to_terminate: next_to_terminate = [] for rdf_flow in to_terminate: _TerminateFlow(rdf_flow, reason=reason, flow_state=flow_state) next_to_terminate.extend(data_store.REL_DB.ReadChildFlowObjects(rdf_flow.client_id, rdf_flow.flow_id)) to_terminate = next_to_terminate
Terminates a flow and all of its children. Args: client_id: Client ID of a flow to terminate. flow_id: Flow ID of a flow to terminate. reason: String with a termination reason. flow_state: Flow state to be assigned to a flow after termination. Defaults to FlowState.ERROR.
codesearchnet
def fasta_verifier(entries, ambiguous=False): if ambiguous: regex = '^>.+{0}[ACGTURYKMSWBDHVNX]+{0}$'.format(os.linesep) else: regex = '^>.+{0}[ACGTU]+{0}$'.format(os.linesep) delimiter = '{0}'.format(os.linesep) for entry in entries: try: entry_verifier([entry.write()], regex, delimiter) except FormatError as error: if (error.part == 0): msg = 'Unknown Header Error with {0}'.format(entry.id) raise FormatError(message=msg) elif ((error.part == 1) and ambiguous): msg = '{0} contains a base not in [ACGTURYKMSWBDHVNX]'.format(entry.id) raise FormatError(message=msg) elif ((error.part == 1) and (not ambiguous)): msg = '{0} contains a base not in [ACGTU]'.format(entry.id) raise FormatError(message=msg) else: msg = '{0}: Unknown Error: Likely a Bug'.format(entry.id) raise FormatError(message=msg)
Raises error if invalid FASTA format detected Args: entries (list): A list of FastaEntry instances ambiguous (bool): Permit ambiguous bases, i.e. permit non-ACGTU bases Raises: FormatError: Error when FASTA format incorrect with descriptive message Example: >>> from bio_utils.iterators import fasta_iter >>> import os >>> entries = r'>entry1{0}AAGGATTCG{0}' \ ... r'>entry{0}AGGTCCCCCG{0}' \ ... r'>entry3{0}GCCTAGC{0}'.format(os.linesep) >>> fasta_entries = fasta_iter(iter(entries.split(os.linesep))) >>> fasta_verifier(fasta_entries)
codesearchnet
def __init__(self, column_names=None, title=None): super(BaseTableView, self).__init__() self._columns = column_names or [] self._number_of_columns = len(self._columns) self._rows = [] self._title = title
Initializes a table view. Args: column_names (Optional[list[str]]): column names. title (Optional[str]): title.
juraj-google-style
def _get_tensor_details(self, tensor_index, subgraph_index): tensor_index = int(tensor_index) subgraph_index = int(subgraph_index) tensor_name = self._interpreter.TensorName(tensor_index, subgraph_index) tensor_size = self._interpreter.TensorSize(tensor_index, subgraph_index) tensor_size_signature = self._interpreter.TensorSizeSignature(tensor_index, subgraph_index) tensor_type = self._interpreter.TensorType(tensor_index, subgraph_index) tensor_quantization = self._interpreter.TensorQuantization(tensor_index, subgraph_index) tensor_quantization_params = self._interpreter.TensorQuantizationParameters(tensor_index, subgraph_index) tensor_sparsity_params = self._interpreter.TensorSparsityParameters(tensor_index, subgraph_index) if not tensor_type: raise ValueError('Could not get tensor details') details = {'name': tensor_name, 'index': tensor_index, 'shape': tensor_size, 'shape_signature': tensor_size_signature, 'dtype': tensor_type, 'quantization': tensor_quantization, 'quantization_parameters': {'scales': tensor_quantization_params[0], 'zero_points': tensor_quantization_params[1], 'quantized_dimension': tensor_quantization_params[2]}, 'sparsity_parameters': tensor_sparsity_params} return details
Gets tensor details. Args: tensor_index: Tensor index of tensor to query. subgraph_index: Index of the subgraph. Returns: A dictionary containing the following fields of the tensor: 'name': The tensor name. 'index': The tensor index in the subgraph. 'shape': The shape of the tensor. 'quantization': Deprecated, use 'quantization_parameters'. This field only works for per-tensor quantization, whereas 'quantization_parameters' work in all cases. 'quantization_parameters': The parameters used to quantize the tensor: 'scales': List of scales (one if per-tensor quantization) 'zero_points': List of zero_points (one if per-tensor quantization) 'quantized_dimension': Specifies the dimension of per-axis quantization, in the case of multiple scales/zero_points. Raises: ValueError: If tensor_index is invalid.
github-repos
def ParseFileObject(self, parser_mediator, file_object): regf_file = pyregf.file() try: regf_file.open_file_object(file_object) except IOError: return root_key = regf_file.get_root_key() if root_key is None: regf_file.close() return root_file_key = root_key.get_sub_key_by_path(self._AMCACHE_ROOT_FILE_KEY) if root_file_key is None: regf_file.close() return for volume_key in root_file_key.sub_keys: for am_entry in volume_key.sub_keys: self._ProcessAMCacheFileKey(am_entry, parser_mediator) root_program_key = root_key.get_sub_key_by_path( self._AMCACHE_ROOT_PROGRAM_KEY) if root_program_key is None: regf_file.close() return for am_entry in root_program_key.sub_keys: self._ProcessAMCacheProgramKey(am_entry, parser_mediator) regf_file.close()
Parses an Amcache.hve file for events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object.
juraj-google-style
def calculate_entropy(self, entropy_string): total = 0 for char in entropy_string: if char.isalpha(): prob = self.frequency[char.lower()] total += - math.log(prob) / math.log(2) logging.debug("Entropy score: {0}".format(total)) return total
Calculates the entropy of a string based on known frequency of English letters. Args: entropy_string: A str representing the string to calculate. Returns: A negative float with the total entropy of the string (higher is better).
juraj-google-style
def list_merge(list_a, list_b): result = [] for item in list_a: if (not (item in result)): result.append(item) for item in list_b: if (not (item in result)): result.append(item) return result
Merge two lists without duplicating items Args: list_a: list list_b: list Returns: New list with deduplicated items from list_a and list_b
codesearchnet
def get(self, key=None, indices=None, name=None): if key is None: return self._popitem(indices=indices, name=name) else: return self._pop(key, indices=indices, name=name)
If the key is provided, the associated (key, value) is returned from the staging area. If the key is not in the staging area, this method will block until the associated (key, value) is inserted. If no key is provided and the staging area is ordered, the (key, value) with the smallest key will be returned. Otherwise, a random (key, value) will be returned. If the staging area is empty when this operation executes, it will block until there is an element to dequeue. Args: key: Key associated with the required data (Optional) indices: Partial list of tensors to retrieve (optional). A list of integer or string indices. String indices are only valid if the Staging Area has names associated with it. name: A name for the operation (optional) Returns: The created op
github-repos
def is_adb_available(): ret, out, err = utils.run_command('which adb', shell=True) clean_out = out.decode('utf-8').strip() if clean_out: return True return False
Checks if adb is available as a command line tool. Returns: True if adb binary is available in console, False otherwise.
github-repos
def search(self, scope, search, **kwargs): data = {'scope': scope, 'search': search} path = '/projects/%s/search' % self.get_id() return self.manager.gitlab.http_list(path, query_data=data, **kwargs)
Search the project resources matching the provided string.' Args: scope (str): Scope of the search search (str): Search string **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabSearchError: If the server failed to perform the request Returns: GitlabList: A list of dicts describing the resources found.
juraj-google-style
def discretize(self, contact_id=0, accuracy=0.004, dt=0.001): if not self.event_points: return [] events = [] action_dt = accuracy / self.speed dt = dt or action_dt ep0 = self.event_points[0] for _ in range(int(ep0[0] / dt)): events.append(['s', dt]) events.append(['d', ep0[1], contact_id]) for i, ep in enumerate(self.event_points[1:]): prev_ts = self.event_points[i][0] curr_ts = ep[0] p0 = self.event_points[i][1] p1 = ep[1] if p0 == p1: for _ in range(int((curr_ts - prev_ts) / dt)): events.append(['s', dt]) else: dpoints = track_sampling([p0, p1], accuracy) for p in dpoints: events.append(['m', p, contact_id]) for _ in range(int(action_dt / dt)): events.append(['s', dt]) events.append(['u', contact_id]) return events
Sample this motion track into discretized motion events. Args: contact_id: contact point id accuracy: motion minimum difference in space dt: sample time difference
juraj-google-style
def pop(self, key, default=None): return self._dictionary.pop(key.lower(), default)
Remove the key and return the associated value or default if not found Args: key (str): The key to remove default (obj): The value to return if key is not present
juraj-google-style
def copy_update(pb_message, **kwds): result = pb_message.__class__() result.CopyFrom(pb_message) for k, v in kwds.items(): setattr(result, k, v) return result
Returns a copy of the PB object, with some fields updated. Args: pb_message: **kwds: Returns:
juraj-google-style
def call_plugins(self, step): for plugin in self.plugins: try: getattr(plugin, step)() except AttributeError: self.logger.debug("{} doesn't exist on plugin {}".format(step, plugin)) except TypeError: self.logger.debug('{} on plugin {} is not callable'.format(step, plugin))
For each plugins, check if a "step" method exist on it, and call it Args: step (str): The method to search and call on each plugin
codesearchnet
def is44(msg): if allzeros(msg): return False d = hex2bin(data(msg)) if wrongstatus(d, 5, 6, 23): return False if wrongstatus(d, 35, 36, 46): return False if wrongstatus(d, 47, 48, 49): return False if wrongstatus(d, 50, 51, 56): return False if bin2int(d[0:4]) > 4: return False vw = wind44(msg) if vw is not None and vw[0] > 250: return False temp, temp2 = temp44(msg) if min(temp, temp2) > 60 or max(temp, temp2) < -80: return False return True
Check if a message is likely to be BDS code 4,4. Meteorological routine air report Args: msg (String): 28 bytes hexadecimal message string Returns: bool: True or False
juraj-google-style
def __init__( self, name, aliases=None, description=None, false_value=0, urls=None): super(BooleanDefinition, self).__init__( name, aliases=aliases, description=description, urls=urls) self.false_value = false_value self.true_value = None
Initializes a boolean data type definition. Args: name (str): name. aliases (Optional[list[str]]): aliases. description (Optional[str]): description. false_value (Optional[int]): value that represents false. urls (Optional[list[str]]): URLs.
juraj-google-style
def _free_array(self, handle: int): with self._lock: if self._arrays[handle] is not None: self._arrays[handle] = None self._count -= 1
Frees the memory for the array with the given handle. Args: handle: The handle of the array whose memory should be freed. This handle must come from the _create_array method.
juraj-google-style
def _make_model(self, data, key=None): if data['deleted'] and not self.adapter.want_deleted: raise ObjectDoesNotExist('Deleted object returned') model = self._model_class(self._current_context, _pass_perm_checks=self._pass_perm_checks) model.setattr('key', ub_to_str(key) if key else ub_to_str(data.get('key'))) model = model.set_data(data, from_db=True) model._initial_data = model.clean_value() return model
Creates a model instance with the given data. Args: data: Model data returned from DB. key: Object key Returns: pyoko.Model object.
juraj-google-style
def delete(self, resource, timeout=(- 1)): self._client.delete(resource=resource, timeout=timeout)
Delete all the labels for a resource. Args: resource (dict): Object to delete. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion.
codesearchnet
def write(self, source=None, **kwargs): if (not source): source = self.msg return self._writer.write(source=source, **kwargs)
Wrappe r to call the writer's write method if present. Args: source(pandasdmx.model.Message, iterable): stuff to be written. If a :class:`pandasdmx.model.Message` is given, the writer itself must determine what to write unless specified in the keyword arguments. If an iterable is given, the writer should write each item. Keyword arguments may specify what to do with the output depending on the writer's API. Defaults to self.msg. Returns: type: anything the writer returns.
codesearchnet
def _get_object_from_python_path(python_path): python_path = python_path.split('.') module_path = python_path[:-1] object_class = python_path[-1] if isinstance(module_path, list): module_path = '.'.join(module_path) module = import_module(module_path) schema = getattr(module, object_class) if isclass(schema): schema = schema() return schema
Method that will fetch a Marshmallow schema from a path to it. Args: python_path (str): The string path to the Marshmallow schema. Returns: marshmallow.Schema: The schema matching the provided path. Raises: TypeError: This is raised if the specified object isn't a Marshmallow schema.
juraj-google-style
def load_json(json_filespec): json_fh = open(json_filespec) config_dict = json.load(json_fh) json_fh.close() return config_dict
Loads JSON from a config file Args: json_filespec: path/to/file.json Returns: a dict made from the JSON read, if successful Raises: IOError if the file could not be opened ValueError if the JSON could not be read successfully RuntimeError if something else went wrong
juraj-google-style
def _SendItem(self, zmq_socket, item, block=True): try: logger.debug('{0:s} sending item'.format(self.name)) if block: zmq_socket.send_pyobj(item) else: zmq_socket.send_pyobj(item, zmq.DONTWAIT) logger.debug('{0:s} sent item'.format(self.name)) return True except zmq.error.Again: logger.debug('{0:s} could not send an item'.format(self.name)) except zmq.error.ZMQError as exception: if exception.errno == errno.EINTR: logger.error( 'ZMQ syscall interrupted in {0:s}.'.format( self.name)) return False
Attempts to send an item to a ZeroMQ socket. Args: zmq_socket (zmq.Socket): used to the send the item. item (object): sent on the queue. Will be pickled prior to sending. block (Optional[bool]): whether the push should be performed in blocking or non-blocking mode. Returns: bool: whether the item was sent successfully.
juraj-google-style
def set_shutter_level(self, level=0.0): data = {"channelIndex": 1, "deviceId": self.id, "shutterLevel": level} return self._restCall("device/control/setShutterLevel", body=json.dumps(data))
sets the shutter level Args: level(float): the new level of the shutter. 0.0 = open, 1.0 = closed Returns: the result of the _restCall
juraj-google-style
def pkcs12_key_as_pem(private_key_bytes, private_key_password): private_key_password = _helpers._to_bytes(private_key_password) pkcs12 = crypto.load_pkcs12(private_key_bytes, private_key_password) return crypto.dump_privatekey(crypto.FILETYPE_PEM, pkcs12.get_privatekey())
Convert the contents of a PKCS#12 key to PEM using pyOpenSSL. Args: private_key_bytes: Bytes. PKCS#12 key in DER format. private_key_password: String. Password for PKCS#12 key. Returns: String. PEM contents of ``private_key_bytes``.
juraj-google-style
def set_all_pattern_variables(self, patternnumber, \ sp0, ti0, sp1, ti1, sp2, ti2, sp3, ti3, sp4, ti4, sp5, ti5, sp6, ti6, sp7, ti7, \ actual_step, additional_cycles, link_pattern): _checkPatternNumber(patternnumber) self.set_pattern_step_setpoint(patternnumber, 0, sp0) self.set_pattern_step_setpoint(patternnumber, 1, sp1) self.set_pattern_step_setpoint(patternnumber, 2, sp2) self.set_pattern_step_setpoint(patternnumber, 3, sp3) self.set_pattern_step_setpoint(patternnumber, 4, sp4) self.set_pattern_step_setpoint(patternnumber, 5, sp5) self.set_pattern_step_setpoint(patternnumber, 6, sp6) self.set_pattern_step_setpoint(patternnumber, 7, sp7) self.set_pattern_step_time( patternnumber, 0, ti0) self.set_pattern_step_time( patternnumber, 1, ti1) self.set_pattern_step_time( patternnumber, 2, ti2) self.set_pattern_step_time( patternnumber, 3, ti3) self.set_pattern_step_time( patternnumber, 4, ti4) self.set_pattern_step_time( patternnumber, 5, ti5) self.set_pattern_step_time( patternnumber, 6, ti6) self.set_pattern_step_time( patternnumber, 7, ti7) self.set_pattern_additional_cycles(patternnumber, additional_cycles) self.set_pattern_link_topattern( patternnumber, link_pattern) self.set_pattern_actual_step( patternnumber, actual_step)
Set all variables for a given pattern at one time. Args: * patternnumber (integer): 0-7 * sp[*n*] (float): setpoint value for step *n* * ti[*n*] (integer??): step time for step *n*, 0-900 * actual_step (int): ? * additional_cycles(int): ? * link_pattern(int): ?
juraj-google-style
def get_orbit(name, date): if name not in [x.name for x in Bsp().top.list]: raise UnknownBodyError(name) for a, b in Bsp().top.steps(name): if b.name not in _propagator_cache: propagator = type( "%sBspPropagator" % b.name, (GenericBspPropagator,), {'src': a, 'dst': b} ) center = Pck()[b.full_name.title()] propagator.propagate(date).as_frame(b.name, center=center) _propagator_cache[b.name] = propagator if Bsp().top not in _propagator_cache: _propagator_cache[Bsp().top.name] = EarthPropagator() return _propagator_cache[name].propagate(date)
Retrieve the orbit of a solar system object Args: name (str): The name of the body desired. For exact nomenclature, see :py:func:`available_planets` date (Date): Date at which the state vector will be extracted Return: Orbit: Orbit of the desired object, in the reference frame in which it is declared in the .bsp file
juraj-google-style
def es_indexers(cls, base_class=None, role='rdf_class', **kwargs): def _prop_filter(prop, value, **kwargs): try: use_prop = len(set(value.owl_inverseOf) - parent_props) > 0 except AttributeError: use_prop = True if prop in nested_props and use_prop: return True return False if not base_class: base_class = cls rtn_list = [] if kwargs.get("depth"): kwargs['depth'] += 1 initial = False else: initial = True kwargs['depth'] = 1 kwargs['class'] = cls.__name__ kwargs['class_obj'] = cls if kwargs.get('class_obj'): parent_props = set(cls.properties) else: parent_props = set() if role == 'rdf_class': for value in cls.properties.values(): rtn_list += value.es_indexers(base_class, **kwargs) elif role == 'es_Nested': if cls == base_class: nested_props = LABEL_FIELDS else: nested_props = cls.es_defs.get('kds_esNestedProps', list(cls.properties.keys())) used_props = [value for prop, value in cls.properties.items() \ if _prop_filter(prop, value, **kwargs)] for value in cls.properties.values(): rtn_list += value.es_indexers(base_class, **kwargs) if cls.es_defs.get('kds_esIndex',[None])[0]: rtn_list += [cls] return list(set(rtn_list))
Returns the es mapping for the class args: ----- base_class: The root class being indexed role: the role states how the class should be mapped depending upon whether it is used as a subject of an object. options are es_Nested or rdf_class
juraj-google-style
def get_image_features(self, pixel_values: torch.FloatTensor): image_outputs = self.vision_tower(pixel_values) selected_image_feature = image_outputs.last_hidden_state image_features = self.multi_modal_projector(selected_image_feature) image_features = image_features / self.config.text_config.hidden_size ** 0.5 return image_features
Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`) The tensors corresponding to the input images. Returns: image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
github-repos
def compress_summary(summary, epsilon): if summary.shape[1] * epsilon < 1: return summary percents = epsilon + np.arange(0.0, 1.0, epsilon) cum_weights = summary[1].cumsum() cum_weight_percents = cum_weights / cum_weights[-1] new_bins = np.interp(percents, cum_weight_percents, summary[0]) cum_weights = np.interp(percents, cum_weight_percents, cum_weights) new_weights = cum_weights - np.concatenate((np.array([0]), cum_weights[:-1])) summary = np.stack((new_bins, new_weights)) return summary.astype('float32')
Compress a summary to within `epsilon` accuracy. The compression step is needed to keep the summary sizes small after merging, and also used to return the final target boundaries. It finds the new bins based on interpolating cumulative weight percentages from the large summary. Taking the difference of the cumulative weights from the previous bin's cumulative weight will give the new weight for that bin. Args: summary: 2D `np.ndarray` summary to be compressed. epsilon: A `'float32'` that determines the approximate desired precision. Returns: A 2D `np.ndarray` that is a compressed summary. First column is the interpolated partition values, the second is the weights (counts).
github-repos
def __move(self, current_pos): if self.__move_range is not None: next_pos = np.random.randint(current_pos - self.__move_range, current_pos + self.__move_range) if next_pos < 0: next_pos = 0 elif next_pos >= self.var_arr.shape[0] - 1: next_pos = self.var_arr.shape[0] - 1 return next_pos else: next_pos = np.random.randint(self.var_arr.shape[0] - 1) return next_pos
Move in the feature map. Args: current_pos: The now position. Returns: The next position.
juraj-google-style
def search(self, query, results=10, suggestion=False): self._check_query(query, "Query must be specified") search_params = { "list": "search", "srprop": "", "srlimit": results, "srsearch": query, } if suggestion: search_params["srinfo"] = "suggestion" raw_results = self.wiki_request(search_params) self._check_error_response(raw_results, query) search_results = [d["title"] for d in raw_results["query"]["search"]] if suggestion: sug = None if raw_results["query"].get("searchinfo"): sug = raw_results["query"]["searchinfo"]["suggestion"] return search_results, sug return search_results
Search for similar titles Args: query (str): Page title results (int): Number of pages to return suggestion (bool): Use suggestion Returns: tuple or list: tuple (list results, suggestion) if \ suggestion is **True**; list of results \ otherwise
juraj-google-style
def from_dict(cls, cls_dict, fallback_xsi_type=None): if (not cls_dict): return None if isinstance(cls_dict, six.string_types): if (not getattr(cls, '_convert_strings', False)): return cls_dict try: typekey = cls.dictkey(cls_dict) except TypeError: typekey = fallback_xsi_type klass = cls.entity_class(typekey) return klass.from_dict(cls_dict)
Parse the dictionary and return an Entity instance. This will attempt to extract type information from the input dictionary and pass it to entity_class to resolve the correct class for the type. Args: cls_dict: A dictionary representation of an Entity object. fallback_xsi_type: An xsi_type to use for string input, which doesn't have properties Returns: An Entity instance.
codesearchnet
def find_elements_by_class(self, class_, update=False) -> Elements: return self.find_elements(by=By.CLASS, value=class_, update=update)
Finds multiple elements by class. Args: class_: The class of the elements to be found. update: If the interface has changed, this option should be True. Returns: A list with elements if any was found. An empty list if not. Raises: NoSuchElementException - If the element wasn't found. Usage: elements = driver.find_elements_by_class('foo')
codesearchnet
def to_element(self, include_namespaces=False): elt_attrib = {} if include_namespaces: elt_attrib.update({ 'xmlns': "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/", 'xmlns:dc': "http: 'xmlns:upnp': "urn:schemas-upnp-org:metadata-1-0/upnp/", }) elt_attrib.update({ 'parentID': self.parent_id, 'restricted': 'true' if self.restricted else 'false', 'id': self.item_id }) elt = XML.Element(self.tag, elt_attrib) XML.SubElement(elt, 'dc:title').text = self.title for resource in self.resources: elt.append(resource.to_element()) for key, value in self._translation.items(): if hasattr(self, key): tag = "%s:%s" % value if value[0] else "%s" % value[1] XML.SubElement(elt, tag).text = ("%s" % getattr(self, key)) XML.SubElement(elt, 'upnp:class').text = self.item_class desc_attrib = {'id': 'cdudn', 'nameSpace': 'urn:schemas-rinconnetworks-com:metadata-1-0/'} desc_elt = XML.SubElement(elt, 'desc', desc_attrib) desc_elt.text = self.desc return elt
Return an ElementTree Element representing this instance. Args: include_namespaces (bool, optional): If True, include xml namespace attributes on the root element Return: ~xml.etree.ElementTree.Element: an Element.
juraj-google-style
def run_tpm(tpm, time_scale): sbs_tpm = convert.state_by_node2state_by_state(tpm) if sparse(tpm): tpm = sparse_time(sbs_tpm, time_scale) else: tpm = dense_time(sbs_tpm, time_scale) return convert.state_by_state2state_by_node(tpm)
Iterate a TPM by the specified number of time steps. Args: tpm (np.ndarray): A state-by-node tpm. time_scale (int): The number of steps to run the tpm. Returns: np.ndarray
codesearchnet
def _add_arg(self, key, value, mask=False): if self.lang == 'python': self._add_arg_python(key, value, mask) elif self.lang == 'java': self._add_arg_java(key, value, mask)
Add CLI Arg for the correct language. Args: key (string): The CLI Args key (e.g., --name). value (string): The CLI Args value (e.g., bob). mask (boolean, default:False): Indicates whether no mask value.
juraj-google-style
def _resize_image(image, height, width): return tf.image.resize_images( image, [height, width], method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
Simple wrapper around tf.resize_images. This is primarily to make sure we use the same `ResizeMethod` and other details each time. Args: image: A 3-D image `Tensor`. height: The target height for the resized image. width: The target width for the resized image. Returns: resized_image: A 3-D tensor containing the resized image. The first two dimensions have the shape [height, width].
juraj-google-style
def onchange(self, new_value): self.disable_refresh() self.set_value(new_value) self.enable_refresh() return (new_value, )
Called when the user changes the TextInput content. With single_line=True it fires in case of focus lost and Enter key pressed. With single_line=False it fires at each key released. Args: new_value (str): the new string content of the TextInput.
juraj-google-style
def clear_extra_selections(self, key): for decoration in self.extra_selections_dict.get(key, []): self.decorations.remove(decoration) self.extra_selections_dict[key] = []
Remove decorations added through set_extra_selections. Args: key (str) name of the extra selections group.
juraj-google-style
def get(self, key): path = self.object_path(key) return self._read_object(path)
Return the object named by key or None if it does not exist. Args: key: Key naming the object to retrieve Returns: object or None
codesearchnet
def expand(self, url): url = self.clean_url(url) expand_url = f'{self.api_url}v1/expand' payload = { 'domain': getattr(self, 'domain', 'adf.ly'), 'advert_type': getattr(self, 'type', 'int'), 'group_id': getattr(self, 'group_id', None), 'key': self.api_key, 'user_id': self.user_id, 'url': url, } response = self._post(expand_url, data=payload) if not response.ok: raise BadAPIResponseException(response.content) try: data = response.json() except json.decoder.JSONDecodeError: raise BadAPIResponseException('API response could not be decoded') if data.get('errors'): errors = ','.join(i['msg'] for i in data['errors']) raise ShorteningErrorException(errors) if not data.get('data'): raise BadAPIResponseException(response.content) return data['data'][0]['url']
Expand implementation for Adf.ly Args: url: the URL you want to expand Returns: A string containing the expanded URL Raises: BadAPIResponseException: If the data is malformed or we got a bad status code on API response ShorteningErrorException: If the API Returns an error as response
juraj-google-style
def _log_unnorm_prob(self, x, name=None): with tf.name_scope((name or 'log_unnorm_prob_lkj')): x = tf.convert_to_tensor(value=x, name='x') if self.input_output_cholesky: logdet = (2.0 * tf.reduce_sum(input_tensor=tf.math.log(tf.linalg.diag_part(x)), axis=[(- 1)])) else: (_, logdet) = tf.linalg.slogdet(x) answer = ((self.concentration - 1.0) * logdet) return answer
Returns the unnormalized log density of an LKJ distribution. Args: x: `float` or `double` `Tensor` of correlation matrices. The shape of `x` must be `B + [D, D]`, where `B` broadcasts with the shape of `concentration`. name: Python `str` name prefixed to Ops created by this function. Returns: log_p: A Tensor of the unnormalized log density of each matrix element of `x`, with respect to an LKJ distribution with parameter the corresponding element of `concentration`.
codesearchnet
def truncate(string, maxchar): if maxchar < 4: raise TruncateError("Maxchar must be > 3") if len(string) <= maxchar: return string else: return string[:maxchar - 3] + "..."
Truncate a string to a maximum number of characters. If the string is longer than maxchar, then remove excess characters and append an ellipses. Arguments: string (str): String to truncate. maxchar (int): Maximum length of string in characters. Must be >= 4. Returns: str: Of length <= maxchar. Raises: TruncateError: In case of an error.
juraj-google-style
def _ConditionalFormatMessages(self, event_values): string_pieces = [] for (map_index, attribute_name) in enumerate(self._format_string_pieces_map): if ((not attribute_name) or (attribute_name in event_values)): if attribute_name: attribute = event_values.get(attribute_name, None) if ((not isinstance(attribute, (bool, float))) and (not isinstance(attribute, py2to3.INTEGER_TYPES)) and (not attribute)): continue string_pieces.append(self.FORMAT_STRING_PIECES[map_index]) format_string = self.FORMAT_STRING_SEPARATOR.join(string_pieces) string_pieces = [] for (map_index, attribute_name) in enumerate(self._format_string_short_pieces_map): if ((not attribute_name) or event_values.get(attribute_name, None)): string_pieces.append(self.FORMAT_STRING_SHORT_PIECES[map_index]) short_format_string = self.FORMAT_STRING_SEPARATOR.join(string_pieces) return self._FormatMessages(format_string, short_format_string, event_values)
Determines the conditional formatted message strings. Args: event_values (dict[str, object]): event values. Returns: tuple(str, str): formatted message string and short message string.
codesearchnet
def add_group_coordinator(self, group, response): log.debug('Updating coordinator for %s: %s', group, response) error_type = Errors.for_code(response.error_code) if (error_type is not Errors.NoError): log.error('GroupCoordinatorResponse error: %s', error_type) self._groups[group] = (- 1) return False node_id = response.coordinator_id coordinator = BrokerMetadata(response.coordinator_id, response.host, response.port, None) if (node_id not in self._brokers): self._brokers[node_id] = coordinator else: node = self._brokers[node_id] if ((coordinator.host != node.host) or (coordinator.port != node.port)): log.error('GroupCoordinator metadata conflicts with existing broker metadata. Coordinator: %s, Broker: %s', coordinator, node) self._groups[group] = node_id return False log.info('Group coordinator for %s is %s', group, coordinator) self._groups[group] = node_id return True
Update with metadata for a group coordinator Arguments: group (str): name of group from GroupCoordinatorRequest response (GroupCoordinatorResponse): broker response Returns: bool: True if metadata is updated, False on error
codesearchnet
def request(session, url, rule_payload, **kwargs): if isinstance(rule_payload, dict): rule_payload = json.dumps(rule_payload) logger.debug("sending request") result = session.post(url, data=rule_payload, **kwargs) return result
Executes a request with the given payload and arguments. Args: session (requests.Session): the valid session object url (str): Valid API endpoint rule_payload (str or dict): rule package for the POST. If you pass a dictionary, it will be converted into JSON.
juraj-google-style
def _ParseOrMerge(self, lines, message): tokenizer = Tokenizer(lines) while not tokenizer.AtEnd(): self._MergeField(tokenizer, message)
Converts a text representation of a protocol message into a message. Args: lines: Lines of a message's text representation. message: A protocol buffer message to merge into. Raises: ParseError: On text parsing problems.
juraj-google-style
def _sample_actions(self, state: Sequence[tf.Tensor]) -> Tuple[(Sequence[tf.Tensor], tf.Tensor, tf.Tensor)]: default = self.compiler.compile_default_action(self.batch_size) bound_constraints = self.compiler.compile_action_bound_constraints(state) action = self._sample_action(bound_constraints, default) (n, action, checking) = self._check_preconditions(state, action, bound_constraints, default) return (action, n, checking)
Returns sampled action fluents and tensors related to the sampling. Args: state (Sequence[tf.Tensor]): A list of state fluents. Returns: Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: A tuple with action fluents, an integer tensor for the number of samples, and a boolean tensor for checking all action preconditions.
codesearchnet
def retry_loop(retries, delay_in_seconds, conditions, function): if (not isinstance(retries, Integral)): raise TypeError(retries) if (delay_in_seconds < 0): raise TypeError(delay_in_seconds) attempts = 0 value = None err = None while (attempts <= retries): try: value = function() for condition in conditions: if condition.on_value(value): break else: return value except Exception as exc: err = exc for condition in conditions: if condition.on_exception(exc): break else: raise attempts += 1 sleep(delay_in_seconds) else: if err: raise err else: raise ValueError('Max retries ({}) reached and return the value is still {}.'.format(attempts, value)) return value
Actually performs the retry loop used by the retry decorator and handler functions. Failures for retrying are defined by the RetryConditions passed in. If the maximum number of retries has been reached then it raises the most recent error or a ValueError on the most recent result value. Args: retries (Integral): Maximum number of times to retry. delay_in_seconds (Integral): Number of seconds to wait between retries. conditions (list): A list of retry conditions the can trigger a retry on a return value or exception. function (function): The function to wrap. Returns: value: The return value from function
codesearchnet
def start_naive_bayes(automated_run, session, path): module = functions.import_string_code_as_module(automated_run.source) random_state = 8 if not hasattr(module, 'random_state') else module.random_state assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators base_estimator = automated_run.base_learner_origin.return_estimator() base_estimator.set_params(**module.default_params) default_params = functions.make_serializable(base_estimator.get_params()) non_searchable_params = dict((key, val) for key, val in iteritems(default_params) if key not in module.pbounds) existing_base_learners = [] for base_learner in automated_run.base_learner_origin.base_learners: if not base_learner.job_status == 'finished': continue in_search_space = True for key, val in iteritems(non_searchable_params): if base_learner.hyperparameters[key] != val: in_search_space = False break if in_search_space: existing_base_learners.append(base_learner) target = [] initialization_dict = dict((key, list()) for key in module.pbounds.keys()) for base_learner in existing_base_learners: all_numerical = True for key in module.pbounds.keys(): if not isinstance(base_learner.hyperparameters[key], numbers.Number): all_numerical = False break if not all_numerical: continue for key in module.pbounds.keys(): initialization_dict[key].append(base_learner.hyperparameters[key]) target.append(base_learner.individual_score[module.metric_to_optimize]) initialization_dict['target'] = target if not module.invert_metric \ else list(map(lambda x: -x, target)) print('{} existing in initialization dictionary'. format(len(initialization_dict['target']))) func_to_optimize = return_func_to_optimize( path, session, automated_run.base_learner_origin, module.default_params, module.metric_to_optimize, module.invert_metric, set(module.integers) ) bo = BayesianOptimization(func_to_optimize, module.pbounds) bo.initialize(initialization_dict) np.random.seed(random_state) bo.maximize(**module.maximize_config)
Starts naive bayes automated run Args: automated_run (xcessiv.models.AutomatedRun): Automated run object session: Valid SQLAlchemy session path (str, unicode): Path to project folder
juraj-google-style
def setup_logging(args=None): logging_level = logging.WARNING if ((args is not None) and args.verbose): logging_level = logging.INFO config = {'level': logging_level, 'format': 'jtlocalize:%(message)s'} if ((args is not None) and (args.log_path != '')): config['filename'] = args.log_path logging.basicConfig(**config)
Setup logging module. Args: args (optional): The arguments returned by the argparse module.
codesearchnet
def gen_cartesian_product(*args): if (not args): return [] elif (len(args) == 1): return args[0] product_list = [] for product_item_tuple in itertools.product(*args): product_item_dict = {} for item in product_item_tuple: product_item_dict.update(item) product_list.append(product_item_dict) return product_list
generate cartesian product for lists Args: args (list of list): lists to be generated with cartesian product Returns: list: cartesian product in list Examples: >>> arg1 = [{"a": 1}, {"a": 2}] >>> arg2 = [{"x": 111, "y": 112}, {"x": 121, "y": 122}] >>> args = [arg1, arg2] >>> gen_cartesian_product(*args) >>> # same as below >>> gen_cartesian_product(arg1, arg2) [ {'a': 1, 'x': 111, 'y': 112}, {'a': 1, 'x': 121, 'y': 122}, {'a': 2, 'x': 111, 'y': 112}, {'a': 2, 'x': 121, 'y': 122} ]
codesearchnet
def get_generic_type(val: '_base.BaseValue') -> '_classes.ParameterizedClass | None': is_class = isinstance(val, _abstract.Class) if is_class: cls = val elif isinstance(val.cls, _abstract.Class): cls = val.cls else: return None for parent_cls in cls.mro: if isinstance(parent_cls, _abstract.ParameterizedClass): base_cls = parent_cls.base_cls else: base_cls = parent_cls if isinstance(base_cls, _abstract.Class) and base_cls.template: ctx = base_cls.ctx params = {item.name: item for item in base_cls.template} generic_cls = _abstract.ParameterizedClass(base_cls, params, ctx) if is_class: return _abstract.ParameterizedClass(ctx.convert.type_type, {T: generic_cls}, ctx) else: return generic_cls return None
Gets the generic type of an abstract value. Args: val: The abstract value. Returns: The type of the value, with concrete type parameters replaced by TypeVars. For example, the generic type of `[0]` is `List[T]`.
github-repos