code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def write_message(self, message, timeout): with self._writer_lock: self._transport.write(message.header, timeout.remaining_ms) if timeout.has_expired(): _LOG.warning('Timed out between AdbMessage header and data, sending data anyway with 10ms timeout') timeout = timeouts.PolledTimeout.from_millis(10) self._transport.write(message.data, timeout.remaining_ms)
Send the given message over this transport. Args: message: The AdbMessage to send. timeout: Use this timeout for the entire write operation, it should be an instance of timeouts.PolledTimeout.
codesearchnet
def transpose(self, name=None): if name is None: name = self.module_name + "_transpose" if self._data_format == DATA_FORMAT_NHWC: stride = self._stride[1:-1] else: stride = self._stride[2:] return Conv2D(output_channels=lambda: self.input_channels, kernel_shape=self._kernel_shape, stride=stride, padding=self._padding, use_bias=self._use_bias, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name)
Returns matching `Conv2D` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv2D` module.
juraj-google-style
def visit_Call(self, node): if (self.depth == 0): return node if (self.ignore_exceptions is None): ignore_exceptions = ast.Name('None', ast.Load()) else: ignore_exceptions = ast.List(self.ignore_exceptions, ast.Load()) catch_exception_type = (self.catch_exception if self.catch_exception else 'None') catch_exception = ast.Name(catch_exception_type, ast.Load()) depth = ast.Num(((self.depth - 1) if (self.depth > 0) else (- 1))) debug_node_name = ast.Name('debug', ast.Load()) call_extra_parameters = ([] if IS_PYTHON_3 else [None, None]) node.func = ast.Call(debug_node_name, [node.func, ignore_exceptions, catch_exception, depth], [], *call_extra_parameters) return node
Propagate 'debug' wrapper into inner function calls if needed. Args: node (ast.AST): node statement to surround.
codesearchnet
def sum(x, axis=None, keepdims=False): return math_ops.reduce_sum(x, axis, keepdims)
Sum of the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to sum over. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with sum of `x`.
github-repos
def _encrypt_asymmetric(self, encryption_algorithm, encryption_key, plain_text, padding_method, hashing_algorithm=None): if (encryption_algorithm == enums.CryptographicAlgorithm.RSA): if (padding_method == enums.PaddingMethod.OAEP): hash_algorithm = self._encryption_hash_algorithms.get(hashing_algorithm) if (hash_algorithm is None): raise exceptions.InvalidField("The hashing algorithm '{0}' is not supported for asymmetric encryption.".format(hashing_algorithm)) padding_method = asymmetric_padding.OAEP(mgf=asymmetric_padding.MGF1(algorithm=hash_algorithm()), algorithm=hash_algorithm(), label=None) elif (padding_method == enums.PaddingMethod.PKCS1v15): padding_method = asymmetric_padding.PKCS1v15() else: raise exceptions.InvalidField("The padding method '{0}' is not supported for asymmetric encryption.".format(padding_method)) backend = default_backend() try: public_key = backend.load_der_public_key(encryption_key) except Exception: try: public_key = backend.load_pem_public_key(encryption_key) except Exception: raise exceptions.CryptographicFailure('The public key bytes could not be loaded.') cipher_text = public_key.encrypt(plain_text, padding_method) return {'cipher_text': cipher_text} else: raise exceptions.InvalidField("The cryptographic algorithm '{0}' is not supported for asymmetric encryption.".format(encryption_algorithm))
Encrypt data using asymmetric encryption. Args: encryption_algorithm (CryptographicAlgorithm): An enumeration specifying the asymmetric encryption algorithm to use for encryption. Required. encryption_key (bytes): The bytes of the public key to use for encryption. Required. plain_text (bytes): The bytes to be encrypted. Required. padding_method (PaddingMethod): An enumeration specifying the padding method to use with the asymmetric encryption algorithm. Required. hashing_algorithm (HashingAlgorithm): An enumeration specifying the hashing algorithm to use with the encryption padding method. Required, if the padding method is OAEP. Optional otherwise, defaults to None. Returns: dict: A dictionary containing the encrypted data, with at least the following key/value field: * cipher_text - the bytes of the encrypted data Raises: InvalidField: Raised when the algorithm is unsupported or the length is incompatible with the algorithm. CryptographicFailure: Raised when the key generation process fails.
codesearchnet
def _find_scalar_and_max_depth(pylist): if isinstance(pylist, (list, tuple)) or np.ndim(pylist) != 0: scalar_depth = None max_depth = 1 for child in pylist: child_scalar_depth, child_max_depth = _find_scalar_and_max_depth(child) if child_scalar_depth is not None: if scalar_depth is not None and scalar_depth != child_scalar_depth + 1: raise ValueError('all scalar values must have the same nesting depth') scalar_depth = child_scalar_depth + 1 max_depth = max(max_depth, child_max_depth + 1) return (scalar_depth, max_depth) return (0, 0)
Finds nesting depth of scalar values in pylist. Args: pylist: A nested python `list` or `tuple`. Returns: A tuple `(scalar_depth, max_depth)`. `scalar_depth` is the nesting depth of scalar values in `pylist`, or `None` if `pylist` contains no scalars. `max_depth` is the maximum depth of `pylist` (including empty lists). Raises: ValueError: If pylist has inconsistent nesting depths for scalars.
github-repos
def _build(self, inputs): shape_inputs = inputs.get_shape().as_list() rank = len(shape_inputs) full_multiples = ([1] * rank) for (dim, multiple) in zip(self._dims, self._multiples): full_multiples[dim] = multiple return tf.tile(inputs, multiples=full_multiples)
Connects the `TileByDim` module into the graph. Args: inputs: `Tensor` to tile. Returns: The tiled tensor.
codesearchnet
def vq_gating(x, num_experts, k, bneck, hparams=None, name='vq_gating'): with tf.variable_scope(name, reuse=tf.AUTO_REUSE): if hparams.use_scales: scales = tf.get_variable('scales', [num_experts], tf.float32, initializer=tf.ones_initializer()) scales = tf.nn.softmax(scales) hparams.scales = scales input_size = x.get_shape().as_list()[(- 1)] batch_size = common_layers.shape_list(x)[0] if (k > 1): x = tf.layers.dense(x, (input_size * k)) x = tf.reshape(x, [(batch_size * k), input_size]) inputs = tf.expand_dims(x, axis=1) inputs = tf.expand_dims(inputs, axis=1) hparams.z_size = int(math.log(num_experts, 2)) hparams.hidden_size = input_size hparams.top_k = k d = bneck.discrete_bottleneck(inputs) centroids = None exp_discrete = d['discrete'] embed_lookup = d['embed'] extra_loss = d['loss'] if hparams.residual_centroids: centroids = embed_lookup(exp_discrete) top_k_indices = tf.squeeze(exp_discrete, axis=1) tf.summary.histogram('discrete_counts', top_k_indices) if (k > 1): top_k_indices = tf.reshape(top_k_indices, [batch_size, k]) top_k_gates = tf.ones([batch_size, k]) gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices, num_experts) count_per_expert = tf.reduce_sum(gates, axis=0) if hparams.use_scales: scale_loss = tf.reduce_mean((tf.to_float(count_per_expert) * scales)) extra_loss += scale_loss if common_layers.should_generate_summaries(): tf.summary.histogram('vq_loss', extra_loss) tf.summary.historgram('scale_loss', scale_loss) return (gates, extra_loss, centroids)
VQ gating. Args: x: input Tensor with shape [batch_size, input_size] num_experts: an integer k: an integer - number of experts per example bneck: a bottleneck object hparams: optional hparams name: an optional string Returns: gates: a Tensor with shape [batch_size, num_experts] load: a Tensor with shape [num_experts]
codesearchnet
def __init__(self, state_transition: Callable, current_state: Optional[State]) -> None: if not callable(state_transition): raise ValueError('state_transition must be a callable') self.state_transition = state_transition self.current_state = current_state
Initialize the state manager. Args: state_transition: function that can apply a StateChange message. current_state: current application state.
juraj-google-style
def conforms(self, instance, format): try: self.check(instance, format) except FormatError: return False else: return True
Check whether the instance conforms to the given format. Arguments: instance (*any primitive type*, i.e. str, number, bool): The instance to check format (str): The format that instance should conform to Returns: bool: whether it conformed
juraj-google-style
def print_result_for_plain_cgi_script_from_tuple( contenttype_headers_content: WSGI_TUPLE_TYPE, status: str = '200 OK') -> None: contenttype, headers, content = contenttype_headers_content print_result_for_plain_cgi_script(contenttype, headers, content, status)
Writes HTTP result to stdout. Args: contenttype_headers_content: the tuple ``(contenttype, extraheaders, data)`` status: HTTP status message (default ``"200 OK``)
juraj-google-style
def create_issues_report(self, timeout=-1): uri = "{}/issues/".format(self.data["uri"]) return self._helper.create_report(uri, timeout)
Creates an unexpected zoning report for a SAN. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: list: A list of FCIssueResponse dict.
juraj-google-style
def add_callback(self, name, func): if name == 'on_scan': events = ['device_seen'] def callback(_conn_string, _conn_id, _name, event): func(self.id, event, event.get('validity_period', 60)) elif name == 'on_report': events = ['report', 'broadcast'] def callback(_conn_string, conn_id, _name, event): func(conn_id, event) elif name == 'on_trace': events = ['trace'] def callback(_conn_string, conn_id, _name, event): func(conn_id, event) elif name == 'on_disconnect': events = ['disconnection'] def callback(_conn_string, conn_id, _name, _event): func(self.id, conn_id) else: raise ArgumentError("Unknown callback type {}".format(name)) self._adapter.register_monitor([None], events, callback)
Add a callback when device events happen. Args: name (str): currently support 'on_scan' and 'on_disconnect' func (callable): the function that should be called
juraj-google-style
def get_os(detailed=False): try: os_type = platform.system() if os_type == 'Linux': os_detail = platform.uname() distribution = platform.linux_distribution() HOME = os.environ['HOME'] username = os.getenv('USER') elif os_type == 'Windows': username = os.getenv('username') HOME = 'C:\\Users\\' + username elif os_type == 'Java': logger.warning('Unsupported OS. No information') except OSError as e: raise e except Exception as e: logger.exception( '%s: problem determining local os environment %s' % (inspect.stack()[0][3], str(e)) ) if detailed and os_type == 'Linux': return { 'os_type': os_type, 'os_detail': os_detail, 'linux_distribution': distribution, 'HOME': HOME } elif detailed and os_type == 'Windows': return { 'os_type': os_type, 'platform': platform, 'HOME': HOME } elif not detailed: return {'os_type': os_type}
Summary: Retrieve local operating system environment characteristics Args: :user (str): USERNAME, only required when run on windows os Returns: TYPE: dict object containing key, value pairs describing os information
juraj-google-style
def render_template(template, out_dir='.', context=None): template_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'templates', template) files = [] empty_dirs = [] for (dirpath, _, filenames) in os.walk(template_directory): if (len(filenames) == 0): empty_dirs.append(os.path.relpath(dirpath, template_directory)) else: files.extend([os.path.join(dirpath, filepath) for filepath in filenames]) for source_file in files: with open(source_file, 'r') as file: template = Template(file.read()) template_rendered = template.render(**(context or {})) source_relpath = os.path.relpath(source_file, template_directory) filename = os.path.join(out_dir, source_relpath) filename_rendered = Template(filename).render(**context) source_dir = os.path.dirname(filename_rendered) if (not os.path.exists(source_dir)): os.makedirs(source_dir) with open(filename_rendered, 'w') as target_file: target_file.write(template_rendered) for dirpath in empty_dirs: try: dirname = os.path.join(out_dir, dirpath) dirname_rendered = Template(dirname).render(**context) if (not os.path.exists(dirname_rendered)): os.makedirs(dirname_rendered) except OSError as exc: if ((exc.errno == errno.EEXIST) and os.path.isdir(dirpath)): pass else: raise
This function renders the template desginated by the argument to the designated directory using the given context. Args: template (string) : the source template to use (relative to ./templates) out_dir (string) : the name of the output directory context (dict) : the template rendering context
codesearchnet
def _SkipFieldMessage(tokenizer): if tokenizer.TryConsume('<'): delimiter = '>' else: tokenizer.Consume('{') delimiter = '}' while ((not tokenizer.LookingAt('>')) and (not tokenizer.LookingAt('}'))): _SkipField(tokenizer) tokenizer.Consume(delimiter)
Skips over a field message. Args: tokenizer: A tokenizer to parse the field name and values.
codesearchnet
def _get_left_right_blocks(x): (_, x_num_outer_h_blocks, x_num_outer_w_blocks, x_memory_flange_h, x_memory_flange_w, depth) = common_layers.shape_list(x) x_left_right_blocks = tf.slice(x, [0, 1, 0, 0, 0, 0], [(- 1), (x_num_outer_h_blocks - 2), (- 1), (- 1), (- 1), (- 1)]) num_blocks_h = ((x_num_outer_h_blocks - 2) x_left_right_blocks = tf.reshape(x_left_right_blocks, [(- 1), num_blocks_h, 2, x_num_outer_w_blocks, x_memory_flange_h, x_memory_flange_w, depth]) x_left_right_blocks = tf.transpose(x_left_right_blocks, [0, 1, 3, 2, 4, 5, 6]) x_left_right_blocks = tf.reshape(x_left_right_blocks, [(- 1), num_blocks_h, x_num_outer_w_blocks, (2 * x_memory_flange_h), x_memory_flange_w, depth]) (x_left_blocks, x_right_blocks) = _split_along_width(x_left_right_blocks) return (x_left_blocks, x_right_blocks)
Helper function. Assumes that memory_flange is half of query sizes. This function splits the tensor of width 'n' into two halves, where the first half gets the width indices 0, 2, 4.. and the second half gets the width indices 3, 5, ... We also fuse two blocks along the h dimension. Args: x: a 6-d tensor. Returns: x_left_blocks, x_right_blocks: Two 6-d tensors
codesearchnet
def get_colorscale(cmap, levels=None, cmin=None, cmax=None): ncolors = levels if isinstance(levels, int) else None if isinstance(levels, list): ncolors = len(levels) - 1 if isinstance(cmap, list) and len(cmap) != ncolors: raise ValueError('The number of colors in the colormap ' 'must match the intervals defined in the ' 'color_levels, expected %d colors found %d.' % (ncolors, len(cmap))) try: palette = process_cmap(cmap, ncolors) except Exception as e: colorscale = colors.PLOTLY_SCALES.get(cmap) if colorscale is None: raise e return colorscale if isinstance(levels, int): colorscale = [] scale = np.linspace(0, 1, levels+1) for i in range(levels+1): if i == 0: colorscale.append((scale[0], palette[i])) elif i == levels: colorscale.append((scale[-1], palette[-1])) else: colorscale.append((scale[i], palette[i-1])) colorscale.append((scale[i], palette[i])) return colorscale elif isinstance(levels, list): palette, (cmin, cmax) = color_intervals( palette, levels, clip=(cmin, cmax)) return colors.make_colorscale(palette)
Converts a cmap spec to a plotly colorscale Args: cmap: A recognized colormap by name or list of colors levels: A list or integer declaring the color-levels cmin: The lower bound of the color range cmax: The upper bound of the color range Returns: A valid plotly colorscale
juraj-google-style
def WriteTo(self, values): try: return self._struct.pack(*values) except (TypeError, struct.error) as exception: raise IOError('Unable to write stream with error: {0!s}'.format( exception))
Writes values to a byte stream. Args: values (tuple[object, ...]): values to copy to the byte stream. Returns: bytes: byte stream. Raises: IOError: if byte stream cannot be written. OSError: if byte stream cannot be read.
juraj-google-style
def insert_feasible_configurations(cur, feasible_configurations, encoded_data=None): if (encoded_data is None): encoded_data = {} if ('num_variables' not in encoded_data): encoded_data['num_variables'] = len(next(iter(feasible_configurations))) if ('num_feasible_configurations' not in encoded_data): encoded_data['num_feasible_configurations'] = len(feasible_configurations) if (('feasible_configurations' not in encoded_data) or ('energies' not in encoded_data)): encoded = {_serialize_config(config): en for (config, en) in feasible_configurations.items()} (configs, energies) = zip(*sorted(encoded.items())) encoded_data['feasible_configurations'] = json.dumps(configs, separators=(',', ':')) encoded_data['energies'] = json.dumps(energies, separators=(',', ':')) insert = '\n INSERT OR IGNORE INTO feasible_configurations(\n num_variables,\n num_feasible_configurations,\n feasible_configurations,\n energies)\n VALUES (\n :num_variables,\n :num_feasible_configurations,\n :feasible_configurations,\n :energies);\n ' cur.execute(insert, encoded_data)
Insert a group of feasible configurations into the cache. Args: cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement. feasible_configurations (dict[tuple[int]): The set of feasible configurations. Each key should be a tuple of variable assignments. The values are the relative energies. encoded_data (dict, optional): If a dictionary is provided, it will be populated with the serialized data. This is useful for preventing encoding the same information many times. Examples: >>> feasible_configurations = {(-1, -1): 0.0, (+1, +1): 0.0} >>> with pmc.cache_connect(':memory:') as cur: ... pmc.insert_feasible_configurations(cur, feasible_configurations)
codesearchnet
def auto_flexdock(self, binding_residues, radius, ligand_path=None, force_rerun=False): log.debug('\n{}: running DOCK6...\n\tBinding residues: {}\n\tBinding residues radius: {}\n\tLigand to dock: {}\n'.format(self.id, binding_residues, radius, op.basename(ligand_path))) self.dockprep(force_rerun=force_rerun) self.protein_only_and_noH(force_rerun=force_rerun) self.dms_maker(force_rerun=force_rerun) self.sphgen(force_rerun=force_rerun) self.binding_site_mol2(residues=binding_residues, force_rerun=force_rerun) self.sphere_selector_using_residues(radius=radius, force_rerun=force_rerun) self.showbox(force_rerun=force_rerun) self.grid(force_rerun=force_rerun) if ligand_path: self.do_dock6_flexible(ligand_path=ligand_path, force_rerun=force_rerun)
Run DOCK6 on a PDB file, given its binding residues and a radius around them. Provide a path to a ligand to dock a ligand to it. If no ligand is provided, DOCK6 preparations will be run on that structure file. Args: binding_residues (str): Comma separated string of residues (eg: '144,170,199') radius (int, float): Radius around binding residues to dock to ligand_path (str): Path to ligand (mol2 format) to dock to protein force_rerun (bool): If method should be rerun even if output files exist
codesearchnet
def execute_async_script(self, script, *args): return self._execute(Command.EXECUTE_ASYNC_SCRIPT, {'script': script, 'args': list(args)})
Execute JavaScript Asynchronously in current context. Support: Web(WebView) Args: script: The JavaScript to execute. *args: Arguments for your JavaScript. Returns: Returns the return value of the function.
codesearchnet
def find_rule(condition): final_condition = re.sub('{{.*}}', '42', condition) ast_tokens = Condition.get_tokens(final_condition) ast_compressed_tokens = Condition.compress_tokens(ast_tokens) name = 'undefined' function = lambda tokens: False if len(ast_compressed_tokens) > 0: for rule in Condition.RULES: if Condition.match_tokens(ast_compressed_tokens, rule['types']): name = rule['name'] function = rule['evaluate'] break return name, ast_tokens, function
Find rule for given condition. Args: condition (str): Python condition as string. Returns: str, list, function: found rule name, list of AST tokens for condition and verification function.
juraj-google-style
def _step(time, output_ta_t, *states): current_input = tuple((ta[time] for ta in input_ta)) current_input = tree.pack_sequence_as(inputs, current_input) output, new_states = step_function(current_input, tuple(states) + tuple(constants)) flat_new_state = tree.flatten(new_states) flat_output = tree.flatten(output) ta_index_to_write = time if return_all_outputs else 0 for ta, out in zip(output_ta_t, flat_output): ta[ta_index_to_write] = out new_states = tree.pack_sequence_as(initial_states, flat_new_state) return (time + 1, output_ta_t) + tuple(new_states)
RNN step function. Args: time: Current timestep value. output_ta_t: TensorArray. *states: List of states. Returns: Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
github-repos
def json(self, attribs=None, recurse=True, ignorelist=False): jsonnode = {} jsonnode['type'] = self.XMLTAG if self.id: jsonnode['id'] = self.id if self.set: jsonnode['set'] = self.set if self.cls: jsonnode['class'] = self.cls if self.annotator: jsonnode['annotator'] = self.annotator if self.annotatortype: if (self.annotatortype == AnnotatorType.AUTO): jsonnode['annotatortype'] = 'auto' elif (self.annotatortype == AnnotatorType.MANUAL): jsonnode['annotatortype'] = 'manual' if (self.confidence is not None): jsonnode['confidence'] = self.confidence if self.n: jsonnode['n'] = self.n if self.auth: jsonnode['auth'] = self.auth if self.datetime: jsonnode['datetime'] = self.datetime.strftime('%Y-%m-%dT%H:%M:%S') if recurse: jsonnode['children'] = [] if self.TEXTCONTAINER: jsonnode['text'] = self.text() if self.PHONCONTAINER: jsonnode['phon'] = self.phon() for child in self: if (self.TEXTCONTAINER and isstring(child)): jsonnode['children'].append(child) elif (not self.PHONCONTAINER): ignore = False if ignorelist: for e in ignorelist: if isinstance(child, e): ignore = True break if (not ignore): jsonnode['children'].append(child.json(attribs, recurse, ignorelist)) if attribs: for attrib in attribs: jsonnode[attrib] = attribs return jsonnode
Serialises the FoLiA element and all its contents to a Python dictionary suitable for serialisation to JSON. Example:: import json json.dumps(word.json()) Returns: dict
codesearchnet
def finalize(): if config_is_locked(): raise RuntimeError('Finalize called twice (config already locked).') bindings = {} for hook in _FINALIZE_HOOKS: new_bindings = hook(_CONFIG) if (new_bindings is not None): for (key, value) in six.iteritems(new_bindings): pbk = ParsedBindingKey(key) if (pbk in bindings): err_str = 'Received conflicting updates when running {}.' raise ValueError(err_str.format(hook)) bindings[pbk] = value for (pbk, value) in six.iteritems(bindings): bind_parameter(pbk, value) _set_config_is_locked(True)
A function that should be called after parsing all Gin config files. Calling this function allows registered "finalize hooks" to inspect (and potentially modify) the Gin config, to provide additional functionality. Hooks should not modify the configuration object they receive directly; instead, they should return a dictionary mapping Gin binding keys to (new or updated) values. This way, all hooks see the config as originally parsed. Raises: RuntimeError: If the config is already locked. ValueError: If two or more hooks attempt to modify or introduce bindings for the same key. Since it is difficult to control the order in which hooks are registered, allowing this could yield unpredictable behavior.
codesearchnet
def __init__(self, api_key, db_path='/tmp/gsb_v4.db', discard_fair_use_policy=False, platforms=None, timeout=10): self.api_client = SafeBrowsingApiClient(api_key, discard_fair_use_policy=discard_fair_use_policy) self.storage = SqliteStorage(db_path, timeout=timeout) self.platforms = platforms
Constructor. Args: api_key: string, a key for API authentication. db_path: string, path to SQLite DB file to store cached data. discard_fair_use_policy: boolean, disable request frequency throttling (only for testing). platforms: list, threat lists to look up, default includes all platforms. timeout: seconds to wait for Sqlite DB to become unlocked from concurrent WRITE transaction.
juraj-google-style
def __init__(self, spec, meta_graph, trainable, checkpoint_path, name): self._spec = spec self._meta_graph = meta_graph self._trainable = trainable self._checkpoint_path = checkpoint_path register_ops_if_needed({ op.name for op in self._meta_graph.meta_info_def.stripped_op_list.op}) with tf.init_scope(): self._init_state(name)
Private constructor. Args: spec: _ModuleSpec instance. meta_graph: MetaGraphDef to use trainable: whether module is trainable. checkpoint_path: None or a string to the variables checkpoints. name: variable and scope name where to instantiate the Module. Must be an unused name scope.
juraj-google-style
def _get_suffix(path): suffix = os.path.basename(path).split(".")[-1] if "/" in suffix: raise UserWarning("Filename can't contain '/' in suffix (%s)!" % path) return suffix
Return suffix from `path`. ``/home/xex/somefile.txt`` --> ``txt``. Args: path (str): Full file path. Returns: str: Suffix. Raises: UserWarning: When ``/`` is detected in suffix.
juraj-google-style
def sheets_tab_rename(config, auth, sheet_url_or_name, old_sheet_tab, new_sheet_tab): sheet_id, tab_id = sheets_tab_id(config, auth, sheet_url_or_name, old_sheet_tab) if tab_id is not None: sheets_batch_update(config, auth, sheet_url_or_name, {'requests': [{'updateSheetProperties': {'properties': {'sheetId': tab_id, 'title': new_sheet_tab}, 'fields': 'title'}}]})
Rename a tab in a sheet. Args: config - see starthinker/util/configuration.py auth - user or service url_or_name - one of: URL, document title, or id old_sheet_tab - name of tab to get id for new_sheet_tab - name of tab to get id for No Return
github-repos
def prepare_request(url: Union[(str, methods)], data: Optional[MutableMapping], headers: Optional[MutableMapping], global_headers: MutableMapping, token: str, as_json: Optional[bool]=None) -> Tuple[(str, Union[(str, MutableMapping)], MutableMapping)]: if isinstance(url, methods): as_json = (as_json or url.value[3]) real_url = url.value[0] else: real_url = url as_json = False if (not headers): headers = {**global_headers} else: headers = {**global_headers, **headers} payload: Optional[Union[(str, MutableMapping)]] = None if (real_url.startswith(HOOK_URL) or (real_url.startswith(ROOT_URL) and as_json)): (payload, headers) = _prepare_json_request(data, token, headers) elif (real_url.startswith(ROOT_URL) and (not as_json)): payload = _prepare_form_encoded_request(data, token) else: real_url = (ROOT_URL + real_url) payload = _prepare_form_encoded_request(data, token) return (real_url, payload, headers)
Prepare outgoing request Create url, headers, add token to the body and if needed json encode it Args: url: :class:`slack.methods` item or string of url data: Outgoing data headers: Custom headers global_headers: Global headers token: Slack API token as_json: Post JSON to the slack API Returns: :py:class:`tuple` (url, body, headers)
codesearchnet
def find_element(self, name, type=ElementType.ANY): for e in self.e_list: if type.value and not e['elementType'] == type: continue if e["name"] == name: uri = self.uri uri.eid = e["id"] return uri
Find an elemnent in the document with the given name - could be a PartStudio, Assembly or blob. Args: name: str the name of the element. Returns: - onshapepy.uri of the element
juraj-google-style
def __init__(self, action_type=None, ethertype=None): super().__init__(action_type, length=8) self.ethertype = ethertype
Create a ActionPush with the optional parameters below. Args: action_type (:class:`ActionType`): indicates which tag will be pushed (VLAN, MPLS, PBB). ethertype (int): indicates the Ethertype of the new tag.
juraj-google-style
def show_confidence_band(self, value): if not isinstance(values, list): raise TypeError("show_confidence_band must be a list of strings") self.options["show_confidence_band"] = values
Show confidence band? See metricsgraphics documentation Args: value (list): strings Raises: TypeError: show_confidence_band must be a list of strings.
juraj-google-style
def include(self, scheduled_operation: ScheduledOperation): collisions = self.query(time=scheduled_operation.time, duration=scheduled_operation.duration, qubits=scheduled_operation.operation.qubits) if collisions: raise ValueError('Operation {} has collisions: {}'.format( scheduled_operation.operation, collisions)) self.scheduled_operations.add(scheduled_operation) self._max_duration = max(self._max_duration, scheduled_operation.duration)
Adds a scheduled operation to the schedule. Args: scheduled_operation: The operation to add. Raises: ValueError: The operation collided with something already in the schedule.
juraj-google-style
def get_qemu_info(path, backing_chain=False, fail_on_error=True): cmd = ['qemu-img', 'info', '--output=json', path] if backing_chain: cmd.insert((- 1), '--backing-chain') result = run_command_with_validation(cmd, fail_on_error, msg='Failed to get info for {}'.format(path)) return json.loads(result.out)
Get info on a given qemu disk Args: path(str): Path to the required disk backing_chain(boo): if true, include also info about the image predecessors. Return: object: if backing_chain == True then a list of dicts else a dict
codesearchnet
def _add_imports_to_env(self, raw_api): for namespace, desc in raw_api: for item in desc: if isinstance(item, AstImport): if namespace.name == item.target: raise InvalidSpec('Cannot import current namespace.', item.lineno, item.path) if item.target not in self.api.namespaces: raise InvalidSpec( 'Namespace %s is not defined in any spec.' % quote(item.target), item.lineno, item.path) env = self._get_or_create_env(namespace.name) imported_env = self._get_or_create_env(item.target) if namespace.name in imported_env: raise InvalidSpec( 'Circular import of namespaces %s and %s ' 'detected.' % (quote(namespace.name), quote(item.target)), item.lineno, item.path) env[item.target] = imported_env
Scans raw parser output for import declarations. Checks if the imports are valid, and then creates a reference to the namespace in the environment. Args: raw_api (Tuple[Namespace, List[stone.stone.parser._Element]]): Namespace paired with raw parser output.
juraj-google-style
def set_status(self, status, msg): if len(msg) > 2000: msg = msg[:2000] msg += "\n... snip ...\n" if self.status == self.S_LOCKED or status == self.S_LOCKED: err_msg = ( "Locked files must be explicitly unlocked before calling set_status but\n" "task.status = %s, input status = %s" % (self.status, status)) raise RuntimeError(err_msg) status = Status.as_status(status) changed = True if hasattr(self, "_status"): changed = (status != self._status) self._status = status if status == self.S_RUN: if self.datetimes.start is None: self.datetimes.start = datetime.datetime.now() if changed: if status == self.S_SUB: self.datetimes.submission = datetime.datetime.now() self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % ( self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg)) elif status == self.S_OK: self.history.info("Task completed %s", msg) elif status == self.S_ABICRITICAL: self.history.info("Status set to S_ABI_CRITICAL due to: %s", msg) else: self.history.info("Status changed to %s. msg: %s", status, msg) if status == self.S_DONE: self._on_done() if status == self.S_OK: if not self.finalized: self._on_ok() if self.gc is not None and self.gc.policy == "task": self.clean_output_files() if self.status == self.S_OK: self.send_signal(self.S_OK) return status
Set and return the status of the task. Args: status: Status object or string representation of the status msg: string with human-readable message used in the case of errors.
juraj-google-style
def query(self, minhash, k): if (k <= 0): raise ValueError('k must be positive') if (len(minhash) < (self.k * self.l)): raise ValueError('The num_perm of MinHash out of range') results = set() r = self.k while (r > 0): for key in self._query(minhash, r, self.l): results.add(key) if (len(results) >= k): return list(results) r -= 1 return list(results)
Return the approximate top-k keys that have the highest Jaccard similarities to the query set. Args: minhash (datasketch.MinHash): The MinHash of the query set. k (int): The maximum number of keys to return. Returns: `list` of at most k keys.
codesearchnet
def get_facets(self): return dict([(facet.attrib['path'], [term.text for term in facet.findall('term')]) for facet in self._content.findall('facet')])
Get facets from the response. Returns: A dict where requested facet paths are keys and a list of coresponding terms are values.
codesearchnet
def constcase(text, acronyms=None): words, _case, _sep = case_parse.parse_case(text, acronyms) return '_'.join([w.upper() for w in words])
Return text in CONST_CASE style (aka SCREAMING_SNAKE_CASE). Args: text: input string to convert case detect_acronyms: should attempt to detect acronyms acronyms: a list of acronyms to detect >>> constcase("hello world") 'HELLO_WORLD' >>> constcase("helloHTMLWorld", True, ["HTML"]) 'HELLO_HTML_WORLD'
juraj-google-style
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False, dtype=None): if sparse: raise NotImplementedError('SparseDataFrame is not implemented. To contribute to Modin, please visit github.com/modin-project/modin.') if (not isinstance(data, DataFrame)): ErrorMessage.default_to_pandas('`get_dummies` on non-DataFrame') return DataFrame(pandas.get_dummies(data, prefix=prefix, prefix_sep=prefix_sep, dummy_na=dummy_na, columns=columns, sparse=sparse, drop_first=drop_first, dtype=dtype)) else: new_manager = data._query_compiler.get_dummies(columns, prefix=prefix, prefix_sep=prefix_sep, dummy_na=dummy_na, drop_first=drop_first, dtype=dtype) return DataFrame(query_compiler=new_manager)
Convert categorical variable into indicator variables. Args: data (array-like, Series, or DataFrame): data to encode. prefix (string, [string]): Prefix to apply to each encoded column label. prefix_sep (string, [string]): Separator between prefix and value. dummy_na (bool): Add a column to indicate NaNs. columns: Which columns to encode. sparse (bool): Not Implemented: If True, returns SparseDataFrame. drop_first (bool): Whether to remove the first level of encoded data. dtype: The dtype for the get_dummies call. Returns: DataFrame or one-hot encoded data.
codesearchnet
def _bind_length_handlers(tids, user_handler, lns): for tid in tids: for ln in lns: type_octet = _gen_type_octet(tid, ln) ion_type = _TID_VALUE_TYPE_TABLE[tid] if ((ln == 1) and (ion_type is IonType.STRUCT)): handler = partial(_ordered_struct_start_handler, partial(user_handler, ion_type)) elif (ln < _LENGTH_FIELD_FOLLOWS): handler = partial(user_handler, ion_type, ln) else: handler = partial(_var_uint_field_handler, partial(user_handler, ion_type)) _HANDLER_DISPATCH_TABLE[type_octet] = handler
Binds a set of handlers with the given factory. Args: tids (Sequence[int]): The Type IDs to bind to. user_handler (Callable): A function that takes as its parameters :class:`IonType`, ``length``, and the ``ctx`` context returning a co-routine. lns (Sequence[int]): The low-nibble lengths to bind to.
codesearchnet
def create_file(self, filename): self.response.write('Creating file %s\n' % filename) write_retry_params = gcs.RetryParams(backoff_factor=1.1) gcs_file = gcs.open(filename, 'w', content_type='text/plain', options={'x-goog-meta-foo': 'foo', 'x-goog-meta-bar': 'bar'}, retry_params=write_retry_params) gcs_file.write('abcde\n') gcs_file.write('f'*1024*4 + '\n') gcs_file.close() self.tmp_filenames_to_clean_up.append(filename)
Create a file. The retry_params specified in the open call will override the default retry params for this particular file handle. Args: filename: filename.
juraj-google-style
def get(self, save_path, dataset=None): if dataset is None: selected_dataset = self._present_options() else: selected_dataset = dataset save_path_full = join(save_path, selected_dataset.split('.')[0]) if isdir(save_path_full): warn("\n'{0}' already exists. Voiding Download.".format( save_path_full)) else: self._print('Downloading Data...') url = "{0}/{1}".format(self.url, selected_dataset) self._download_data(url, save_path=save_path) return abspath(save_path_full)
Download a dataset. Args: save_path : str A directory to save the data to. dataset : str, optional A specific dataset to download. Note: this must include the file extension. If None, options will be presented for you to choose from. Returns: save_path_full : str The absolute path to the downloaded data.
juraj-google-style
def Matches(self, file_entry): location = getattr(file_entry.path_spec, 'location', None) if not location: return None if '.' not in location: return False _, _, extension = location.rpartition('.') return extension.lower() in self._extensions
Compares the file entry against the filter. Args: file_entry (dfvfs.FileEntry): file entry to compare. Returns: bool: True if the file entry matches the filter, False if not or None if the filter does not apply.
juraj-google-style
def load_graph(path: str, squeeze: bool=False) -> Tuple[Union[EventSetNode, Dict[str, EventSetNode]], Union[EventSetNode, Dict[str, EventSetNode]]]: g = _load_graph(path=path) inputs = g.named_inputs outputs = g.named_outputs assert inputs is not None assert outputs is not None if squeeze and len(inputs) == 1: inputs = list(inputs.values())[0] if squeeze and len(outputs) == 1: outputs = list(outputs.values())[0] return (inputs, outputs)
Loads a Temporian graph from a file. See [`tp.save()`][temporian.save] and [`tp.save_graph()`][temporian.save_graph] for usage examples. Args: path: File path to load from. squeeze: If true, and if the input/output contains a single EventSetNode, returns an EventSetNode (instead of a dictionary of EventSetNodes). Returns: Input and output EventSetNodes.
github-repos
def create_cells(headers, schema_fields, values=None, row_number=None): fillvalue = '_fillvalue' is_header_row = (values is None) cells = [] iterator = zip_longest(headers, schema_fields, (values or []), fillvalue=fillvalue) for (column_number, (header, field, value)) in enumerate(iterator, start=1): if (header == fillvalue): header = None elif is_header_row: value = header if (field == fillvalue): field = None if (value == fillvalue): value = None elif (value is None): value = '' cell = create_cell(header, value, field, column_number, row_number) cells.append(cell) return cells
Create list of cells from headers, fields and values. Args: headers (List[str]): The headers values. schema_fields (List[tableschema.field.Field]): The tableschema fields. values (List[Any], optional): The cells values. If not specified, the created cells will have the same values as their corresponding headers. This is useful for specifying headers cells. If the list has any `None` values, as is the case on empty cells, the resulting Cell will have an empty string value. If the `values` list has a different length than the `headers`, the resulting Cell will have value `None`. row_number (int, optional): The row number. Returns: List[dict]: List of cells.
codesearchnet
def __init__(self, channel): self.receive = channel.unary_stream( '/predix.eventhub.Subscriber/receive', request_serializer=EventHub__pb2.SubscriptionRequest.SerializeToString, response_deserializer=EventHub__pb2.Message.FromString, ) self.receiveWithAcks = channel.stream_stream( '/predix.eventhub.Subscriber/receiveWithAcks', request_serializer=EventHub__pb2.SubscriptionResponse.SerializeToString, response_deserializer=EventHub__pb2.Message.FromString, ) self.subscribe = channel.stream_stream( '/predix.eventhub.Subscriber/subscribe', request_serializer=EventHub__pb2.SubscriptionAcks.SerializeToString, response_deserializer=EventHub__pb2.SubscriptionMessage.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def probe_characteristics(self, conn_id, handle, services): self._command_task.async_command(['_probe_characteristics', handle, services], self._probe_characteristics_finished, {'connection_id': conn_id, 'handle': handle, 'services': services})
Probe a device for all characteristics defined in its GATT table This routine must be called after probe_services and passed the services dictionary produced by that method. Args: handle (int): a handle to the connection on the BLED112 dongle conn_id (int): a unique identifier for this connection on the DeviceManager that owns this adapter. services (dict): A dictionary of GATT services produced by probe_services()
codesearchnet
def new(self, name, *args, **kwargs): if (name in self._instance_map): raise ValueError('Instance {0} is already initialized'.format(name)) instance = self._class_map[name](*args, **kwargs) self._instance_map[name] = instance return instance
Create an instance. Args: name (str): The name of the class args: The arguments to pass to the class. kwargs: The keyword arguments to pass to the class. Returns: instance
codesearchnet
def get(self, key, default=None): if isinstance(key, six.text_type): return self.__mapping.get(key, None) if (not isinstance(key, int)): raise TypeError('Key must be int or Unicode sequence.') if (key == 0): return SYMBOL_ZERO_TOKEN index = (key - 1) if ((index < 0) or (key > len(self))): return default return self.__symbols[index]
Returns a token by text or local ID, with a default. A given text image may be associated with more than one symbol ID. This will return the first definition. Note: User defined symbol IDs are always one-based. Symbol zero is a special symbol that always has no text. Args: key (unicode | int): The key to lookup. default(Optional[SymbolToken]): The default to return if the key is not found Returns: SymbolToken: The token associated with the key or the default if it doesn't exist.
codesearchnet
def __init__(self, vocab, shift): self.shift = shift alphabet = vocab shifted_alphabet = deque(alphabet) shifted_alphabet.rotate(shift) self.encrypt = dict(zip(alphabet, list(shifted_alphabet))) self.decrypt = dict(zip(list(shifted_alphabet), alphabet))
Initialize shift layer. Args: vocab: (list of String) the vocabulary shift: (Integer) the amount of shift apply to the alphabet. Positive number implies shift to the right, negative number implies shift to the left.
juraj-google-style
def expand(self, words): return words | beam.combiners.Count.PerElement() | beam.FlatMap(extract_prefixes) | beam.combiners.Top.LargestPerKey(self._count)
Compute the most common words for each possible prefixes. Args: words: a PCollection of strings Returns: A PCollection of most common words with each prefix, in the form (prefix, [(count, word), (count, word), ...])
github-repos
def __init__(self, batch_env): super(PyFuncBatchEnv, self).__init__(batch_env.observation_space, batch_env.action_space) self._batch_env = batch_env with tf.variable_scope("env_temporary"): self._observ = tf.Variable( tf.zeros((self._batch_env.batch_size,) + self.observ_shape, self.observ_dtype), name="observ", trainable=False)
Batch of environments inside the TensorFlow graph. Args: batch_env: Batch environment.
juraj-google-style
def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor: if masks.numel() == 0: return torch.zeros((0, 4), device=masks.device) h, w = masks.shape[-2:] y = torch.arange(0, h, dtype=torch.float32, device=masks.device) x = torch.arange(0, w, dtype=torch.float32, device=masks.device) y, x = torch.meshgrid(y, x, indexing='ij') x_mask = masks * torch.unsqueeze(x, 0) x_max = x_mask.view(x_mask.shape[0], -1).max(-1)[0] x_min = torch.where(masks, x.unsqueeze(0), torch.tensor(100000000.0, device=masks.device)).view(masks.shape[0], -1).min(-1)[0] y_mask = masks * torch.unsqueeze(y, 0) y_max = y_mask.view(y_mask.shape[0], -1).max(-1)[0] y_min = torch.where(masks, y.unsqueeze(0), torch.tensor(100000000.0, device=masks.device)).view(masks.shape[0], -1).min(-1)[0] return torch.stack([x_min, y_min, x_max, y_max], 1)
Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
github-repos
def subscribe(self, exchange_name: str, routing: str, exchange_type: ExchangeType_='topic', on_message: EVENT_CALLBACK_=None) -> EventSubscription: sub = EventSubscription(exchange_name, routing, exchange_type, on_message=on_message) if (self._pending is not None): self._pending.put_nowait(sub) else: self._pending_pre_async.append(sub) LOGGER.info(f'Deferred event bus subscription: [{sub}]') self._lazy_listen() return sub
Adds a new event subscription to the listener. Actual queue declaration to the remote message server is done when connected. If the listener is not currently connected, it defers declaration. All existing subscriptions are redeclared on the remote if `EventListener` loses and recreates the connection. Args: exchange_name (str): Name of the AMQP exchange. Messages are always published to a specific exchange. routing (str): Filter messages passing through the exchange. A routing key is a '.'-separated string, and accepts '#' and '*' wildcards. exchange_type (ExchangeType_, optional): If the exchange does not yet exist, it will be created with this type. Default is `topic`, acceptable values are `topic`, `fanout`, or `direct`. on_message (EVENT_CALLBACK_, optional): The function to be called when a new message is received. If `on_message` is none, it will default to logging the message. Returns: EventSubscription: The newly created subscription. This value can safely be discarded: EventListener keeps its own reference.
codesearchnet
def to_control_flow_context_def(self, context_def, export_scope=None): raise NotImplementedError('Abstract method')
Serializes this into `context_def`. Args: context_def: a `ControlFlowContextDef` protocol buffer. export_scope: Optional `string`. Name scope to remove.
github-repos
def _compute_nfp_uniform(l, u, cum_counts, sizes): if l > u: raise ValueError("l must be less or equal to u") if l == 0: n = cum_counts[u] else: n = cum_counts[u]-cum_counts[l-1] return n * float(sizes[u] - sizes[l]) / float(2*sizes[u])
Computes the expected number of false positives caused by using u to approximate set sizes in the interval [l, u], assuming uniform distribution of set sizes within the interval. Args: l: the lower bound on set sizes. u: the upper bound on set sizes. cum_counts: the complete cummulative distribution of set sizes. sizes: the complete domain of set sizes. Return (float): the expected number of false positives.
juraj-google-style
def _extract_options(config, options, *args): extract = {} for key in args: if (key not in args): continue extract[key] = config[key] option = getattr(options, key, None) if (option is not None): extract[key] = option return extract
Extract options values from a configparser, optparse pair. Options given on command line take precedence over options read in the configuration file. Args: config (dict): option values read from a config file through configparser options (optparse.Options): optparse 'options' object containing options values from the command line *args (str tuple): name of the options to extract
codesearchnet
def _get_params(mcs, bases, namespace): params = [ (name, namespace.pop(name)) for name, attribute in list(namespace.items()) if isinstance(attribute, BaseParam) ] for base in reversed(bases): if hasattr(base, mcs._params_storage_key): params = list( getattr(base, mcs._params_storage_key).items() ) + params return OrderedDict(params)
Create params dictionary to be used in resource class namespace. Pop all parameter objects from attributes dict (namespace) and store them under _params_storage_key atrribute. Also collect all params from base classes in order that ensures params can be overriden. Args: bases: all base classes of created resource class namespace (dict): namespace as dictionary of attributes
juraj-google-style
def value_of(self, value: Union[(sympy.Basic, float, str)]) -> Union[(sympy.Basic, float)]: if isinstance(value, str): return self.param_dict.get(value, sympy.Symbol(value)) if isinstance(value, sympy.Basic): if (sys.version_info.major < 3): d = {k.encode(): v for (k, v) in self.param_dict.items()} v = value.subs(d) else: v = value.subs(self.param_dict) return (v if v.free_symbols else float(v)) return value
Attempt to resolve a Symbol or name or float to its assigned value. If unable to resolve a sympy.Symbol, returns it unchanged. If unable to resolve a name, returns a sympy.Symbol with that name. Args: value: The sympy.Symbol or name or float to try to resolve into just a float. Returns: The value of the parameter as resolved by this resolver.
codesearchnet
def are_you_sure(msg=''): print(msg) from utool import util_arg from utool import util_str override = util_arg.get_argflag(('--yes', '--y', '-y')) if override: print('accepting based on command line flag') return True valid_ans = ['yes', 'y'] valid_prompt = util_str.conj_phrase(valid_ans, 'or') ans = input(('Are you sure?\n Enter %s to accept\n' % valid_prompt)) return (ans.lower() in valid_ans)
r""" Prompts user to accept or checks command line for -y Args: msg (str): Returns: bool: accept or not
codesearchnet
def FVDEVolumeOpen(fvde_volume, path_spec, file_object, key_chain): encrypted_root_plist = key_chain.GetCredential( path_spec, 'encrypted_root_plist') if encrypted_root_plist: fvde_volume.read_encrypted_root_plist(encrypted_root_plist) password = key_chain.GetCredential(path_spec, 'password') if password: fvde_volume.set_password(password) recovery_password = key_chain.GetCredential(path_spec, 'recovery_password') if recovery_password: fvde_volume.set_recovery_password(recovery_password) fvde_volume.open_file_object(file_object)
Opens the FVDE volume using the path specification. Args: fvde_volume (pyfvde.volume): FVDE volume. path_spec (PathSpec): path specification. file_object (FileIO): file-like object. key_chain (KeyChain): key chain.
juraj-google-style
class RandomNormal(Initializer): def __init__(self, mean=0.0, stddev=0.05, seed=None): self.mean = mean self.stddev = stddev self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=None, **kwargs): _validate_kwargs(self.__class__.__name__, kwargs) dtype = _assert_float_dtype(_get_dtype(dtype)) if _PARTITION_SHAPE in kwargs: shape = kwargs[_PARTITION_SHAPE] return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype) def get_config(self): return {'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed}
Initializer that generates tensors with a normal distribution. Also available via the shortcut function `tf.keras.initializers.random_normal`. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype.
github-repos
def init_config(self, config): self.config.update(config) self.config.setdefault('LDAP_PORT', 389) self.config.setdefault('LDAP_HOST', None) self.config.setdefault('LDAP_USE_SSL', False) self.config.setdefault('LDAP_READONLY', True) self.config.setdefault('LDAP_CHECK_NAMES', True) self.config.setdefault('LDAP_BIND_DIRECT_CREDENTIALS', False) self.config.setdefault('LDAP_BIND_DIRECT_PREFIX', '') self.config.setdefault('LDAP_BIND_DIRECT_SUFFIX', '') self.config.setdefault('LDAP_BIND_DIRECT_GET_USER_INFO', True) self.config.setdefault('LDAP_ALWAYS_SEARCH_BIND', False) self.config.setdefault('LDAP_BASE_DN', '') self.config.setdefault('LDAP_BIND_USER_DN', None) self.config.setdefault('LDAP_BIND_USER_PASSWORD', None) self.config.setdefault('LDAP_SEARCH_FOR_GROUPS', True) self.config.setdefault('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND', False) self.config.setdefault('LDAP_USER_DN', '') self.config.setdefault('LDAP_GROUP_DN', '') self.config.setdefault('LDAP_BIND_AUTHENTICATION_TYPE', 'SIMPLE') self.config.setdefault('LDAP_USER_SEARCH_SCOPE', 'LEVEL') self.config.setdefault('LDAP_USER_OBJECT_FILTER', '(objectclass=person)') self.config.setdefault('LDAP_USER_LOGIN_ATTR', 'uid') self.config.setdefault('LDAP_USER_RDN_ATTR', 'uid') self.config.setdefault('LDAP_GET_USER_ATTRIBUTES', ldap3.ALL_ATTRIBUTES) self.config.setdefault('LDAP_GROUP_SEARCH_SCOPE', 'LEVEL') self.config.setdefault('LDAP_GROUP_OBJECT_FILTER', '(objectclass=group)') self.config.setdefault('LDAP_GROUP_MEMBERS_ATTR', 'uniqueMember') self.config.setdefault('LDAP_GET_GROUP_ATTRIBUTES', ldap3.ALL_ATTRIBUTES) self.config.setdefault('LDAP_ADD_SERVER', True) if self.config['LDAP_ADD_SERVER']: self.add_server(hostname=self.config['LDAP_HOST'], port=self.config['LDAP_PORT'], use_ssl=self.config['LDAP_USE_SSL'])
Configures this extension with a given configuration dictionary. This allows use of this extension without a flask app. Args: config (dict): A dictionary with configuration keys
codesearchnet
def strip_graph_default_valued_attrs(meta_graph_def): op_name_to_function = {} for function_def in meta_graph_def.graph_def.library.function: op_name_to_function[function_def.signature.name] = function_def def _strip_node_default_valued_attrs(node_def): if node_def.op in op_name_to_function: return op_def = op_def_registry.get(node_def.op) if op_def is None: return attrs_to_strip = set() for attr_name, attr_value in node_def.attr.items(): if _is_default_attr_value(op_def, attr_name, attr_value): attrs_to_strip.add(attr_name) for attr in attrs_to_strip: del node_def.attr[attr] for node_def in meta_graph_def.graph_def.node: _strip_node_default_valued_attrs(node_def) for function_def in meta_graph_def.graph_def.library.function: for function_node_def in function_def.node_def: _strip_node_default_valued_attrs(function_node_def) meta_graph_def.meta_info_def.stripped_default_attrs = True
Strips default valued attributes for node defs in given MetaGraphDef. This method also sets `meta_info_def.stripped_default_attrs` in the given `MetaGraphDef` proto to True. Args: meta_graph_def: `MetaGraphDef` protocol buffer Returns: None.
github-repos
def select(self, cols, mode='list'): if isinstance(cols, stringtypes): cols = _split_cols(cols) if not cols: cols = [f.name for f in self.fields] return select_rows(cols, self, mode=mode)
Select columns from each row in the table. See :func:`select_rows` for a description of how to use the *mode* parameter. Args: cols: an iterable of Field (column) names mode: how to return the data
juraj-google-style
def scale(self, scalar, ignored_terms=None): if (ignored_terms is None): ignored_terms = set() else: ignored_terms = {asfrozenset(term) for term in ignored_terms} for term in self: if (term not in ignored_terms): self[term] *= scalar
Multiply the polynomial by the given scalar. Args: scalar (number): Value to multiply the polynomial by. ignored_terms (iterable, optional): Biases associated with these terms are not scaled.
codesearchnet
def WaitUntilDone(self, timeout=None): f = utils.Poll(generator=self.Get, condition=(lambda f: (f.data.state != f.data.RUNNING)), timeout=timeout) if (f.data.state != f.data.TERMINATED): raise errors.FlowFailedError(('Flow %s (%s) failed: %s' % (self.flow_id, self.client_id, f.data.context.current_state))) return f
Wait until the flow completes. Args: timeout: timeout in seconds. None means default timeout (1 hour). 0 means no timeout (wait forever). Returns: Fresh flow object. Raises: PollTimeoutError: if timeout is reached. FlowFailedError: if the flow is not successful.
codesearchnet
def predict_proba(self, a, b, device=None): device = SETTINGS.get_default(device=device) if (self.model is None): print('Model has to be trained before doing any predictions') raise ValueError if (len(np.array(a).shape) == 1): a = np.array(a).reshape(((- 1), 1)) b = np.array(b).reshape(((- 1), 1)) m = np.hstack((a, b)) m = scale(m) m = m.astype('float32') m = th.from_numpy(m).t().unsqueeze(0) if th.cuda.is_available(): m = m.cuda() return ((self.model(m).data.cpu().numpy() - 0.5) * 2)
Infer causal directions using the trained NCC pairwise model. Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 device (str): Device to run the algorithm on (defaults to ``cdt.SETTINGS.default_device``) Returns: float: Causation score (Value : 1 if a->b and -1 if b->a)
codesearchnet
def assistant_from_yaml(cls, source, y, superassistant, fully_loaded=True, role=settings.DEFAULT_ASSISTANT_ROLE): name = os.path.splitext(os.path.basename(source))[0] yaml_checker.check(source, y) assistant = yaml_assistant.YamlAssistant(name, y, source, superassistant, fully_loaded=fully_loaded, role=role) return assistant
Constructs instance of YamlAssistant loaded from given structure y, loaded from source file source. Args: source: path to assistant source file y: loaded yaml structure superassistant: superassistant of this assistant Returns: YamlAssistant instance constructed from y with source file source Raises: YamlError: if the assistant is malformed
juraj-google-style
def __init__(self, hash_queue, hash_analysis_queue, **kwargs): super(ViperAnalyzer, self).__init__( hash_queue, hash_analysis_queue, **kwargs) self._checked_for_old_python_version = False self._host = None self._port = None self._protocol = None self._url = None
Initializes a Viper hash analyzer. Args: hash_queue (Queue.queue): contains hashes to be analyzed. hash_analysis_queue (Queue.queue): that the analyzer will append HashAnalysis objects this queue.
juraj-google-style
def from_service_account_info(cls, info, **kwargs): signer = _service_account_info.from_dict(info, require=['client_email', 'token_uri']) return cls._from_signer_and_info(signer, info, **kwargs)
Creates a Credentials instance from parsed service account info. Args: info (Mapping[str, str]): The service account info in Google format. kwargs: Additional arguments to pass to the constructor. Returns: google.auth.service_account.Credentials: The constructed credentials. Raises: ValueError: If the info is not in the expected format.
codesearchnet
def get_case_groups(adapter, total_cases, institute_id=None, slice_query=None): cases = [{'status': 'all', 'count': total_cases, 'percent': 1}] pipeline = [] group = {'$group': {'_id': '$status', 'count': {'$sum': 1}}} subquery = {} if (institute_id and slice_query): subquery = adapter.cases(owner=institute_id, name_query=slice_query, yield_query=True) elif institute_id: subquery = adapter.cases(owner=institute_id, yield_query=True) elif slice_query: subquery = adapter.cases(name_query=slice_query, yield_query=True) query = ({'$match': subquery} if subquery else {}) if query: pipeline.append(query) pipeline.append(group) res = adapter.case_collection.aggregate(pipeline) for status_group in res: cases.append({'status': status_group['_id'], 'count': status_group['count'], 'percent': (status_group['count'] / total_cases)}) return cases
Return the information about case groups Args: store(adapter.MongoAdapter) total_cases(int): Total number of cases slice_query(str): Query to filter cases to obtain statistics for. Returns: cases(dict):
codesearchnet
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: eos = [self.eos_token_id] if token_ids_1 is None: return len(token_ids_0 + eos) * [0] return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros.
github-repos
def check_plugin(self, plugin): vcf_section = self[plugin] try: vcf_field = vcf_section['field'] if (not (vcf_field in self.vcf_columns)): raise ValidateError('field has to be in {0}\nWrong field name in plugin: {1}'.format(self.vcf_columns, plugin)) if (vcf_field == 'INFO'): try: info_key = vcf_section['info_key'] if (info_key == 'CSQ'): try: csq_key = vcf_section['csq_key'] except KeyError: raise ValidateError("CSQ entrys has to refer to an csq field.\nRefer with keyword 'csq_key'\ncsq_key is missing in section: {0}".format(plugin)) except KeyError: raise ValidateError("INFO entrys has to refer to an INFO field.\nRefer with keyword 'info_key'\ninfo_key is missing in section: {0}".format(plugin)) except KeyError: raise ValidateError("Vcf entrys have to refer to a field in the VCF with keyword 'field'.\nMissing keyword 'field' in plugin: {0}".format(plugin)) try: data_type = vcf_section['data_type'] if (not (data_type in self.data_types)): raise ValidateError('data_type has to be in {0}\nWrong data_type in plugin: {1}'.format(self.data_types, plugin)) except KeyError: raise ValidateError("Vcf entrys have to refer to a data type in the VCF with keyword 'data_type'.\nMissing data_type in plugin: {0}".format(plugin)) separators = vcf_section.get('separators', None) if separators: if (len(separators) == 1): self[plugin]['separators'] = list(separators) elif (data_type != 'flag'): raise ValidateError('If data_type != flag the separators have to be definedMissing separators in plugin: {0}'.format(plugin)) record_rule = vcf_section.get('record_rule', None) if record_rule: if (not (record_rule in ['min', 'max'])): raise ValidateError('Record rules have to be in {0}\nWrong record_rule in plugin: {1}'.format(['min', 'max'], plugin)) else: self.logger.info("Setting record rule to default: 'max'") return True
Check if the section is in the proper format vcf format. Args: vcf_section (dict): The information from a vcf section Returns: True is it is in the proper format
codesearchnet
def _enumerate_cores(bounds: List[int], ring_bounds: List[int], ring_sizes: List[int], host_bounds: List[int], host_sizes: List[int]) -> List[List[int]]: if not bounds: return [[]] partials = _enumerate_cores(bounds[:-1], ring_bounds[:-1], ring_sizes[:-1], host_bounds[:-1], host_sizes[:-1]) results = [] for ring_i in range(0, bounds[-1], ring_bounds[-1]): for ring_j in range(0, len(partials), ring_sizes[-1]): for host_i in range(ring_i, ring_i + ring_bounds[-1], host_bounds[-1]): for host_j in range(ring_j, ring_j + ring_sizes[-1], host_sizes[-1]): for i in range(host_i, host_i + host_bounds[-1]): for j in range(host_j, host_j + host_sizes[-1]): results.append(partials[j] + [i]) return results
Enumerates cores within `bounds` from fatest to slowest varying axes. Args: bounds: Upper bounds of axes, from fastest to slowest varying. ring_bounds: Upper bounds of ring size per axis in the same axis order. ring_sizes: Number consecutive cores in the ring built so far, cumulatively. host_bounds: Number of axis values per host in the same axis order. host_sizes: Number consecutive cores on one host, cumulatively. Returns: Cores represented as a list of 4 integers in the same axis order.
github-repos
def new_partition(self, table, **kwargs): from . import Partition if isinstance(table, string_types): table = self.table(table) if 'sequence_id' in kwargs: sequence_id = kwargs['sequence_id'] del kwargs['sequence_id'] else: sequence_id = self._database.next_sequence_id(Dataset, self.vid, Partition) p = Partition( t_vid=table.vid, table_name=table.name, sequence_id=sequence_id, dataset=self, d_vid=self.vid, **kwargs ) p.update_id() return p
Creates new partition and returns it. Args: table (orm.Table): Returns: orm.Partition
juraj-google-style
def import_settings(self, filename): if not os.path.isfile(filename): self._logger.log( 'error', 'File: {} not found, continuing with default settings'.format( filename ) ) else: with open(filename, 'r') as jsonFile: data = json.load(jsonFile) self._value_ranges = data['valueRanges'] self._best_values = data['best_values'] self._best_values = [] for index, value in enumerate(data['best_values']): if self._value_ranges[index] == 'int': self._best_values.append(int(value)) else: self._best_values.append(float(value)) self.minimize = data['minimize'] self.num_employers = data['num_employers'] self._best_score = float(data['best_score']) self.limit = data['limit']
Import settings from a JSON file Args: filename (string): name of the file to import from
juraj-google-style
def get_members(self, **query_params): members = self.get_members_json(self.base_uri, query_params=query_params) members_list = [] for member_json in members: members_list.append(self.create_member(member_json)) return members_list
Get all members attached to this organisation. Returns a list of Member objects Returns: list(Member): The members attached to this organisation
codesearchnet
def __init__(self, code, component_trace): super().__init__(code) self.trace = component_trace
Constructs a FireExit exception. Args: code: (int) Exit code for the Fire CLI. component_trace: (FireTrace) The trace for the Fire command.
github-repos
def conditionally_create_kms_key(role_name, service_type): if service_type not in KMS_SERVICE_TYPES: print_if_verbose("not eligible for kms; service_type: {} is not valid for kms".format(service_type)) return key_alias = role_name.replace('.', '_') try: kms_key = CLIENTS["kms"].describe_key(KeyId='alias/{}'.format(key_alias)) except ClientError as error: if error.response['Error']['Code'] == 'NotFoundException': kms_key = None else: fail("Exception describing KMS key: {} {}".format(role_name, error)) if service_type == "aws_fixture": kms_key_policy = + CONTEXT.account_id + else: formatted_principal = '"AWS": "arn:aws:iam::{}:role/{}"'.format(CONTEXT.account_id, role_name) kms_key_policy = + CONTEXT.account_id + + formatted_principal + + CONTEXT.account_id + + CONTEXT.account_id + if not kms_key: print("Create KMS key: {}".format(key_alias)) if CONTEXT.commit: create_key_failures = 0 while create_key_failures <= 5: try: new_kms_key = CLIENTS["kms"].create_key( Policy=kms_key_policy, Description='Master Key for {}'.format(role_name) ) break except ClientError as error: if error.response['Error']['Code'] == 'MalformedPolicyDocumentException': if create_key_failures == 5: fail("Exception creating kms key: {} {}".format(role_name, error)) else: create_key_failures += 1 time.sleep(5) else: fail("Exception creating kms key: {} {}".format(role_name, error)) try: CLIENTS["kms"].create_alias( AliasName='alias/{}'.format(key_alias), TargetKeyId=new_kms_key['KeyMetadata']['KeyId'] ) except ClientError as error: fail("Exception creating alias for kms key: {} {}".format(role_name, error)) else: print_if_verbose("KMS key already exists: {}".format(key_alias))
Create KMS Master Key for encryption/decryption of sensitive values in cf templates and latebind configs Args: role_name: name of the role that kms key is being created for; it will be given decrypt privileges. service_type: service registry service type: 'aws_ec2', 'aws_fixture', 'aws_lambda', or 'http_service'
juraj-google-style
def annotated(func, name=None): if hasattr(func, 'metadata'): if name is not None: func.metadata = AnnotatedMetadata(func, name) return func func.metadata = AnnotatedMetadata(func, name) func.finalizer = False func.takes_cmdline = False func.decorated = False func.context = False return func
Mark a function as callable from the command line. This function is meant to be called as decorator. This function also initializes metadata about the function's arguments that is built up by the param decorator. Args: func (callable): The function that we wish to mark as callable from the command line. name (str): Optional string that will override the function's built-in name.
juraj-google-style
def create_cloudtrail(self, region): ct = self.session.client('cloudtrail', region_name=region) self.create_sns_topic(region) ct.create_trail(Name=self.trail_name, S3BucketName=self.bucket_name, S3KeyPrefix=self.account.account_name, IsMultiRegionTrail=True, IncludeGlobalServiceEvents=True, SnsTopicName=self.topic_name) self.subscribe_sns_topic_to_sqs(region) auditlog(event='cloudtrail.create_cloudtrail', actor=self.ns, data={'account': self.account.account_name, 'region': region}) self.log.info('Created CloudTrail for {} in {} ({})'.format(self.account, region, self.bucket_name))
Creates a new CloudTrail Trail Args: region (str): Name of the AWS region Returns: `None`
codesearchnet
def update_compliance_all(self, information, timeout=-1): uri = self.URI + "/compliance" result = self._helper.update(information, uri, timeout=timeout) return result
Returns SAS Logical Interconnects to a consistent state. The current SAS Logical Interconnect state is compared to the associated SAS Logical Interconnect group. Args: information: Can be either the resource ID or URI. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: SAS Logical Interconnect.
juraj-google-style
def _grad_fn(ys, xs, args, func_graph): grad_ys = args[3:] grad_outs = gradients_util._GradientsHelper(ys, xs, grad_ys=grad_ys, src_graph=func_graph, unconnected_gradients='zero') assert all((g is not None for g in grad_outs)) counter = args[0] maximum_iterations = args[1] total_iters = args[2] return [counter + 1, maximum_iterations, total_iters] + grad_outs
Computes the gradient of `func_graph` in the current graph. This function builds the gradient graph of the corresponding forward-pass `func_graph` by differentiating `func_graph`'s outputs w.r.t. its inputs. Args: ys: A `Tensor` or list of tensors to be differentiated. xs: A `Tensor` or list of tensors to be used for differentiation. args: The input arguments. args[0] - Loop counter args[1] - Total number of iterations. args[2] - maximum_iterations. args[3:] - Incoming gradients for `ys`. func_graph: function.FuncGraph. The corresponding forward-pass function. Returns: The output gradient Tensors.
github-repos
def _parse_networks(service_list: dict) -> list: networks = [] for n_values in service_list['networks'].values(): for (n_key, n_value) in n_values.items(): if ('name' in n_key): networks.append(n_value) return networks
Parse network key. Args: service_list (dict): Service configurations Returns: list, List of networks
codesearchnet
def timeseries_from_mat(filename, varname=None, fs=1.0): import scipy.io as sio if varname is None: mat_dict = sio.loadmat(filename) if len(mat_dict) > 1: raise ValueError('Must specify varname: file contains ' 'more than one variable. ') else: mat_dict = sio.loadmat(filename, variable_names=(varname,)) array = mat_dict.popitem()[1] return Timeseries(array, fs=fs)
load a multi-channel Timeseries from a MATLAB .mat file Args: filename (str): .mat file to load varname (str): variable name. only needed if there is more than one variable saved in the .mat file fs (scalar): sample rate of timeseries in Hz. (constant timestep assumed) Returns: Timeseries
juraj-google-style
def create_view(self, state_root_hash=None): if (state_root_hash is None): state_root_hash = INIT_ROOT_KEY merkle_db = MerkleDatabase(self._database, merkle_root=state_root_hash) return StateView(merkle_db)
Creates a StateView for the given state root hash. Args: state_root_hash (str): The state root hash of the state view to return. If None, returns the state view for the Returns: StateView: state view locked to the given root hash.
codesearchnet
def module_name_from_path(folder_name, verbose=False): folder_name = folder_name.split('.pyc')[0] folder_name = folder_name.split('.py')[0] folder_name = os.path.normpath(folder_name) path = (folder_name + '/') package = get_python_package(path) module = [] if verbose: print(('folder_name', folder_name)) while True: path = os.path.dirname(path) module.append(os.path.basename(path)) if (os.path.basename(path) == package): path = os.path.dirname(path) break if (os.path.dirname(path) == path): (path, module) = (None, None) break if verbose: print(('path', path, os.path.dirname(path))) if verbose: print(('module', module)) if verbose: print(('module', module)) module.reverse() module = '.'.join(module) return (module, path)
takes in a path to a folder or file and return the module path and the path to the module the module is idenitified by the path being in os.path, e.g. if /Users/Projects/Python/ is in os.path, then folder_name = '/Users/PycharmProjects/pylabcontrol/pylabcontrol/scripts/script_dummy.pyc' returns '/Users/PycharmProjects/' as the path and pylabcontrol.scripts.script_dummy as the module Args: folder_name: path to a file of the form '/Users/PycharmProjects/pylabcontrol/pylabcontrol/scripts/script_dummy.pyc' Returns: module: a string of the form, e.g. pylabcontrol.scripts.script_dummy ... path: a string with the path to the module, e.g. /Users/PycharmProjects/
codesearchnet
def patch_traces(self, traces, project_id=None): if project_id is None: project_id = self.project self.trace_api.patch_traces(project_id=project_id, traces=traces)
Sends new traces to Stackdriver Trace or updates existing traces. Args: traces (dict): Required. The traces to be patched in the API call. project_id (Optional[str]): ID of the Cloud project where the trace data is stored.
juraj-google-style
def mkdirs(self, path): pass
Recursively create directories for the provided path. Args: path: string path of the directory structure that should be created Raises: IOError: if leaf directory already exists.
github-repos
def dbmax_stddev(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `dbmax_stddev`'.format(value)) self._dbmax_stddev = value
Corresponds to IDD Field `dbmax_stddev` Standard deviation of extreme annual maximum dry-bulb temperature Args: value (float): value for IDD Field `dbmax_stddev` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def get_unparsed_moves_from_last_n_games(games, games_nr, n, moves=(2 ** 21), shuffle=True, column_family=TFEXAMPLE, column='example', values_only=True): (ct_r, ct_nr) = (9, 1) ct_total = (ct_r + ct_nr) fr_r = (ct_r / ct_total) fr_nr = (ct_nr / ct_total) resign = games.moves_from_last_n_games(math.ceil((n * fr_r)), math.ceil((moves * fr_r)), shuffle, column_family, column) no_resign = games_nr.moves_from_last_n_games(math.floor((n * fr_nr)), math.floor((moves * fr_nr)), shuffle, column_family, column) selection = np.array((([0] * ct_r) + ([1] * ct_nr)), dtype=np.int64) choice = tf.data.Dataset.from_tensor_slices(selection).repeat().take(moves) ds = tf.contrib.data.choose_from_datasets([resign, no_resign], choice) if shuffle: ds = ds.shuffle((len(selection) * 2)) if values_only: ds = ds.map((lambda row_name, s: s)) return ds
Get a dataset of serialized TFExamples from the last N games. Args: games, games_nr: GameQueues of the regular selfplay and calibration (aka 'no resign') games to sample from. n: an integer indicating how many past games should be sourced. moves: an integer indicating how many moves should be sampled from those N games. column_family: name of the column family containing move examples. column: name of the column containing move examples. shuffle: if True, shuffle the selected move examples. values_only: if True, return only column values, no row keys. Returns: A dataset containing no more than `moves` examples, sampled randomly from the last `n` games in the table.
codesearchnet
def write_merged_bioassembly(inpath, outdir, outname, force_rerun=False): outpath = outfile = op.join(outdir, (outname + '.pdb')) if ssbio.utils.force_rerun(flag=force_rerun, outfile=op.join(outdir, (outname + '.pdb'))): s = StructProp('Model merging', structure_path=inpath, file_type='pdb') ss = s.parse_structure() merge_all_models_into_first_model(ss.structure) outpath = ss.write_pdb(custom_name=outname, out_dir=outdir, force_rerun=force_rerun) else: return outpath
Utility to take as input a bioassembly file and merge all its models into multiple chains in a single model. Args: infile (str): Path to input PDB file with multiple models that represent an oligomeric form of a structure. outdir (str): Path to output directory outname (str): New filename of structure file force_rerun (bool): If a new PDB should be written if the file exists Returns: str: Path to newly written PDB file.
codesearchnet
def get(self, param=None, must=[APIKEY]): param = {} if param is None else param r = self.verify_param(param, must) if not r.is_succ(): return r handle = CommonResultHandler(lambda rsp: {VERSION_V1:rsp.get(USER), VERSION_V2:rsp}[self.version()]) return self.path('get.json').post(param, handle, r)
查账户信息 参数名 类型 是否必须 描述 示例 apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526 Args: param: (Optional) Results: Result
juraj-google-style
def _compute_dtype(self): return self._dtype_policy.compute_dtype
The layer's compute dtype. Unless mixed-precision is used, this is the same as `Layer.dtype`. If self._autocast is True, layer's will cast floating-point inputs to this. Returns: The layer's compute dtype.
github-repos
def get_nanopub_urls(ns_root_url: str=None, start_dt: str=None) -> dict: if (not ns_root_url): ns_root_url = config['bel_api']['servers']['nanopubstore'] url = f'{ns_root_url}/nanopubs/timed' if (not start_dt): start_dt = get_nanopubstore_start_dt(ns_root_url) params = {'startTime': start_dt, 'published': True} r = bel.utils.get_url(url, params=params, cache=False) if (r and (r.status_code == 200)): data = r.json() new_start_dt = data['queryTime'] update_nanopubstore_start_dt(ns_root_url, new_start_dt) nanopub_urls = {'modified': [], 'deleted': []} for nid in data['deleteddata']: nanopub_urls['deleted'].append(f'{ns_root_url}/nanopubs/{nid}') for nid in data['data']: nanopub_urls['modified'].append(f'{ns_root_url}/nanopubs/{nid}') return nanopub_urls else: log.error(f'Bad request to Nanopubstore', url=url, status=r.status_code, type='api_request') return {}
Get modified and deleted nanopub urls Limited by last datetime retrieved (start_dt). Modified includes new and updated nanopubs Returns: dict: {'modified': [], 'deleted': []}
codesearchnet
def _reduce_sum_grad(op, grad): if op.get_attr('reduction') != b'sum': raise LookupError('No gradient defined for NcclAllReduce except for reduction="sum".') _check_device(grad, expected=op.device) with ops.device(op.device): result = gen_nccl_ops.nccl_broadcast(input=grad, shape=grad.shape) return [result] * len(op.inputs)
The gradients for input `Operation` of `reduce_sum`. Args: op: The `sum send` `Operation` that we are differentiating. grad: Gradient with respect to the output of the `reduce_sum` op. Returns: The gradient with respect to the input of `reduce_sum` op. Raises: LookupError: If the reduction attribute of op is not `sum`.
github-repos