code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def run_metadata(name, data, step=None): summary_metadata = summary_pb2.SummaryMetadata() summary_metadata.plugin_data.plugin_name = 'graph_run_metadata' summary_metadata.plugin_data.content = b'1' with summary_scope(name, 'graph_run_metadata_summary', [data, step]) as (tag, _): with ops.device('cpu:0'): tensor = constant_op.constant(data.SerializeToString(), dtype=dtypes.string) return write(tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
Writes entire RunMetadata summary. A RunMetadata can contain DeviceStats, partition graphs, and function graphs. Please refer to the proto for definition of each field. Args: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A RunMetadata proto to write. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
github-repos
def publish(self, topic, dct): get_logger().info('Publishing message {} on routing key {}...'.format(dct, topic)) self._channel.basic_publish(exchange=self.exchange, routing_key=topic, body=json.dumps(dct))
Send a dict with internal routing key to the exchange. Args: topic: topic to publish the message to dct: dict object to send
codesearchnet
def from_dict(cls, d, fmt=None): if fmt == "abivars": from pymatgen.io.abinit.abiobjects import structure_from_abivars return structure_from_abivars(cls=cls, **d) lattice = Lattice.from_dict(d["lattice"]) sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]] charge = d.get("charge", None) return cls.from_sites(sites, charge=charge)
Reconstitute a Structure object from a dict representation of Structure created using as_dict(). Args: d (dict): Dict representation of structure. Returns: Structure object
juraj-google-style
def yaml_to_ordered_dict(stream, loader=yaml.SafeLoader): class OrderedUniqueLoader(loader): NO_DUPE_SIBLINGS = ["stacks", "class_path"] NO_DUPE_CHILDREN = ["stacks"] def _error_mapping_on_dupe(self, node, node_name): if isinstance(node, MappingNode): mapping = {} for n in node.value: a = n[0] b = mapping.get(a.value, None) if b: msg = "{} mapping cannot have duplicate keys {} {}" raise ConstructorError( msg.format(node_name, b.start_mark, a.start_mark) ) mapping[a.value] = a def _validate_mapping(self, node, deep=False): if not isinstance(node, MappingNode): raise ConstructorError( None, None, "expected a mapping node, but found %s" % node.id, node.start_mark) mapping = OrderedDict() for key_node, value_node in node.value: key = self.construct_object(key_node, deep=deep) try: hash(key) except TypeError as exc: raise ConstructorError( "while constructing a mapping", node.start_mark, "found unhashable key (%s)" % exc, key_node.start_mark ) if key in mapping and key in self.NO_DUPE_SIBLINGS: msg = "{} key cannot have duplicate siblings {} {}" raise ConstructorError( msg.format(key, node.start_mark, key_node.start_mark) ) if key in self.NO_DUPE_CHILDREN: self._error_mapping_on_dupe(value_node, key_node.value) value = self.construct_object(value_node, deep=deep) mapping[key] = value return mapping def construct_mapping(self, node, deep=False): if isinstance(node, MappingNode): self.flatten_mapping(node) return self._validate_mapping(node, deep=deep) def construct_yaml_map(self, node): data = OrderedDict() yield data value = self.construct_mapping(node) data.update(value) OrderedUniqueLoader.add_constructor( u'tag:yaml.org,2002:map', OrderedUniqueLoader.construct_yaml_map, ) return yaml.load(stream, OrderedUniqueLoader)
Provides yaml.load alternative with preserved dictionary order. Args: stream (string): YAML string to load. loader (:class:`yaml.loader`): PyYAML loader class. Defaults to safe load. Returns: OrderedDict: Parsed YAML.
juraj-google-style
def user_exists(self, username): response = self._get(self.rest_url + "/user", params={"username": username}) if not response.ok: return None return True
Determines if the user exists. Args: username: The user name. Returns: bool: True if the user exists in the Crowd application.
juraj-google-style
def ParseFileObject(self, parser_mediator, file_object): if file_object.read(1) != b'{': raise errors.UnableToParseFile(( '[{0:s}] {1:s} is not a valid Preference file, ' 'missing opening brace.').format( self.NAME, parser_mediator.GetDisplayName())) file_object.seek(0, os.SEEK_SET) file_content = file_object.read() file_content = codecs.decode(file_content, self._ENCODING) try: json_dict = json.loads(file_content) except ValueError as exception: raise errors.UnableToParseFile(( '[{0:s}] Unable to parse file {1:s} as JSON: {2!s}').format( self.NAME, parser_mediator.GetDisplayName(), exception)) except IOError as exception: raise errors.UnableToParseFile(( '[{0:s}] Unable to open file {1:s} for parsing as' 'JSON: {2!s}').format( self.NAME, parser_mediator.GetDisplayName(), exception)) if not set(self.REQUIRED_KEYS).issubset(set(json_dict.keys())): raise errors.UnableToParseFile('File does not contain Preference data.') extensions_setting_dict = json_dict.get('extensions') if not extensions_setting_dict: raise errors.UnableToParseFile( '[{0:s}] {1:s} is not a valid Preference file, ' 'does not contain extensions value.'.format( self.NAME, parser_mediator.GetDisplayName())) extensions_dict = extensions_setting_dict.get('settings') if not extensions_dict: raise errors.UnableToParseFile( '[{0:s}] {1:s} is not a valid Preference file, ' 'does not contain extensions settings value.'.format( self.NAME, parser_mediator.GetDisplayName())) extensions_autoupdate_dict = extensions_setting_dict.get('autoupdate') if extensions_autoupdate_dict: autoupdate_lastcheck_timestamp = extensions_autoupdate_dict.get( 'last_check', None) if autoupdate_lastcheck_timestamp: autoupdate_lastcheck = int(autoupdate_lastcheck_timestamp, 10) event_data = ChromeExtensionsAutoupdaterEventData() event_data.message = 'Chrome extensions autoupdater last run' date_time = dfdatetime_webkit_time.WebKitTime( timestamp=autoupdate_lastcheck) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) autoupdate_nextcheck_timestamp = extensions_autoupdate_dict.get( 'next_check', None) if autoupdate_nextcheck_timestamp: autoupdate_nextcheck = int(autoupdate_nextcheck_timestamp, 10) event_data = ChromeExtensionsAutoupdaterEventData() event_data.message = 'Chrome extensions autoupdater next run' date_time = dfdatetime_webkit_time.WebKitTime( timestamp=autoupdate_nextcheck) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) browser_dict = json_dict.get('browser', None) if browser_dict and 'last_clear_browsing_data_time' in browser_dict: last_clear_history_timestamp = browser_dict.get( 'last_clear_browsing_data_time', None) if last_clear_history_timestamp: last_clear_history = int(last_clear_history_timestamp, 10) event_data = ChromeExtensionsAutoupdaterEventData() event_data.message = 'Chrome history was cleared by user' date_time = dfdatetime_webkit_time.WebKitTime( timestamp=last_clear_history) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_DELETED) parser_mediator.ProduceEventWithEventData(event, event_data) self._ExtractExtensionInstallEvents(extensions_dict, parser_mediator) profile_dict = json_dict.get('profile', None) if profile_dict: content_settings_dict = profile_dict.get('content_settings', None) if content_settings_dict: exceptions_dict = content_settings_dict.get('exceptions', None) if exceptions_dict: self._ExtractContentSettingsExceptions( exceptions_dict, parser_mediator)
Parses a Chrome preferences file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
juraj-google-style
def __init__(self, scope, parent, explicit=True): CodeStatement.__init__(self, scope, parent) self.body = [] self.explicit = explicit
Constructor for code blocks. Args: scope (CodeEntity): The program scope where this object belongs. parent (CodeEntity): This object's parent in the program tree. Kwargs: explicit (bool): Whether the block is explicit in the code.
juraj-google-style
def clone(self, name=None): if name is None: name = self.module_name + "_clone" return type(self)(output_channels=self.output_channels, kernel_shape=self._kernel_shape, stride=self._stride, rate=self._rate, padding=self._padding, use_bias=self._use_bias, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, mask=self._mask, data_format=self._data_format, custom_getter=self._custom_getter, name=name)
Returns a cloned `_ConvND` module. Args: name: Optional string assigning name of cloned module. The default name is constructed by appending "_clone" to `self.module_name`. Returns: A copy of the current class.
juraj-google-style
def tokenize(self, s, pattern=None, active=None): if pattern is None: if self.tokenize_pattern is None: pattern = r'[ \t]+' else: pattern = self.tokenize_pattern if active is None: active = self.active return self.group.tokenize(s, pattern=pattern, active=active)
Rewrite and tokenize the input string *s*. Args: s (str): the input string to process pattern (str, optional): the regular expression pattern on which to split tokens; defaults to `[ \t]+` active (optional): a collection of external module names that may be applied if called Returns: a :class:`~delphin.tokens.YyTokenLattice` containing the tokens and their characterization information
juraj-google-style
def _process_parameters_section(func_documentation, sig, func, class_name, model_name_lowercase, parent_class, indent_level): docstring = set_min_indent('Args:\n', indent_level + 4) undocumented_parameters = [] documented_params = {} documented_kwargs = {} if func_documentation is not None: documented_params, func_documentation = parse_docstring(func_documentation) if model_name_lowercase is not None: documented_params = format_args_docstring(documented_params, model_name_lowercase) param_docstring, missing_args = _process_regular_parameters(sig, func, class_name, documented_params, indent_level, undocumented_parameters) docstring += param_docstring kwargs_docstring = _process_kwargs_parameters(sig, func, parent_class, model_name_lowercase, documented_kwargs, indent_level, undocumented_parameters) docstring += kwargs_docstring if len(undocumented_parameters) > 0: print('\n'.join(undocumented_parameters)) return docstring
Process the parameters section of the docstring. Args: func_documentation (`str`): Existing function documentation (manually specified in the docstring) sig (`inspect.Signature`): Function signature func (`function`): Function the parameters belong to class_name (`str`): Name of the class the function belongs to model_name_lowercase (`str`): Lowercase model name parent_class (`class`): Parent class of the function (if any) indent_level (`int`): Indentation level
github-repos
def retrieve_token(self, token): headers = self.client._get_private_headers() endpoint = '/tokens/{}'.format(token) return self.client._get(self.client.URL_BASE + endpoint, headers=headers)
Retrieve Token details for a specific Token. Args: token: The identifier of the token. Returns:
juraj-google-style
def get_link_flags(): is_mac = _platform.system() == 'Darwin' ver = _VERSION.split('.')[0] flags = [] if not _MONOLITHIC_BUILD: flags.append('-L%s' % get_lib()) if is_mac: flags.append('-ltensorflow_framework.%s' % ver) else: flags.append('-l:libtensorflow_framework.so.%s' % ver) return flags
Returns the linker flags for linking with TensorFlow. The returned list of arguments can be passed to the linker for linking against TensorFlow. The result is platform dependent. For example, on a typical Linux system with Python 3.7 the following command prints `['-L/usr/local/lib/python3.7/dist-packages/tensorflow', '-l:libtensorflow_framework.so.2']` >>> print(tf.sysconfig.get_link_flags()) Returns: A list of strings for the linker flags.
github-repos
def __parse_cmd_args(args, sudo, shell): if (isinstance(args, tuple) and (len(args) == 1) and isinstance(args[0], tuple)): args = args[0] if shell: if isinstance(args, six.string_types): pass elif (isinstance(args, (list, tuple)) and (len(args) > 1)): args = ' '.join(args) elif (isinstance(args, (list, tuple)) and (len(args) == 1)): if isinstance(args[0], (tuple, list)): args = ' '.join(args) elif isinstance(args[0], six.string_types): args = args[0] elif isinstance(args, six.string_types): args = shlex.split(args, posix=(not WIN32)) elif isinstance(args, (list, tuple)): if (len(args) > 1): args = tuple(args) elif (len(args) == 1): if isinstance(args[0], (tuple, list)): args = tuple(args[0]) elif isinstance(args[0], six.string_types): args = shlex.split(args[0], posix=(not WIN32)) if (sudo is True): if (not WIN32): if shell: args = ('sudo ' + args) else: args = (tuple(['sudo']) + tuple(args)) else: pass if WIN32: if ((len(args) == 1) and isinstance(args[0], six.string_types)): args = shlex.split(args[0], posix=(not WIN32)) return args
When shell is True, Popen will only accept strings. No tuples Shell really should not be true. Returns: args suitable for subprocess.Popen I'm not quite sure what those are yet. Plain old string seem to work well? But I remember needing shlex at some point. CommandLine: python -m utool.util_cplat --test-__parse_cmd_args Example: >>> # DISABLE_DOCTEST >>> from utool.util_cplat import * # NOQA >>> # build test data >>> args = 'echo "hello world"' >>> sudo = False >>> shell = False >>> # execute function >>> args = __parse_cmd_args(args, sudo, shell) >>> # verify results >>> result = str(args) >>> print(result)
codesearchnet
def is_valid_assignment(self, mtf_dimension_name, mesh_dimension_name): return ((mtf_dimension_name in self._splittable_mtf_dimension_names) and ((self._mtf_dimension_name_to_size_gcd[mtf_dimension_name] % self._mesh_dimension_name_to_size[mesh_dimension_name]) == 0))
Whether this MTF dimension may be assigned to this mesh dimension. Args: mtf_dimension_name: string, the name of a Mesh TensorFlow dimension. mesh_dimension_name: string, the name of a mesh dimension. Returns: A boolean indicating whether the assignment is valid.
codesearchnet
def parse(file_contents, file_name): env = Environment() result = "" try: env.parse(file_contents) except Exception: _, exc_value, _ = sys.exc_info() result += "ERROR: Jinja2 Template File: {0}".format(file_name) result += repr(exc_value) + '\n' return result
Takes a list of files which are assumed to be jinja2 templates and tries to parse the contents of the files Args: file_contents (str): File contents of a jinja file Raises: Exception: An exception is raised if the contents of the file cannot be parsed.
juraj-google-style
def with_values(self, new_values): new_values = _convert_to_ragged_tensor_values(new_values) new_values.shape.with_rank_at_least(1) self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1]) if isinstance(new_values, RaggedTensor) and self._row_partition.dtype != new_values.row_splits.dtype: if not ragged_config.auto_cast_partition_dtype(): raise ValueError('self and new_values have mismatched row_splits dtypes; use RaggedTensor.with_row_splits_dtype() to convert them to compatible dtypes.') new_values = new_values.with_row_splits_dtype(dtypes.int64) return self.with_row_splits_dtype(dtypes.int64).with_values(new_values) return RaggedTensor(values=new_values, row_partition=self._row_partition, internal=True)
Returns a copy of `self` with `values` replaced by `new_value`. Preserves cached row-partitioning tensors such as `self.cached_nrows` and `self.cached_value_rowids` if they have values. Args: new_values: Potentially ragged tensor to use as the `values` for the returned `RaggedTensor`. Must have `rank > 0`, and must have the same number of rows as `self.values`. Returns: A `RaggedTensor`. `result.rank = 1 + new_values.rank`. `result.ragged_rank = 1 + new_values.ragged_rank`
github-repos
def MakePmfFromItems(t, name=''): pmf = Pmf(dict(t), name) pmf.Normalize() return pmf
Makes a PMF from a sequence of value-probability pairs Args: t: sequence of value-probability pairs name: string name for this PMF Returns: Pmf object
juraj-google-style
def locked_put(self, credentials): keyring.set_password(self._service_name, self._user_name, credentials.to_json())
Write Credentials to file. Args: credentials: Credentials, the credentials to store.
juraj-google-style
def write(gctoo, out_fname, data_null='NaN', metadata_null='-666', filler_null='-666', data_float_format='%.4f'): if (not out_fname.endswith('.gct')): out_fname += '.gct' f = open(out_fname, 'w') dims = [str(gctoo.data_df.shape[0]), str(gctoo.data_df.shape[1]), str(gctoo.row_metadata_df.shape[1]), str(gctoo.col_metadata_df.shape[1])] write_version_and_dims(VERSION, dims, f) write_top_half(f, gctoo.row_metadata_df, gctoo.col_metadata_df, metadata_null, filler_null) write_bottom_half(f, gctoo.row_metadata_df, gctoo.data_df, data_null, data_float_format, metadata_null) f.close() logger.info('GCT has been written to {}'.format(out_fname))
Write a gctoo object to a gct file. Args: gctoo (gctoo object) out_fname (string): filename for output gct file data_null (string): how to represent missing values in the data (default = "NaN") metadata_null (string): how to represent missing values in the metadata (default = "-666") filler_null (string): what value to fill the top-left filler block with (default = "-666") data_float_format (string): how many decimal points to keep in representing data (default = 4 digits; None will keep all digits) Returns: None
codesearchnet
def get_app_hostname(): if ((not is_running_on_app_engine()) or is_running_on_localhost()): return None app_id = app_identity.get_application_id() prefix = get_hostname_prefix() suffix = 'appspot.com' if (':' in app_id): tokens = app_id.split(':') api_name = tokens[1] if (tokens[0] == 'google.com'): suffix = 'googleplex.com' else: api_name = app_id return '{0}{1}.{2}'.format(prefix, api_name, suffix)
Return hostname of a running Endpoints service. Returns hostname of an running Endpoints API. It can be 1) "localhost:PORT" if running on development server, or 2) "app_id.appspot.com" if running on external app engine prod, or "app_id.googleplex.com" if running as Google first-party Endpoints API, or 4) None if not running on App Engine (e.g. Tornado Endpoints API). Returns: A string representing the hostname of the service.
codesearchnet
def info(name): try: handle_scm = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_CONNECT) except pywintypes.error as exc: raise CommandExecutionError('Failed to connect to the SCM: {0}'.format(exc.strerror)) try: handle_svc = win32service.OpenService(handle_scm, name, (((win32service.SERVICE_ENUMERATE_DEPENDENTS | win32service.SERVICE_INTERROGATE) | win32service.SERVICE_QUERY_CONFIG) | win32service.SERVICE_QUERY_STATUS)) except pywintypes.error as exc: raise CommandExecutionError('Failed To Open {0}: {1}'.format(name, exc.strerror)) try: config_info = win32service.QueryServiceConfig(handle_svc) status_info = win32service.QueryServiceStatusEx(handle_svc) try: description = win32service.QueryServiceConfig2(handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION) except pywintypes.error: description = 'Failed to get description' delayed_start = win32service.QueryServiceConfig2(handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO) finally: win32service.CloseServiceHandle(handle_scm) win32service.CloseServiceHandle(handle_svc) ret = dict() try: sid = win32security.LookupAccountName('', 'NT Service\\{0}'.format(name))[0] ret['sid'] = win32security.ConvertSidToStringSid(sid) except pywintypes.error: ret['sid'] = 'Failed to get SID' ret['BinaryPath'] = config_info[3] ret['LoadOrderGroup'] = config_info[4] ret['TagID'] = config_info[5] ret['Dependencies'] = config_info[6] ret['ServiceAccount'] = config_info[7] ret['DisplayName'] = config_info[8] ret['Description'] = description ret['Status_ServiceCode'] = status_info['ServiceSpecificExitCode'] ret['Status_CheckPoint'] = status_info['CheckPoint'] ret['Status_WaitHint'] = status_info['WaitHint'] ret['StartTypeDelayed'] = delayed_start flags = list() for bit in SERVICE_TYPE: if isinstance(bit, int): if (config_info[0] & bit): flags.append(SERVICE_TYPE[bit]) ret['ServiceType'] = (flags if flags else config_info[0]) flags = list() for bit in SERVICE_CONTROLS: if (status_info['ControlsAccepted'] & bit): flags.append(SERVICE_CONTROLS[bit]) ret['ControlsAccepted'] = (flags if flags else status_info['ControlsAccepted']) try: ret['Status_ExitCode'] = SERVICE_ERRORS[status_info['Win32ExitCode']] except KeyError: ret['Status_ExitCode'] = status_info['Win32ExitCode'] try: ret['StartType'] = SERVICE_START_TYPE[config_info[1]] except KeyError: ret['StartType'] = config_info[1] try: ret['ErrorControl'] = SERVICE_ERROR_CONTROL[config_info[2]] except KeyError: ret['ErrorControl'] = config_info[2] try: ret['Status'] = SERVICE_STATE[status_info['CurrentState']] except KeyError: ret['Status'] = status_info['CurrentState'] return ret
Get information about a service on the system Args: name (str): The name of the service. This is not the display name. Use ``get_service_name`` to find the service name. Returns: dict: A dictionary containing information about the service. CLI Example: .. code-block:: bash salt '*' service.info spooler
codesearchnet
def stop_instance(self): stop_url = self._get_url('stop_path') res = self.rest_client.session.put(stop_url, json={}) _handle_http_errors(res) return res.json()
Stop the instance for this Streaming Analytics service. Returns: dict: JSON response for the instance stop operation.
codesearchnet
def sync_executors(self): if self._context_handle: pywrap_tfe.TFE_ContextSyncExecutors(self._context_handle) else: raise ValueError('Context is not initialized.')
Sync both local executors and the ones on remote workers. In async execution mode, local function calls can return before the corresponding remote op/function execution requests are completed. Calling this method creates a synchronization barrier for remote executors. It only returns when all remote pending nodes are finished, potentially with errors if any remote executors are in error state. Raises: ValueError: if context is not initialized.
github-repos
def back_propagation(self, delta_arr): re_encoder_delta_arr, delta_hidden_arr, re_encoder_grads_list = self.__retrospective_encoder.hidden_back_propagate( delta_arr[:, -1] ) re_encoder_grads_list.insert(0, None) re_encoder_grads_list.insert(0, None) observed_arr, encoded_arr, decoded_arr, re_encoded_arr = self.__inferenced_tuple delta_arr = self.__encoder_decoder_controller.computable_loss.compute_delta( decoded_arr, observed_arr ) delta_arr[:, -1] += re_encoder_delta_arr[:, -1] decoder_grads_list, encoder_delta_arr, encoder_grads_list = self.__encoder_decoder_controller.back_propagation( delta_arr ) return re_encoder_grads_list, decoder_grads_list, encoder_delta_arr, encoder_grads_list
Back propagation. Args: delta_output_arr: Delta. Returns: Tuple data. - decoder's `list` of gradations, - encoder's `np.ndarray` of Delta, - encoder's `list` of gradations.
juraj-google-style
def recombine(self, parents: List[pg.DNA], global_state: pg.geno.AttributeDict, step: int) -> List[pg.DNA]:
Generate a list of child DNA based on the list of parents given. User should override this method with optional keyword arguments 'global_state' and 'step'. The parents DNA contains a metadata field 'generation', which is the generation of the parent DNA. If the Recombinator does not assign this field for the new child DNA, the child DNA will have the maximum generation from the parents plus 1. Args: parents: Parent trials. global_state: An `AttributeDict` object as the global state container, which is readable/writable during the operation. step: Number of examples historically proposed, which can be used for determining a cross over schedule. Returns: A list of generated child DNA.
github-repos
def console_get_width(con: tcod.console.Console) -> int: return int(lib.TCOD_console_get_width(_console(con)))
Return the width of a console. Args: con (Console): Any Console instance. Returns: int: The width of a Console. .. deprecated:: 2.0 Use `Console.width` instead.
codesearchnet
def get(account_id, account_type_id=None): if type(account_id) == str: args = {'account_name': account_id} else: args = {'account_id': account_id} if account_type_id: args['account_type_id'] = account_type_id return db.Account.find_one(**args)
Return account by ID and type Args: account_id (`int`, `str`): Unique Account identifier account_type_id (str): Type of account to get Returns: :obj:`Account`: Returns an Account object if found, else None
juraj-google-style
def __init__(self, scaffold=None, master='', config=None, checkpoint_dir=None, checkpoint_filename_with_path=None): self._checkpoint_dir = checkpoint_dir self._checkpoint_filename_with_path = checkpoint_filename_with_path self._scaffold = scaffold or Scaffold() self._session_manager = None self._master = master self._config = config
Initializes a chief session creator. Args: scaffold: A `Scaffold` used for gathering or building supportive ops. If not specified a default one is created. It's used to finalize the graph. master: `String` representation of the TensorFlow master to use. config: `ConfigProto` proto used to configure the session. checkpoint_dir: A string. Optional path to a directory where to restore variables. checkpoint_filename_with_path: Full file name path to the checkpoint file.
github-repos
def can_user_access_build(param_name): build_id = (request.args.get(param_name, type=int) or request.form.get(param_name, type=int) or request.json[param_name]) if (not build_id): logging.debug('Build ID in param_name=%r was missing', param_name) abort(400) ops = operations.UserOps(current_user.get_id()) (build, user_is_owner) = ops.owns_build(build_id) if (not build): logging.debug('Could not find build_id=%r', build_id) abort(404) if (current_user.is_authenticated() and (not user_is_owner)): ops.evict() claim_invitations(current_user) (build, user_is_owner) = ops.owns_build(build_id) if (not user_is_owner): if (current_user.is_authenticated() and current_user.superuser): pass elif (request.method != 'GET'): logging.debug('No way to log in user via modifying request') abort(403) elif build.public: pass elif current_user.is_authenticated(): logging.debug('User does not have access to this build') abort(flask.Response('You cannot access this build', 403)) else: logging.debug('Redirecting user to login to get build access') abort(login.unauthorized()) elif (not login_fresh()): logging.debug('User login is old; forcing refresh') abort(login.needs_refresh()) return build
Determines if the current user can access the build ID in the request. Args: param_name: Parameter name to use for getting the build ID from the request. Will fetch from GET or POST requests. Returns: The build the user has access to.
codesearchnet
def __init__(self, filenames, index=0, buffer_size=None, _account_id=None, delimiter=None): self._filenames = filenames self._index = index self._buffer_size = buffer_size self._account_id = _account_id self._delimiter = delimiter self._bucket = None self._bucket_iter = None self._fail_on_missing_input = None
Initialize a GoogleCloudStorageInputReader instance. Args: filenames: A list of Google Cloud Storage filenames of the form '/bucket/objectname'. index: Index of the next filename to read. buffer_size: The size of the read buffer, None to use default. _account_id: Internal use only. See cloudstorage documentation. delimiter: Delimiter used as path separator. See class doc for details.
juraj-google-style
def _StopStyleSelectionMethod(self, doc): if (not self.show_stop_hierarchy): return (lambda stop: (None, None)) self._CreateStyle(doc, 'stop_entrance', {'IconStyle': {'color': 'ff0000ff'}}) self._CreateStyle(doc, 'entrance_connection', {'LineStyle': {'color': 'ff0000ff', 'width': '2'}}) self._CreateStyle(doc, 'stop_platform', {'IconStyle': {'color': 'ffff0000'}}) self._CreateStyle(doc, 'platform_connection', {'LineStyle': {'color': 'ffff0000', 'width': '2'}}) self._CreateStyle(doc, 'stop_standalone', {'IconStyle': {'color': 'ff00ff00'}}) def StyleSelectionMethod(stop): if (stop.location_type == transitfeed.Stop.LOCATION_TYPE_STATION): return ('stop_station', None) elif (stop.location_type == googletransit.Stop.LOCATION_TYPE_ENTRANCE): return ('stop_entrance', 'entrance_connection') elif stop.parent_station: return ('stop_platform', 'platform_connection') return ('stop_standalone', None) return StyleSelectionMethod
Create a method to determine which style to apply to a stop placemark. Args: doc: the KML document. Returns: A function that should accept a Stop argument and return a tuple of (stop placemark style id, pathway placemark style id). Either style id can be None, indicating no style should be set. Given a Stop, we need to determine what KML style to apply to the stops' placemark. In the most basic case, no styling is applied. However, if show_stop_hierarchy is enabled, we style each type of stop differently depending on if the stop is a station, platform, entrance, etc. This method returns a function that is used to pick which style id should be associated with a stop placemark, or None if no style should be applied. It also optionally returns a style id to associate with any line-string connections associated with a stop (eg. to show the pathway between an entrance and a station).
codesearchnet
def _ReadCompressedData(self, read_size): self._uncompressed_data = self._zip_ext_file.read(read_size) self._uncompressed_data_size = len(self._uncompressed_data)
Reads compressed data from the file-like object. Args: read_size (int): number of bytes of compressed data to read.
juraj-google-style
def to_json(value: Any, **kwargs) -> Any: if isinstance(value, (type(None), bool, int, float, str)): v = value elif isinstance(value, JSONConvertible): v = value.to_json(**kwargs) elif isinstance(value, tuple): v = [JSONConvertible.TUPLE_MARKER] + to_json(list(value), **kwargs) elif isinstance(value, list): v = [to_json(item, **kwargs) for item in value] elif isinstance(value, dict): v = {k: to_json(v, **kwargs) for k, v in value.items()} elif isinstance(value, (type, typing.GenericAlias)): v = _type_to_json(value) elif inspect.isbuiltin(value): v = _builtin_function_to_json(value) elif inspect.isfunction(value): v = _function_to_json(value) elif inspect.ismethod(value): v = _method_to_json(value) elif isinstance(value, typing._Final): v = _annotation_to_json(value) elif value is ...: v = {JSONConvertible.TYPE_NAME_KEY: 'type', 'name': 'builtins.Ellipsis'} else: v, converted = (None, False) if JSONConvertible.TYPE_CONVERTER is not None: converter = JSONConvertible.TYPE_CONVERTER(type(value)) if converter: v = to_json(converter(value)) converted = True if not converted: v = _OpaqueObject(value).to_json(**kwargs) return v
Serializes a (maybe) JSONConvertible value into a plain Python object. Args: value: value to serialize. Applicable value types are: * Builtin python types: None, bool, int, float, string; * JSONConvertible types; * List types; * Tuple types; * Dict types. **kwargs: Keyword arguments to pass to value.to_json if value is JSONConvertible. Returns: JSON value.
github-repos
def to_python(self): return (self.selector, COMPARISON_MAP.get(self.comparison, self.comparison), self.argument)
Deconstruct the ``Constraint`` instance to a tuple. Returns: tuple: The deconstructed ``Constraint``.
codesearchnet
def _parse_username(self, config): (username, priv, role, nopass, fmt, secret, sshkey) = config resource = dict() resource['privilege'] = priv resource['role'] = role resource['nopassword'] = nopass == 'nopassword' resource['format'] = fmt resource['secret'] = secret resource['sshkey'] = sshkey return {username: resource}
Scans the config block and returns the username as a dict Args: config (str): The config block to parse Returns: dict: A resource dict that is intended to be merged into the user resource
juraj-google-style
def AddBackpropAccumulatedValue(self, history_value, value, dead_branch=False): history_ctxt = history_value.op._get_control_flow_context() cond_ctxt = None value_ctxt = value.op._get_control_flow_context() while value_ctxt and value_ctxt != history_ctxt: if isinstance(value_ctxt, control_flow_ops.CondContext): cond_ctxt = value_ctxt break value_ctxt = value_ctxt.outer_context with ops.control_dependencies(None): self.grad_context.Enter() if cond_ctxt: grad_state = self pred = None while pred is None and grad_state: pred = grad_state.history_map.get(cond_ctxt.pred.name) grad_state = grad_state.outer_grad_state if pred is None: pred = cond_ctxt.pred branch = 1 - cond_ctxt.branch if dead_branch else cond_ctxt.branch history_value = control_flow_ops._SwitchRefOrTensor(history_value, pred)[branch] pop = gen_data_flow_ops.stack_pop_v2(history_value, value.dtype.base_dtype) pop.set_shape(value.get_shape()) self.grad_context.Exit() parallel_iterations = self.grad_context.parallel_iterations if parallel_iterations > 1: self.grad_sync._add_control_input(pop.op) return pop
Add the getter for an accumulated value in the grad context. This is added to the backprop loop. Called in the grad context to get the value of an accumulated value. The stack pop op must be guarded by the pred of the controlling cond. Args: history_value: The history (a stack) of a value. value: The value that is pushed onto the stack. dead_branch: True iff the tensor is on a dead branch of a cond. Returns: The current value (the top of the stack).
github-repos
def create(self, resource): return self.service.create(resource, self.url_prefix, self.auth, self.session, self.session_send_opts)
Create the given resource. Args: resource (intern.resource.boss.BossResource): Create a data model object with attributes matching those of the resource. Returns: (intern.resource.boss.BossResource): Returns resource of type requested on success. Raises: requests.HTTPError on failure.
codesearchnet
def cancel(self, subscription_id, data={}, **kwargs): url = '{}/{}/cancel'.format(self.base_url, subscription_id) return self.post_url(url, data, **kwargs)
Cancel subscription given by subscription_id Args: subscription_id : Id for which subscription has to be cancelled Returns: Subscription Dict for given subscription id
codesearchnet
def VerifyServerControlResponse(self, http_object): if (http_object.code != 200): return False try: (http_object.messages, http_object.source, http_object.nonce) = self.communicator.DecryptMessage(http_object.data) return True except communicator.DecodingError as e: logging.info('Protobuf decode error: %s.', e) return False
Verify the server response to a 'control' endpoint POST message. We consider the message correct if and only if we can decrypt it properly. Note that in practice we can not use the HTTP status to figure out if the request worked because captive proxies have a habit of lying and returning a HTTP success code even when there is no connectivity. Args: http_object: The HTTPObject returned from the HTTP transaction. Returns: True if the http_object is correct. False if it is not valid. Side Effect: Fill in the decoded_data attribute in the http_object.
codesearchnet
def greedy_decode(logits_fn, initial_ids, temperature=0.0, initial_states=None, eos_id=EOS_ID, forced_ids=None, use_tpu=True): length_dim = initial_ids.shape.dims[(- 1)] mesh = initial_ids.mesh num_steps = mtf.constant(mesh, length_dim.size, dtype=tf.int32) def cond_fn(step_num, prev_ids, *unused_states): 'Should we run another loop iteration.' overflow = mtf.equal(step_num, num_steps) has_eos = mtf.reduce_any(mtf.equal(prev_ids, eos_id), reduced_dim=length_dim) all_has_eos = mtf.reduce_all(has_eos) return mtf.logical_not(mtf.logical_or(overflow, all_has_eos)) def body_fn(step_num, ids, *states): 'Body function for greedy decoding.\n\n Args:\n step_num: a mtf.Tensor\n ids: a mtf.Tensor\n *states: additional mtf.Tensors\n Returns:\n new_step_num, new_ids, *new_states\n ' (logits, new_states) = logits_fn(step_num, ids, states) vocab_dim = logits.shape.dims[(- 1)] new_ids = mtf.sample_with_temperature(logits, vocab_dim, temperature) if (forced_ids is not None): forced = mtf.gather(forced_ids, step_num, length_dim) new_ids = (forced + (new_ids * mtf.to_int32(mtf.equal(forced, 0)))) ids += (new_ids * mtf.one_hot(step_num, length_dim, dtype=tf.int32)) new_step_num = (step_num + 1) return ([new_step_num, ids] + new_states) initial_step_num = mtf.constant(mesh, 0, dtype=tf.int32) while_loop_inputs = ([initial_step_num, initial_ids] + initial_states) (final_step_num, mtf_samples) = mtf.while_loop(cond_fn, body_fn, while_loop_inputs, num_loop_vars=(None if use_tpu else 2))[:2] mtf_samples = mtf.Print(mtf_samples, [final_step_num], 'output_length') return mtf_samples
Greedy decoding. Args: logits_fn: Interface to the model, to provide logits. Shoud take: step_num - mtf Scalar ids - mtf Tensor with shape [..., length] states - list of mtf.Tensor Should return: logits - [batch, vocab_size] new_states - list of mtf.Tensor initial_ids: mtf.Tensor with shape [..., length], containing zeros. temperature: a float between 0.0 (argmax) and 1.0 (random) initial_states: list of mtf.Tensor eos_id: ID for end of sentence. forced_ids: optional mtf.Tensor with shape [..., length] use_tpu: a boolean Returns: Tensor with shape [..., length]
codesearchnet
def createEmails(nicks=None, nicksFile=None): candidate_emails = set() if nicks != None: for n in nicks: for e in email_providers.domains: candidate_emails.add("{}@{}".format(n, e)) elif nicksFile != None: with open(nicksFile, "r") as iF: nicks = iF.read().splitlines() for n in nicks: for e in email_providers.domains: candidate_emails.add("{}@{}".format(n, e)) return candidate_emails
Method that globally permits to generate the emails to be checked. Args: ----- nicks: List of aliases. nicksFile: The filepath to the aliases file. Returns: -------- list: list of emails to be checked.
juraj-google-style
def share(self, name, item): try: if isinstance(item, s_telepath.Aware): item.onTeleShare(self, name) self.shared[name] = item except Exception: logger.exception(f'onTeleShare() error for: {name}')
Share an object via the telepath protocol. Args: name (str): Name of the shared object item (object): The object to share over telepath.
juraj-google-style
def encoder(self, inputs, n_layers=3): latent_dims = self.hparams.z_dim shape_as_list = inputs.shape.as_list() if len(shape_as_list) != 5: raise ValueError("Expected inputs to be a 5-D, got %d" % len(shape_as_list)) if inputs.dtype != tf.float32: raise ValueError("Expected dtype tf.float32, got %s" % inputs.dtype) batch_size, _ = shape_as_list[:2] inputs = tf.reshape(inputs, [-1] + list(inputs.shape)[2:]) n_filters = 64 rectified = None padding = [[0, 0], [1, 1], [1, 1], [0, 0]] for i in range(n_layers): with tf.variable_scope("layer_%d" % (i + 1)): n_filters *= 2**i if i: padded = tf.pad(rectified, padding) else: padded = tf.pad(inputs, padding) convolved = tf.layers.conv2d(padded, filters=n_filters, kernel_size=4, strides=2, padding="VALID") normalized = tf.contrib.layers.instance_norm(convolved) rectified = tf.nn.leaky_relu(normalized, alpha=0.2) pooled = tf.nn.avg_pool( rectified, [1] + rectified.shape[1:3].as_list() + [1], strides=[1, 1, 1, 1], padding="VALID") squeezed = tf.squeeze(pooled, [1, 2]) with tf.variable_scope("z_mu"): z_mu = tf.layers.dense(squeezed, latent_dims) with tf.variable_scope("z_log_sigma_sq"): z_log_var = tf.layers.dense(squeezed, latent_dims) z_log_var = tf.clip_by_value(z_log_var, -10, 10) z_mu = tf.reshape(z_mu, (batch_size, -1, latent_dims)) z_log_var = tf.reshape( z_log_var, (batch_size, -1, latent_dims)) return z_mu, z_log_var
Convnet that encodes inputs into mean and std of a gaussian. Args: inputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels) n_layers: Number of layers. Returns: z_mu: Mean of the latent gaussians. z_log_var: log(var) of the latent gaussians. Raises: ValueError: If inputs is not a 5-D tensor or not float32.
juraj-google-style
def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False): if not pdb_file_type: pdb_file_type = self.pdb_file_type counter = 0 for g in tqdm(self.genes): pdbs = g.protein.pdb_downloader_and_metadata(outdir=outdir, pdb_file_type=pdb_file_type, force_rerun=force_rerun) if pdbs: counter += len(pdbs) log.info('Updated PDB metadata dataframe. See the "df_pdb_metadata" attribute for a summary dataframe.') log.info('Saved {} structures total'.format(counter))
Download ALL mapped experimental structures to each protein's structures directory. Args: outdir (str): Path to output directory, if GEM-PRO directories were not set or other output directory is desired pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired force_rerun (bool): If files should be re-downloaded if they already exist
juraj-google-style
def StartsWith(self, value): self._awql = self._CreateSingleValueCondition(value, 'STARTS_WITH') return self._query_builder
Sets the type of the WHERE clause as "starts with". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
juraj-google-style
def write_float(self, value, little_endian=True): if little_endian: endian = "<" else: endian = ">" return self.pack('%sf' % endian, value)
Pack the value as a float and write 4 bytes to the stream. Args: value (number): the value to write to the stream. little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
juraj-google-style
def get_params(img, output_size): w, h, *_ = img.shape th, tw = output_size if w == tw and h == th: return 0, 0, h, w i = random.randint(0, h - th) j = random.randint(0, w - tw) return i, j, th, tw
Get parameters for ``crop`` for a random crop. Args: img (PIL Image): Image to be cropped. output_size (tuple): Expected output size of the crop. Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
juraj-google-style
def _GetInstanceConfig(self): try: instance_data = self.metadata_dict['instance']['attributes'] except KeyError: instance_data = {} self.logger.warning('Instance attributes were not found.') try: project_data = self.metadata_dict['project']['attributes'] except KeyError: project_data = {} self.logger.warning('Project attributes were not found.') return (instance_data.get('google-instance-configs') or project_data.get('google-instance-configs'))
Get the instance configuration specified in metadata. Returns: string, the instance configuration data.
codesearchnet
def compute_output_shape(self, input_shape): if context.executing_eagerly(): self._maybe_build(input_shape) with func_graph.FuncGraph(str(self.name) + '_scratch_graph').as_default(): input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) def _make_placeholder_like(shape): ph = backend.placeholder(shape=shape, dtype=self.dtype) ph._keras_mask = None return ph inputs = nest.map_structure(_make_placeholder_like, input_shape) try: outputs = self(inputs, training=False) except TypeError as e: raise NotImplementedError("We could not automatically infer the static shape of the layer's output. Please implement the `compute_output_shape` method on your layer (%s)." % self.__class__.__name__) from e return nest.map_structure(lambda t: t.shape, outputs) raise NotImplementedError('Please run in eager mode or implement the `compute_output_shape` method on your layer (%s).' % self.__class__.__name__)
Computes the output shape of the layer. If the layer has not been built, this method will call `build` on the layer. This assumes that the layer will later be used with inputs that match the input shape provided here. Args: input_shape: Shape tuple (tuple of integers) or list of shape tuples (one per output tensor of the layer). Shape tuples can include None for free dimensions, instead of an integer. Returns: An input shape tuple.
github-repos
def check_managed_pipeline(name='', app_name=''): *pipeline_name_prefix, bracket_region = name.split() region = bracket_region.strip('[]') not_managed_message = '"{0}" is not managed.'.format(name) if 'onetime' in region: LOG.info('"%s" is a onetime, marked for cleaning.', name) return region if not all([bracket_region.startswith('['), bracket_region.endswith(']')]): LOG.debug('"%s" does not end with "[region]".', name) raise ValueError(not_managed_message) if len(pipeline_name_prefix) is not 1: LOG.debug('"%s" does not only have one word before [region].', name) raise ValueError(not_managed_message) if app_name not in pipeline_name_prefix: LOG.debug('"%s" does not use "%s" before [region].', name, app_name) raise ValueError(not_managed_message) return region
Check a Pipeline name is a managed format **app_name [region]**. Args: name (str): Name of Pipeline to check. app_name (str): Name of Application to find in Pipeline name. Returns: str: Region name from managed Pipeline name. Raises: ValueError: Pipeline is not managed.
juraj-google-style
def _create_inbound_stream(self, config=None): if config is None: raise ValueError('No stream config to create stream from.') name = self._get_stream_name(config) stream_handlers = self._get_stream_handlers(config, name) stream_input = config.get('input', None) if stream_input is None: raise(cfg.AitConfigMissing('inbound stream {}\'s input'.format(name))) if type(stream_input[0]) is int: return PortInputStream(name, stream_input, stream_handlers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL}) else: return ZMQStream(name, stream_input, stream_handlers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL})
Creates an inbound stream from its config. Params: config: stream configuration as read by ait.config Returns: stream: a Stream Raises: ValueError: if any of the required config values are missing
juraj-google-style
def list_media_services_rg(access_token, subscription_id, rgname): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', rgname, '/providers/microsoft.media/mediaservices?api-version=', MEDIA_API]) return do_get(endpoint, access_token)
List the media services in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. JSON body.
codesearchnet
def create_media_assetfile(access_token, parent_asset_id, name, is_primary='false', is_encrypted='false', encryption_scheme='None', encryptionkey_id='None'): path = '/Files' endpoint = ''.join([ams_rest_endpoint, path]) if (encryption_scheme == 'StorageEncryption'): body = (((((((((((((('{ \t\t\t"IsEncrypted": "' + is_encrypted) + '", \t\t\t"EncryptionScheme": "') + encryption_scheme) + '", \t\t\t"EncryptionVersion": "') + '1.0') + '", \t\t\t"EncryptionKeyId": "') + encryptionkey_id) + '", \t\t\t"IsPrimary": "') + is_primary) + '", \t\t\t"MimeType": "video/mp4", \t\t\t"Name": "') + name) + '", \t\t\t"ParentAssetId": "') + parent_asset_id) + '" \t\t}') else: body = (((((('{ \t\t\t"IsPrimary": "' + is_primary) + '", \t\t\t"MimeType": "video/mp4", \t\t\t"Name": "') + name) + '", \t\t\t"ParentAssetId": "') + parent_asset_id) + '" \t\t}') return do_ams_post(endpoint, path, body, access_token)
Create Media Service Asset File. Args: access_token (str): A valid Azure authentication token. parent_asset_id (str): Media Service Parent Asset ID. name (str): Media Service Asset Name. is_primary (str): Media Service Primary Flag. is_encrypted (str): Media Service Encryption Flag. encryption_scheme (str): Media Service Encryption Scheme. encryptionkey_id (str): Media Service Encryption Key ID. Returns: HTTP response. JSON body.
codesearchnet
def update_dict_recursive(editable_dict: dict, editing_dict: dict) -> None: for k, v in editing_dict.items(): if isinstance(v, collections.Mapping): update_dict_recursive(editable_dict.get(k, {}), v) else: editable_dict[k] = v
Updates dict recursively You need to use this function to update dictionary if depth of editing_dict is more then 1 Args: editable_dict: dictionary, that will be edited editing_dict: dictionary, that contains edits Returns: None
juraj-google-style
def panel(self, panel_id): if (not isinstance(panel_id, ObjectId)): panel_id = ObjectId(panel_id) panel_obj = self.panel_collection.find_one({'_id': panel_id}) return panel_obj
Fetch a gene panel by '_id'. Args: panel_id (str, ObjectId): str or ObjectId of document ObjectId Returns: dict: panel object or `None` if panel not found
codesearchnet
def BuildFindSpecs(self, artifact_filter_names, environment_variables=None): find_specs = [] for name in artifact_filter_names: definition = self._artifacts_registry.GetDefinitionByName(name) if not definition: logger.debug('undefined artifact definition: {0:s}'.format(name)) continue logger.debug('building find spec from artifact definition: {0:s}'.format( name)) artifact_find_specs = self._BuildFindSpecsFromArtifact( definition, environment_variables) find_specs.extend(artifact_find_specs) for find_spec in find_specs: if isinstance(find_spec, file_system_searcher.FindSpec): self.file_system_find_specs.append(find_spec) elif isinstance(find_spec, registry_searcher.FindSpec): self.registry_find_specs.append(find_spec) else: logger.warning('Unsupported find specification type: {0:s}'.format( type(find_spec)))
Builds find specifications from artifact definitions. Args: artifact_filter_names (list[str]): names of artifact definitions that are used for filtering file system and Windows Registry key paths. environment_variables (Optional[list[EnvironmentVariableArtifact]]): environment variables.
juraj-google-style
def phase_uniquizer(all_phases): measurement_name_maker = UniqueNameMaker(itertools.chain.from_iterable((phase.measurements.keys() for phase in all_phases if phase.measurements))) attachment_names = list(itertools.chain.from_iterable((phase.attachments.keys() for phase in all_phases))) attachment_names.extend(itertools.chain.from_iterable(([('multidim_' + name) for (name, meas) in phase.measurements.items() if (meas.dimensions is not None)] for phase in all_phases if phase.measurements))) attachment_name_maker = UniqueNameMaker(attachment_names) for phase in all_phases: for (name, _) in sorted(phase.measurements.items()): old_name = name name = measurement_name_maker.make_unique(name) phase.measurements[old_name].name = name phase.measurements[name] = phase.measurements.pop(old_name) for (name, _) in sorted(phase.attachments.items()): old_name = name name = attachment_name_maker.make_unique(name) phase.attachments[old_name].name = name phase.attachments[name] = phase.attachments.pop(old_name) return all_phases
Makes the names of phase measurement and attachments unique. This function will make the names of measurements and attachments unique. It modifies the input all_phases. Args: all_phases: the phases to make unique Returns: the phases now modified.
codesearchnet
def install_js(): target_jsdir = join(SERVER, 'static', 'js') target_cssdir = join(SERVER, 'static', 'css') target_tslibdir = join(SERVER, 'static', 'lib') STATIC_ASSETS = [join(JS, 'bokeh.js'), join(JS, 'bokeh.min.js'), join(CSS, 'bokeh.css'), join(CSS, 'bokeh.min.css')] if (not all((exists(a) for a in STATIC_ASSETS))): print(BOKEHJS_INSTALL_FAIL) sys.exit(1) if exists(target_jsdir): shutil.rmtree(target_jsdir) shutil.copytree(JS, target_jsdir) if exists(target_cssdir): shutil.rmtree(target_cssdir) shutil.copytree(CSS, target_cssdir) if exists(target_tslibdir): shutil.rmtree(target_tslibdir) if exists(TSLIB): lib = {'lib.es5.d.ts', 'lib.dom.d.ts', 'lib.es2015.core.d.ts', 'lib.es2015.promise.d.ts', 'lib.es2015.symbol.d.ts', 'lib.es2015.iterable.d.ts'} shutil.copytree(TSLIB, target_tslibdir, ignore=(lambda _, files: [f for f in files if (f not in lib)]))
Copy built BokehJS files into the Python source tree. Returns: None
codesearchnet
def __init__(self, **kwargs): self.unitname = kwargs.get('unitname', self.unitname) self.unitmultiplier = kwargs.get('unitmultiplier', self.unitmultiplier)
Distance unit parameter. Args: - **unitname**: A pycrs.elements.units.UnitName instance with the name given by each supported format. - **unitmultiplier**: A pycrs.elements.units.UnitMultiplier instance.
juraj-google-style
def set_lock_state(self, code, device_label, state): response = None try: response = requests.put( urls.set_lockstate(self._giid, device_label, state), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({"code": str(code)})) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
Lock or unlock Args: code (str): Lock code device_label (str): device label of lock state (str): 'lock' or 'unlock'
juraj-google-style
def print_layer_summary(layer): try: output_shape = layer.output_shape except AttributeError: output_shape = 'multiple' except RuntimeError: output_shape = '?' name = layer.name cls_name = layer.__class__.__name__ if not layer.built and (not getattr(layer, '_is_graph_network', False)): params = '0 (unused)' else: params = layer.count_params() fields = [name + ' (' + cls_name + ')', output_shape, params] print_row(fields, positions)
Prints a summary for a single layer. Args: layer: target layer.
github-repos
def init_cache(self, batch_size, max_length, encoder_outputs): decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4') decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, **kwargs) init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward) return unfreeze(init_variables['cache'])
Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
github-repos
def all_events_filter( self, from_block: BlockSpecification = GENESIS_BLOCK_NUMBER, to_block: BlockSpecification = 'latest', ) -> StatelessFilter: return self.events_filter(None, from_block, to_block)
Install a new filter for all the events emitted by the current token network contract Args: from_block: Create filter starting from this block number (default: 0). to_block: Create filter stopping at this block number (default: 'latest'). Return: The filter instance.
juraj-google-style
def _get_operation_input_field_values(self, metadata, file_input): input_args = metadata['request']['ephemeralPipeline']['inputParameters'] vals_dict = metadata['request']['pipelineArgs']['inputs'] names = [ arg['name'] for arg in input_args if ('localCopy' in arg) == file_input ] return {name: vals_dict[name] for name in names if name in vals_dict}
Returns a dictionary of envs or file inputs for an operation. Args: metadata: operation metadata field file_input: True to return a dict of file inputs, False to return envs. Returns: A dictionary of input field name value pairs
juraj-google-style
def _PrintAnalysisStatusHeader(self, processing_status): self._output_writer.Write('Storage file\t\t: {0:s}\n'.format(self._storage_file_path)) self._PrintProcessingTime(processing_status) if (processing_status and processing_status.events_status): self._PrintEventsStatus(processing_status.events_status) self._output_writer.Write('\n')
Prints the analysis status header. Args: processing_status (ProcessingStatus): processing status.
codesearchnet
def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float: examples_len = len(y_true) correct = sum([y1 == y2 for y1, y2 in zip(y_true, y_predicted)]) return correct / examples_len if examples_len else 0
Calculate accuracy in terms of absolute coincidence Args: y_true: array of true values y_predicted: array of predicted values Returns: portion of absolutely coincidental samples
juraj-google-style
def diff(self) -> List[str]: return set(self.to_track.keys()) - self._seen
This method returns a set difference between the keys in the tracked state dict and the one we have access so far. This is an effective method to check if we have update all the keys Returns: List[str]: List of keys not yet updated
github-repos
def draw_point(self, x, y): check_int_err(lib.SDL_RenderDrawPoint(self._ptr, x, y))
Draw a point on the current rendering target. Args: x (int): The x coordinate of the point. y (int): The y coordinate of the point. Raises: SDLError: If an error is encountered.
codesearchnet
def calculate_subscription_lifecycle(subscription_id): subscription = Subscription.objects.select_related("messageset", "schedule").get( id=subscription_id ) behind = subscription.messages_behind() if behind == 0: return current_messageset = subscription.messageset current_sequence_number = subscription.next_sequence_number end_subscription = Subscription.fast_forward_lifecycle(subscription, save=False)[-1] BehindSubscription.objects.create( subscription=subscription, messages_behind=behind, current_messageset=current_messageset, current_sequence_number=current_sequence_number, expected_messageset=end_subscription.messageset, expected_sequence_number=end_subscription.next_sequence_number, )
Calculates the expected lifecycle position the subscription in subscription_ids, and creates a BehindSubscription entry for them. Args: subscription_id (str): ID of subscription to calculate lifecycle for
juraj-google-style
async def leave_conversation(self, conv_id): logger.info('Leaving conversation: {}'.format(conv_id)) await self._conv_dict[conv_id].leave() del self._conv_dict[conv_id]
Leave a conversation. Args: conv_id (str): ID of conversation to leave.
juraj-google-style
def _MeanAggregator(inputs, segments): result = [] for inputs_i, segments_i in zip(array_ops.split(inputs, inputs.shape[0]), array_ops.split(segments, segments.shape[0])): means_i = math_ops.unsorted_segment_mean(inputs_i, segments_i, num_segments=math_ops.reduce_max(segments_i) + 1) result.append(array_ops.reshape(array_ops.gather(means_i, segments_i), [-1])) return array_ops_stack.stack(result, axis=0)
Replaces each segment with its mean along the last axis. Specifically, each value in the `inputs` tensor gets replaced by the mean value computed from the values that belong to the same segment. Args: inputs: A 2-tensor. Aggregation is done over dimension 1. segments: A 2-tensor, same shape as `input`. Returns: The result, same shape and type as `inputs`.
github-repos
def __process_instr(self, instr, avoid, next_addr, initial_state, execution_state, trace_current): if instr.mnemonic == ReilMnemonic.JCC: not_taken_addr = next_addr address, index = split_address(instr.address) logger.debug("[+] Processing branch: {: if isinstance(instr.operands[0], ReilRegisterOperand): next_ip = self.__process_branch_cond(instr, avoid, initial_state, execution_state, trace_current, not_taken_addr) else: next_ip = self.__process_branch_uncond(instr, trace_current, not_taken_addr) else: trace_current += [(instr, None)] self.__cpu.execute(instr) next_ip = next_addr return next_ip
Process a REIL instruction. Args: instr (ReilInstruction): Instruction to process. avoid (list): List of addresses to avoid while executing the code. next_addr (int): Address of the following instruction. initial_state (State): Initial execution state. execution_state (Queue): Queue of execution states. trace_current (list): Current trace. Returns: int: Returns the next address to execute.
juraj-google-style
def _op_in_graph_mode(tensor): if context.executing_eagerly(): return tensor return tensor.op
Returns the tensor's op in graph mode, or the tensor in eager mode. This is useful because sometimes an op is needed in graph mode instead of a tensor. In eager mode, there are no ops. Args: tensor: A tensor. Returns: The tensor's op in graph mode. The tensor in eager mode.
github-repos
def download(url, output_file=None, open_file=True, allow_overwrite=False): filename = url.split('/')[-1] if output_file is None: cache = os.path.join(get_data_home(), filename) else: cache = output_file if os.path.exists(cache) and not allow_overwrite: logger.info("> {} already exists.".format(cache)) logger.info("> If you have any issue when using this file, ") logger.info("> manually remove the file and try download again.") else: r = request.urlopen(url) try: if six.PY2: content_length = int(r.info().dict['content-length']) elif six.PY3: content_length = int(r.info()['Content-Length']) except: content_length = 0 unit = 1000000 content = b'' with tqdm(total=content_length, desc=filename, unit='B', unit_scale=True, unit_divisor=1024) as t: while True: data = r.read(unit) l = len(data) t.update(l) if l == 0: break content += data with open(cache, 'wb') as f: f.write(content) if not open_file: return return open(cache, 'rb')
Download a file from URL. Args: url (str): URL. output_file (str, optional): If given, the downloaded file is written to the given path. open_file (bool): If True, it returns an opened file stream of the downloaded file. allow_overwrite (bool): If True, it overwrites an existing file. Returns: Returns file object if open_file is True, otherwise None.
juraj-google-style
def on_click(self, handler): self.on_event(ButtonClick, handler) self.on_event(MenuItemClick, handler)
Set up a handler for button or menu item clicks. Args: handler (func) : handler function to call when button is activated. Returns: None
codesearchnet
def delete_metadata(self, resource, keys): self.metadata_service.set_auth(self._token_metadata) self.metadata_service.delete(resource, keys)
Deletes the given key-value pairs associated with the given resource. Will attempt to delete all key-value pairs even if some fail. Args: resource (intern.resource.boss.BossResource) keys (list) Raises: HTTPErrorList on failure.
juraj-google-style
def resample(self, size, interpolation=gdalconst.GRA_NearestNeighbour): factors = (size[0] / float(self.RasterXSize), size[1] / float(self.RasterYSize)) affine = AffineTransform(*tuple(self.affine)) affine.scale = (affine.scale[0] / factors[0], affine.scale[1] / factors[1]) dest = self.new(size, affine) gdal.ReprojectImage(self.ds, dest.ds, None, None, interpolation) return dest
Returns a new instance resampled to provided size. Arguments: size -- tuple of x,y image dimensions
juraj-google-style
def subset_gctoo(gctoo, row_bool=None, col_bool=None, rid=None, cid=None, ridx=None, cidx=None, exclude_rid=None, exclude_cid=None): assert (sum([(rid is not None), (row_bool is not None), (ridx is not None)]) <= 1), 'Only one of rid, row_bool, and ridx can be provided.' assert (sum([(cid is not None), (col_bool is not None), (cidx is not None)]) <= 1), 'Only one of cid, col_bool, and cidx can be provided.' rows_to_keep = get_rows_to_keep(gctoo, rid, row_bool, ridx, exclude_rid) cols_to_keep = get_cols_to_keep(gctoo, cid, col_bool, cidx, exclude_cid) rows_to_keep_bools = gctoo.data_df.index.isin(rows_to_keep) cols_to_keep_bools = gctoo.data_df.columns.isin(cols_to_keep) out_gctoo = GCToo.GCToo(src=gctoo.src, version=gctoo.version, data_df=gctoo.data_df.loc[(rows_to_keep_bools, cols_to_keep_bools)], row_metadata_df=gctoo.row_metadata_df.loc[(rows_to_keep_bools, :)], col_metadata_df=gctoo.col_metadata_df.loc[(cols_to_keep_bools, :)]) assert (out_gctoo.data_df.size > 0), 'Subsetting yielded an empty gct!' logger.info(('Initial GCToo with {} rows and {} columns subsetted down to ' + '{} rows and {} columns.').format(gctoo.data_df.shape[0], gctoo.data_df.shape[1], out_gctoo.data_df.shape[0], out_gctoo.data_df.shape[1])) return out_gctoo
Extract a subset of data from a GCToo object in a variety of ways. The order of rows and columns will be preserved. Args: gctoo (GCToo object) row_bool (list of bools): length must equal gctoo.data_df.shape[0] col_bool (list of bools): length must equal gctoo.data_df.shape[1] rid (list of strings): rids to include cid (list of strings): cids to include ridx (list of integers): row integer ids to include cidx (list of integers): col integer ids to include exclude_rid (list of strings): rids to exclude exclude_cid (list of strings): cids to exclude Returns: out_gctoo (GCToo object): gctoo after subsetting
codesearchnet
def reset_port_protection(self, id_or_uri, timeout=(- 1)): uri = (self._client.build_uri(id_or_uri) + '/resetportprotection') return self._client.update_with_zero_body(uri, timeout)
Triggers a reset of port protection. Cause port protection to be reset on all the interconnects of the logical interconnect that matches ID. Args: id_or_uri: Can be either the interconnect id or the interconnect uri. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: The interconnect.
codesearchnet
def add_gene_info(self, variant_obj, gene_panels=None): gene_panels = (gene_panels or []) variant_obj['has_refseq'] = False extra_info = {} for panel_obj in gene_panels: for gene_info in panel_obj['genes']: hgnc_id = gene_info['hgnc_id'] if (hgnc_id not in extra_info): extra_info[hgnc_id] = [] extra_info[hgnc_id].append(gene_info) for variant_gene in variant_obj.get('genes', []): hgnc_id = variant_gene['hgnc_id'] hgnc_gene = self.hgnc_gene(hgnc_id) if (not hgnc_gene): continue transcripts_dict = {} for transcript in hgnc_gene.get('transcripts', []): tx_id = transcript['ensembl_transcript_id'] transcripts_dict[tx_id] = transcript hgnc_gene['transcripts_dict'] = transcripts_dict if hgnc_gene.get('incomplete_penetrance'): variant_gene['omim_penetrance'] = True panel_info = extra_info.get(hgnc_id, []) disease_associated = set() disease_associated_no_version = set() manual_penetrance = False mosaicism = False manual_inheritance = set() for gene_info in panel_info: for tx in gene_info.get('disease_associated_transcripts', []): stripped = re.sub('\\.[0-9]', '', tx) disease_associated_no_version.add(stripped) disease_associated.add(tx) if gene_info.get('reduced_penetrance'): manual_penetrance = True if gene_info.get('mosaicism'): mosaicism = True manual_inheritance.update(gene_info.get('inheritance_models', [])) variant_gene['disease_associated_transcripts'] = list(disease_associated) variant_gene['manual_penetrance'] = manual_penetrance variant_gene['mosaicism'] = mosaicism variant_gene['manual_inheritance'] = list(manual_inheritance) for transcript in variant_gene.get('transcripts', []): tx_id = transcript['transcript_id'] if (not (tx_id in transcripts_dict)): continue hgnc_transcript = transcripts_dict[tx_id] if hgnc_transcript.get('is_primary'): transcript['is_primary'] = True if (not hgnc_transcript.get('refseq_id')): continue refseq_id = hgnc_transcript['refseq_id'] transcript['refseq_id'] = refseq_id variant_obj['has_refseq'] = True if (refseq_id in disease_associated_no_version): transcript['is_disease_associated'] = True transcript['refseq_identifiers'] = hgnc_transcript.get('refseq_identifiers', []) variant_gene['common'] = hgnc_gene variant_gene['disease_terms'] = self.disease_terms(hgnc_id) return variant_obj
Add extra information about genes from gene panels Args: variant_obj(dict): A variant from the database gene_panels(list(dict)): List of panels from database
codesearchnet
def calculate_3D_elastic_energy(self, film, match, elasticity_tensor=None, include_strain=False): if (elasticity_tensor is None): return 9999 struc = SlabGenerator(self.film, match['film_miller'], 20, 15, primitive=False).get_slab().oriented_unit_cell film_matrix = list(match['film_sl_vecs']) film_matrix.append(np.cross(film_matrix[0], film_matrix[1])) substrate_matrix = list(match['sub_sl_vecs']) temp_sub = np.cross(substrate_matrix[0], substrate_matrix[1]) temp_sub = ((temp_sub * fast_norm(film_matrix[2])) / fast_norm(temp_sub)) substrate_matrix.append(temp_sub) transform_matrix = np.transpose(np.linalg.solve(film_matrix, substrate_matrix)) dfm = Deformation(transform_matrix) strain = dfm.green_lagrange_strain.convert_to_ieee(struc, initial_fit=False) energy_density = elasticity_tensor.energy_density(strain) if include_strain: return (((film.volume * energy_density) / len(film.sites)), strain.von_mises_strain) else: return ((film.volume * energy_density) / len(film.sites))
Calculates the multi-plane elastic energy. Returns 999 if no elastic tensor was given on init Args: film(Structure): conventional standard structure for the film match(dictionary) : match dictionary from substrate analyzer elasticity_tensor(ElasticTensor): elasticity tensor for the film include_strain(bool): include strain in the output or not; changes return from just the energy to a tuple with the energy and strain in voigt notation
codesearchnet
def _fetch_certs(request, certs_url): response = request(certs_url, method='GET') if (response.status != http_client.OK): raise exceptions.TransportError('Could not fetch certificates at {}'.format(certs_url)) return json.loads(response.data.decode('utf-8'))
Fetches certificates. Google-style cerificate endpoints return JSON in the format of ``{'key id': 'x509 certificate'}``. Args: request (google.auth.transport.Request): The object used to make HTTP requests. certs_url (str): The certificate endpoint URL. Returns: Mapping[str, str]: A mapping of public key ID to x.509 certificate data.
codesearchnet
def get_collectors(self, limit=1000, offset=0): options = {'limit': limit, 'offset': offset} request = requests.get(self.url, params=options, auth=self.auth) try: results = request.json()['collectors'] except KeyError: results = request.json() except json.decoder.JSONDecodeError: results = [] return results
Returns a dict of collectors. Args: limit (int): number of collectors to return offset (int): the offset of where the list of collectors should begin from
codesearchnet
def draw_layer(ax, layer): ax.set_aspect('equal', 'datalim') ax.plot(*layer) ax.axis('off')
Draws a layer on the given matplotlib axis. Args: ax (axis): the matplotlib axis to draw on layer (layer): the layers to plot
codesearchnet
def _print_results(file, status): file_color = c.Fore.GREEN status_color = c.Fore.RED if (status == 'Success'): status_color = c.Fore.GREEN elif (status == 'Skipped'): status_color = c.Fore.YELLOW print('{}{!s:<13}{}{!s:<35}{}{!s:<8}{}{}'.format(c.Fore.CYAN, 'Downloading:', file_color, file, c.Fore.CYAN, 'Status:', status_color, status))
Print the download results. Args: file (str): The filename. status (str): The file download status.
codesearchnet
def __init__(self, additional_note='', kwargs_dict=None): self._additional_note = additional_note if kwargs_dict: bullets = [] for key in sorted(kwargs_dict.keys()): value = kwargs_dict[key] if any((x.isspace() for x in key)): raise ValueError('Parameter name "%s" contains whitespace.' % key) value = value.lstrip() if '\n' in value: raise ValueError('Parameter description for "%s" contains newlines.' % key) bullets.append('* `%s`: %s' % (key, value)) self._additional_note += '\n\n
Initializes the AppendDocstring object. Args: additional_note: Python string added as additional docstring to public version of function. kwargs_dict: Python string/string dictionary representing specific kwargs expanded from the **kwargs input. Raises: ValueError: if kwargs_dict.key contains whitespace. ValueError: if kwargs_dict.value contains newlines.
github-repos
def bidiagonalize_real_matrix_pair_with_symmetric_products(mat1: np.ndarray, mat2: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08, check_preconditions: bool=True) -> Tuple[(np.ndarray, np.ndarray)]: if check_preconditions: if np.any((np.imag(mat1) != 0)): raise ValueError('mat1 must be real.') if np.any((np.imag(mat2) != 0)): raise ValueError('mat2 must be real.') if (not predicates.is_hermitian(mat1.dot(mat2.T), rtol=rtol, atol=atol)): raise ValueError('mat1 @ mat2.T must be symmetric.') if (not predicates.is_hermitian(mat1.T.dot(mat2), rtol=rtol, atol=atol)): raise ValueError('mat1.T @ mat2 must be symmetric.') (base_left, base_diag, base_right) = _svd_handling_empty(np.real(mat1)) base_diag = np.diag(base_diag) dim = base_diag.shape[0] rank = dim while ((rank > 0) and tolerance.all_near_zero(base_diag[((rank - 1), (rank - 1))], atol=atol)): rank -= 1 base_diag = base_diag[(:rank, :rank)] semi_corrected = base_left.T.dot(np.real(mat2)).dot(base_right.T) overlap = semi_corrected[(:rank, :rank)] overlap_adjust = diagonalize_real_symmetric_and_sorted_diagonal_matrices(overlap, base_diag, rtol=rtol, atol=atol, check_preconditions=check_preconditions) extra = semi_corrected[(rank:, rank:)] (extra_left_adjust, _, extra_right_adjust) = _svd_handling_empty(extra) left_adjust = combinators.block_diag(overlap_adjust, extra_left_adjust) right_adjust = combinators.block_diag(overlap_adjust.T, extra_right_adjust) left = left_adjust.T.dot(base_left.T) right = base_right.T.dot(right_adjust.T) return (left, right)
Finds orthogonal matrices that diagonalize both mat1 and mat2. Requires mat1 and mat2 to be real. Requires mat1.T @ mat2 to be symmetric. Requires mat1 @ mat2.T to be symmetric. Args: mat1: One of the real matrices. mat2: The other real matrix. rtol: Relative numeric error threshold. atol: Absolute numeric error threshold. check_preconditions: If set, verifies that the inputs are real, and that mat1.T @ mat2 and mat1 @ mat2.T are both symmetric. Defaults to set. Returns: A tuple (L, R) of two orthogonal matrices, such that both L @ mat1 @ R and L @ mat2 @ R are diagonal matrices. Raises: ValueError: Matrices don't meet preconditions (e.g. not real).
codesearchnet
def get_config(): profiles = {} curr = None cmd = ['netsh', 'advfirewall', 'show', 'allprofiles'] ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True) if (ret['retcode'] != 0): raise CommandExecutionError(ret['stdout']) for line in ret['stdout'].splitlines(): if (not curr): tmp = re.search('(.*) Profile Settings:', line) if tmp: curr = tmp.group(1) elif line.startswith('State'): profiles[curr] = (line.split()[1] == 'ON') curr = None return profiles
Get the status of all the firewall profiles Returns: dict: A dictionary of all profiles on the system Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash salt '*' firewall.get_config
codesearchnet
def _GetBetweenQEqualsAndAmpersand(self, url): (_, _, url) = url.partition('?') (_, _, url) = url.partition('q=') if (not url): return '' (url, _, _) = url.partition('&') return url
Retrieves the substring between the substrings 'q=' and '&'. Args: url (str): URL. Returns: str: search query, the value between 'q=' and '&' or None if no query was found.
codesearchnet
def recursive_copy(src_dir, dest_dir): file_io.recursive_create_dir(dest_dir) for file_name in file_io.list_directory(src_dir): old_path = os.path.join(src_dir, file_name) new_path = os.path.join(dest_dir, file_name) if file_io.is_directory(old_path): recursive_copy(old_path, new_path) else: file_io.copy(old_path, new_path, overwrite=True)
Copy the contents of src_dir into the folder dest_dir. Args: src_dir: gsc or local path. dest_dir: gcs or local path.
juraj-google-style
def polyFitIgnoringOutliers( x, y, deg=2, niter=3, nstd=2, return_outliers=False): if return_outliers: a = all_outliers = np.zeros_like(y, dtype=bool) for i in range(niter): poly = np.polyfit(x, y, deg) p = np.poly1d(poly) if i == niter - 1: break y_fit = p(x) dy = y - y_fit std = (dy**2).mean()**0.5 inliers = abs(dy) < nstd * std if return_outliers: a[~inliers] = True if inliers.sum() > deg + 1: x = x[inliers] y = y[inliers] if return_outliers: a = a[inliers] else: break if return_outliers: return p, all_outliers return p
Returns: (np.poly1d): callable function of polynomial fit excluding all outliers Args: deg (int): degree of polynomial fit n_iter (int): do linear regression n times successive removing nstd (float): exclude outliers, if their deviation is > [nstd] * standard deviation return_outliers (bool): also return outlier positions as 2. arg
juraj-google-style
def registerAccount(self, person, vendorSpecific=None): response = self.registerAccountResponse(person, vendorSpecific) return self._read_boolean_response(response)
See Also: registerAccountResponse() Args: person: vendorSpecific: Returns:
juraj-google-style
def get_or_create(session, model, **kwargs): instance = session.query(model).filter_by(**kwargs).first() if instance: return instance, False else: instance = model(**kwargs) if 'dataset' in kwargs: instance.update_sequence_id(session, kwargs['dataset']) session.add(instance) session.commit() return instance, True
Get or create sqlalchemy instance. Args: session (Sqlalchemy session): model (sqlalchemy model): kwargs (dict): kwargs to lookup or create instance. Returns: Tuple: first element is found or created instance, second is boolean - True if instance created, False if instance found.
juraj-google-style
def absl_to_cpp(level): if not isinstance(level, int): raise TypeError('Expect an int level, found {}'.format(type(level))) if level >= 0: return 0 else: return -level
Converts an absl log level to a cpp log level. Args: level: int, an absl.logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in Abseil C++.
juraj-google-style
def report_server_init_errors(address=None, port=None, **kwargs): try: (yield) except EnvironmentError as e: if (e.errno == errno.EADDRINUSE): log.critical('Cannot start Bokeh server, port %s is already in use', port) elif (e.errno == errno.EADDRNOTAVAIL): log.critical("Cannot start Bokeh server, address '%s' not available", address) else: codename = errno.errorcode[e.errno] log.critical('Cannot start Bokeh server [%s]: %r', codename, e) sys.exit(1)
A context manager to help print more informative error messages when a ``Server`` cannot be started due to a network problem. Args: address (str) : network address that the server will be listening on port (int) : network address that the server will be listening on Example: .. code-block:: python with report_server_init_errors(**server_kwargs): server = Server(applications, **server_kwargs) If there are any errors (e.g. port or address in already in use) then a critical error will be logged and the process will terminate with a call to ``sys.exit(1)``
codesearchnet
def get(self, url, params=None, **kwargs): return self.call_api( "GET", url, params=params, **kwargs )
Call the API with a GET request. Args: url (str): Resource location relative to the base URL. params (dict or None): Query-string parameters. Returns: ResultParser or ErrorParser.
juraj-google-style
def optimize_boolean_expression_comparisons(ir_blocks): operator_inverses = {u'=': u'!=', u'!=': u'='} def visitor_fn(expression): 'Expression visitor function that performs the above rewriting.' if (not isinstance(expression, BinaryComposition)): return expression left_is_binary_composition = isinstance(expression.left, BinaryComposition) right_is_binary_composition = isinstance(expression.right, BinaryComposition) if ((not left_is_binary_composition) and (not right_is_binary_composition)): return expression identity_literal = None inverse_literal = None if (expression.operator == u'='): identity_literal = TrueLiteral inverse_literal = FalseLiteral elif (expression.operator == u'!='): identity_literal = FalseLiteral inverse_literal = TrueLiteral else: return expression expression_to_rewrite = None if ((expression.left == identity_literal) and right_is_binary_composition): return expression.right elif ((expression.right == identity_literal) and left_is_binary_composition): return expression.left elif ((expression.left == inverse_literal) and right_is_binary_composition): expression_to_rewrite = expression.right elif ((expression.right == inverse_literal) and left_is_binary_composition): expression_to_rewrite = expression.left if (expression_to_rewrite is None): return expression elif (expression_to_rewrite.operator not in operator_inverses): return expression else: return BinaryComposition(operator_inverses[expression_to_rewrite.operator], expression_to_rewrite.left, expression_to_rewrite.right) new_ir_blocks = [] for block in ir_blocks: new_block = block.visit_and_update_expressions(visitor_fn) new_ir_blocks.append(new_block) return new_ir_blocks
Optimize comparisons of a boolean binary comparison expression against a boolean literal. Rewriting example: BinaryComposition( '=', BinaryComposition('!=', something, NullLiteral) False) The above is rewritten into: BinaryComposition('=', something, NullLiteral) Args: ir_blocks: list of basic block objects Returns: a new list of basic block objects, with the optimization applied
codesearchnet
def cudnn_stacked_bi_gru(units, n_hidden, seq_lengths=None, n_stacks=2, keep_prob=1.0, concat_stacked_outputs=False, trainable_initial_states=False, name='cudnn_stacked_bi_gru', reuse=False): if (seq_lengths is None): seq_lengths = (tf.ones([tf.shape(units)[0]], dtype=tf.int32) * tf.shape(units)[1]) outputs = [units] with tf.variable_scope(name, reuse=reuse): for n in range(n_stacks): if (n == 0): inputs = outputs[(- 1)] else: inputs = variational_dropout(outputs[(- 1)], keep_prob=keep_prob) ((h_fw, h_bw), _) = cudnn_bi_gru(inputs, n_hidden, seq_lengths, n_layers=1, trainable_initial_states=trainable_initial_states, name='{}_cudnn_bi_gru'.format(n), reuse=reuse) outputs.append(tf.concat([h_fw, h_bw], axis=2)) if concat_stacked_outputs: return tf.concat(outputs[1:], axis=2) return outputs[(- 1)]
Fast CuDNN Stacked Bi-GRU implementation Args: units: tf.Tensor with dimensions [B x T x F], where B - batch size T - number of tokens F - features n_hidden: dimensionality of hidden state seq_lengths: number of tokens in each sample in the batch n_stacks: number of stacked Bi-GRU keep_prob: dropout keep_prob between Bi-GRUs (intra-layer dropout) concat_stacked_outputs: return last Bi-GRU output or concat outputs from every Bi-GRU, trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros name: name of the variable scope to use reuse: whether to reuse already initialized variable Returns: h - all hidden states along T dimension, tf.Tensor with dimensionality [B x T x ((n_hidden * 2) * n_stacks)]
codesearchnet
def easeInOutExpo(n): _checkRange(n) if n == 0: return 0 elif n == 1: return 1 else: n = n * 2 if n < 1: return 0.5 * 2**(10 * (n - 1)) else: n -= 1 return 0.5 * (-1 * (2 ** (-10 * n)) + 2)
An exponential tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
juraj-google-style
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_attention_mask = (attention_mask[:, None, None, :] == attention_mask[:, None, :, None])[:, :, -sequence_length:, :].to(dtype) padding_mask = causal_mask[:, :, :, :mask_length] + padding_attention_mask padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size.
github-repos