code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def CreateMock(self, class_to_mock): new_mock = MockObject(class_to_mock) self._mock_objects.append(new_mock) return new_mock
Create a new mock object. Args: # class_to_mock: the class to be mocked class_to_mock: class Returns: MockObject that can be used as the class_to_mock would be.
codesearchnet
def get_SZ(self, psd, geometry): if ((self._S_table is None) or (self._Z_table is None)): raise AttributeError('Initialize or load the scattering table first.') if ((not isinstance(psd, PSD)) or (self._previous_psd != psd)): self._S_dict = {} self._Z_dict = {} psd_w = psd(self._psd_D) for geom in self.geometries: self._S_dict[geom] = trapz((self._S_table[geom] * psd_w), self._psd_D) self._Z_dict[geom] = trapz((self._Z_table[geom] * psd_w), self._psd_D) self._previous_psd = psd return (self._S_dict[geometry], self._Z_dict[geometry])
Compute the scattering matrices for the given PSD and geometries. Returns: The new amplitude (S) and phase (Z) matrices.
codesearchnet
def update(self, attributes=None): resource_type = self._resource_type() resource_path = self._resource_path() session = self._session singleton = self.is_singleton() id = (None if singleton else self.id) url = session._build_url(resource_path, id) attributes = build_request_body(resource_type, self.id, attributes=attributes) process = self._mk_one(session, singleton=singleton) return session.patch(url, CB.json(200, process), json=attributes)
Update this resource. Not all aspects of a resource can be updated. If the server rejects updates an error will be thrown. Keyword Arguments: attributes(dict): Attributes that are to be updated Returns: Resource: A new instance of this type of resource with the updated attribute. On errors an exception is thrown.
codesearchnet
def chgroups(name, groups, append=True): if six.PY2: name = _to_unicode(name) if isinstance(groups, string_types): groups = groups.split(',') groups = [x.strip(' *') for x in groups] if six.PY2: groups = [_to_unicode(x) for x in groups] ugrps = set(list_groups(name)) if (ugrps == set(groups)): return True name = _cmd_quote(name) if (not append): for group in ugrps: group = _cmd_quote(group).lstrip("'").rstrip("'") if (group not in groups): cmd = 'net localgroup "{0}" {1} /delete'.format(group, name) __salt__['cmd.run_all'](cmd, python_shell=True) for group in groups: if (group in ugrps): continue group = _cmd_quote(group).lstrip("'").rstrip("'") cmd = 'net localgroup "{0}" {1} /add'.format(group, name) out = __salt__['cmd.run_all'](cmd, python_shell=True) if (out['retcode'] != 0): log.error(out['stdout']) return False agrps = set(list_groups(name)) return (len((ugrps - agrps)) == 0)
Change the groups this user belongs to, add append=False to make the user a member of only the specified groups Args: name (str): The user name for which to change groups groups (str, list): A single group or a list of groups to assign to the user. For multiple groups this can be a comma delimited string or a list. append (bool, optional): True adds the passed groups to the user's current groups. False sets the user's groups to the passed groups only. Default is True. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' user.chgroups jsnuffy Administrators,Users True
codesearchnet
def parse(self, buf: memoryview, params: Params) \ -> Tuple[Command, memoryview]: try: tag, buf = Tag.parse(buf, params) except NotParseable as exc: return InvalidCommand(params, exc), buf[0:0] else: params = params.copy(tag=tag.value) cmd_parts: List[bytes] = [] while True: try: _, buf = Space.parse(buf, params) atom, buf = Atom.parse(buf, params) cmd_parts.append(atom.value.upper()) except NotParseable as exc: return InvalidCommand(params, exc), buf[0:0] command = b' '.join(cmd_parts) cmd_type = self.commands.get(command) if not cmd_type: return InvalidCommand(params, None, command), buf[0:0] elif not cmd_type.compound: break params = params.copy(command_name=command) try: return cmd_type.parse(buf, params) except NotParseable as exc: return InvalidCommand(params, exc, command, cmd_type), buf[0:0]
Parse the given bytes into a command. The basic syntax is a tag string, a command name, possibly some arguments, and then an endline. If the command has a complete structure but cannot be parsed, an :class:`InvalidCommand` is returned. Args: buf: The bytes to parse. params: The parsing parameters.
juraj-google-style
def within(self, other: 'Interval', inclusive: bool=True) -> bool: if (not other): return False if inclusive: return ((self.start >= other.start) and (self.end <= other.end)) else: return ((self.start > other.start) and (self.end < other.end))
Is this interval contained within the other? Args: other: the :class:`Interval` to check inclusive: use inclusive rather than exclusive range checks?
codesearchnet
def worker_task(work_item, config): global _workspace _ensure_workspace(config) result = worker(work_item.module_path, config.python_version, work_item.operator_name, work_item.occurrence, config.test_command, config.timeout) return (work_item.job_id, result)
The celery task which performs a single mutation and runs a test suite. This runs `cosmic-ray worker` in a subprocess and returns the results, passing `config` to it via stdin. Args: work_item: A dict describing a WorkItem. config: The configuration to use for the test execution. Returns: An updated WorkItem
codesearchnet
def __call__(self, name, value): super(IterableTypeChecker, self).__call__(name, value) if isinstance(self.item_type, type): if not all(isinstance(o, self.item_type) for o in value): raise ValueError("All elements of %s must be %s" % (name, self.item_type)) if isinstance(self.min_length, int): if len(value) < self.min_length: raise ValueError("%s must be longer than %s (or equal)" % (name, self.min_length)) if isinstance(self.max_length, int): if len(value) > self.max_length: raise ValueError("%s must be shorter than %s (or equal)" % (name, self.max_length)) if len(value) == 0 and not self.empty: raise ValueError("%s must not be empty" % name)
Call method. Args: name (str): the value's name. value (iterable): the value to check. Raises: ValueError: if value is not type iter_type. ValueError: if any item in value is not type item_type. ValueError: if value's length is less than min_length. ValueError: if value's length is more than max_length. ValueError: if value's length is 0 and emptiness is not allowed.
juraj-google-style
def trigger_if_changed(self, obj, old): new_value = self.__get__(obj, obj.__class__) if (not self.property.matches(old, new_value)): self._trigger(obj, old, new_value)
Send a change event notification if the property is set to a value is not equal to ``old``. Args: obj (HasProps) The object the property is being set on. old (obj) : The previous value of the property to compare Returns: None
codesearchnet
def test_fail(self, e=None): self._test_end(TestResultEnums.TEST_RESULT_FAIL, e)
To mark the test as failed in this record. Only test_fail does instance check because we want 'assert xxx' to also fail the test same way assert_true does. Args: e: An exception object. It can be an instance of AssertionError or mobly.base_test.TestFailure.
github-repos
async def _retreive_websocket_info(self): if (self._web_client is None): self._web_client = WebClient(token=self.token, base_url=self.base_url, ssl=self.ssl, proxy=self.proxy, run_async=True, loop=self._event_loop, session=self._session) self._logger.debug('Retrieving websocket info.') if (self.connect_method in ['rtm.start', 'rtm_start']): resp = (await self._web_client.rtm_start()) else: resp = (await self._web_client.rtm_connect()) url = resp.get('url') if (url is None): msg = 'Unable to retreive RTM URL from Slack.' raise client_err.SlackApiError(message=msg, response=resp) return (url, resp.data)
Retreives the WebSocket info from Slack. Returns: A tuple of websocket information. e.g. ( "wss://...", { "self": {"id": "U01234ABC","name": "robotoverlord"}, "team": { "domain": "exampledomain", "id": "T123450FP", "name": "ExampleName" } } ) Raises: SlackApiError: Unable to retreive RTM URL from Slack.
codesearchnet
def file_config(filename=None): logger.debug('On entry into file_config(), filename = {}'.format(filename)) if filename is None: filename = CONFIG_DEFAULT_PATH logger.debug('file_config() will try to open `{}`'.format(filename)) with open(filename) as f: try: config = json.load(f) except ValueError as err: raise exceptions.ConfigurationError( 'Failed to parse the JSON configuration from `{}`, {}'.format(filename, err) ) logger.info('Configuration loaded from `{}`'.format(filename)) return config
Returns the config values found in a configuration file. Args: filename (str): the JSON file with the configuration values. If ``None``, CONFIG_DEFAULT_PATH will be used. Returns: dict: The config values in the specified config file (or the file at CONFIG_DEFAULT_PATH, if filename == None)
juraj-google-style
def __init__(self, header_handler, packer, version): self._header_handler = header_handler self._packer = packer self._version = version self._method_proxies = {}
Initializes a SOAP service. Args: header_handler: A googleads.common.HeaderHandler instance used to set SOAP and HTTP headers. packer: A googleads.common.SoapPacker instance used to transform entities. version: the version of the current API, e.g. 'v201811'
juraj-google-style
def get_property(self, name): for prop in self.resource.properties: if prop.name == name: return prop raise AttributeError(name)
Return a named property for a resource, if available. Will raise an `AttributeError` if the property does not exist Args: name (str): Name of the property to return Returns: `ResourceProperty`
juraj-google-style
def populate_request_data(self, request_args): request_args['auth'] = HTTPBasicAuth(self._username, self._password) return request_args
Add the authentication info to the supplied dictionary. We use the `requests.HTTPBasicAuth` class as the `auth` param. Args: `request_args`: The arguments that will be passed to the request. Returns: The updated arguments for the request.
codesearchnet
def has_relationship(self, left_id, left_type, right_id, right_type, rel_type='Related To'): data = self.get_object(left_id, left_type) if (not data): raise CRITsOperationalError('Crits Object not found with id {}and type {}'.format(left_id, left_type)) if ('relationships' not in data): return False for relationship in data['relationships']: if (relationship['relationship'] != rel_type): continue if (relationship['value'] != right_id): continue if (relationship['type'] != right_type): continue return True return False
Checks if the two objects are related Args: left_id: The CRITs ID of the first indicator left_type: The CRITs TLO type of the first indicator right_id: The CRITs ID of the second indicator right_type: The CRITs TLO type of the second indicator rel_type: The relationships type ("Related To", etc) Returns: True or False if the relationship exists or not.
codesearchnet
def _dilated_conv_layer(self, output_channels, dilation_rate, apply_relu, name): layer_components = [conv.Conv2D(output_channels, [3, 3], initializers=self._initializers, regularizers=self._regularizers, rate=dilation_rate, name=('dilated_conv_' + name))] if apply_relu: layer_components.append((lambda net: tf.nn.relu(net, name=('relu_' + name)))) return sequential.Sequential(layer_components, name=name)
Create a dilated convolution layer. Args: output_channels: int. Number of output channels for each pixel. dilation_rate: int. Represents how many pixels each stride offset will move. A value of 1 indicates a standard convolution. apply_relu: bool. If True, a ReLU non-linearlity is added. name: string. Name for layer. Returns: a sonnet Module for a dilated convolution.
codesearchnet
def is_valid_assignment(self, mtf_dimension_name, mesh_dimension_name): return ((mtf_dimension_name in self._splittable_mtf_dimension_names) and (self._mtf_dimension_name_to_size_gcd[mtf_dimension_name] % self._mesh_dimension_name_to_size[mesh_dimension_name] == 0))
Whether this MTF dimension may be assigned to this mesh dimension. Args: mtf_dimension_name: string, the name of a Mesh TensorFlow dimension. mesh_dimension_name: string, the name of a mesh dimension. Returns: A boolean indicating whether the assignment is valid.
juraj-google-style
def load_file_to_base64_str(f_path): path = abs_path(f_path) with io.open(path, 'rb') as f: f_bytes = f.read() base64_str = base64.b64encode(f_bytes).decode('utf-8') return base64_str
Loads the content of a file into a base64 string. Args: f_path: full path to the file including the file name. Returns: A base64 string representing the content of the file in utf-8 encoding.
codesearchnet
def get_paths(self, id_or_uri, path_id_or_uri=''): if path_id_or_uri: uri = self._client.build_uri(path_id_or_uri) if ('/paths' not in uri): uri = (((self._client.build_uri(id_or_uri) + '/paths') + '/') + path_id_or_uri) else: uri = (self._client.build_uri(id_or_uri) + '/paths') return self._client.get(uri)
Gets all paths or a specific attachment path for the specified volume attachment. Args: id_or_uri: Can be either the volume attachment id or the volume attachment uri. path_id_or_uri: Can be either the path id or the path uri. Returns: dict: Paths.
codesearchnet
def thaw_parameter(self, name): i = self.get_parameter_names(include_frozen=True).index(name) self.unfrozen_mask[i] = True
Thaw a parameter by name Args: name: The name of the parameter
codesearchnet
def remove_cert_binding(name, site, hostheader='', ipaddress='*', port=443): name = six.text_type(name).upper() binding_info = _get_binding_info(hostheader, ipaddress, port) ps_cmd = ['$Site = Get-ChildItem', '-Path', "'IIS:\\Sites'", '|', 'Where-Object', " {{ $_.Name -Eq '{0}' }};".format(site), '$Binding = $Site.Bindings.Collection', '| Where-Object { $_.bindingInformation', "-Eq '{0}' }};".format(binding_info), '$Binding.RemoveSslCertificate()'] current_cert_bindings = list_cert_bindings(site) if (binding_info not in current_cert_bindings): log.warning('Binding not found: %s', binding_info) return True if (name != current_cert_bindings[binding_info]['certificatehash']): log.debug('Certificate binding already absent: %s', name) return True cmd_ret = _srvmgr(ps_cmd) if (cmd_ret['retcode'] != 0): msg = 'Unable to remove certificate binding: {0}\nError: {1}'.format(name, cmd_ret['stderr']) raise CommandExecutionError(msg) new_cert_bindings = list_cert_bindings(site) if (binding_info not in new_cert_bindings): log.warning('Binding not found: %s', binding_info) return True if (name != new_cert_bindings[binding_info]['certificatehash']): log.debug('Certificate binding removed successfully: %s', name) return True log.error('Unable to remove certificate binding: %s', name) return False
Remove a certificate from an IIS Web Binding. .. versionadded:: 2016.11.0 .. note:: This function only removes the certificate from the web binding. It does not remove the web binding itself. Args: name (str): The thumbprint of the certificate. site (str): The IIS site name. hostheader (str): The host header of the binding. ipaddress (str): The IP address of the binding. port (int): The TCP port of the binding. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.remove_cert_binding name='AAA000' site='site0' hostheader='example.com' ipaddress='*' port='443'
codesearchnet
def delay(self, n, start_time): if (n > self.max_retries or (n > self.min_retries and time.time() - start_time > self.max_retry_period)): return -1 return min( math.pow(self.backoff_factor, n-1) * self.initial_delay, self.max_delay)
Calculate delay before the next retry. Args: n: the number of current attempt. The first attempt should be 1. start_time: the time when retry started in unix time. Returns: Number of seconds to wait before next retry. -1 if retry should give up.
juraj-google-style
def RegisterImplementation(source): global _source_implementations if 'name' not in source.__dict__: raise RuntimeError("'name' not defined in Source %r" % (source,)) _source_implementations[source.name] = source
Register a Source implementation with the factory method. Sources being registered are expected to have a name attribute, unique to themselves. Child modules are expected to call this method in the file-level scope. Args: source: A class type that is a subclass of Source Returns: Nothing Raises: RuntimeError: no 'name' entry in this source.
github-repos
def write(self, array_dict: Dict[str, np.ndarray]) -> None: self._writer.write(_make_example(array_dict))
Writes a dictionary of arrays to the file. Args: array_dict: A record to write. Should be a dictionary with string keys and numpy array values.
github-repos
def update_panel(adapter, panel_name, panel_version, new_version=None, new_date=None): panel_obj = adapter.gene_panel(panel_name, panel_version) if not panel_obj: raise IntegrityError("Panel %s version %s does not exist" % (panel_name, panel_version)) updated_panel = adapter.update_panel(panel_obj, new_version, new_date) panel_id = updated_panel['_id'] update = {'$set': {}} if new_version: update['$set']['panels.$.version'] = updated_panel['version'] if new_date: update['$set']['panels.$.updated_at'] = updated_panel['date'] LOG.info('Updating affected cases with {0}'.format(update)) query = {'panels': { '$elemMatch': {'panel_name': panel_name}}} adapter.case_collection.update_many(query, update) return updated_panel
Update a gene panel in the database We need to update the actual gene panel and then all cases that refers to the panel. Args: adapter(scout.adapter.MongoAdapter) panel_name(str): Unique name for a gene panel panel_version(float) new_version(float) new_date(datetime.datetime) Returns: updated_panel(scout.models.GenePanel): The updated gene panel object
juraj-google-style
def insert_query_m(data, table, conn, columns=None, db_type='mysql'): if (len(data) > 10000): _chunk_query(data, 10000, columns, conn, table, db_type) else: if (db_type == 'sqlite'): type_sign = '?' else: type_sign = '%s' type_com = (type_sign + ', ') type = (type_com * (len(data[0]) - 1)) type = (type + type_sign) if columns: stmt = (((((('INSERT INTO ' + table) + '( ') + columns) + ') VALUES (') + type) + ')') else: stmt = (((('INSERT INTO ' + table) + ' VALUES (') + type) + ')') cursor = conn.cursor() cursor.executemany(stmt, data) conn.commit()
Insert python list of tuples into SQL table Args: data (list): List of tuples table (str): Name of database table conn (connection object): database connection object columns (str): String of column names to use if not assigned then all columns are presumed to be used [Optional] db_type (str): If "sqlite" or "mysql"
codesearchnet
def run_scan_command(self, server_info: ServerConnectivityInfo, scan_command: PluginScanCommand) -> PluginScanResult: plugin_class = self._plugins_repository.get_plugin_class_for_command(scan_command) plugin = plugin_class() return plugin.process_task(server_info, scan_command)
Run a single scan command against a server; will block until the scan command has been completed. Args: server_info: The server's connectivity information. The test_connectivity_to_server() method must have been called first to ensure that the server is online and accessible. scan_command: The scan command to run against this server. Returns: The result of the scan command, which will be an instance of the scan command's corresponding PluginScanResult subclass.
codesearchnet
def elevation(self, value=0.0): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `elevation`'.format(value)) if (value < (- 1000.0)): raise ValueError('value need to be greater or equal -1000.0 for field `elevation`') if (value >= 9999.9): raise ValueError('value need to be smaller 9999.9 for field `elevation`') self._elevation = value
Corresponds to IDD Field `elevation` Args: value (float): value for IDD Field `elevation` Unit: m Default value: 0.0 value >= -1000.0 value < 9999.9 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def _PrintWarningCounters(self, storage_counters): warnings_by_pathspec = storage_counters.get('warnings_by_path_spec', {}) warnings_by_parser_chain = storage_counters.get('warnings_by_parser_chain', {}) if (not warnings_by_parser_chain): self._output_writer.Write('No warnings stored.\n\n') return table_view = views.ViewsFactory.GetTableView(self._views_format_type, title='Warnings generated per parser', column_names=['Parser (plugin) name', 'Number of warnings']) for (parser_chain, count) in warnings_by_parser_chain.items(): parser_chain = (parser_chain or '<No parser>') table_view.AddRow([parser_chain, '{0:d}'.format(count)]) table_view.Write(self._output_writer) table_view = views.ViewsFactory.GetTableView(self._views_format_type, title='Pathspecs with most warnings', column_names=['Number of warnings', 'Pathspec']) top_pathspecs = warnings_by_pathspec.most_common(10) for (pathspec, count) in top_pathspecs: for (path_index, line) in enumerate(pathspec.split('\n')): if (not line): continue if (path_index == 0): table_view.AddRow(['{0:d}'.format(count), line]) else: table_view.AddRow(['', line]) table_view.Write(self._output_writer)
Prints a summary of the warnings. Args: storage_counters (dict): storage counters.
codesearchnet
def stop(self): if (self._status is TaskStatus.STOPPED): return if (self._status is not TaskStatus.STARTED): raise RuntimeError(('Cannot stop %s in state %s' % (self, self._status))) self._stop() STARTED_TASKS.remove(self) self._status = TaskStatus.STOPPED
Stop a task immediately. Raises: RuntimeError: If the task hasn't been started or has already been stopped.
codesearchnet
def run(self, args): jlink = self.create_jlink(args) mcu = args.name[0].lower() if pylink.unlock(jlink, mcu): print('Successfully unlocked device!') else: print('Failed to unlock device!')
Unlocks the target device. Args: self (UnlockCommand): the ``UnlockCommand`` instance args (Namespace): the arguments passed on the command-line Returns: ``None``
juraj-google-style
def write_buffers(self, conn, locked=True): if (conn is None): raise ValueError('Cannot write_buffers to connection None') sent = 0 for (header, payload) in self._buffers: (yield conn.write_message(header, locked=locked)) (yield conn.write_message(payload, binary=True, locked=locked)) sent += (len(header) + len(payload)) raise gen.Return(sent)
Write any buffer headers and payloads to the given connection. Args: conn (object) : May be any object with a ``write_message`` method. Typically, a Tornado ``WSHandler`` or ``WebSocketClientConnection`` locked (bool) : Returns: int : number of bytes sent
codesearchnet
def get_transition_chempots(self, element): if element not in self.elements: raise ValueError("get_transition_chempots can only be called with " "elements in the phase diagram.") critical_chempots = [] for facet in self.facets: chempots = self._get_facet_chempots(facet) critical_chempots.append(chempots[element]) clean_pots = [] for c in sorted(critical_chempots): if len(clean_pots) == 0: clean_pots.append(c) else: if abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol: clean_pots.append(c) clean_pots.reverse() return tuple(clean_pots)
Get the critical chemical potentials for an element in the Phase Diagram. Args: element: An element. Has to be in the PD in the first place. Returns: A sorted sequence of critical chemical potentials, from less negative to more negative.
juraj-google-style
def NewCheckpointReader(filepattern): try: return CheckpointReader(compat.as_bytes(filepattern)) except RuntimeError as e: error_translator(e)
A function that returns a CheckPointReader. Args: filepattern: The filename. Returns: A CheckpointReader object.
github-repos
def assert_scalar_v2(tensor, message=None, name=None): assert_scalar(tensor=tensor, message=message, name=name)
Asserts that the given `tensor` is a scalar. This function raises `ValueError` unless it can be certain that the given `tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is unknown. This is always checked statically, so this method returns nothing. Args: tensor: A `Tensor`. message: A string to prefix to the default message. name: A name for this operation. Defaults to "assert_scalar" Raises: ValueError: If the tensor is not scalar (rank 0), or if its shape is unknown.
github-repos
def IsNamedTuple(component): if not isinstance(component, tuple): return False has_fields = bool(getattr(component, '_fields', None)) return has_fields
Return true if the component is a namedtuple. Unfortunately, Python offers no native way to check for a namedtuple type. Instead, we need to use a simple hack which should suffice for our case. namedtuples are internally implemented as tuples, therefore we need to: 1. Check if the component is an instance of tuple. 2. Check if the component has a _fields attribute which regular tuples do not have. Args: component: The component to analyze. Returns: True if the component is a namedtuple or False otherwise.
github-repos
def alias_tensors(*args): def alias_if_tensor(a): return array_ops.identity(a) if isinstance(a, tensor.Tensor) else a if len(args) > 1: return (alias_if_tensor(a) for a in args) elif len(args) == 1: return alias_if_tensor(args[0]) raise ValueError('at least one argument required')
Wraps any Tensor arguments with an identity op. Any other argument, including Variables, is returned unchanged. Args: *args: Any arguments. Must contain at least one element. Returns: Same as *args, with Tensor instances replaced as described. Raises: ValueError: If args doesn't meet the requirements.
github-repos
def apply_region_configs(env_config): new_config = env_config.copy() for region in env_config.get('regions', REGIONS): if isinstance(env_config.get('regions'), dict): region_specific_config = env_config['regions'][region] new_config[region] = dict(DeepChainMap(region_specific_config, env_config)) else: new_config[region] = env_config.copy() LOG.debug('Region Specific Config:\n%s', new_config) return new_config
Override default env configs with region specific configs and nest all values under a region Args: env_config (dict): The environment specific config. Return: dict: Newly updated dictionary with region overrides applied.
codesearchnet
def imag(x): if any_symbolic_tensors((x,)): return Imag().symbolic_call(x) return backend.numpy.imag(x)
Return the imaginary part of the complex argument. Args: x: Input tensor. Returns: The imaginary component of the complex argument.
github-repos
def _batch_prepare_for_model(self, batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], batch_shape_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], batch_pronunciation_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding: batch_outputs = {} for i, (first_ids, second_ids) in enumerate(batch_ids_pairs): first_shape_ids, second_shape_ids = batch_shape_ids_pairs[i] first_pronunciation_ids, second_pronunciation_ids = batch_pronunciation_ids_pairs[i] outputs = self.prepare_for_model(first_ids, first_shape_ids, first_pronunciation_ids, pair_ids=second_ids, pair_shape_ids=second_shape_ids, pair_pronunciation_ids=second_pronunciation_ids, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs batch_shape_ids_pairs: list of tokenized input shape ids or input shape ids pairs batch_pronunciation_ids_pairs: list of tokenized input pronunciation ids or input pronunciation ids pairs
github-repos
def chop(array, epsilon=1e-10): ret = np.array(array) if np.isrealobj(ret): ret[(abs(ret) < epsilon)] = 0.0 else: ret.real[(abs(ret.real) < epsilon)] = 0.0 ret.imag[(abs(ret.imag) < epsilon)] = 0.0 return ret
Truncate small values of a complex array. Args: array (array_like): array to truncte small values. epsilon (float): threshold. Returns: np.array: A new operator with small values set to zero.
codesearchnet
def _read_mptcp_remove(self, bits, size): adid = [] for _ in size: adid.append(self._read_unpack(1)) data = dict(subtype='REMOVE_ADDR', removeaddr=dict(addrid=(adid or None))) return data
Read Remove Address option. Positional arguments: * bits - str, 4-bit data * size - int, length of option Returns: * dict -- extracted Remove Address (REMOVE_ADDR) option Structure of REMOVE_ADDR [RFC 6824]: 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +---------------+---------------+-------+-------+---------------+ | Kind | Length = 3+n |Subtype|(resvd)| Address ID | ... +---------------+---------------+-------+-------+---------------+ (followed by n-1 Address IDs, if required) Octets Bits Name Description 0 0 tcp.opt.kind Kind (30) 1 8 tcp.opt.length Length 2 16 tcp.opt.mp.subtype Subtype (4) 2 20 - Reserved (must be zero) 3 24 tcp.opt.mp.removeaddr.addrid Address ID (optional list)
codesearchnet
def __init__(self, output, output_name, loss_fn, loss_weight=None, training_target=None, output_loss_metric=None, sample_weight=None, sample_weight_mode=None): self._output = output self._output_name = output_name self._loss_fn = loss_fn self._loss_weight = loss_weight self._training_target = training_target self._output_loss_metric = output_loss_metric self._sample_weight = sample_weight self._sample_weight_mode = sample_weight_mode
Initialize the _TrainingEndpoint. Note that the output and output_name should be stable as long as the model structure doesn't change. The training_target suppose to be mutable since the information is provided via `compile()` Args: output: the output tensor of the model. output_name: the unique name of the output tensor. loss_fn: the loss function for the output tensor. loss_weight: float, the weights for the loss. training_target: the _TrainingTarget for the model. output_loss_metric: the metric object for the loss function. sample_weight: the weights for how a sample is weighted during metric and loss calculation. Could be None. sample_weight_mode: string, 'temporal', 'samplewise' or None. The mode for how the sample_weight is populated.
github-repos
def begin_stream(self, command: Command) -> Reply: yield from self._control_stream.write_command(command) reply = yield from self._control_stream.read_reply() self.raise_if_not_match( 'Begin stream', ( ReplyCodes.file_status_okay_about_to_open_data_connection, ReplyCodes.data_connection_already_open_transfer_starting, ), reply ) return reply
Start sending content on the data stream. Args: command: A command that tells the server to send data over the data connection. Coroutine. Returns: The begin reply.
juraj-google-style
def __init__(self, topology: Topology, core_assignment: np.ndarray): if not isinstance(topology, Topology): raise ValueError('topology must be a Topology object, got {}'.format(type(topology))) core_assignment = numpy_compat.np_asarray(core_assignment, dtype=np.int32) self._topology = topology if core_assignment.ndim != 3: raise ValueError(f'core_assignment must be a rank 3 numpy array, got shape {core_assignment.shape}') self._num_replicas = core_assignment.shape[0] self._num_cores_per_replica = core_assignment.shape[1] if core_assignment.shape[-1] != topology.mesh_rank: raise ValueError(f'core_assignment.shape[-1] must have size equal to topology rank ({topology.mesh_rank}), got core_assignment.shape={core_assignment.shape}') self._core_assignment = core_assignment self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(self._core_assignment, topology)
Constructs a `DeviceAssignment` object. Args: topology: A `Topology` object that describes the physical TPU topology. core_assignment: A logical to physical core mapping, represented as a rank 3 numpy array. See the description of the `core_assignment` property for more details. Raises: ValueError: If `topology` is not `Topology` object. ValueError: If `core_assignment` is not a rank 3 numpy array.
github-repos
def glob(*args): if len(args) is 1 and isinstance(args[0], list): args = args[0] matches = [] for pattern in args: for item in glob2.glob(pattern): if not os.path.isdir(item): matches.append(item) return matches
Returns list of paths matching one or more wildcard patterns. Args: include_dirs: Include directories in the output
juraj-google-style
def merge(profile, branch, merge_into): data = merges.merge(profile, branch, merge_into) return data
Merge a branch into another branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. branch The name of the branch to merge. merge_into The name of the branch you want to merge into. Returns: A dict wtih data about the merge.
juraj-google-style
def __init__(self, xid=None, body_type=None, flags=0, body=b''): super().__init__(xid) self.body_type = body_type self.flags = flags self.body = body
Create a StatsRequest with the optional parameters below. Args: xid (int): xid to be used on the message header. body_type (StatsType): One of the OFPST_* constants. flags (int): OFPSF_REQ_* flags (none yet defined). body (BinaryData): Body of the request.
juraj-google-style
def orthologize(ast, bo, species_id: str): if not species_id: bo.validation_messages.append( ("WARNING", "No species id was provided for orthologization") ) return ast if isinstance(ast, NSArg): if ast.orthologs: if ast.orthologs.get(species_id, None): orthologized_nsarg_val = ast.orthologs[species_id]["decanonical"] ns, value = orthologized_nsarg_val.split(":") ast.change_nsvalue(ns, value) ast.canonical = ast.orthologs[species_id]["canonical"] ast.decanonical = ast.orthologs[species_id]["decanonical"] ast.orthologized = True bo.ast.species.add( (species_id, ast.orthologs[species_id]["species_label"]) ) else: bo.ast.species.add((ast.species_id, ast.species_label)) bo.validation_messages.append( ("WARNING", f"No ortholog found for {ast.namespace}:{ast.value}") ) elif ast.species_id: bo.ast.species.add((ast.species_id, ast.species_label)) if hasattr(ast, "args"): for arg in ast.args: orthologize(arg, bo, species_id) return ast
Recursively orthologize BEL Entities in BEL AST using API endpoint NOTE: - will take first ortholog returned in BEL.bio API result (which may return more than one ortholog) Args: ast (BEL): BEL AST endpoint (str): endpoint url with a placeholder for the term_id Returns: BEL: BEL AST
juraj-google-style
def transpose(self, name=None): if name is None: name = self.module_name + "_transpose" if self._data_format == DATA_FORMAT_NWC: stride = self._stride[1:-1] else: stride = self._stride[2:] return Conv1D(output_channels=lambda: self.input_channels, kernel_shape=self.kernel_shape, stride=stride, padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name)
Returns matching `Conv1D` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv1D` module.
juraj-google-style
def clean_model_doc_toc(model_doc: List[dict]) -> List[dict]: counts = defaultdict(int) for doc in model_doc: counts[doc['local']] += 1 duplicates = [key for key, value in counts.items() if value > 1] new_doc = [] for duplicate_key in duplicates: titles = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key}) if len(titles) > 1: raise ValueError(f'{duplicate_key} is present several times in the documentation table of content at `docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the others.') new_doc.append({'local': duplicate_key, 'title': titles[0]}) new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1]) return sorted(new_doc, key=lambda s: s['title'].lower())
Cleans a section of the table of content of the model documentation (one specific modality) by removing duplicates and sorting models alphabetically. Args: model_doc (`List[dict]`): The list of dictionaries extracted from the `_toctree.yml` file for this specific modality. Returns: `List[dict]`: List of dictionaries like the input, but cleaned up and sorted.
github-repos
def __init__(self, filepath): self._filepath = filepath self._subword_text_encoder = text_encoder.SubwordTextEncoder(filepath)
Create a T2tVocabulary. Args: filepath: a string
juraj-google-style
def _make_static_axis_non_negative_list(axis, ndims): axis = distribution_util.make_non_negative_axis(axis, ndims) axis_const = tf.get_static_value(axis) if axis_const is None: raise ValueError( 'Expected argument `axis` to be statically available. Found: %s' % axis) axis = axis_const + np.zeros([1], dtype=axis_const.dtype) return list(int(dim) for dim in axis)
Convert possibly negatively indexed axis to non-negative list of ints. Args: axis: Integer Tensor. ndims: Number of dimensions into which axis indexes. Returns: A list of non-negative Python integers. Raises: ValueError: If `axis` is not statically defined.
juraj-google-style
def tokenize_sentence(input_dict): text, uid = (input_dict['text'], input_dict['id']) tokens = Tokenizer([text], padding=True, truncation=True, return_tensors='pt') tokens = {key: torch.squeeze(val) for key, val in tokens.items()} return ((text, uid), tokens)
It takes a dictionary with a text and an id, tokenizes the text, and returns a tuple of the text and id and the tokenized text Args: input_dict: a dictionary with the text and id of the sentence Returns: A tuple of the text and id, and a dictionary of the tokens.
github-repos
def dtime(sdat, tstart=None, tend=None): tseries = sdat.tseries_between(tstart, tend) time = tseries['t'].values return time[1:] - time[:-1], time[:-1]
Time increment dt. Compute dt as a function of time. Args: sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance. tstart (float): time at which the computation should start. Use the beginning of the time series data if set to None. tend (float): time at which the computation should end. Use the end of the time series data if set to None. Returns: tuple of :class:`numpy.array`: dt and time arrays.
juraj-google-style
def play(self, **kwargs): path = '%s/%s/play' % (self.manager.path, self.get_id()) self.manager.gitlab.http_post(path)
Trigger a job explicitly. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabJobPlayError: If the job could not be triggered
juraj-google-style
def GetPredefinedFile(stubs_subdir, module, extension='.pytd', as_package=False): parts = module.split('.') if as_package: parts.append('__init__') mod_path = path_utils.join(*parts) + extension path = path_utils.join('stubs', stubs_subdir, mod_path) return (path, pytype_source_utils.load_text_file(path))
Get the contents of a predefined PyTD, typically with a file name *.pytd. Arguments: stubs_subdir: the directory, typically "builtins" or "stdlib" module: module name (e.g., "sys" or "__builtins__") extension: either ".pytd" or ".py" as_package: try the module as a directory with an __init__ file Returns: The contents of the file Raises: IOError: if file not found
github-repos
def add_whitespace_before(char, input_file, output_file): line_count = get_line_count(input_file) input_file = open(input_file, 'r') output_file = open(output_file, 'r+') for line in range(line_count): string = input_file.readline() if re.search(r'[a-zA-Z0-9]' + char, string) != None: string = re.sub(char, ' ' + char, string) output_file.write(string) input_file.close()
Adds a space before a character if there's isn't one already. Args: char: string, character that needs a space before it. input_file: string, path to file to parse. output_file: string, path to destination file. Returns: None.
juraj-google-style
def retrieve_from_web(generate_csv=False): url = 'https: source = urllib.request.urlopen(url) matches = [] while True: line = source.readline() if '</html>' in line: break else: gpu = re.search('<a href=.*>([\\w\\S\\s\\d\\[\\]\\,]+[^*])</a>(<a href=.*)?.*', line) capability = re.search('([\\d]+).([\\d]+)(/)?([\\d]+)?(.)?([\\d]+)?.*</td>.*', line) if gpu: matches.append(gpu.group(1)) elif capability: if capability.group(3): capability_str = capability.group(4) + '.' + capability.group(6) else: capability_str = capability.group(1) + '.' + capability.group(2) matches.append(capability_str) return create_gpu_capa_map(matches, generate_csv)
Retrieves list of all CUDA compute capability from NVIDIA webpage. Args: generate_csv: Boolean for generating an output file containing the results. Returns: OrderedDict that is a list of all CUDA compute capability listed on the NVIDIA page. Order goes from top to bottom of the webpage content (.html).
github-repos
def num_memory_zones(self): count = self._dll.JLINK_GetMemZones(0, 0) if (count < 0): raise errors.JLinkException(count) return count
Returns the number of memory zones supported by the target. Args: self (JLink): the ``JLink`` instance Returns: An integer count of the number of memory zones supported by the target. Raises: JLinkException: on error.
codesearchnet
def gaussian_deriv(duration: int, amp: complex, sigma: float, name: str = None) -> SamplePulse: r center = duration/2 return _sampled_gaussian_deriv_pulse(duration, amp, center, sigma, name=name)
r"""Generates unnormalized gaussian derivative `SamplePulse`. Applies `left` sampling strategy to generate discrete pulse from continuous function. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude at `center`. sigma: Width (standard deviation) of pulse. name: Name of pulse.
juraj-google-style
def install(self, package: str, option: str='-r') -> None: if (not os.path.isfile(package)): raise FileNotFoundError(f'{package!r} does not exist.') for i in option: if (i not in '-lrtsdg'): raise ValueError(f'There is no option named: {option!r}.') self._execute('-s', self.device_sn, 'install', option, package)
Push package to the device and install it. Args: option: -l: forward lock application -r: replace existing application -t: allow test packages -s: install application on sdcard -d: allow version code downgrade (debuggable packages only) -g: grant all runtime permissions
codesearchnet
def get_slab_regions(slab, blength=3.5): fcoords, indices, all_indices = [], [], [] for site in slab: neighbors = slab.get_neighbors(site, blength, include_index=True, include_image=True) for nn in neighbors: if nn[0].frac_coords[2] < 0: fcoords.append(nn[0].frac_coords[2]) indices.append(nn[-2]) if nn[-2] not in all_indices: all_indices.append(nn[-2]) if fcoords: while fcoords: last_fcoords = copy.copy(fcoords) last_indices = copy.copy(indices) site = slab[indices[fcoords.index(min(fcoords))]] neighbors = slab.get_neighbors(site, blength, include_index=True, include_image=True) fcoords, indices = [], [] for nn in neighbors: if 1 > nn[0].frac_coords[2] > 0 and \ nn[0].frac_coords[2] < site.frac_coords[2]: fcoords.append(nn[0].frac_coords[2]) indices.append(nn[-2]) if nn[-2] not in all_indices: all_indices.append(nn[-2]) upper_fcoords = [] for site in slab: if all([nn[-1] not in all_indices for nn in slab.get_neighbors(site, blength, include_index=True)]): upper_fcoords.append(site.frac_coords[2]) coords = copy.copy(last_fcoords) if not fcoords else copy.copy(fcoords) min_top = slab[last_indices[coords.index(min(coords))]].frac_coords[2] ranges = [[0, max(upper_fcoords)], [min_top, 1]] else: sorted_sites = sorted(slab, key=lambda site: site.frac_coords[2]) ranges = [[sorted_sites[0].frac_coords[2], sorted_sites[-1].frac_coords[2]]] return ranges
Function to get the ranges of the slab regions. Useful for discerning where the slab ends and vacuum begins if the slab is not fully within the cell Args: slab (Structure): Structure object modelling the surface blength (float, Ang): The bondlength between atoms. You generally want this value to be larger than the actual bondlengths in order to find atoms that are part of the slab
juraj-google-style
def total_stored(self, wanted, slots=None): if (slots is None): slots = self.window.slots wanted = make_slot_check(wanted) return sum((slot.amount for slot in slots if wanted(slot)))
Calculates the total number of items of that type in the current window or given slot range. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata)
codesearchnet
class EnsembleAnomalyDetector(AnomalyDetector): def __init__(self, sub_detectors: Optional[list[AnomalyDetector]]=None, aggregation_strategy: Optional[AggregationFn]=None, **kwargs): if 'model_id' not in kwargs or kwargs['model_id'] is None: kwargs['model_id'] = getattr(self, 'spec_type', lambda: 'custom')() super().__init__(**kwargs) self._aggregation_strategy = aggregation_strategy self._sub_detectors = sub_detectors def learn_one(self, x: beam.Row) -> None: raise NotImplementedError def score_one(self, x: beam.Row) -> float: raise NotImplementedError
An abstract base class for an ensemble of anomaly (sub-)detectors. Args: sub_detectors: A list of `AnomalyDetector` used in this ensemble model. aggregation_strategy: An optional `AggregationFn` to apply to the predictions from all sub-detectors and yield an aggregated result. model_id: Inherited from `AnomalyDetector`. features: Inherited from `AnomalyDetector`. target: Inherited from `AnomalyDetector`. threshold_criterion: Inherited from `AnomalyDetector`.
github-repos
def make_initializer(self, dataset, name=None): with ops.name_scope(name, 'make_initializer') as name: dataset_output_types = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), dataset.element_spec) dataset_output_shapes = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), dataset.element_spec) dataset_output_classes = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), dataset.element_spec) nest.assert_same_structure(self.output_types, dataset_output_types) nest.assert_same_structure(self.output_shapes, dataset_output_shapes) for iterator_class, dataset_class in zip(nest.flatten(self.output_classes), nest.flatten(dataset_output_classes)): if iterator_class is not dataset_class: raise TypeError(f'Expected output classes {self.output_classes!r} but got dataset with output classes {dataset_output_classes!r}.') for iterator_dtype, dataset_dtype in zip(nest.flatten(self.output_types), nest.flatten(dataset_output_types)): if iterator_dtype != dataset_dtype: raise TypeError(f'Expected output types {self.output_types!r} but got dataset with output types {dataset_output_types!r}.') for iterator_shape, dataset_shape in zip(nest.flatten(self.output_shapes), nest.flatten(dataset_output_shapes)): if not iterator_shape.is_compatible_with(dataset_shape): raise TypeError(f'Expected output shapes compatible with {self.output_shapes!r} but got dataset with output shapes {dataset_output_shapes!r}.') with ops.colocate_with(self._iterator_resource): return gen_dataset_ops.make_iterator(dataset._variant_tensor, self._iterator_resource, name=name)
Returns a `tf.Operation` that initializes this iterator on `dataset`. Args: dataset: A `Dataset` whose `element_spec` if compatible with this iterator. name: (Optional.) A name for the created operation. Returns: A `tf.Operation` that can be run to initialize this iterator on the given `dataset`. Raises: TypeError: If `dataset` and this iterator do not have a compatible `element_spec`.
github-repos
def train_auto_encoder(self, generative_model, a_logs_list): error_arr = generative_model.update() if error_arr.ndim > 1: error_arr = error_arr.mean() a_logs_list.append(error_arr) self.__logger.debug("The reconstruction error (mean): " + str(error_arr)) return generative_model, a_logs_list
Train the generative model as the Auto-Encoder. Args: generative_model: Generator which draws samples from the `fake` distribution. a_logs_list: `list` of the reconstruction errors. Returns: The tuple data. The shape is... - Generator which draws samples from the `fake` distribution. - `list` of the reconstruction errors.
juraj-google-style
def access_vlan(self, inter_type, inter, vlan_id): config = ET.Element('config') interface = ET.SubElement(config, 'interface', xmlns='urn:brocade.com:mgmt:brocade-interface') int_type = ET.SubElement(interface, inter_type) name = ET.SubElement(int_type, 'name') name.text = inter switchport = ET.SubElement(int_type, 'switchport') access = ET.SubElement(switchport, 'access') accessvlan = ET.SubElement(access, 'accessvlan') accessvlan.text = vlan_id try: self._callback(config) return True except Exception as error: logging.error(error) return False
Add a L2 Interface to a specific VLAN. Args: inter_type: The type of interface you want to configure. Ex. tengigabitethernet, gigabitethernet, fortygigabitethernet. inter: The ID for the interface you want to configure. Ex. 1/0/1 vlan_id: ID for the VLAN interface being modified. Value of 2-4096. Returns: True if command completes successfully or False if not. Raises: None
codesearchnet
def get_user_display_name(self, userid): user_info = self.slack_client.api_call('users.info', user=userid) if user_info.get('ok'): user = user_info.get('user') if user.get('profile'): return user.get('profile').get('display_name') else: return user.get('name') else: return userid
Given a Slack userid, grabs user display_name from api. Args: userid (string): the user id of the user being queried Returns: dict: a dictionary of the api response
juraj-google-style
def _GenerateZipInfo(self, arcname=None, compress_type=None, st=None): if st is None: st = os.stat_result((0o100644, 0, 0, 0, 0, 0, 0, 0, 0, 0)) mtime = time.localtime(st.st_mtime or time.time()) date_time = mtime[0:6] if arcname is None: raise ValueError("An arcname must be provided.") zinfo = zipfile.ZipInfo(arcname, date_time) zinfo.external_attr = (st[0] & 0xFFFF) << 16 if compress_type is None: zinfo.compress_type = self._compression else: zinfo.compress_type = compress_type zinfo.file_size = 0 zinfo.compress_size = 0 zinfo.flag_bits = 0x08 zinfo.CRC = 0x08074b50 zinfo.extra = struct.pack( "<HHIIHH", 0x5855, 12, 0, 0, 0, 0) return zinfo
Generate ZipInfo instance for the given name, compression and stat. Args: arcname: The name in the archive this should take. compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED) st: An optional stat object to be used for setting headers. Returns: ZipInfo instance. Raises: ValueError: If arcname is not provided.
juraj-google-style
def get_conditional_uni(cls, left_parent, right_parent): (left, right, _) = cls._identify_eds_ing(left_parent, right_parent) left_u = (left_parent.U[0] if (left_parent.L == left) else left_parent.U[1]) right_u = (right_parent.U[0] if (right_parent.L == right) else right_parent.U[1]) return (left_u, right_u)
Identify pair univariate value from parents. Args: left_parent(Edge): left parent right_parent(Edge): right parent Returns: tuple[np.ndarray, np.ndarray]: left and right parents univariate.
codesearchnet
def minimum(x, y): return math_ops.minimum(x, y)
Element-wise minimum of two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A tensor.
github-repos
def delete(name, **kwargs): if info(name): comp_obj = _get_computer_object() try: comp_obj.Delete('group', name) log.info('Successfully removed group %s', name) except pywintypes.com_error as exc: msg = 'Failed to remove group {0}. {1}'.format( name, win32api.FormatMessage(exc.excepinfo[5])) log.error(msg) return False else: log.warning('The group %s does not exists.', name) return False return True
Remove the named group Args: name (str): The name of the group to remove Returns: bool: ``True`` if successful, otherwise ``False`` CLI Example: .. code-block:: bash salt '*' group.delete foo
juraj-google-style
def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1): if self.recurrent_dropout == 0: return None init_kwargs = dict(inputs=inputs, training=training, count=count) return self._recurrent_dropout_mask_cache.setdefault(kwargs=init_kwargs)
Get the recurrent dropout mask for RNN cell. It will create mask based on context if there isn't any existing cached mask. If a new mask is generated, it will update the cache in the cell. Args: inputs: The input tensor whose shape will be used to generate dropout mask. training: Boolean tensor, whether its in training mode, dropout will be ignored in non-training mode. count: Int, how many dropout mask will be generated. It is useful for cell that has internal weights fused together. Returns: List of mask tensor, generated or cached mask based on context.
github-repos
def _find_config(self): for search_path in self.config_paths: for ext in self._fmt_to_ext.get(self.config_format): path = os.path.abspath(os.path.join(search_path, (self.config_name + ext))) if os.path.isfile(path): self.config_file = path return raise BisonError('No file named {} found in search paths {}'.format(self.config_name, self.config_paths))
Searches through the configured `config_paths` for the `config_name` file. If there are no `config_paths` defined, this will raise an error, so the caller should take care to check the value of `config_paths` first. Returns: str: The fully qualified path to the configuration that was found. Raises: Exception: No paths are defined in `config_paths` or no file with the `config_name` was found in any of the specified `config_paths`.
codesearchnet
def version(self): version = int(self._dll.JLINKARM_GetDLLVersion()) major = (version / 10000) minor = ((version / 100) % 100) rev = (version % 100) rev = ('' if (rev == 0) else chr(((rev + ord('a')) - 1))) return ('%d.%02d%s' % (major, minor, rev))
Returns the device's version. The device's version is returned as a string of the format: M.mr where ``M`` is major number, ``m`` is minor number, and ``r`` is revision character. Args: self (JLink): the ``JLink`` instance Returns: Device version string.
codesearchnet
def __init__(self, api_key: str, config: interfaces.Config=interfaces.Config()) -> None: self._config = config p_topic_generator = topic_generator.TopicGenerator(api_key=api_key, config=config) p_topic_researcher = topic_researcher.TopicResearcher(api_key=api_key, config=config) p_topic_verbalizer = topic_verbalizer.TopicVerbalizer(config=config) p_genai_model = genai_model.GenaiModel(api_key=api_key, model_name=self._config.research_synthesizer_model_name) p_preamble = preamble.Preamble(content=[ProcessorPart(prompts.SYNTHESIS_PREAMBLE), ProcessorPart('Research text: ')]) p_suffix = preamble.Suffix(content=[ProcessorPart('Your synthesized research: ')]) self._pipeline = p_topic_generator + p_topic_researcher + p_topic_verbalizer + p_preamble + p_suffix + p_genai_model
Initializes the Research Agent. Args: api_key: The API key to use for the GenAI API. config: The configuration for the Research Agent.
github-repos
def delete_submission(self, submission_id): LOG.info("Deleting clinvar submission %s", submission_id) submission_obj = self.clinvar_submission_collection.find_one({ '_id' : ObjectId(submission_id)}) submission_variants = submission_obj.get('variant_data') submission_casedata = submission_obj.get('case_data') submission_objects = [] if submission_variants and submission_casedata: submission_objects = submission_variants + submission_casedata elif submission_variants: submission_objects = submission_variants elif submission_casedata: submission_objects = submission_casedata result = self.clinvar_collection.delete_many({'_id': { "$in": submission_objects} }) deleted_objects = result.deleted_count result = self.clinvar_submission_collection.delete_one({'_id': ObjectId(submission_id)}) deleted_submissions = result.deleted_count return deleted_objects,deleted_submissions
Deletes a Clinvar submission object, along with all associated clinvar objects (variants and casedata) Args: submission_id(str): the ID of the submission to be deleted Returns: deleted_objects(int): the number of associated objects removed (variants and/or casedata) deleted_submissions(int): 1 if it's deleted, 0 if something went wrong
juraj-google-style
def append_column(table, col_name, default_value=None): table[0].append(col_name.strip()) for row in table[1:]: row.append(default_value)
Appends a column to the raw data without any integrity checks. Args: default_value: The value which will assigned, not copied into each row
juraj-google-style
def resolve_for(self, node): self.node = node self.actual_text = normalize_text( node.visible_text if self.query_type == "visible" else node.all_text) self.count = len(re.findall(self.search_regexp, self.actual_text)) return self.count
Resolves this query relative to the given node. Args: node (node.Base): The node to be evaluated. Returns: int: The number of matches found.
juraj-google-style
def add_evaluation_step(result_tensor, ground_truth_tensor): with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): prediction = tf.argmax(result_tensor, 1) correct_prediction = tf.equal(prediction, ground_truth_tensor) with tf.name_scope('accuracy'): evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar('accuracy', evaluation_step) return evaluation_step, prediction
Inserts the operations we need to evaluate the accuracy of our results. Args: result_tensor: The new final node that produces results. ground_truth_tensor: The node we feed ground truth data into. Returns: Tuple of (evaluation step, prediction).
juraj-google-style
def cast_to_str(obj): if isinstance(obj, str): return obj if isinstance(obj, Seq): return str(obj) if isinstance(obj, SeqRecord): return str(obj.seq) else: raise ValueError('Must provide a string, Seq, or SeqRecord object.')
Return a string representation of a Seq or SeqRecord. Args: obj (str, Seq, SeqRecord): Biopython Seq or SeqRecord Returns: str: String representation of the sequence
codesearchnet
async def apply(self, sender: str, recipient: str, mailbox: str, append_msg: AppendMessage) \ -> Tuple[Optional[str], AppendMessage]: ...
Run the filter and return the mailbox where it should be appended, or None to discard, and the message to be appended, which is usually the same as ``append_msg``. Args: sender: The envelope sender of the message. recipient: The envelope recipient of the message. mailbox: The intended mailbox to append the message. append_msg: The message to be appended. raises: :exc:`~pymap.exceptions.AppendFailure`
juraj-google-style
def laplacian_pyramid_image(shape, n_levels=4, sd=None): batch_dims = shape[:-3] w, h, ch = shape[-3:] pyramid = 0 for n in range(n_levels): k = 2 ** n pyramid += lowres_tensor(shape, batch_dims + (w return pyramid
Simple laplacian pyramid paramaterization of an image. For more flexibility, use a sum of lowres_tensor()s. Args: shape: shape of resulting image, [batch, width, height, channels]. n_levels: number of levels of laplacian pyarmid. sd: standard deviation of param initialization. Returns: tensor with shape from first argument.
juraj-google-style
def returns(desc=None, printer=None, data=True): if (data is False): raise ArgumentError('Specifying non data return type in returns is no longer supported') def _returns(func): annotated(func) func.custom_returnvalue(printer, desc) return func return _returns
Specify how the return value of this function should be handled. Args: desc (str): A deprecated description of the return value printer (callable): A callable function that can format this return value data (bool): A deprecated parameter for specifying that this function returns data.
codesearchnet
def experimental_distribute_dataset(self, dataset, options=None): return super(OneDeviceStrategy, self).experimental_distribute_dataset(dataset, options)
Distributes a tf.data.Dataset instance provided via dataset. In this case, there is only one device, so this is only a thin wrapper around the input dataset. It will, however, prefetch the input data to the specified device. The returned distributed dataset can be iterated over similar to how regular datasets can. NOTE: Currently, the user cannot add any more transformations to a distributed dataset. Example: ``` strategy = tf.distribute.OneDeviceStrategy() dataset = tf.data.Dataset.range(10).batch(2) dist_dataset = strategy.experimental_distribute_dataset(dataset) for x in dist_dataset: print(x) # [0, 1], [2, 3],... ``` Args: dataset: `tf.data.Dataset` to be prefetched to device. options: `tf.distribute.InputOptions` used to control options on how this dataset is distributed. Returns: A "distributed `Dataset`" that the caller can iterate over.
github-repos
def _to_df(self, result, handle_annotations=None): annotations = result._data if handle_annotations == 'first': annotations = [annotations[0]] face_results = [] for i, annotation in enumerate(annotations): data_dict = {} for field, val in annotation.items(): if 'Confidence' in field: data_dict['face_' + field] = val elif 'oundingPoly' in field: for j, vertex in enumerate(val['vertices']): for dim in ['x', 'y']: name = '%s_vertex%d_%s' % (field, j+1, dim) val = vertex[dim] if dim in vertex else np.nan data_dict[name] = val elif field == 'landmarks': for lm in val: name = 'landmark_' + lm['type'] + '_%s' lm_pos = {name % k: v for (k, v) in lm['position'].items()} data_dict.update(lm_pos) else: data_dict[field] = val face_results.append(data_dict) return pd.DataFrame(face_results)
Converts a Google API Face JSON response into a Pandas Dataframe. Args: result (ExtractorResult): Result object from which to parse out a Dataframe. handle_annotations (str): How returned face annotations should be handled in cases where there are multiple faces. 'first' indicates to only use the first face JSON object, all other values will default to including every face.
juraj-google-style
def std_dev(self, value): if value == self._defaults['stdDev'] and 'stdDev' in self._values: del self._values['stdDev'] else: self._values['stdDev'] = value
The std_dev property. Args: value (float). the property value.
juraj-google-style
def docx_process_simple_text(text: str, width: int) -> str: if width: return '\n'.join(textwrap.wrap(text, width=width)) else: return text
Word-wraps text. Args: text: text to process width: width to word-wrap to (or 0 to skip word wrapping) Returns: wrapped text
codesearchnet
def _find_address_values_in_chain(self, base_contexts, addresses_to_find): contexts_in_chain = deque() contexts_in_chain.extend(base_contexts) reads = list(addresses_to_find) address_values = [] context_ids_already_searched = [] context_ids_already_searched.extend(base_contexts) while reads: try: current_c_id = contexts_in_chain.popleft() except IndexError: break current_context = self._contexts[current_c_id] deleted_addresses = current_context.get_if_deleted(reads) for address in deleted_addresses: if address is not None: address_values.append((address, None)) reads = list(set(reads) - set(deleted_addresses)) values = current_context.get_if_set(reads) addresses_not_found = [] for address, value in zip(reads, values): if value is not None: address_values.append((address, value)) else: addresses_not_found.append(address) reads = addresses_not_found addresses_in_inputs = [address for address in reads if address in current_context] values = current_context.get_if_not_set(addresses_in_inputs) address_values.extend(list(zip(addresses_in_inputs, values))) for add in addresses_in_inputs: reads.remove(add) for c_id in current_context.base_contexts: if c_id not in context_ids_already_searched: contexts_in_chain.append(c_id) context_ids_already_searched.append(c_id) return address_values, reads
Breadth first search through the chain of contexts searching for the bytes values at the addresses in addresses_to_find. Args: base_contexts (list of str): The context ids to start with. addresses_to_find (list of str): Addresses to find values in the chain of contexts. Returns: tuple of found address_values and still not found addresses
juraj-google-style
def scale(self, width: int, height: int) -> None: lib.TCOD_image_scale(self.image_c, width, height) self.width, self.height = width, height
Scale this Image to the new width and height. Args: width (int): The new width of the Image after scaling. height (int): The new height of the Image after scaling.
juraj-google-style
def _update_context_field_binary_composition(present_locations, expression): if not any((isinstance(expression.left, ContextField), isinstance(expression.right, ContextField))): raise AssertionError(u'Received a BinaryComposition {} without any ContextField ' u'operands. This should never happen.'.format(expression)) if isinstance(expression.left, ContextField): context_field = expression.left location_name, _ = context_field.location.get_location_name() if location_name not in present_locations: return TrueLiteral if isinstance(expression.right, ContextField): context_field = expression.right location_name, _ = context_field.location.get_location_name() if location_name not in present_locations: return TrueLiteral return expression
Lower BinaryCompositions involving non-existent ContextFields to True. Args: present_locations: set of all locations in the current MatchQuery that have not been pruned expression: BinaryComposition with at least one ContextField operand Returns: TrueLiteral iff either ContextField operand is not in `present_locations`, and the original expression otherwise
juraj-google-style
def _distro_release_info(self): if self.distro_release_file: distro_info = self._parse_distro_release_file(self.distro_release_file) basename = os.path.basename(self.distro_release_file) match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) if match: distro_info['id'] = match.group(1) return distro_info else: try: basenames = os.listdir(_UNIXCONFDIR) basenames.sort() except OSError: basenames = ['SuSE-release', 'arch-release', 'base-release', 'centos-release', 'fedora-release', 'gentoo-release', 'mageia-release', 'mandrake-release', 'mandriva-release', 'mandrivalinux-release', 'manjaro-release', 'oracle-release', 'redhat-release', 'sl-release', 'slackware-version'] for basename in basenames: if (basename in _DISTRO_RELEASE_IGNORE_BASENAMES): continue match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) if match: filepath = os.path.join(_UNIXCONFDIR, basename) distro_info = self._parse_distro_release_file(filepath) if ('name' in distro_info): self.distro_release_file = filepath distro_info['id'] = match.group(1) return distro_info return {}
Get the information items from the specified distro release file. Returns: A dictionary containing all information items.
codesearchnet
def _check_property(self, rest=None, require_indexed=True): if require_indexed and not self._indexed: raise InvalidPropertyError('Property is unindexed %s' % self._name) if rest: raise InvalidPropertyError('Referencing subproperty %s.%s ' 'but %s is not a structured property' % (self._name, rest, self._name))
Internal helper to check this property for specific requirements. Called by Model._check_properties(). Args: rest: Optional subproperty to check, of the form 'name1.name2...nameN'. Raises: InvalidPropertyError if this property does not meet the given requirements or if a subproperty is specified. (StructuredProperty overrides this method to handle subproperties.)
juraj-google-style
def get_organization(self, **kwargs): resp = self._get(self._u(self._ORGANIZATION_ENDPOINT_SUFFIX), **kwargs) resp.raise_for_status() return resp.json()
Get the organization to which the user belongs Returns: dictionary of the response
codesearchnet
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(EncryptResponsePayload, self).read(input_stream, kmip_version=kmip_version) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream): self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER) self._unique_identifier.read(local_stream, kmip_version=kmip_version) else: raise ValueError('invalid payload missing the unique identifier attribute') if self.is_tag_next(enums.Tags.DATA, local_stream): self._data = primitives.ByteString(tag=enums.Tags.DATA) self._data.read(local_stream, kmip_version=kmip_version) else: raise ValueError('invalid payload missing the data attribute') if self.is_tag_next(enums.Tags.IV_COUNTER_NONCE, local_stream): self._iv_counter_nonce = primitives.ByteString(tag=enums.Tags.IV_COUNTER_NONCE) self._iv_counter_nonce.read(local_stream, kmip_version=kmip_version) self.is_oversized(local_stream)
Read the data encoding the Encrypt response payload and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the unique_identifier or data attributes are missing from the encoded payload.
codesearchnet
def SmartSet(self, obj, attr_name, new_attr): _, obj = tf_decorator.unwrap(obj) if tf_inspect.ismodule(obj) or (not tf_inspect.isclass(obj) and attr_name in obj.__dict__): orig_obj = obj orig_attr = getattr(obj, attr_name) else: if not tf_inspect.isclass(obj): mro = list(tf_inspect.getmro(obj.__class__)) else: mro = list(tf_inspect.getmro(obj)) mro.reverse() orig_attr = None found_attr = False for cls in mro: try: orig_obj = cls orig_attr = getattr(obj, attr_name) found_attr = True except AttributeError: continue if not found_attr: raise AttributeError('Attribute not found.') old_attribute = obj.__dict__.get(attr_name) if old_attribute is not None and isinstance(old_attribute, staticmethod): orig_attr = staticmethod(orig_attr) self.stubs.append((orig_obj, attr_name, orig_attr)) setattr(orig_obj, attr_name, new_attr)
Replace obj.attr_name with new_attr. This method is smart and works at the module, class, and instance level while preserving proper inheritance. It will not stub out C types however unless that has been explicitly allowed by the type. This method supports the case where attr_name is a staticmethod or a classmethod of obj. Notes: - If obj is an instance, then it is its class that will actually be stubbed. Note that the method Set() does not do that: if obj is an instance, it (and not its class) will be stubbed. - The stubbing is using the builtin getattr and setattr. So, the __get__ and __set__ will be called when stubbing (TODO: A better idea would probably be to manipulate obj.__dict__ instead of getattr() and setattr()). Args: obj: The object whose attributes we want to modify. attr_name: The name of the attribute to modify. new_attr: The new value for the attribute. Raises: AttributeError: If the attribute cannot be found.
github-repos
def rvs(self, size=1): return np.random.multivariate_normal(self.mean, self.cov, size)
Convenience method to sample from this distribution. Args: size (int or tuple): Shape of return value. Each element is drawn independently from this distribution.
juraj-google-style
def GetArtifactKnowledgeBase(client_obj, allow_uninitialized=False): client_schema = client_obj.Schema kb = client_obj.Get(client_schema.KNOWLEDGE_BASE) if (not allow_uninitialized): if (not kb): raise artifact_utils.KnowledgeBaseUninitializedError(('KnowledgeBase empty for %s.' % client_obj.urn)) if (not kb.os): raise artifact_utils.KnowledgeBaseAttributesMissingError(('KnowledgeBase missing OS for %s. Knowledgebase content: %s' % (client_obj.urn, kb))) if (not kb): kb = client_schema.KNOWLEDGE_BASE() SetCoreGRRKnowledgeBaseValues(kb, client_obj) if (kb.os == 'Windows'): if ((not kb.environ_allusersappdata) and kb.environ_allusersprofile): if (kb.os_major_version >= 6): kb.environ_allusersappdata = u'c:\\programdata' kb.environ_allusersprofile = u'c:\\programdata' else: kb.environ_allusersappdata = u'c:\\documents and settings\\All Users\\Application Data' kb.environ_allusersprofile = u'c:\\documents and settings\\All Users' return kb
This generates an artifact knowledge base from a GRR client. Args: client_obj: A GRRClient object which is opened for reading. allow_uninitialized: If True we accept an uninitialized knowledge_base. Returns: A KnowledgeBase semantic value. Raises: ArtifactProcessingError: If called when the knowledge base has not been initialized. KnowledgeBaseUninitializedError: If we failed to initialize the knowledge base. This is needed so that the artifact library has a standardized interface to the data that is actually stored in the GRRClient object in the GRR datastore. We expect that the client KNOWLEDGE_BASE is already filled out through the, KnowledgeBaseInitialization flow, but attempt to make some intelligent guesses if things failed.
codesearchnet