code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def FromFile(cls, in_path): with open(in_path, "rb") as infile: in_data = json.load(infile) if not ('trace', 'selectors') in in_data: raise ArgumentError("Invalid trace file format", keys=in_data.keys(), expected=('trace', 'selectors')) selectors = [DataStreamSelector.FromString(x) for x in in_data['selectors']] readings = [IOTileReading(x['time'], DataStream.FromString(x['stream']).encode(), x['value'], reading_id=x['reading_id']) for x in in_data['trace']] return SimulationTrace(readings, selectors=selectors)
Load a previously saved ascii representation of this simulation trace. Args: in_path (str): The path of the input file that we should load. Returns: SimulationTrace: The loaded trace object.
juraj-google-style
def get_replicas(self, service_id: str) -> str: replicas = [] if (not self._manager): raise RuntimeError('Only the Swarm manager node can retrieve replication level of the service') service_tasks = self._client.services.get(service_id).tasks() for task in service_tasks: if (task['Status']['State'] == 'running'): replicas.append(task) return len(replicas)
Get the replication level of a service. Args: service_id (str): docker swarm service id Returns: str, replication level of the service
codesearchnet
def traverse_postorder(self, leaves=True, internal=True): s1 = deque() s2 = deque() s1.append(self) while (len(s1) != 0): n = s1.pop() s2.append(n) s1.extend(n.children) while (len(s2) != 0): n = s2.pop() if ((leaves and n.is_leaf()) or (internal and (not n.is_leaf()))): (yield n)
Perform a postorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
codesearchnet
def AddOption(self, descriptor, constant=False): if self.initialized: raise AlreadyInitializedError(('Config was already initialized when defining %s' % descriptor.name)) descriptor.section = descriptor.name.split('.')[0] if (descriptor.name in self.type_infos): logging.warning('Config Option %s multiply defined!', descriptor.name) self.type_infos.Append(descriptor) if constant: self.constants.add(descriptor.name) self.defaults[descriptor.name] = descriptor.GetDefault() self.FlushCache()
Registers an option with the configuration system. Args: descriptor: A TypeInfoObject instance describing the option. constant: If this is set, the option is treated as a constant - it can be read at any time (before parsing the configuration) and it's an error to try to override it in a config file. Raises: RuntimeError: The descriptor's name must contain a . to denote the section name, otherwise we raise. AlreadyInitializedError: If the config has already been read it's too late to define new options.
codesearchnet
def get_all_aminames(i_info): for i in i_info: try: i_info[i]['aminame'] = EC2R.Image(i_info[i]['ami']).name except AttributeError: i_info[i]['aminame'] = 'Unknown' return i_info
Get Image_Name for each instance in i_info. Args: i_info (dict): information on instances and details. Returns: i_info (dict): i_info is returned with the aminame added for each instance.
codesearchnet
def _convert_to_tensors_or_sparse_tensors(a, b): a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name='a') if a.dtype.base_dtype not in _VALID_DTYPES: raise TypeError(f"'a' has invalid dtype `{a.dtype}` not in supported dtypes: `{_VALID_DTYPES}`.") b = sparse_tensor.convert_to_tensor_or_sparse_tensor(b, name='b') if b.dtype.base_dtype != a.dtype.base_dtype: raise TypeError("Types don't match, %s vs %s." % (a.dtype, b.dtype)) if isinstance(a, sparse_tensor.SparseTensor) and (not isinstance(b, sparse_tensor.SparseTensor)): return (b, a, True) return (a, b, False)
Convert to tensor types, and flip order if necessary. Args: a: `Tensor` or `SparseTensor` of the same type as `b`. b: `Tensor` or `SparseTensor` of the same type as `a`. Returns: Tuple of `(a, b, flipped)`, where `a` and `b` have been converted to `Tensor` or `SparseTensor`, and `flipped` indicates whether the order has been flipped to make it dense,sparse instead of sparse,dense (since the set ops do not support the latter).
github-repos
def verify_oauth2_token(id_token, request, audience=None): return verify_token( id_token, request, audience=audience, certs_url=_GOOGLE_OAUTH2_CERTS_URL)
Verifies an ID Token issued by Google's OAuth 2.0 authorization server. Args: id_token (Union[str, bytes]): The encoded token. request (google.auth.transport.Request): The object used to make HTTP requests. audience (str): The audience that this token is intended for. This is typically your application's OAuth 2.0 client ID. If None then the audience is not verified. Returns: Mapping[str, Any]: The decoded token.
juraj-google-style
def sign(check_request): if not isinstance(check_request, sc_messages.CheckRequest): raise ValueError(u'Invalid request') op = check_request.operation if op is None or op.operationName is None or op.consumerId is None: logging.error(u'Bad %s: not initialized => not signed', check_request) raise ValueError(u'check request must be initialized with an operation') md5 = hashlib.md5() md5.update(op.operationName.encode('utf-8')) md5.update(b'\x00') md5.update(op.consumerId.encode('utf-8')) if op.labels: signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels)) for value_set in op.metricValueSets: md5.update(b'\x00') md5.update(value_set.metricName.encode('utf-8')) for mv in value_set.metricValues: metric_value.update_hash(md5, mv) md5.update(b'\x00') if op.quotaProperties: md5.update(repr(op.quotaProperties).encode('utf-8')) md5.update(b'\x00') return md5.digest()
Obtains a signature for an operation in a `CheckRequest` Args: op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an operation used in a `CheckRequest` Returns: string: a secure hash generated from the operation
juraj-google-style
def _ListDir(dirpath, pathtype): pathspec = rdf_paths.PathSpec(path=dirpath, pathtype=pathtype) childpaths = [] try: file_obj = vfs.VFSOpen(pathspec) for path in file_obj.ListNames(): if pathtype != rdf_paths.PathSpec.PathType.REGISTRY or path: childpaths.append(path) except IOError: pass return childpaths
Returns children of a given directory. This function is intended to be used by the `PathComponent` subclasses to get initial list of potential children that then need to be filtered according to the rules of a specific component. Args: dirpath: A path to the directory. pathtype: The pathtype to use. Raises: ValueError: in case of unsupported path types.
juraj-google-style
def execute_add(args, root_dir=None): command = ' '.join(args['command']) instruction = { 'command': command, 'path': os.getcwd() } print_command_factory('add')(instruction, root_dir)
Add a new command to the daemon queue. Args: args['command'] (list(str)): The actual programm call. Something like ['ls', '-a'] or ['ls -al'] root_dir (string): The path to the root directory the daemon is running in.
juraj-google-style
def _get_stringlist_from_commastring(self, field): strings = self.data.get(field) if strings: return strings.split(',') else: return list()
Return list of strings from comma separated list Args: field (str): Field containing comma separated list Returns: List[str]: List of strings
juraj-google-style
def _get_class_frame_source(class_name): for frame_info in inspect.stack(): try: with open(frame_info[1]) as fp: src = "".join(fp.readlines()[frame_info[2] - 1 :]) except IOError: continue if re.search(r"\bclass\b\s+\b{}\b".format(class_name), src): reader = six.StringIO(src).readline tokens = tokenize.generate_tokens(reader) source_tokens = [] indent_level = 0 base_indent_level = 0 has_base_level = False for token, value, _, _, _ in tokens: source_tokens.append((token, value)) if token == tokenize.INDENT: indent_level += 1 elif token == tokenize.DEDENT: indent_level -= 1 if has_base_level and indent_level <= base_indent_level: return ( tokenize.untokenize(source_tokens), frame_info[0].f_globals, frame_info[0].f_locals, ) elif not has_base_level: has_base_level = True base_indent_level = indent_level raise TypeError( 'Unable to retrieve source for class "{}"'.format(class_name) )
Return the source code for a class by checking the frame stack. This is necessary because it is not possible to get the source of a class being created by a metaclass directly. Args: class_name: The class to look for on the stack. Returns: The source code for the requested class if the class was found and the source was accessible.
juraj-google-style
def _lookup_dependency(self, name, cached_dependencies=None): if cached_dependencies: return cached_dependencies.get(name) return self._self_unconditional_dependency_names.get(name)
Look up a dependency by name. May be overridden to include conditional dependencies. Args: name: The local name of the dependency. cached_dependencies: Optional dict containing all computed dependencies returned by `self._trackable_children()`. Returns: A `Trackable` object, or `None` if no dependency by this name was found.
github-repos
def _add_consequences(self, variant_obj, raw_variant_line): consequences = [] for consequence in SO_TERMS: if consequence in raw_variant_line: consequences.append(consequence) variant_obj.consequences = consequences
Add the consequences found for a variant Args: variant_obj (puzzle.models.Variant) raw_variant_line (str): A raw vcf variant line
juraj-google-style
async def _on_report_notification(self, event): conn_string = event.get('connection_string') report = self._report_parser.deserialize_report(event.get('serialized_report')) self.notify_event(conn_string, 'report', report)
Callback function called when a report event is received. Args: event (dict): The report_event
codesearchnet
def __init__(self, channel): self.ComputeThreatListDiff = channel.unary_unary( "/google.cloud.webrisk.v1beta1.WebRiskServiceV1Beta1/ComputeThreatListDiff", request_serializer=google_dot_cloud_dot_webrisk__v1beta1_dot_proto_dot_webrisk__pb2.ComputeThreatListDiffRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_webrisk__v1beta1_dot_proto_dot_webrisk__pb2.ComputeThreatListDiffResponse.FromString, ) self.SearchUris = channel.unary_unary( "/google.cloud.webrisk.v1beta1.WebRiskServiceV1Beta1/SearchUris", request_serializer=google_dot_cloud_dot_webrisk__v1beta1_dot_proto_dot_webrisk__pb2.SearchUrisRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_webrisk__v1beta1_dot_proto_dot_webrisk__pb2.SearchUrisResponse.FromString, ) self.SearchHashes = channel.unary_unary( "/google.cloud.webrisk.v1beta1.WebRiskServiceV1Beta1/SearchHashes", request_serializer=google_dot_cloud_dot_webrisk__v1beta1_dot_proto_dot_webrisk__pb2.SearchHashesRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_webrisk__v1beta1_dot_proto_dot_webrisk__pb2.SearchHashesResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def _load_third_party_packages(self): modules = collections.defaultdict(set) stubs = set() for third_party_file in self._store.list_files('stubs'): parts = third_party_file.split(path_utils.sep) filename = parts[-1] if filename == 'METADATA.toml' or parts[1] == '@tests': continue if filename.endswith('.pyi'): stubs.add(parts[0]) name, _ = path_utils.splitext(parts[1]) modules[parts[0]].add(name) packages = collections.defaultdict(set) for package, names in modules.items(): for name in names: if package in stubs: packages[name].add(package) return packages
Loads package and Python version information for typeshed/stubs/. stubs/ contains type information for third-party packages. Each top-level directory corresponds to one PyPI package and contains one or more modules, plus a metadata file (METADATA.toml). The top-level directory may contain a @tests subdirectory for typeshed testing. Returns: A mapping from module name to a set of package names.
github-repos
def predict_array(self, arr): precompute = self.precompute self.precompute = False pred = super().predict_array(arr) self.precompute = precompute return pred
This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called with precompute set to true Args: arr: a numpy array to be used as input to the model for prediction purposes Returns: a numpy array containing the predictions from the model
codesearchnet
def get_ordered_names(self, features): idxs = np.where( np.in1d(self.data.columns.values, np.array(features)))[0] return list(self.data.columns[idxs].values)
Given a list of features, returns features in order that they appear in database. Args: features (list): A list or 1D numpy array of named features to return. Returns: A list of features in order they appear in database.
juraj-google-style
def get_covalent_bonds(self, tol=0.2): bonds = [] for site1, site2 in itertools.combinations(self._sites, 2): if CovalentBond.is_bonded(site1, site2, tol): bonds.append(CovalentBond(site1, site2)) return bonds
Determines the covalent bonds in a molecule. Args: tol (float): The tol to determine bonds in a structure. See CovalentBond.is_bonded. Returns: List of bonds
juraj-google-style
def initialize_means(data, clusters, k): init_w = np.zeros((data.shape[0], k)) if sparse.issparse(data): for i in range(k): if (data[(:, (clusters == i))].shape[1] == 0): point = np.random.randint(0, data.shape[1]) init_w[(:, i)] = data[(:, point)].toarray().flatten() else: init_w[(:, i)] = (np.array(data[(:, (clusters == i))].mean(1)).flatten() + eps) else: for i in range(k): if (data[(:, (clusters == i))].shape[1] == 0): point = np.random.randint(0, data.shape[1]) init_w[(:, i)] = data[(:, point)].flatten() else: init_w[(:, i)] = (data[(:, (clusters == i))].mean(1) + eps) return init_w
Initializes the M matrix given the data and a set of cluster labels. Cluster centers are set to the mean of each cluster. Args: data (array): genes x cells clusters (array): 1d array of ints (0...k-1) k (int): number of clusters
codesearchnet
def write_unitth(suites, out_dir): if (not os.path.isdir(out_dir)): os.mkdir(out_dir) for (classname, cases) in suites.items(): doc_xml = minidom.Document() suite_xml = doc_xml.createElement('testsuite') suite_xml.setAttribute('name', classname) suite_xml.setAttribute('tests', str(len(cases))) suite_xml.setAttribute('errors', str(sum((('error' in case) for case in cases)))) suite_xml.setAttribute('failures', str(sum((('failure' in case) for case in cases)))) suite_xml.setAttribute('skipped', str(sum((('skipped' in case) for case in cases)))) suite_xml.setAttribute('time', '{:.3f}'.format(sum((case['time'] for case in cases)))) doc_xml.appendChild(suite_xml) for case in cases: case_xml = doc_xml.createElement('testcase') case_xml.setAttribute('classname', classname) case_xml.setAttribute('name', case['name']) case_xml.setAttribute('time', '{:.3f}'.format(case['time'])) suite_xml.appendChild(case_xml) if ('skipped' in case): skipped_xml = doc_xml.createElement('skipped') skipped_xml.setAttribute('type', case['skipped']['type']) skipped_xml.setAttribute('message', case['skipped']['message']) case_xml.appendChild(skipped_xml) skipped_text_xml = doc_xml.createCDATASection(case['skipped']['text']) skipped_xml.appendChild(skipped_text_xml) if ('failure' in case): failure_xml = doc_xml.createElement('failure') failure_xml.setAttribute('type', case['failure']['type']) failure_xml.setAttribute('message', case['failure']['message']) case_xml.appendChild(failure_xml) failure_text_xml = doc_xml.createCDATASection(case['failure']['text']) failure_xml.appendChild(failure_text_xml) if ('error' in case): error_xml = doc_xml.createElement('error') error_xml.setAttribute('type', case['error']['type']) error_xml.setAttribute('message', case['error']['message']) case_xml.appendChild(error_xml) error_text_xml = doc_xml.createCDATASection(case['error']['text']) error_xml.appendChild(error_text_xml) with open(os.path.join(out_dir, '{}.xml'.format(classname)), 'w') as output: doc_xml.writexml(output, encoding='utf-8', addindent='', newl='') doc_xml.unlink()
Write UnitTH-style test reports Args: suites (:obj:`dict`): dictionary of test suites out_dir (:obj:`str`): path to save UnitTH-style test reports
codesearchnet
def cache_memlimit(self, memlimit): self._fetch_cmd(b'cache_memlimit', [str(int(memlimit))], False) return True
The memcached "cache_memlimit" command. Args: memlimit: int, the number of megabytes to set as the new cache memory limit. Returns: If no exception is raised, always returns True.
juraj-google-style
def find_overlaps(self, index=False): return self.__find_incongruities(op=operator.gt, index=index)
Find overlaps in a striplog. Args: index (bool): If True, returns indices of intervals with gaps after them. Returns: Striplog: A striplog of all the overlaps as intervals.
juraj-google-style
def same_intersection(intersection1, intersection2, wiggle=(0.5 ** 40)): if (intersection1.index_first != intersection2.index_first): return False if (intersection1.index_second != intersection2.index_second): return False return np.allclose([intersection1.s, intersection1.t], [intersection2.s, intersection2.t], atol=0.0, rtol=wiggle)
Check if two intersections are close to machine precision. .. note:: This is a helper used only by :func:`verify_duplicates`, which in turn is only used by :func:`generic_intersect`. Args: intersection1 (.Intersection): The first intersection. intersection2 (.Intersection): The second intersection. wiggle (Optional[float]): The amount of relative error allowed in parameter values. Returns: bool: Indicates if the two intersections are the same to machine precision.
codesearchnet
def publish(msg='checkpoint: publish package'): test = check() if test.succeeded: sdist = local('python setup.py sdist') if sdist.succeeded: build = local('python setup.py build && python setup.py bdist_egg') if build.succeeded: upload = local('twine upload dist/*') if upload.succeeded: tag()
Deploy the app to PYPI. Args: msg (str, optional): Description
codesearchnet
def get_object(cls, api_token, id): load_balancer = cls(token=api_token, id=id) load_balancer.load() return load_balancer
Class method that will return a LoadBalancer object by its ID. Args: api_token (str): DigitalOcean API token id (str): Load Balancer ID
codesearchnet
def country(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `country`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `country`') self._country = value
Corresponds to IDD Field `country` Args: value (str): value for IDD Field `country` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def pretty_printer(cls, primitive_handler_: primitive_handler.PrimitiveHandler, indent_size: int) -> 'JsonPrinter': return cls(primitive_handler_, _PrettyJsonTextGenerator(indent_size), _FhirJsonFormat.PURE)
Returns a printer for FHIR JSON with spaces and newlines. Args: primitive_handler_: Responsible for returning PrimitiveWrappers. indent_size: The size of space indentation for lexical scoping.
github-repos
def _set_mtu_to_nics(self, conf): for dom_name, dom_spec in conf.get('domains', {}).items(): for idx, nic in enumerate(dom_spec.get('nics', [])): net = self._get_net(conf, dom_name, nic) mtu = net.get('mtu', 1500) if mtu != 1500: nic['mtu'] = mtu
For all the nics of all the domains in the conf that have MTU set, save the MTU on the NIC definition. Args: conf (dict): Configuration spec to extract the domains from Returns: None
juraj-google-style
def fullStats(a, b): stats = [['bias', 'Bias', bias(a, b)], ['stderr', 'Standard Deviation Error', stderr(a, b)], ['mae', 'Mean Absolute Error', mae(a, b)], ['rmse', 'Root Mean Square Error', rmse(a, b)], ['nmse', 'Normalized Mean Square Error', nmse(a, b)], ['mfbe', 'Mean Fractionalized bias Error', mfbe(a, b)], ['fa2', 'Factor of Two', fa(a, b, 2)], ['foex', 'Factor of Exceedance', foex(a, b)], ['correlation', 'Correlation R', correlation(a, b)], ['determination', 'Coefficient of Determination r2', determination(a, b)], ['gmb', 'Geometric Mean Bias', gmb(a, b)], ['gmv', 'Geometric Mean Variance', gmv(a, b)], ['fmt', 'Figure of Merit in Time', fmt(a, b)]] rec = np.rec.fromrecords(stats, names=('stat', 'description', 'result')) df = pd.DataFrame.from_records(rec, index='stat') return df
Performs several stats on a against b, typically a is the predictions array, and b the observations array Returns: A dataFrame of stat name, stat description, result
codesearchnet
def __init__( self, resolver_context, file_system, path_spec, is_root=False, is_virtual=False): bde_volume = file_system.GetBDEVolume() if bde_volume is None: raise errors.BackEndError('Missing BDE volume.') super(BDEFileEntry, self).__init__( resolver_context, file_system, path_spec, is_root=is_root, is_virtual=is_virtual) self._bde_volume = bde_volume self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
Initializes the file entry object. Args: resolver_context (Context): resolver context. file_system (FileSystem): file system. path_spec (PathSpec): path specification. is_root (Optional[bool]): True if the file entry is the root file entry of the corresponding file system. is_virtual (Optional[bool]): True if the file entry is a virtual file Raises: BackEndError: when the BDE volume is missing.
juraj-google-style
def register_for_auto_class(cls, auto_class='AutoModel'): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f'{auto_class} is not a valid auto class.') cls._auto_class = auto_class
Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`): The auto class to register this new model with.
github-repos
def asset(self, asset_id, asset_type, action='GET'): if not self.can_update(): self._tcex.handle_error(910, [self.type]) if asset_type == 'PHONE': return self.tc_requests.adversary_phone_asset( self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action ) if asset_type == 'HANDLER': return self.tc_requests.adversary_handle_asset( self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action ) if asset_type == 'URL': return self.tc_requests.adversary_url_asset( self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action ) self._tcex.handle_error( 925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type] ) return None
Gets the asset with the provided id Args: asset_id: The id of the asset to be retrieved asset_type: (str) Either PHONE, HANDLER, or URL action: Returns:
juraj-google-style
def cancel( self, accountID, orderSpecifier, **kwargs ): request = Request( 'PUT', '/v3/accounts/{accountID}/orders/{orderSpecifier}/cancel' ) request.set_path_param( 'accountID', accountID ) request.set_path_param( 'orderSpecifier', orderSpecifier ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} if str(response.status) == "200": if jbody.get('orderCancelTransaction') is not None: parsed_body['orderCancelTransaction'] = \ self.ctx.transaction.OrderCancelTransaction.from_dict( jbody['orderCancelTransaction'], self.ctx ) if jbody.get('relatedTransactionIDs') is not None: parsed_body['relatedTransactionIDs'] = \ jbody.get('relatedTransactionIDs') if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('orderCancelRejectTransaction') is not None: parsed_body['orderCancelRejectTransaction'] = \ self.ctx.transaction.OrderCancelRejectTransaction.from_dict( jbody['orderCancelRejectTransaction'], self.ctx ) if jbody.get('relatedTransactionIDs') is not None: parsed_body['relatedTransactionIDs'] = \ jbody.get('relatedTransactionIDs') if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') else: parsed_body = jbody response.body = parsed_body return response
Cancel a pending Order in an Account Args: accountID: Account Identifier orderSpecifier: The Order Specifier Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def read(alias_name, allow_none=False): warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) return core.read('{0}_PORT'.format(alias_name), default=None, allow_none=allow_none)
Get the raw docker link value. Get the raw environment variable for the docker link Args: alias_name: The environment variable name default: The default value if the link isn't available allow_none: If the return value can be `None` (i.e. optional)
juraj-google-style
def cancelPnLSingle(self, account: str, modelCode: str, conId: int): key = (account, modelCode, conId) reqId = self.wrapper.pnlSingleKey2ReqId.pop(key, None) if reqId: self.client.cancelPnLSingle(reqId) self.wrapper.pnlSingles.pop(reqId, None) else: self._logger.error(f'cancelPnLSingle: No subscription for account {account}, modelCode {modelCode}, conId {conId}')
Cancel PnLSingle subscription for the given account, modelCode and conId. Args: account: Cancel for this account name. modelCode: Cancel for this account model. conId: Cancel for this contract ID.
codesearchnet
def default_datastore_policy(key): flag = None if (key is not None): modelclass = model.Model._kind_map.get(key.kind()) if (modelclass is not None): policy = getattr(modelclass, '_use_datastore', None) if (policy is not None): if isinstance(policy, bool): flag = policy else: flag = policy(key) return flag
Default datastore policy. This defers to _use_datastore on the Model class. Args: key: Key instance. Returns: A bool or None.
codesearchnet
def _read_mode_tcpao(self, size, kind): key_ = self._read_unpack(1) rkey = self._read_unpack(1) mac_ = self._read_fileng((size - 2)) data = dict(kind=kind, length=size, keyid=key_, rnextkeyid=rkey, mac=mac_) return data
Read Authentication option. Positional arguments: * size - int, length of option * kind - int, 29 (TCP Authentication Option) Returns: * dict -- extracted Authentication (AO) option Structure of TCP AOopt [RFC 5925]: +------------+------------+------------+------------+ | Kind=29 | Length | KeyID | RNextKeyID | +------------+------------+------------+------------+ | MAC ... +-----------------------------------... ...-----------------+ ... MAC (con't) | ...-----------------+ Octets Bits Name Description 0 0 tcp.ao.kind Kind (29) 1 8 tcp.ao.length Length 2 16 tcp.ao.keyid KeyID 3 24 tcp.ao.rnextkeyid RNextKeyID 4 32 tcp.ao.mac Message Authentication Code
codesearchnet
def get_mpkg_ids(mpkg): mpkg = _quote(mpkg) package_infos = [] base_path = os.path.dirname(mpkg) cmd = 'find {0} -name *.pkg'.format(base_path) out = __salt__['cmd.run'](cmd, python_shell=True) pkg_files = out.split('\n') for p in pkg_files: package_infos.extend(get_pkg_id(p)) return package_infos
Attempt to get the package IDs from a mounted .mpkg file Args: mpkg (str): The location of the mounted mpkg file Returns: list: List of package IDs CLI Example: .. code-block:: bash salt '*' macpackage.get_mpkg_ids /dev/disk2
codesearchnet
def sensor_id(self): if hasattr(self, '_sensor_id'): return self._sensor_id relationships = self._json_data.get('relationships') sensor_id = relationships.get('sensor').get('data').get('id') self._sensor_id = sensor_id return sensor_id
The id of the sensor of this data point. Returns: The id of the sensor that generated this datapoint. Will throw an AttributeError if no sensor id was found in the underlyign data.
codesearchnet
def _add_future(cls, future): if cls._local._activated: cls._local._in_order_futures.add(future)
Adds a future to the list of in-order futures thus far. Args: future: The future to add to the list.
codesearchnet
def __init__(self, action, debug_urls, debug_ops='DebugIdentity', node_name_regex_allowlist=None, op_type_regex_allowlist=None, tensor_dtype_regex_allowlist=None, tolerate_debug_op_creation_failures=False): _check_type(action, str) self.action = action _check_type(debug_urls, list) self.debug_urls = debug_urls self.debug_ops = debug_ops self.node_name_regex_allowlist = node_name_regex_allowlist self.op_type_regex_allowlist = op_type_regex_allowlist self.tensor_dtype_regex_allowlist = tensor_dtype_regex_allowlist self.tolerate_debug_op_creation_failures = tolerate_debug_op_creation_failures
Constructor of `OnRunStartResponse`. Args: action: (`OnRunStartAction`) the action actually taken by the wrapped session for the run() call. debug_urls: (`list` of `str`) debug_urls used in watching the tensors during the run() call. debug_ops: (`str` or `list` of `str`) Debug op(s) to be used by the debugger. node_name_regex_allowlist: Regular-expression allowlist for node name. op_type_regex_allowlist: Regular-expression allowlist for op type. tensor_dtype_regex_allowlist: Regular-expression allowlist for tensor dtype. tolerate_debug_op_creation_failures: Whether debug op creation failures are to be tolerated.
github-repos
def setup(self, target_directory=None): self._target_directory = target_directory if not target_directory: self._target_directory = tempfile.mkdtemp() elif not os.path.exists(target_directory): try: os.makedirs(target_directory) except OSError as exception: message = 'An unknown error occurred: {0!s}'.format(exception) self.state.add_error(message, critical=True)
Sets up the _target_directory attribute. Args: target_directory: Directory in which collected files will be dumped.
juraj-google-style
def default(self, obj): if isinstance(obj, decimal.Decimal): obj = format(obj, 'f') str_digit = text_type(obj) return (str_digit.rstrip('0').rstrip('.') if '.' in str_digit else str_digit) elif isinstance(obj, phonenumbers.PhoneNumber): return phonenumbers.format_number( obj, phonenumbers.PhoneNumberFormat.E164 ) elif isinstance(obj, pendulum.Pendulum): return text_type(obj) elif isinstance(obj, arrow.Arrow): return text_type(obj) elif isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() try: return list(iter(obj)) except TypeError: pass return super(FleakerJSONEncoder, self).default(obj)
Encode individual objects into their JSON representation. This method is used by :class:`flask.json.JSONEncoder` to encode individual items in the JSON object. Args: obj (object): Any Python object we wish to convert to JSON. Returns: str: The stringified, valid JSON representation of our provided object.
juraj-google-style
def report(self, branch, commit, infourl=None): issue_number = self._get_report_issue_number() if issue_number: self._report_as_comment(issue_number, branch, commit, infourl) else: self._report_as_issue(branch, commit, infourl)
Report on GitHub that the specified branch is failing to build at the specified commit. The method will open an issue indicating that the branch is failing. If there is an issue already open, it will add a comment avoiding to report twice about the same failure. Args: branch (str): branch name to report about. commit (str): commit hash at which the build fails. infourl (str): URL with extra info about the failure such as the build logs.
codesearchnet
def minimum_image(self, r1, r2): delta_r = (r2 - r1) delta_r = np.array([((x - math.copysign(1.0, x)) if (abs(x) > 0.5) else x) for x in delta_r]) return delta_r
Find the minimum image vector from point r1 to point r2. Args: r1 (np.array): fractional coordinates of point r1. r2 (np.array): fractional coordinates of point r2. Returns: (np.array): the fractional coordinate vector from r1 to the nearest image of r2.
codesearchnet
def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3, probe_rad=0.1): with ScratchDir('.'): name = "temp_zeo" zeo_inp_filename = name + ".cssr" ZeoCssr(structure).write_file(zeo_inp_filename) rad_file = None if rad_dict: rad_file = name + ".rad" with open(rad_file, 'w') as fp: for el in rad_dict.keys(): fp.write("{0} {1}".format(el, rad_dict[el])) atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file) vol_str = volume(atmnet, 0.3, probe_rad, 10000) sa_str = surface_area(atmnet, 0.3, probe_rad, 10000) vol = None sa = None for line in vol_str.split("\n"): if "Number_of_pockets" in line: fields = line.split() if float(fields[1]) > 1: vol = -1.0 break if float(fields[1]) == 0: vol = -1.0 break vol = float(fields[3]) for line in sa_str.split("\n"): if "Number_of_pockets" in line: fields = line.split() if float(fields[1]) > 1: sa = -1.0 break if float(fields[1]) == 0: sa = -1.0 break sa = float(fields[3]) if not vol or not sa: raise ValueError("Error in zeo++ output stream") return vol, sa
Computes the volume and surface area of isolated void using Zeo++. Useful to compute the volume and surface area of vacant site. Args: structure: pymatgen Structure containing vacancy rad_dict(optional): Dictionary with short name of elements and their radii. chan_rad(optional): Minimum channel Radius. probe_rad(optional): Probe radius for Monte Carlo sampling. Returns: volume: floating number representing the volume of void
juraj-google-style
def qualified_name(self): o = VersionedObject.construct(self.name, self.version) return str(o)
Get the qualified name of the package. Returns: str: Name of the package with version, eg "maya-2016.1".
codesearchnet
def douglas_adi_step(theta=0.5): def _step_fn(time, next_time, coord_grid, value_grid, boundary_conditions, second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn, inner_second_order_coeff_fn, inner_first_order_coeff_fn, num_steps_performed, dtype=None, name=None): del num_steps_performed name = name or 'douglas_adi_step' return multidim_parabolic_equation_step(time, next_time, coord_grid, value_grid, boundary_conditions, douglas_adi_scheme(theta), second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn, inner_second_order_coeff_fn, inner_first_order_coeff_fn, dtype=dtype, name=name) return _step_fn
Creates a stepper function with Crank-Nicolson time marching scheme. Douglas ADI scheme is the simplest time marching scheme for solving parabolic PDEs with multiple spatial dimensions. The time step consists of several substeps: the first one is fully explicit, and the following `N` steps are implicit with respect to contributions of one of the `N` axes (hence "ADI" - alternating direction implicit). See `douglas_adi_scheme` below for more details. Args: theta: positive Number. `theta = 0` corresponds to fully explicit scheme. The larger `theta` the stronger are the corrections by the implicit substeps. The recommended value is `theta = 0.5`, because the scheme is second order accurate in that case, unless mixed second derivative terms are present in the PDE. Returns: Callable to be used in finite-difference PDE solvers (see fd_solvers.py).
github-repos
def parse(self, argument): if isinstance(argument, self.enum_class): return argument if (argument not in self.enum_class.__members__): raise ValueError(('value should be one of <%s>' % '|'.join(self.enum_class.__members__.keys()))) else: return self.enum_class[argument]
Determines validity of argument and returns the correct element of enum. Args: argument: str or Enum class member, the supplied flag value. Returns: The first matching Enum class member in Enum class. Raises: ValueError: Raised when argument didn't match anything in enum.
codesearchnet
def set_time(self, value: float): if (value < 0): value = 0 self.offset += (self.get_time() - value)
Set the current time. This can be used to jump in the timeline. Args: value (float): The new time
codesearchnet
def __register_methods(self, parsed_config): methods = parsed_config.get('methods') if not methods: return for method_name, method in methods.iteritems(): self.__api_methods[method_name] = method.get('rosyMethod')
Register all methods from the given api config file. Methods are stored in a map from method_name to rosyMethod, the name of the ProtoRPC method to be called on the backend. If no rosyMethod was specified the value will be None. Args: parsed_config: The JSON object with the API configuration being added.
juraj-google-style
def run(self): if (not self.block): self.output = [] self.error = [] self.thread = threading.Thread(target=self.run_non_blocking) self.thread.start() else: self.__create_process() self.process.wait() if (self._stdout is not None): self.output = self.process.stdout.read().decode('utf-8') if (self._stderr is not None): self.error = self.process.stderr.read().decode('utf-8') self.return_code = self.process.returncode return self
Run the shell command Returns: ShellCommand: return this ShellCommand instance for chaining
codesearchnet
def train(total_loss, global_step): num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY) lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.summary.scalar('learning_rate', lr) loss_averages_op = _add_loss_summaries(total_loss) with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) for grad, var in grads: if grad is not None: tf.summary.histogram(var.op.name + '/gradients', grad) variable_averages = tf.train.ExponentialMovingAverage( MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op
Train CIFAR-10 model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training.
juraj-google-style
def continuous_partition_data(data, bins='auto', n_bins=10): if (bins == 'uniform'): bins = np.linspace(start=np.min(data), stop=np.max(data), num=(n_bins + 1)) elif (bins == 'ntile'): bins = np.percentile(data, np.linspace(start=0, stop=100, num=(n_bins + 1))) elif (bins != 'auto'): raise ValueError('Invalid parameter for bins argument') (hist, bin_edges) = np.histogram(data, bins, density=False) return {'bins': bin_edges, 'weights': (hist / len(data))}
Convenience method for building a partition object on continuous data Args: data (list-like): The data from which to construct the estimate. bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto' (for automatically spaced bins) n_bins (int): Ignored if bins is auto. Returns: A new partition_object:: { "bins": (list) The endpoints of the partial partition of reals, "weights": (list) The densities of the bins implied by the partition. }
codesearchnet
def mangle_scope_tree(root, toplevel): def mangle(scope): if ((scope.get_enclosing_scope() is None) and (not toplevel)): return for name in scope.symbols: mangled_name = scope.get_next_mangled_name() scope.mangled[name] = mangled_name scope.rev_mangled[mangled_name] = name def visit(node): mangle(node) for child in node.children: visit(child) visit(root)
Walk over a scope tree and mangle symbol names. Args: toplevel: Defines if global scope should be mangled or not.
codesearchnet
def get_or_create_node(self, level, entities, *args, **kwargs): result = self.get_nodes(level, entities) if result: if (len(result) > 1): raise ValueError("More than one matching Node found! If you're expecting more than one Node, use get_nodes() instead of get_or_create_node().") return result[0] if (level == 'run'): node = RunNode(entities, *args, **kwargs) else: node = Node(level, entities) entities = dict(entities, node_index=len(self.nodes), level=level) self.nodes.append(node) node_row = pd.Series(entities) self.index = self.index.append(node_row, ignore_index=True) return node
Retrieves a child Node based on the specified criteria, creating a new Node if necessary. Args: entities (dict): Dictionary of entities specifying which Node to return. args, kwargs: Optional positional or named arguments to pass onto class-specific initializers. These arguments are only used if a Node that matches the passed entities doesn't already exist, and a new one must be created. Returns: A Node instance.
codesearchnet
def word_error_rate(raw_predictions, labels, lookup=None, weights_fn=common_layers.weights_nonzero): def from_tokens(raw, lookup_): gathered = tf.gather(lookup_, tf.cast(raw, tf.int32)) joined = tf.regex_replace(tf.reduce_join(gathered, axis=1), b'<EOS>.*', b'') cleaned = tf.regex_replace(joined, b'_', b' ') tokens = tf.string_split(cleaned, ' ') return tokens def from_characters(raw, lookup_): 'Convert ascii+2 encoded codes to string-tokens.' corrected = tf.bitcast(tf.clip_by_value(tf.subtract(raw, 2), 0, 255), tf.uint8) gathered = tf.gather(lookup_, tf.cast(corrected, tf.int32))[(:, :, 0)] joined = tf.reduce_join(gathered, axis=1) cleaned = tf.regex_replace(joined, b'\x00', b'') tokens = tf.string_split(cleaned, ' ') return tokens if (lookup is None): lookup = tf.constant([chr(i) for i in range(256)]) convert_fn = from_characters else: convert_fn = from_tokens if (weights_fn is not common_layers.weights_nonzero): raise ValueError('Only weights_nonzero can be used for this metric.') with tf.variable_scope('word_error_rate', values=[raw_predictions, labels]): raw_predictions = tf.squeeze(tf.argmax(raw_predictions, axis=(- 1)), axis=(2, 3)) labels = tf.squeeze(labels, axis=(2, 3)) reference = convert_fn(labels, lookup) predictions = convert_fn(raw_predictions, lookup) distance = tf.reduce_sum(tf.edit_distance(predictions, reference, normalize=False)) reference_length = tf.cast(tf.size(reference.values, out_type=tf.int32), dtype=tf.float32) return ((distance / reference_length), reference_length)
Calculate word error rate. Args: raw_predictions: The raw predictions. labels: The actual labels. lookup: A tf.constant mapping indices to output tokens. weights_fn: Weighting function. Returns: The word error rate.
codesearchnet
def LoadGDAL(filename, no_data=None): if not GDAL_AVAILABLE: raise Exception("richdem.LoadGDAL() requires GDAL.") allowed_types = {gdal.GDT_Byte,gdal.GDT_Int16,gdal.GDT_Int32,gdal.GDT_UInt16,gdal.GDT_UInt32,gdal.GDT_Float32,gdal.GDT_Float64} src_ds = gdal.Open(filename) srcband = src_ds.GetRasterBand(1) if no_data is None: no_data = srcband.GetNoDataValue() if no_data is None: raise Exception("The source data did not have a NoData value. Please use the no_data argument to specify one. If should not be equal to any of the actual data values. If you are using all possible data values, then the situation is pretty hopeless - sorry.") srcdata = rdarray(srcband.ReadAsArray(), no_data=no_data) if not srcband.DataType in allowed_types: raise Exception("This datatype is not supported. Please file a bug report on RichDEM.") srcdata.projection = src_ds.GetProjectionRef() srcdata.geotransform = src_ds.GetGeoTransform() srcdata.metadata = dict() for k,v in src_ds.GetMetadata().items(): srcdata.metadata[k] = v _AddAnalysis(srcdata, "LoadGDAL(filename={0}, no_data={1})".format(filename, no_data)) return srcdata
Read a GDAL file. Opens any file GDAL can read, selects the first raster band, and loads it and its metadata into a RichDEM array of the appropriate data type. If you need to do something more complicated, look at the source of this function. Args: filename (str): Name of the raster file to open no_data (float): Optionally, set the no_data value to this. Returns: A RichDEM array
juraj-google-style
def find_triggers(nodes, trigger_words, nosec_lines): trigger_nodes = list() for node in nodes: if (node.line_number not in nosec_lines): trigger_nodes.extend(iter(label_contains(node, trigger_words))) return trigger_nodes
Find triggers from the trigger_word_list in the nodes. Args: nodes(list[Node]): the nodes to find triggers in. trigger_word_list(list[Union[Sink, Source]]): list of trigger words to look for. nosec_lines(set): lines with # nosec whitelisting Returns: List of found TriggerNodes
codesearchnet
def serialize(struct, format, target=None, encoding='utf-8'): if (hasattr(target, 'encoding') and target.encoding): raise AnyMarkupError('Input file must be opened in binary mode') fname = None if hasattr(target, 'name'): fname = target.name fmt = _get_format(format, fname) try: serialized = _do_serialize(struct, fmt, encoding) if (target is None): return serialized else: return target.write(serialized) except Exception as e: raise AnyMarkupError(e, traceback.format_exc())
Serialize given structure and return it as encoded string or write it to file-like object. Args: struct: structure (dict or list) with unicode members to serialize; note that list can only be serialized to json format: specify markup format to serialize structure as target: binary-opened file-like object to serialize to; if None (default), the result will be returned instead of writing to `target` encoding: encoding to use when serializing, defaults to utf-8 Returns: bytestring with serialized structure if `target` is None; return value of `target.write` otherwise Raises: AnyMarkupError if a problem occurs while serializing
codesearchnet
def set_refresh(self, timeout, callback, *callback_args): GObject.timeout_add(timeout, callback, *callback_args)
It is just stub for simplify setting timeout. Args: timeout (int): timeout in milliseconds, after which callback will be called callback (callable): usually, just a function that will be called each time after timeout *callback_args (any type): arguments that will be passed to callback function
juraj-google-style
def read_configs_(self): if (not self.config_files_): return ({}, [], []) content = {section: {} for section in self} empty_files = [] faulty_files = [] for cfile in self.config_files_: conf_dict = self.read_config_(cfile) if (conf_dict is None): faulty_files.append(cfile) continue elif (not conf_dict): empty_files.append(cfile) continue for (section, secdict) in conf_dict.items(): content[section].update(secdict) return (content, empty_files, faulty_files)
Read config files and set config values accordingly. Returns: (dict, list, list): respectively content of files, list of missing/empty files and list of files for which a parsing error arised.
codesearchnet
def train_model(preprocessed_dataset_path: str, trained_model_path: str, base_artifact_path: str): timestamp = time.time() model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg16', pretrained=True) target_path = f'{base_artifact_path}/training/trained_model_{timestamp}.pt' target_path_gcsfuse = target_path.replace('gs: Path(target_path_gcsfuse).parent.mkdir(parents=True, exist_ok=True) torch.save(model.state_dict(), target_path_gcsfuse) Path(trained_model_path).parent.mkdir(parents=True, exist_ok=True) with open(trained_model_path, 'w') as f: f.write(target_path)
Placeholder method to load a model from the torch hub and save it. Args: preprocessed_dataset_path (str): Path to the preprocessed dataset trained_model_path (str): Output path for the trained model base_artifact_path (str): path to the base directory of where artifacts can be stored for this component
github-repos
def _hertz_to_mel(self, frequencies_hertz): return _MEL_HIGH_FREQUENCY_Q * self.backend.numpy.log(1.0 + frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ)
Converts frequencies in `frequencies_hertz` in Hertz to the mel scale. Args: frequencies_hertz: A tensor of frequencies in Hertz. name: An optional name for the operation. Returns: A tensor of the same shape and type of `frequencies_hertz` containing frequencies in the mel scale.
github-repos
def Notify(self, message_type, subject, msg, source): pending = self.Get(self.Schema.PENDING_NOTIFICATIONS) if (pending is None): pending = self.Schema.PENDING_NOTIFICATIONS() if (message_type.split(':', 2)[0] not in rdf_flows.Notification.notification_types): raise TypeError(('Invalid notification type %s' % message_type)) pending.Append(type=message_type, subject=subject, message=msg, source=source, timestamp=int((time.time() * 1000000.0))) while (len(pending) > 50): pending.Pop(0) self.Set(self.Schema.PENDING_NOTIFICATIONS, pending)
Send an AFF4-based notification to the user in the UI. Args: message_type: One of aff4_grr.Notification.notification_types e.g. "ViewObject", "HostInformation", "GrantAccess" or the same with an added ":[new-style notification type] suffix, e.g. "ViewObject:TYPE_CLIENT_INTERROGATED". subject: The subject to use, normally a URN. msg: The message to display. source: The class doing the notification. Raises: TypeError: On invalid message_type.
codesearchnet
def menu(self, prompt, choices): menu = ([prompt] + ['{0}. {1}'.format(*choice) for choice in enumerate(choices, start=1)]) command = 'inputlist({})'.format(repr(menu)) choice = int(self._vim.eval(command)) if (not (0 < choice < len(menu))): return return choices[(choice - 1)]
Presents a selection menu and returns the user's choice. Args: prompt (str): Text to ask the user what to select. choices (Sequence[str]): Values for the user to select from. Returns: The value selected by the user, or ``None``. Todo: Nice opportunity to provide a hook for Unite.vim, etc. here.
codesearchnet
def plot(self, freq=None, figsize=(15, 5), title=None, logy=False, **kwargs): if (title is None): title = self._get_default_plot_title(freq, 'Equity Progression') ser = self._get_series(freq).rebase() return ser.plot(figsize=figsize, logy=logy, title=title, **kwargs)
Helper function for plotting the series. Args: * freq (str): Data frequency used for display purposes. Refer to pandas docs for valid freq strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * logy (bool): log-scale for y axis * kwargs: passed to pandas' plot method
codesearchnet
def check_graphs(*args): graph = None for i, sgv in enumerate(args): if graph is None and sgv.graph is not None: graph = sgv.graph elif sgv.graph is not None and sgv.graph is not graph: raise ValueError(f'args[{i}] does not belong to the same graph as other arguments.')
Check that all the element in args belong to the same graph. Args: *args: a list of object with a obj.graph property. Raises: ValueError: if all the elements do not belong to the same graph.
github-repos
def update_serial(self, new_serial): new_serial = str(new_serial) if self.has_active_service: raise DeviceError(self, 'Cannot change device serial number when there is service running.') if self._debug_tag == self.serial: self._debug_tag = new_serial self._serial = new_serial self.adb.serial = new_serial self.fastboot.serial = new_serial
Updates the serial number of a device. The "serial number" used with adb's `-s` arg is not necessarily the actual serial number. For remote devices, it could be a combination of host names and port numbers. This is used for when such identifier of remote devices changes during a test. For example, when a remote device reboots, it may come back with a different serial number. This is NOT meant for switching the object to represent another device. We intentionally did not make it a regular setter of the serial property so people don't accidentally call this without understanding the consequences. Args: new_serial: string, the new serial number for the same device. Raises: DeviceError: tries to update serial when any service is running.
github-repos
def word_score(word, input_letters, questions=0): score = 0 bingo = 0 filled_by_blanks = [] rack = list(input_letters) for letter in word: if (letter in rack): bingo += 1 score += letter_score(letter) rack.remove(letter) else: filled_by_blanks.append(letter_score(letter)) for blank_score in sorted(filled_by_blanks, reverse=True): if (questions > 0): score += blank_score questions -= 1 if (bingo > 6): score += 50 return score
Checks the Scrabble score of a single word. Args: word: a string to check the Scrabble score of input_letters: the letters in our rack questions: integer of the tiles already on the board to build on Returns: an integer Scrabble score amount for the word
codesearchnet
def __init__(self, context, name, task_id=None): self.name = name self.context = context self.task_id = task_id or get_task_id(context.claim_task) self.task = context.task self.task_type = guess_task_type(name, self.task) self.worker_impl = guess_worker_impl(self) self.decision_task_id = get_decision_task_id(self.task) self.parent_task_id = get_parent_task_id(self.task) self.links = []
Initialize ChainOfTrust. Args: context (scriptworker.context.Context): the scriptworker context name (str): the name of the task (e.g., signing) task_id (str, optional): the task_id of the task. If None, use ``get_task_id(context.claim_task)``. Defaults to None.
juraj-google-style
def uptime(ut, facter): ut = ut if (ut and ut.loadavg): return Uptime(ut.currtime, ut.updays, ut.uphhmm, ut.users, ut.loadavg, ut.uptime) ft = facter if (ft and hasattr(ft, 'uptime_seconds')): import datetime secs = int(ft.uptime_seconds) up_dd = (secs up_hh = ((secs % (3600 * 24)) up_mm = ((secs % 3600) updays = (str(up_dd) if (up_dd > 0) else '') uphhmm = ('%02d:%02d' % (up_hh, up_mm)) up_time = datetime.timedelta(seconds=secs) return Uptime(None, updays, uphhmm, None, None, up_time) raise Exception('Unable to get uptime information.')
Check uptime and facts to get the uptime information. Prefer uptime to facts. Returns: insights.combiners.uptime.Uptime: A named tuple with `currtime`, `updays`, `uphhmm`, `users`, `loadavg` and `uptime` components. Raises: Exception: If no data is available from both of the parsers.
codesearchnet
def write(self, data): start_time = time.time() self._get_write_buffer().write(data) ctx = context.get() operation.counters.Increment(COUNTER_IO_WRITE_BYTES, len(data))(ctx) operation.counters.Increment( COUNTER_IO_WRITE_MSEC, int((time.time() - start_time) * 1000))(ctx)
Write data to the GoogleCloudStorage file. Args: data: string containing the data to be written.
juraj-google-style
def resize_to(self, width, height): self.driver.resize_window_to(self.handle, width, height)
Resizes the window to the given dimensions. If this method was called for a window that is not current, then after calling this method the current window should remain the same as it was before calling this method. Args: width (int): The new window width in pixels. height (int): The new window height in pixels.
juraj-google-style
def __call__(self, utterances_batch: list, utterances_ids: Optional[list] = None) -> list: responses_batch = self._call(utterances_batch, utterances_ids) batch_size = len(utterances_batch) ids = utterances_ids or list(range(batch_size)) for utt_batch_idx, utt_id in enumerate(ids): self.history[utt_id].append(str(utterances_batch[utt_batch_idx])) self.dialog_logger.log_in(utterances_batch[utt_batch_idx], utt_id) self.history[utt_id].append(str(responses_batch[utt_batch_idx])) self.dialog_logger.log_out(responses_batch[utt_batch_idx], utt_id) return responses_batch
Wraps _call method and updates utterances history. Args: utterances_batch: Batch of incoming utterances. utterances_ids: Batch of dialog IDs corresponding to incoming utterances. Returns: responses: A batch of responses corresponding to the utterance batch received by agent.
juraj-google-style
def _download_files(self, client, flow_id): output_file_path = os.path.join(self.output_path, '.'.join((flow_id, 'zip'))) if os.path.exists(output_file_path): print('{0:s} already exists: Skipping'.format(output_file_path)) return None flow = client.Flow(flow_id) file_archive = flow.GetFilesArchive() file_archive.WriteToFile(output_file_path) fqdn = client.data.os_info.fqdn.lower() client_output_file = os.path.join(self.output_path, fqdn) if (not os.path.isdir(client_output_file)): os.makedirs(client_output_file) with zipfile.ZipFile(output_file_path) as archive: archive.extractall(path=client_output_file) os.remove(output_file_path) return client_output_file
Download files from the specified flow. Args: client: GRR Client object to which to download flow data from. flow_id: GRR flow ID. Returns: str: path of downloaded files.
codesearchnet
def chat_meMessage(self, *, channel: str, text: str, **kwargs) -> SlackResponse: kwargs.update({"channel": channel, "text": text}) return self.api_call("chat.meMessage", json=kwargs)
Share a me message into a channel. Args: channel (str): The channel id. e.g. 'C1234567890' text (str): The message you'd like to share. e.g. 'Hello world'
juraj-google-style
def get_all(self, attrs: Iterable[FetchAttribute]) \ -> Sequence[Tuple[FetchAttribute, MaybeBytes]]: ret: List[Tuple[FetchAttribute, MaybeBytes]] = [] for attr in attrs: try: ret.append((attr.for_response, self.get(attr))) except NotFetchable: pass return ret
Return a list of tuples containing the attribute iself and the bytes representation of that attribute from the message. Args: attrs: The fetch attributes.
juraj-google-style
def add_file(self, filename, file_content): self._group_data['fileName'] = filename self._file_content = file_content
Add a file for Document and Report types. Example:: document = tcex.batch.group('Document', 'My Document') document.add_file('my_file.txt', 'my contents') Args: filename (str): The name of the file. file_content (bytes|method|str): The contents of the file or callback to get contents.
juraj-google-style
def validate(export_formats): for i in range(len(export_formats)): export_formats[i] = export_formats[i].strip().lower() if (export_formats[i] not in [ExportFormat.CHECKPOINT, ExportFormat.MODEL]): raise TuneError(('Unsupported export format: ' + export_formats[i]))
Validates export_formats. Raises: ValueError if the format is unknown.
codesearchnet
def __getitem__(self, key): path = self.keypath(key) if fs.exists(path): return path else: raise KeyError(key)
Get path to file in cache. Arguments: key: Key. Returns: str: Path to cache value. Raises: KeyErorr: If key not in cache.
juraj-google-style
def ProcessListDirectory(self, responses): if not responses.success: raise flow.FlowError("Unable to list directory.") with data_store.DB.GetMutationPool() as pool: for response in responses: stat_entry = rdf_client_fs.StatEntry(response) filesystem.CreateAFF4Object( stat_entry, self.client_urn, pool, token=self.token) self.SendReply(stat_entry)
Processes the results of the ListDirectory client action. Args: responses: a flow Responses object.
juraj-google-style
def ParseFileObject(self, parser_mediator, file_object): page_header_map = self._GetDataTypeMap('dls_page_header') try: page_header, file_offset = self._ReadStructureFromFileObject( file_object, 0, page_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse page header with error: {0!s}'.format( exception)) if page_header.signature not in self._DLS_SIGNATURES: raise errors.UnableToParseFile('Invalid file signature') current_page_end = page_header.page_size file_entry = parser_mediator.GetFileEntry() date_time = self._GetParentModificationTime(file_entry) if date_time: timestamp_description = definitions.TIME_DESCRIPTION_RECORDED else: date_time = dfdatetime_semantic_time.SemanticTime('Not set') timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME event = time_events.DateTimeValuesEvent(date_time, timestamp_description) file_size = file_object.get_size() while file_offset < file_size: if file_offset >= current_page_end: try: page_header, header_size = self._ParseDLSPageHeader( file_object, file_offset) except errors.ParseError as exception: parser_mediator.ProduceExtractionWarning( 'Unable to parse page header with error: {0!s}'.format( exception)) break current_page_end += page_header.page_size file_offset += header_size continue if page_header.signature == self._DLS_V1_SIGNATURE: record_map = self._GetDataTypeMap('dls_record_v1') else: record_map = self._GetDataTypeMap('dls_record_v2') try: record, record_length = self._ReadStructureFromFileObject( file_object, file_offset, record_map) file_offset += record_length except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'Unable to parse page record with error: {0!s}'.format( exception)) break event_data = self._BuildEventData(record) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses an fseventsd file. Args: parser_mediator (ParserMediator): parser mediator. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the header cannot be parsed.
juraj-google-style
def create_position_ids_from_inputs_embeds(self, inputs_embeds): input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape)
Args: We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.: inputs_embeds: torch.Tensor Returns: torch.Tensor
github-repos
def min(x, axis=None, keepdims=False, initial=None): if any_symbolic_tensors((x,)): return Min(axis=axis, keepdims=keepdims, initial=initial).symbolic_call(x) return backend.numpy.min(x, axis=axis, keepdims=keepdims, initial=initial)
Return the minimum of a tensor or minimum along an axis. Args: x: Input tensor. axis: Axis or axes along which to operate. By default, flattened input is used. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Defaults to `False`. initial: The maximum value of an output element. Defaults to `None`. Returns: Minimum of `x`.
github-repos
def __init__(self, input_size: int, num_experts: int, top_k: int): super().__init__() self.num_experts = num_experts self.input_size = input_size self.top_k = top_k self.layer = nn.Linear(input_size, num_experts, bias=False)
Initialize the top-k gating mechanism. Args: input_size (`int`): Size of the input. num_experts (`int`): Number of experts. top_k (`int`): Number of top experts to select.
github-repos
def ToJsonString(self): nanos = (self.nanos % _NANOS_PER_SECOND) total_sec = (self.seconds + ((self.nanos - nanos) seconds = (total_sec % _SECONDS_PER_DAY) days = ((total_sec - seconds) dt = (datetime(1970, 1, 1) + timedelta(days, seconds)) result = dt.isoformat() if ((nanos % 1000000000.0) == 0): return (result + 'Z') if ((nanos % 1000000.0) == 0): return (result + ('.%03dZ' % (nanos / 1000000.0))) if ((nanos % 1000.0) == 0): return (result + ('.%06dZ' % (nanos / 1000.0))) return (result + ('.%09dZ' % nanos))
Converts Timestamp to RFC 3339 date string format. Returns: A string converted from timestamp. The string is always Z-normalized and uses 3, 6 or 9 fractional digits as required to represent the exact time. Example of the return format: '1972-01-01T10:00:20.021Z'
codesearchnet
def add_layer_timing_signal_sinusoid_1d(x, layer, num_layers): channels = common_layers.shape_list(x)[-1] signal = get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers) return x + signal
Add sinusoids of different frequencies as layer (vertical) timing signal. Args: x: a Tensor with shape [batch, length, channels] layer: layer num num_layers: total number of layers Returns: a Tensor the same shape as x.
juraj-google-style
def discover(package, cls_match_func): matched_classes = set() for (_, module_name, _) in pkgutil.walk_packages(package.__path__, prefix=(package.__name__ + '.')): module = __import__(module_name, fromlist=[str('__trash')], level=0) for (_, imported_class) in inspect.getmembers(module, inspect.isclass): if (imported_class.__module__ != module.__name__): continue if cls_match_func(imported_class): matched_classes.add(imported_class) return matched_classes
Returns a set of classes in the directory matched by cls_match_func Args: path - A Python package cls_match_func - Function taking a class and returning true if the class is to be included in the output.
codesearchnet
def plot_chmap(cube, kidid, ax=None, **kwargs): if ax is None: ax = plt.gca() index = np.where(cube.kidid == kidid)[0] if len(index) == 0: raise KeyError('Such a kidid does not exist.') index = int(index) im = ax.pcolormesh(cube.x, cube.y, cube[:, :, index].T, **kwargs) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_title('intensity map ch return im
Plot an intensity map. Args: cube (xarray.DataArray): Cube which the spectrum information is included. kidid (int): Kidid. ax (matplotlib.axes): Axis the figure is plotted on. kwargs (optional): Plot options passed to ax.imshow().
juraj-google-style
def get_roaster_state(self): value = self._current_state.value if (value == b'\x02\x01'): return 'idle' elif (value == b'\x04\x04'): return 'cooling' elif (value == b'\x08\x01'): return 'sleeping' elif ((value == b'\x00\x00') or (value == b'')): return 'connecting' elif (value == b'\x04\x02'): return 'roasting' else: return 'unknown'
Returns a string based upon the current state of the roaster. Will raise an exception if the state is unknown. Returns: 'idle' if idle, 'sleeping' if sleeping, 'cooling' if cooling, 'roasting' if roasting, 'connecting' if in hardware connection phase, 'unknown' otherwise
codesearchnet
def update_dynamic_gene_list(self, case, hgnc_symbols=None, hgnc_ids=None, phenotype_ids=None, build='37'): dynamic_gene_list = [] res = [] if hgnc_ids: LOG.info("Fetching genes by hgnc id") res = self.hgnc_collection.find({'hgnc_id': {'$in': hgnc_ids}, 'build': build}) elif hgnc_symbols: LOG.info("Fetching genes by hgnc symbols") res = [] for symbol in hgnc_symbols: for gene_obj in self.gene_by_alias(symbol=symbol, build=build): res.append(gene_obj) for gene_obj in res: dynamic_gene_list.append( { 'hgnc_symbol': gene_obj['hgnc_symbol'], 'hgnc_id': gene_obj['hgnc_id'], 'description': gene_obj['description'], } ) LOG.info("Update dynamic gene panel for: %s", case['display_name']) updated_case = self.case_collection.find_one_and_update( {'_id': case['_id']}, {'$set': {'dynamic_gene_list': dynamic_gene_list, 'dynamic_panel_phenotypes': phenotype_ids or []}}, return_document=pymongo.ReturnDocument.AFTER ) LOG.debug("Case updated") return updated_case
Update the dynamic gene list for a case Adds a list of dictionaries to case['dynamic_gene_list'] that looks like { hgnc_symbol: str, hgnc_id: int, description: str } Arguments: case (dict): The case that should be updated hgnc_symbols (iterable): A list of hgnc_symbols hgnc_ids (iterable): A list of hgnc_ids Returns: updated_case(dict)
juraj-google-style
def get_batch(self): params = [self._batch_size, self._num_objects, self._num_features] (inputs, labels) = tf.py_func(self._get_batch_data, params, [tf.float32, tf.float32]) inputs = tf.reshape(inputs, [self._batch_size, self._num_objects, (self._num_features + (self._num_objects * 3))]) labels = tf.reshape(labels, [(- 1)]) return (inputs, labels)
Returns set of nth-farthest input tensors and labels. Returns: 1. tf.Tensor (`batch_size`, `num_objects`, (`num_features` + 3 * `num_objects`)). 2. tf.Tensor (`batch_size`). Output object reference label.
codesearchnet
def to_json(self, with_volumes=True): data = super().to_json() if with_volumes: data['volumes'] = [{'volumeId': vol.id, 'volumeType': vol.volume_type, 'size': vol.size} for vol in self.volumes] return data
Augment the base `to_json` function, adding information about volumes Returns: `dict`
codesearchnet
def Get(self, request, global_params=None): config = self.GetMethodConfig('Get') return self._RunMethod(config, request, global_params=global_params)
Retrieve a `BitbucketServerConfig`. This API is experimental. Args: request: (CloudbuildProjectsLocationsBitbucketServerConfigsGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (BitbucketServerConfig) The response message.
github-repos
def format_level_2_memory(memory, header=None): memory_list = [] for shot_memory in memory: memory_list.append(format_counts_memory(shot_memory, header)) return memory_list
Format an experiment result memory object for measurement level 2. Args: memory (list): Memory from experiment with `meas_level==2` and `memory==True`. header (dict): the experiment header dictionary containing useful information for postprocessing. Returns: list[str]: List of bitstrings
juraj-google-style
def relative_humidity(self, value=999): if (value is not None): try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int for field `relative_humidity`'.format(value)) if (value < 0): raise ValueError('value need to be greater or equal 0 for field `relative_humidity`') if (value > 110): raise ValueError('value need to be smaller 110 for field `relative_humidity`') self._relative_humidity = value
Corresponds to IDD Field `relative_humidity` Args: value (int): value for IDD Field `relative_humidity` value >= 0 value <= 110 Missing value: 999 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def get_dc_keywords(index_page): keyword_lists = (keyword_list.split() for keyword_list in parse_meta(index_page, 'dc.keywords', 'DC')) return [SourceString(keyword, source='DC') for keyword in sum(keyword_lists, [])]
Return list of `keywords` parsed from Dublin core. Args: index_page (str): Content of the page as UTF-8 string Returns: list: List of :class:`.SourceString` objects.
codesearchnet