code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def receive(self, sequence, args): if (not self._reorder): self._callback(*args) return if ((self._next_expected is not None) and (sequence < self._next_expected)): print(('Dropping out of order packet, seq=%d' % sequence)) return self._out_of_order.append((sequence, args)) self._out_of_order.sort(key=(lambda x: x[0])) while (len(self._out_of_order) > 0): (seq, args) = self._out_of_order[0] if ((self._next_expected is not None) and (seq != self._next_expected)): return self._callback(*args) self._out_of_order.pop(0) self._next_expected = (seq + 1)
Receive one packet If the sequence number is one we've already seen before, it is dropped. If it is not the next expected sequence number, it is put into the _out_of_order queue to be processed once the holes in sequence number are filled in. Args: sequence (int): The sequence number of the received packet args (list): The list of packet contents that will be passed to callback as callback(*args)
codesearchnet
def get_energy(self, composition, strict=True): if strict and set(composition.keys()) > set(self.keys()): s = set(composition.keys()) - set(self.keys()) raise ValueError("Potentials not specified for {}".format(s)) return sum(self.get(k, 0) * v for k, v in composition.items())
Calculates the energy of a composition. Args: composition (Composition): input composition strict (bool): Whether all potentials must be specified
juraj-google-style
def add_request(self, input_ids: List[int], request_id: Optional[str]=None, max_new_tokens: Optional[int]=None) -> str: if request_id is None: with self._request_lock: request_id = f'req_{self._request_counter}' self._request_counter += 1 max_new_tokens = self.generation_config.max_new_tokens if max_new_tokens is None else max_new_tokens state = RequestState(request_id=request_id, prompt_ids=list(input_ids), full_prompt_ids=list(input_ids), max_new_tokens=max_new_tokens, eos_token_id=self.generation_config.eos_token_id) self.input_queue.put(state, block=True, timeout=10) logger.debug(f'Added request {request_id} to queue.') return request_id
Add a new generation request to the queue. Args: input_ids: Input token IDs to use as prompt request_id: Optional custom request ID (auto-generated if None) **kwargs: Additional generation parameters Returns: str: The request ID
github-repos
def recipe_ga360_segmentology(config, auth_write, auth_read, view, recipe_slug): dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug}) bigquery(config, {'auth': auth_write, 'function': 'Pearson Significance Test', 'to': {'dataset': recipe_slug}}) ga(config, {'auth': auth_read, 'kwargs': {'reportRequests': [{'viewId': view, 'dateRanges': [{'startDate': '90daysAgo', 'endDate': 'today'}], 'dimensions': [{'name': 'ga:userType'}, {'name': 'ga:userDefinedValue'}, {'name': 'ga:latitude'}, {'name': 'ga:longitude'}], 'metrics': [{'expression': 'ga:users'}, {'expression': 'ga:sessionsPerUser'}, {'expression': 'ga:bounces'}, {'expression': 'ga:timeOnPage'}, {'expression': 'ga:pageviews'}]}], 'useResourceQuotas': False}, 'out': {'bigquery': {'dataset': recipe_slug, 'table': 'GA360_KPI'}}}) bigquery(config, {'auth': auth_write, 'from': {'query': 'WITH GA360_SUM AS (\n SELECT\n A.Dimensions.userType AS User_Type,\n A.Dimensions.userDefinedValue AS User_Value,\n B.zip_code AS Zip,\n SUM(Metrics.users) AS Users,\n SUM(Metrics.sessionsPerUser) AS Sessions,\n SUM(Metrics.timeOnPage) AS Time_On_Site,\n SUM(Metrics.bounces) AS Bounces,\n SUM(Metrics.pageviews) AS Page_Views\n FROM `{dataset}.GA360_KPI` AS A\n JOIN `bigquery-public-data.geo_us_boundaries.zip_codes` AS B\n ON ST_WITHIN(ST_GEOGPOINT(A.Dimensions.longitude, A.Dimensions.latitude), B.zip_code_geom)\n GROUP BY 1,2,3\n )\n SELECT\n User_Type,\n User_Value,\n Zip,\n Users,\n SAFE_DIVIDE(Users, SUM(Users) OVER()) AS User_Percent,\n SAFE_DIVIDE(Sessions, SUM(Sessions) OVER()) AS Impression_Percent,\n SAFE_DIVIDE(Time_On_Site, SUM(Time_On_Site) OVER()) AS Time_On_Site_Percent,\n SAFE_DIVIDE(Bounces, SUM(Bounces) OVER()) AS Bounce_Percent,\n SAFE_DIVIDE(Page_Views, SUM(Page_Views) OVER()) AS Page_View_Percent\n FROM GA360_SUM ', 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'GA360_KPI_Normalized'}}) census(config, {'auth': auth_write, 'normalize': {'census_geography': 'zip_codes', 'census_year': '2018', 'census_span': '5yr'}, 'to': {'dataset': recipe_slug, 'type': 'view'}}) census(config, {'auth': auth_write, 'correlate': {'join': 'Zip', 'pass': ['User_Type', 'User_Value'], 'sum': ['Users'], 'correlate': ['User_Percent', 'Impression_Percent', 'Time_On_Site_Percent', 'Bounce_Percent', 'Page_View_Percent'], 'dataset': recipe_slug, 'table': 'GA360_KPI_Normalized', 'significance': 80}, 'to': {'dataset': recipe_slug, 'type': 'view'}})
GA360 funnel analysis using Census data. Args: auth_write (authentication) - Authorization used for writing data. auth_read (authentication) - Authorization for reading GA360. view (string) - View Id recipe_slug (string) - Name of Google BigQuery dataset to create.
github-repos
def custom_returnvalue(self, printer, desc=None): self.return_info = ReturnInfo(None, printer, True, desc)
Use a custom function to print the return value. Args: printer (callable): A function that should take in the return value and convert it to a string. desc (str): An optional description of the return value.
codesearchnet
def __init__(self, start, end): if start > end: raise ValueError( "Invalid time-range: %s > %s." % (start.AsMicrosecondsSinceEpoch(), end.AsMicrosecondsSinceEpoch())) self._start = start self._end = end
Initializes a TimeRange. Args: start: An RDFDatetime that indicates the beginning of the time-range. end: An RDFDatetime that indicates the end of the time-range. Raises: ValueError: If the beginning of the time range is at a future time as compared to the end of the time-range.
juraj-google-style
def inplace_add(x, i, v): return alias_inplace_add(gen_array_ops.deep_copy(x), i, v)
Applies an inplace add on input x at index i with value v. Note that this function is not actually inplace - it allocates a copy of x. The utility is not avoiding memory copies but rather specifying a sparse update. If i is None, x and v must be the same shape. Computes y = x; y += v; If i is a scalar, x has a rank 1 higher than v's. Computes y = x; y[i, :] += v; Otherwise, x and v must have the same rank. Computes y = x; y[i, :] += v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. Returns: Returns y, which is guaranteed not to be an alias of x.
github-repos
def get_ecommerce_client(url_postfix='', site_code=None): ecommerce_api_root = get_configuration('ECOMMERCE_API_ROOT', site_code=site_code) signing_key = get_configuration('JWT_SECRET_KEY', site_code=site_code) issuer = get_configuration('JWT_ISSUER', site_code=site_code) service_username = get_configuration('ECOMMERCE_SERVICE_USERNAME', site_code=site_code) return EdxRestApiClient( ecommerce_api_root + url_postfix, signing_key=signing_key, issuer=issuer, username=service_username)
Get client for fetching data from ecommerce API. Arguments: site_code (str): (Optional) The SITE_OVERRIDES key to inspect for site-specific values url_postfix (str): (Optional) The URL postfix value to append to the ECOMMERCE_API_ROOT value. Returns: EdxRestApiClient object
juraj-google-style
def unstack(df, level=-1, reset_index=True): df = df.unstack(level=level) if reset_index: df = df.reset_index() df.columns = df.columns.map(_join_names) return df
pd.DataFrame.unstack adapter. Call the `df.unstack` method using the indicated level and afterwards join the column names using an underscore. Args: df (pandas.DataFrame): DataFrame to unstack. level (str, int or list): Level(s) of index to unstack, can pass level name reset_index (bool): Whether to reset the index after unstacking Returns: pandas.Dataframe: unstacked dataframe
juraj-google-style
def get(self, name): interface = name if not interface: raise ValueError("Vrrp.get(): interface must contain a value.") config = self.get_block('interface %s' % interface) if config is None: return config match = set(re.findall(r'^\s+(?:no |)vrrp (\d+)', config, re.M)) if not match: return None result = dict() for vrid in match: subd = dict() subd.update(self._parse_delay_reload(config, vrid)) subd.update(self._parse_description(config, vrid)) subd.update(self._parse_enable(config, vrid)) subd.update(self._parse_ip_version(config, vrid)) subd.update(self._parse_mac_addr_adv_interval(config, vrid)) subd.update(self._parse_preempt(config, vrid)) subd.update(self._parse_preempt_delay_min(config, vrid)) subd.update(self._parse_preempt_delay_reload(config, vrid)) subd.update(self._parse_primary_ip(config, vrid)) subd.update(self._parse_priority(config, vrid)) subd.update(self._parse_secondary_ip(config, vrid)) subd.update(self._parse_timers_advertise(config, vrid)) subd.update(self._parse_track(config, vrid)) subd.update(self._parse_bfd_ip(config, vrid)) result.update({int(vrid): subd}) return result if result else None
Get the vrrp configurations for a single node interface Args: name (string): The name of the interface for which vrrp configurations will be retrieved. Returns: A dictionary containing the vrrp configurations on the interface. Returns None if no vrrp configurations are defined or if the interface is not configured.
juraj-google-style
def Add(self, rdf_value, mutation_pool=None): self.StaticAdd(self.urn, rdf_value, mutation_pool=mutation_pool)
Adds an rdf value to the queue. Adds an rdf value to the queue. Does not require that the queue be locked. Args: rdf_value: The rdf value to add to the queue. mutation_pool: A MutationPool object to write to. Raises: ValueError: rdf_value has unexpected type.
juraj-google-style
def get_appliance(self, appliance_id): url = ('https: headers = self.__gen_headers() headers['Content-Type'] = 'application/json' r = requests.get(url, headers=headers) return r.json()
Get the information for a specified appliance Args: appliance_id (string): identifiying string of appliance Returns: list: dictionary object containing information about the specified appliance
codesearchnet
def GetHashType(self, hash_str): for hash_type, hash_re in self.hashes: if hash_re.match(hash_str): return hash_type return "EMPTY"
Identify the type of hash in a hash string. Args: hash_str: A string value that may be a hash. Returns: A string description of the type of hash.
juraj-google-style
def resume(resume_delay=0): return ProcessContinuation(resume_delay=resume_delay)
A convenient method that produces a ``ProcessContinuation``. Args: resume_delay: delay after which processing current element should be resumed. Returns: a ``ProcessContinuation`` for signalling the runner that current input element has not been fully processed and should be resumed later.
github-repos
def __init__(self, file_object=None): super(SelfFeederMixIn, self).__init__() self.file_object = file_object
Initializes the lexer feeder min object. Args: file_object: Optional file-like object.
juraj-google-style
def clean_single_dict(indict, prepend_to_keys=None, remove_keys_containing=None): if (not prepend_to_keys): prepend_to_keys = '' outdict = {} for (k, v) in indict.items(): if remove_keys_containing: if (remove_keys_containing in k): continue outdict[(prepend_to_keys + k)] = v[0] return outdict
Clean a dict with values that contain single item iterators to single items Args: indict (dict): Dictionary to be cleaned prepend_to_keys (str): String to prepend to all keys remove_keys_containing (str): Text to check for in keys to ignore Returns: dict: Cleaned dictionary Examples: >>> clean_single_dict(indict={'test1': [1], 'test2': ['H']}) {'test1': 1, 'test2': 'H'} >>> clean_single_dict(indict={'test1': [1], 'test2': ['H']}, prepend_to_keys='struct_') {'struct_test1': 1, 'struct_test2': 'H'} >>> clean_single_dict(indict={'test1': [1], 'ignore': ['H']}, prepend_to_keys='struct_', remove_keys_containing='ignore') {'struct_test1': 1}
codesearchnet
def interconnect_link_topologies(self): if (not self.__interconnect_link_topologies): self.__interconnect_link_topologies = InterconnectLinkTopologies(self.__connection) return self.__interconnect_link_topologies
Gets the InterconnectLinkTopologies API client. Returns: InterconnectLinkTopologies:
codesearchnet
def check_termination(self) -> None: if self._is_thread_joined: if self.is_alive(): raise RuntimeError('Thread was not joined with main thread, and is still running when the test finished.') else: self._testcase.fail('A checked thread was not joined.')
Returns whether the checked thread was properly used and did terminate. Every checked thread should be "join"ed after starting, and before the test tears down. If it is not joined, it is possible the thread will hang and cause flaky failures in tests. Raises: self._testcase.failureException: If check_termination was called before thread was joined. RuntimeError: If the thread is not terminated. This means thread was not joined with the main thread.
github-repos
def get_pipeline_options(project: str, job_name: str, mode: str, device: str, num_workers: int=cfg.NUM_WORKERS, **kwargs: Any) -> PipelineOptions: job_name = f'{job_name}-{datetime.now().strftime('%Y%m%d%H%M%S')}' staging_bucket = f'gs: dataflow_options = {'runner': 'DirectRunner' if mode == 'local' else 'DataflowRunner', 'job_name': job_name, 'project': project, 'region': cfg.REGION, 'staging_location': f'{staging_bucket}/dflow-staging', 'temp_location': f'{staging_bucket}/dflow-temp', 'setup_file': './setup.py'} flags = [] if device == 'GPU': flags = ['--experiment=worker_accelerator=type:nvidia-tesla-p4;count:1;install-nvidia-driver'] dataflow_options.update({'sdk_container_image': cfg.DOCKER_IMG, 'machine_type': 'n1-standard-4'}) if num_workers: dataflow_options.update({'num_workers': num_workers}) return PipelineOptions(flags=flags, **dataflow_options)
Function to retrieve the pipeline options. Args: project: GCP project to run on mode: Indicator to run local, cloud or template num_workers: Number of Workers for running the job parallely Returns: Dataflow pipeline options
github-repos
def apply_gradients(self, grads_and_vars, global_step=None, name=None): if distribute_lib.in_cross_replica_context(): raise ValueError('apply_gradients() must be called in a replica context.') if not self._doing_dynamic_loss_scaling(): return self._optimizer.apply_gradients(grads_and_vars, global_step, name) replica_context = distribute_lib.get_replica_context() grads_and_vars = tuple(grads_and_vars) return replica_context.merge_call(self._distributed_apply, args=(grads_and_vars, global_step, name))
Apply gradients to variables. This is the second part of `minimize()`. It returns an `Operation` that conditionally applies gradients if all gradient values are finite. Otherwise no update is performed (nor is `global_step` incremented). Args: grads_and_vars: List of (gradient, variable) pairs as returned by `compute_gradients()`. global_step: Optional `Variable` to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the `Optimizer` constructor. Returns: An `Operation` that conditionally applies the specified gradients. If `global_step` was not None, that operation also increments `global_step`. Raises: RuntimeError: If you should use `_distributed_apply()` instead.
github-repos
def _copy_attr(self, module, varname, cls, attrname=None): if not hasattr(module, varname): raise RuntimeError("Variable '{}' not found".format(varname)) obj = getattr(module, varname) if not isinstance(obj, cls): raise RuntimeError( "Expecting fobj to be a {}, not a '{}'".format(cls.__name__, obj.__class__.__name__)) if attrname is None: attrname = varname setattr(self, attrname, obj)
Copies attribute from module object to self. Raises if object not of expected class Args: module: module object varname: variable name cls: expected class of variable attrname: attribute name of self. Falls back to varname
juraj-google-style
def _escaped_token_to_subtoken_strings(self, escaped_token): ret = [] start = 0 token_len = len(escaped_token) while start < token_len: for end in range( min(token_len, start + self._max_subtoken_len), start, -1): subtoken = escaped_token[start:end] if subtoken in self._subtoken_string_to_id: ret.append(subtoken) start = end break else: assert False, "Token substring not found in subtoken vocabulary." return ret
Converts an escaped token string to a list of subtoken strings. Args: escaped_token: An escaped token as a unicode string. Returns: A list of subtokens as unicode strings.
juraj-google-style
def get_language_stemmer(language): from lunr.languages import SUPPORTED_LANGUAGES from nltk.stem.snowball import SnowballStemmer return SnowballStemmer(SUPPORTED_LANGUAGES[language])
Retrieves the SnowballStemmer for a particular language. Args: language (str): ISO-639-1 code of the language.
codesearchnet
def get_cached_item(cache_key, alternative_cache_key, *func_args, **func_kwargs): key = get_cache_key(cache_key, func, *func_args, **func_kwargs) return cache.get(key)
Not a decorator, but a helper function to retrieve the cached item for a key created via get_cache_key. Args: - cache_key: if there was a specific cache key used to cache the function, it should be provided here. If not this should be None - func: the function which was cache - *func_args: arguments of the function - **func_kwargs: keyword arguments of this function
juraj-google-style
def init_logger(self, log_dir=None, level=logging.INFO): logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=level) logger = logging.getLogger(__name__) if (log_dir and (self.rank == 0)): filename = '{}.log'.format(self.timestamp) log_file = osp.join(log_dir, filename) self._add_file_handler(logger, log_file, level=level) return logger
Init the logger. Args: log_dir(str, optional): Log file directory. If not specified, no log file will be used. level (int or str): See the built-in python logging module. Returns: :obj:`~logging.Logger`: Python logger.
codesearchnet
def update_args(self, args): for arg in vars(args): if self.get(arg) and getattr(args, arg) is not None: self._config[self.root_section][arg] = getattr(args, arg)
Update config dictionary with parsed args, as resolved by argparse. Only root positional arguments that already exist will overridden. Args: args (namespace): args parsed by argparse
juraj-google-style
def getThumbnailForItem(self, itemId, fileName, filePath): admin = None item = None try: admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler) item = admin.content.getItem(itemId = itemId) return item.saveThumbnail(fileName=fileName,filePath=filePath) except: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "getThumbnailForItem", "line": line, "filename": filename, "synerror": synerror, } ) finally: admin = None item = None del admin del item gc.collect()
Gets an item's thumbnail and saves it to disk. Args: itemId (str): The item's ID. fileName (str): The name of the output image. fileName (str): The directory on disk where to save the thumbnail. Returns: dict: The result from :py:func:`arcrest.manageorg._content.UserItem.saveThumbnail`
juraj-google-style
def distribute_data_input(per_process_batch, layout, batch_dim_name): from keras.src.distribution import TensorLayout if isinstance(layout, TensorLayout): layout = layout.backend_layout return jax.make_array_from_process_local_data(layout, per_process_batch)
Distribute the input data with the corresponding layout. Note that the inputs here is a local worker batch. Within the local worker, the data need to be further partitioned to map to each of the devices. Args: inputs: `jax.Array` that is already sharded to a local process size. layout: `TensorLayout` for the distribution information, or a `jax.sharding.Sharding` instance. Returns: A global batch distributed according to `layout`.
github-repos
def GetUnavailableBonus(self): height = (Blockchain.Default().Height + 1) unspents = self.FindUnspentCoinsByAsset(Blockchain.SystemShare().Hash) refs = [coin.Reference for coin in unspents] try: unavailable_bonus = Blockchain.CalculateBonus(refs, height_end=height) return unavailable_bonus except Exception as e: pass return Fixed8(0)
Gets the total claimable amount of Gas in the wallet that is not available to claim because it has not yet been spent. Returns: Fixed8: the amount of Gas unavailable to claim.
codesearchnet
def cross_section(verts, tris, plane_orig, plane_normal, **kwargs): mesh = TriangleMesh(verts, tris) plane = Plane(plane_orig, plane_normal) return cross_section_mesh(mesh, plane, **kwargs)
Compute the planar cross section of a mesh. This returns a set of polylines. Args: verts: Nx3 array of the vertices position faces: Nx3 array of the faces, containing vertex indices plane_orig: 3-vector indicating the plane origin plane_normal: 3-vector indicating the plane normal Returns: A list of Nx3 arrays, each representing a disconnected portion of the cross section as a polyline
codesearchnet
def set_position(self, x, y): self.attributes['x'] = str(x) self.attributes['y'] = str(y)
Sets the shape position. Args: x (int): the x coordinate y (int): the y coordinate
codesearchnet
def _convert(value, dtype=None): result = numpy_compat.np_asarray(value, dtype=dtype, order='C') if result.dtype.char == 'S' and result is not value: return numpy_compat.np_asarray(value, order='C', dtype=object) elif result.dtype.char == 'U' and result is not value: value = np.vectorize(lambda x: x.encode('utf8'))(value) return numpy_compat.np_asarray(value, order='C', dtype=object) elif result.dtype.char == 'U': return result.astype(np.bytes_) else: return result
Converts an arg to numpy, avoiding dangerous string and unicode dtypes. Numpy pads with zeros when using string and unicode dtypes if different components of a tensor have different lengths. This is bad: ignoring the padding is wrong for text data, and removing the padding is wrong for binary data. To avoid this bug, we redo the conversion using an object dtype. Additionally, we convert unicode strings to (byte-)strings for compatibility. Args: value: Value to convert to a numpy array. dtype: (Optional.) Desired NumPy type for the returned value. Returns: A numpy array.
github-repos
def _verify_pair(prev, curr): if prev._dimension != 2: raise ValueError("Curve not in R^2", prev) end = prev._nodes[:, -1] start = curr._nodes[:, 0] if not _helpers.vector_close(end, start): raise ValueError( "Not sufficiently close", "Consecutive sides do not have common endpoint", prev, curr, )
Verify a pair of sides share an endpoint. .. note:: This currently checks that edge endpoints match **exactly** but allowing some roundoff may be desired. Args: prev (.Curve): "Previous" curve at piecewise junction. curr (.Curve): "Next" curve at piecewise junction. Raises: ValueError: If the previous side is not in 2D. ValueError: If consecutive sides don't share an endpoint.
juraj-google-style
def apply_rules(self, rules, recursive=True): if recursive: new_args = [_apply_rules(arg, rules) for arg in self.args] new_kwargs = {key: _apply_rules(val, rules) for (key, val) in self.kwargs.items()} else: new_args = self.args new_kwargs = self.kwargs simplified = self.create(*new_args, **new_kwargs) return _apply_rules_no_recurse(simplified, rules)
Rebuild the expression while applying a list of rules The rules are applied against the instantiated expression, and any sub-expressions if `recursive` is True. Rule application is best though of as a pattern-based substitution. This is different from the *automatic* rules that :meth:`create` uses (see :meth:`add_rule`), which are applied *before* expressions are instantiated. Args: rules (list or ~collections.OrderedDict): List of rules or dictionary mapping names to rules, where each rule is a tuple (:class:`Pattern`, replacement callable), cf. :meth:`apply_rule` recursive (bool): If true (default), apply rules to all arguments and keyword arguments of the expression. Otherwise, only the expression itself will be re-instantiated. If `rules` is a dictionary, the keys (rules names) are used only for debug logging, to allow an analysis of which rules lead to the final form of an expression.
codesearchnet
def create_checksum_object_from_iterator( itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): checksum_str = calculate_checksum_on_iterator(itr, algorithm) checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str) checksum_pyxb.algorithm = algorithm return checksum_pyxb
Calculate the checksum of an iterator. Args: itr: iterable Object which supports the iterator protocol. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated Checksum PyXB object.
juraj-google-style
def service_configuration_check(config): ipv4_enabled = config.getboolean('daemon', 'ipv4') ipv6_enabled = config.getboolean('daemon', 'ipv6') services = config.sections() services.remove('daemon') ip_prefixes = [] for service in services: for option, getter in SERVICE_OPTIONS_TYPE.items(): try: getattr(config, getter)(service, option) except configparser.NoOptionError as error: if option not in SERVICE_OPTIONAL_OPTIONS: raise ValueError(error) except configparser.Error as error: raise ValueError(error) except ValueError as exc: msg = ("invalid data for '{opt}' option in service check " "{name}: {err}" .format(opt=option, name=service, err=exc)) raise ValueError(msg) if (config.get(service, 'on_disabled') != 'withdraw' and config.get(service, 'on_disabled') != 'advertise'): msg = ("'on_disabled' option has invalid value ({val}) for " "service check {name}, 'on_disabled option should be set " "either to 'withdraw' or to 'advertise'" .format(name=service, val=config.get(service, 'on_disabled'))) raise ValueError(msg) ip_prefixes.append(config.get(service, 'ip_prefix')) if not valid_ip_prefix(config.get(service, 'ip_prefix')): msg = ("invalid value ({val}) for 'ip_prefix' option in service " "check {name}. It should be an IP PREFIX in form of " "ip/prefixlen." .format(name=service, val=config.get(service, 'ip_prefix'))) raise ValueError(msg) _ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix')) if not ipv6_enabled and _ip_prefix.version == 6: raise ValueError("IPv6 support is disabled in " "anycast-healthchecker while there is an IPv6 " "prefix configured for {name} service check" .format(name=service)) if not ipv4_enabled and _ip_prefix.version == 4: raise ValueError("IPv4 support is disabled in " "anycast-healthchecker while there is an IPv4 " "prefix configured for {name} service check" .format(name=service)) cmd = shlex.split(config.get(service, 'check_cmd')) try: proc = subprocess.Popen(cmd) proc.kill() except (OSError, subprocess.SubprocessError) as exc: msg = ("failed to run check command '{cmd}' for service check " "{name}: {err}" .format(name=service, cmd=config.get(service, 'check_cmd'), err=exc)) raise ValueError(msg) occurrences_of_ip_prefixes = Counter(ip_prefixes) for ip_prefix, counter in occurrences_of_ip_prefixes.items(): if counter > 1: raise ValueError("{ip} is used by {c} service checks" .format(ip=ip_prefix, c=counter))
Perform a sanity check against options for each service check. Arguments: config (obj): A configparser object which holds our configuration. Returns: None if all sanity checks are successfully passed otherwise raises a ValueError exception.
juraj-google-style
def make_tests(self, sdkobject, testcase): tests = dict() attributes = sdkobject.get_attributes() for attribute in attributes: if attribute.local_name in self.IGNORED_ATTRIBUTES: continue for function_name, conditions in self._attributes_registry.items(): if self.does_attribute_meet_condition(attribute, conditions): (test_name, test_func) = self._create_test(testcase=testcase, sdkobject=sdkobject, function_name=function_name, attribute=attribute) tests[test_name] = test_func for function_name, infos in self._object_registry.items(): (test_name, test_func) = self._create_test(testcase=testcase, sdkobject=sdkobject, function_name=function_name) tests[test_name] = test_func return tests
Make all tests that should be run for the given object in the specified testcase Args: sdkobject: the sdk object testcase: the test case Returns: It returns a dictionary of all tests to run
juraj-google-style
def ones(shape, dtype=None, **kwargs): data = np.ones(shape, dtype) return dc.array(data, **kwargs)
Create an array of given shape and type, filled with ones. Args: shape (sequence of ints): 2D shape of the array. dtype (data-type, optional): Desired data-type for the array. kwargs (optional): Other arguments of the array (*coords, attrs, and name). Returns: array (decode.array): Decode array filled with ones.
juraj-google-style
def flatten_top_level_keys(data, top_level_keys): flattened_data = {} for top_level_key in top_level_keys: if data[top_level_key] is None: flattened_data[top_level_key] = None else: for key in data[top_level_key]: flattened_data['{}_-_{}'.format(top_level_key, key)] = data[top_level_key][key] return flattened_data
Helper method to flatten a nested dict of dicts (one level) Example: {'a': {'b': 'bbb'}} becomes {'a_-_b': 'bbb'} The separator '_-_' gets formatted later for the column headers Args: data: the dict to flatten top_level_keys: a list of the top level keys to flatten ('a' in the example above)
juraj-google-style
def AsDict(self): sources = [] for source in self.sources: source_definition = {'type': source.type_indicator, 'attributes': source.AsDict()} if source.supported_os: source_definition['supported_os'] = source.supported_os if source.conditions: source_definition['conditions'] = source.conditions sources.append(source_definition) artifact_definition = {'name': self.name, 'doc': self.description, 'sources': sources} if self.labels: artifact_definition['labels'] = self.labels if self.supported_os: artifact_definition['supported_os'] = self.supported_os if self.provides: artifact_definition['provides'] = self.provides if self.conditions: artifact_definition['conditions'] = self.conditions if self.urls: artifact_definition['urls'] = self.urls return artifact_definition
Represents an artifact as a dictionary. Returns: dict[str, object]: artifact attributes.
codesearchnet
def update_port_monitor(self, resource, timeout=-1): data = resource.copy() if 'type' not in data: data['type'] = 'port-monitor' uri = "{}{}".format(self.data["uri"], self.PORT_MONITOR_PATH) return self._helper.update(data, uri=uri, timeout=timeout)
Updates the port monitor configuration of a logical interconnect. Args: resource: Port monitor configuration. Returns: dict: Port monitor configuration.
juraj-google-style
def get_signature_request_list(self, page=1, ux_version=None): request = self._get_request() parameters = { "page": page } if ux_version is not None: parameters['ux_version'] = ux_version return request.get(self.SIGNATURE_REQUEST_LIST_URL, parameters=parameters)
Get a list of SignatureRequest that you can access This includes SignatureRequests you have sent as well as received, but not ones that you have been CCed on. Args: page (int, optional): Which page number of the SignatureRequest list to return. Defaults to 1. ux_version (int): UX version, either 1 (default) or 2. Returns: A ResourceList object
juraj-google-style
def consume(self, source): manifest = OrderedDict() rules = parse_stylesheet( source, skip_comments=True, skip_whitespace=True, ) for rule in rules: name = self.digest_prelude(rule) if not name.startswith(RULE_BASE_PREFIX): continue properties = self.digest_content(rule) manifest[name] = properties return manifest
Parse source and consume tokens from tinycss2. Arguments: source (string): Source content to parse. Returns: dict: Retrieved rules.
juraj-google-style
def _validate_isvalid_orcid(self, isvalid_orcid, field, value): if (isvalid_orcid and ('ORCID' in value)): try: res = search_orcid(value['ORCID']) except ConnectionError: warn('network not available, ORCID not validated.') return except HTTPError: self._error(field, ('ORCID incorrect or invalid for ' + value['name'])) return family_name = res['name']['family-name']['value'] given_name = res['name']['given-names']['value'] if (not compare_name(given_name, family_name, value['name'])): self._error(field, ((('Name and ORCID do not match. Name supplied: ' + value['name']) + '. Name associated with ORCID: ') + ' '.join([given_name, family_name])))
Checks for valid ORCID if given. Args: isvalid_orcid (`bool`): flag from schema indicating ORCID to be checked. field (`str`): 'author' value (`dict`): dictionary of author metadata. The rule's arguments are validated against this schema: {'isvalid_orcid': {'type': 'bool'}, 'field': {'type': 'str'}, 'value': {'type': 'dict'}}
codesearchnet
def parse_variable(self, variable): data = None if (variable is not None): variable = variable.strip() if re.match(self._variable_match, variable): var = re.search(self._variable_parse, variable) data = {'root': var.group(0), 'job_id': var.group(2), 'name': var.group(3), 'type': var.group(4)} return data
Method to parse an input or output variable. **Example Variable**:: #App:1234:output!String Args: variable (string): The variable name to parse. Returns: (dictionary): Result of parsed string.
codesearchnet
def distribute_equally(daily_data, divide=False): index = hourly_index(daily_data.index) hourly_data = daily_data.reindex(index) hourly_data = hourly_data.groupby(hourly_data.index.day).transform((lambda x: x.fillna(method='ffill', limit=23))) if divide: hourly_data /= 24 return hourly_data
Obtains hourly values by equally distributing the daily values. Args: daily_data: daily values divide: if True, divide resulting values by the number of hours in order to preserve the daily sum (required e.g. for precipitation). Returns: Equally distributed hourly values.
codesearchnet
def get_parameter_names(self, include_frozen=False): if include_frozen: return self.parameter_names return tuple((p for (p, f) in zip(self.parameter_names, self.unfrozen_mask) if f))
Get a list of the parameter names Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
codesearchnet
def redraw(self, reset_camera=False): self.ren.RemoveAllViewProps() self.picker = None self.add_picker_fixed() self.helptxt_mapper = vtk.vtkTextMapper() tprops = self.helptxt_mapper.GetTextProperty() tprops.SetFontSize(14) tprops.SetFontFamilyToTimes() tprops.SetColor(0, 0, 0) if (self.structure is not None): self.set_structure(self.structure, reset_camera) self.ren_win.Render()
Redraw the render window. Args: reset_camera: Set to True to reset the camera to a pre-determined default for each structure. Defaults to False.
codesearchnet
def Convert(self, metadata, stat_entry, token=None): return self.BatchConvert([(metadata, stat_entry)], token=token)
Converts StatEntry to ExportedFile. Does nothing if StatEntry corresponds to a registry entry and not to a file. Args: metadata: ExportedMetadata to be used for conversion. stat_entry: StatEntry to be converted. token: Security token. Returns: List or generator with resulting RDFValues. Empty list if StatEntry corresponds to a registry entry and not to a file.
juraj-google-style
def port_get_tag(port): cmd = 'ovs-vsctl get port {0} tag'.format(port) result = __salt__['cmd.run_all'](cmd) retcode = result['retcode'] stdout = result['stdout'] return _stdout_list_split(retcode, stdout)
Lists tags of the port. Args: port: A string - port name. Returns: List of tags (or empty list), False on failure. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' openvswitch.port_get_tag tap0
codesearchnet
def _process_rules(self, rules): cidr = [] non_cidr = [] for rule in rules: if ('.' in rule['app']): self.log.debug('Custom CIDR rule: %s', rule) self._validate_cidr(rule) cidr.append(rule) else: self.log.debug('SG reference rule: %s', rule) non_cidr.append(rule) self.log.debug('Custom CIDR rules: %s', cidr) self.log.debug('SG reference rules: %s', non_cidr) return (non_cidr, cidr)
Process rules into cidr and non-cidr lists. Args: rules (list): Allowed Security Group ports and protocols. Returns: (list, list): Security Group reference rules and custom CIDR rules.
codesearchnet
def Dump(obj): text = yaml.safe_dump(obj, default_flow_style=False, allow_unicode=True) if compatibility.PY2: text = text.decode('utf-8') return text
Stringifies a Python object into its YAML representation. Args: obj: A Python object to convert to YAML. Returns: A YAML representation of the given object.
codesearchnet
def score_braycurtis(self, term1, term2, **kwargs): t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) return 1-distance.braycurtis(t1_kde, t2_kde)
Compute a weighting score based on the "City Block" distance between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float
juraj-google-style
def dagify_min_edge(g): while (not nx.is_directed_acyclic_graph(g)): cycle = next(nx.simple_cycles(g)) scores = [] edges = [] for (i, j) in zip(cycle[:1], cycle[:1]): edges.append((i, j)) scores.append(g[i][j]['weight']) (i, j) = edges[scores.index(min(scores))] gc = deepcopy(g) gc.remove_edge(i, j) gc.add_edge(j, i) if (len(list(nx.simple_cycles(gc))) < len(list(nx.simple_cycles(g)))): g.add_edge(j, i, weight=min(scores)) g.remove_edge(i, j) return g
Input a graph and output a DAG. The heuristic is to reverse the edge with the lowest score of the cycle if possible, else remove it. Args: g (networkx.DiGraph): Graph to modify to output a DAG Returns: networkx.DiGraph: DAG made out of the input graph.
codesearchnet
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): values_dict = {} if registry_key.number_of_values == 0: values_dict['Value'] = 'No values stored in key.' else: for registry_value in registry_key.GetValues(): value_name = registry_value.name or '(default)' if registry_value.data is None: value_string = '[{0:s}] Empty'.format( registry_value.data_type_string) elif registry_value.DataIsString(): value_string = registry_value.GetDataAsObject() value_string = '[{0:s}] {1:s}'.format( registry_value.data_type_string, value_string) elif registry_value.DataIsInteger(): value_integer = registry_value.GetDataAsObject() value_string = '[{0:s}] {1:d}'.format( registry_value.data_type_string, value_integer) elif registry_value.DataIsMultiString(): multi_string = registry_value.GetDataAsObject() if not isinstance(multi_string, (list, tuple)): value_string = '[{0:s}]'.format(registry_value.data_type_string) else: value_string = '[{0:s}] {1:s}'.format( registry_value.data_type_string, ''.join(multi_string)) else: value_string = '[{0:s}]'.format(registry_value.data_type_string) values_dict[value_name] = value_string event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
juraj-google-style
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None): min_patches = images_kwargs.get('min_patches', None) or self.min_patches max_patches = images_kwargs.get('max_patches', None) or self.max_patches patch_size = images_kwargs.get('size', None) or self.size crop_to_patches = images_kwargs.get('crop_to_patches', None) or self.crop_to_patches num_patches = 1 if crop_to_patches and max_patches > 1: num_columns, num_rows = get_optimal_tiled_canvas((height, width), (patch_size['height'], patch_size['width']), min_patches, max_patches) num_patches += num_columns * num_rows return num_patches
A utility that returns number patches for a given image size. Args: height (`int`): Height of the input image. width (`int`): Width of the input image. images_kwargs (`dict`, *optional*) Any kwargs to override defaults of the image processor. Returns: `int`: Number of patches per image.
github-repos
def add_relationship(self, txn_id, predecessors): all_pred = set(predecessors) for pred in predecessors: all_pred.update(self._predecessors_by_id[pred]) self._predecessors_by_id[txn_id] = all_pred
Add a predecessor-successor relationship between one txn id and a set of predecessors. Args: txn_id (str): The transaction id of the transaction. predecessors (set): The transaction ids of the transaction's predecessors Returns: None
juraj-google-style
def hpo_terms(self, query=None, hpo_term=None, text=None, limit=None): query_dict = {} search_term = None if query: query_dict = {'$or': [{'hpo_id': {'$regex': query, '$options': 'i'}}, {'description': {'$regex': query, '$options': 'i'}}]} search_term = query elif text: new_string = '' for (i, word) in enumerate(text.split(' ')): if (i == 0): new_string += word else: new_string += ' "{0}"'.format(word) LOG.info('Search HPO terms with %s', new_string) query_dict['$text'] = {'$search': new_string} search_term = text elif hpo_term: query_dict['hpo_id'] = hpo_term search_term = hpo_term limit = (limit or int(100000000000.0)) res = self.hpo_term_collection.find(query_dict).limit(limit).sort('hpo_number', ASCENDING) LOG.info('Found {0} terms with search word {1}'.format(res.count(), search_term)) return res
Return all HPO terms If a query is sent hpo_terms will try to match with regex on term or description. Args: query(str): Part of a hpoterm or description hpo_term(str): Search for a specific hpo term limit(int): the number of desired results Returns: result(pymongo.Cursor): A cursor with hpo terms
codesearchnet
def eval(self, expr, **kwargs): columns = self.index if self._is_transposed else self.columns index = self.columns if self._is_transposed else self.index columns_copy = pandas.DataFrame(columns=self.columns) columns_copy = columns_copy.eval(expr, inplace=False, **kwargs) expect_series = isinstance(columns_copy, pandas.Series) def eval_builder(df, **kwargs): kwargs.pop("axis", None) df.columns = columns result = df.eval(expr, inplace=False, **kwargs) return result func = self._build_mapreduce_func(eval_builder, axis=1, **kwargs) new_data = self._map_across_full_axis(1, func) if expect_series: new_columns = [columns_copy.name] new_index = index else: new_columns = columns_copy.columns new_index = self.index return self.__constructor__(new_data, new_index, new_columns)
Returns a new QueryCompiler with expr evaluated on columns. Args: expr: The string expression to evaluate. Returns: A new QueryCompiler with new columns after applying expr.
juraj-google-style
def str_to_etree(xml_str, encoding='utf-8'): parser = xml.etree.ElementTree.XMLParser(encoding=encoding) return xml.etree.ElementTree.fromstring(xml_str, parser=parser)
Deserialize API XML doc to an ElementTree. Args: xml_str: bytes DataONE API XML doc encoding: str Decoder to use when converting the XML doc ``bytes`` to a Unicode str. Returns: ElementTree: Matching the API version of the XML doc.
codesearchnet
def summarize_variables(var_list=None, tag=None): if var_list is None: var_list = tf.trainable_variables() if tag is None: tag = "training_variables/" name_to_var = {v.name: v for v in var_list} for v_name in list(name_to_var): v = name_to_var[v_name] tf.summary.histogram(tag + v_name, v)
Summarize the variables. Args: var_list: a list of variables; defaults to trainable_variables. tag: name scope of the summary; defaults to training_variables/.
juraj-google-style
def assertAllGreater(self, a, comparison_target): a, comparison_target = self.evaluate_if_both_tensors(a, comparison_target) a = self._GetNdArray(a) self.assertGreater(np.min(a), comparison_target)
Assert element values are all greater than a target value. Args: a: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). comparison_target: The target value of comparison.
github-repos
def requested_test_names_dict(self): return {'Requested Tests': copy.deepcopy(self.requested)}
Gets the requested test names of a test run in a dict format. Note a test can be requested multiple times, so there can be duplicated values Returns: A dict with a key and the list of strings.
github-repos
def _truncate(self, new_rank: int) -> 'DynamicRaggedShape.Spec': if self.rank is None: return self._set_rank_if_unknown(new_rank)._truncate(new_rank) if new_rank == 0: return DynamicRaggedShape.Spec._from_tensor_shape([], 0, self.dtype) if new_rank == 1: vector_size = self._dimension(0) return DynamicRaggedShape.Spec._from_tensor_shape([vector_size], 0, self.dtype) if new_rank < self.num_row_partitions + 1: new_row_partitions = self._row_partitions[:new_rank - 1] new_static_inner_shape = tensor_shape.TensorShape([new_row_partitions[-1].nvals]) return DynamicRaggedShape.Spec(row_partitions=new_row_partitions, static_inner_shape=new_static_inner_shape, dtype=self.dtype) else: remainder = new_rank - self.num_row_partitions new_static_inner_shape = self._static_inner_shape[:remainder] return DynamicRaggedShape.Spec(row_partitions=self._row_partitions, static_inner_shape=new_static_inner_shape, dtype=self.dtype)
Truncate a ragged shape spec. For example, if the original spec s was for a shape: [3, [4, 1], 2, 7] Then truncate_dynamic_ragged_shape_spec(s, 3) is a spec for: [3, [4, 1], 2] Args: new_rank: the new rank Returns: A truncated DynamicRaggedShape.Spec.
github-repos
def getKeyName(username, date, blob_key): sep = FileMetadata.__SEP return str(((((username + sep) + str(date)) + sep) + blob_key))
Returns the internal key for a particular item in the database. Our items are stored with keys of the form 'user/date/blob_key' ('/' is not the real separator, but __SEP is). Args: username: The given user's e-mail address. date: A datetime object representing the date and time that an input file was uploaded to this app. blob_key: The blob key corresponding to the location of the input file in the Blobstore. Returns: The internal key for the item specified by (username, date, blob_key).
codesearchnet
def terminate_ec2_instance(client, resource): instance = EC2Instance.get(resource.id) if (instance.state == 'terminated'): return (ActionStatus.IGNORED, {}) client.terminate_instances(InstanceIds=[resource.id]) return (ActionStatus.SUCCEED, {'instance_type': resource.instance_type, 'public_ip': resource.public_ip})
Terminate an EC2 Instance This function will terminate an EC2 Instance. Args: client (:obj:`boto3.session.Session.client`): A boto3 client object resource (:obj:`Resource`): The resource object to terminate Returns: `ActionStatus`
codesearchnet
def with_min_execution_time(self, min_micros=0, min_accelerator_micros=0, min_cpu_micros=0): self._options['min_micros'] = min_micros self._options['min_accelerator_micros'] = min_accelerator_micros self._options['min_cpu_micros'] = min_cpu_micros return self
Only show profiler nodes consuming no less than 'min_micros'. Args: min_micros: Only show profiler nodes with execution time no less than this. It sums accelerator and cpu times. min_accelerator_micros: Only show profiler nodes spend no less than this time on accelerator (e.g. GPU). min_cpu_micros: Only show profiler nodes spend no less than this time on cpu. Returns: self
github-repos
def read(self, size=None): data = b'' while ((size and (len(data) < size)) and (self._current_offset < self.uncompressed_data_size)): member = self._GetMemberForOffset(self._current_offset) member_offset = (self._current_offset - member.uncompressed_data_offset) data_read = member.ReadAtOffset(member_offset, size) if data_read: self._current_offset += len(data_read) data = b''.join([data, data_read]) return data
Reads a byte string from the gzip file at the current offset. The function will read a byte string up to the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
codesearchnet
def rebin(d, n_x, n_y=None): if (d.ndim == 2): if (n_y is None): n_y = 1 if (n_x is None): n_x = 1 d = d[(:(int((d.shape[0] d = d.reshape(((d.shape[0] d = d.mean(axis=3) d = d.mean(axis=1) elif (d.ndim == 1): d = d[:(int((d.shape[0] d = d.reshape(((d.shape[0] d = d.mean(axis=1) else: raise RuntimeError('Only NDIM <= 2 supported') return d
Rebin data by averaging bins together Args: d (np.array): data n_x (int): number of bins in x dir to rebin into one n_y (int): number of bins in y dir to rebin into one Returns: d: rebinned data with shape (n_x, n_y)
codesearchnet
def load_glove(file): model = {} with open(file, encoding="utf8", errors='ignore') as f: for line in f: line = line.split(' ') word = line[0] vector = np.array([float(val) for val in line[1:]]) model[word] = vector return model
Loads GloVe vectors in numpy array. Args: file (str): a path to a glove file. Return: dict: a dict of numpy arrays.
juraj-google-style
def get_all_options(self, drop_default=False, add_extra_args_fn: Optional[Callable[[_BeamArgumentParser], None]]=None, retain_unknown_options=False) -> Dict[str, Any]: subset = {} parser = _BeamArgumentParser(allow_abbrev=False) for cls in PipelineOptions.__subclasses__(): subset[str(cls)] = cls for cls in subset.values(): cls._add_argparse_args(parser) if add_extra_args_fn: add_extra_args_fn(parser) known_args, unknown_args = parser.parse_known_args(self._flags) if retain_unknown_options: if unknown_args: _LOGGER.warning('Unknown pipeline options received: %s. Ignore if flags are used for internal purposes.' % ','.join(unknown_args)) seen = set() def add_new_arg(arg, **kwargs): if arg not in seen: parser.add_argument(arg, **kwargs) seen.add(arg) i = 0 while i < len(unknown_args): if unknown_args[i] == '--': break if not unknown_args[i].startswith('-'): i += 1 continue if i + 1 >= len(unknown_args) or unknown_args[i + 1].startswith('-'): split = unknown_args[i].split('=', 1) if len(split) == 1: add_new_arg(unknown_args[i], action='store_true') else: add_new_arg(split[0], type=str) i += 1 elif unknown_args[i].startswith('--'): add_new_arg(unknown_args[i], type=str) i += 2 else: _LOGGER.warning('Discarding flag %s, single dash flags are not allowed.', unknown_args[i]) i += 2 continue parsed_args, _ = parser.parse_known_args(self._flags) else: if unknown_args: _LOGGER.warning('Discarding unparseable args: %s', unknown_args) parsed_args = known_args result = vars(parsed_args) overrides = self._all_options.copy() for k in list(result): overrides.pop(k, None) if k in self._all_options: result[k] = self._all_options[k] if drop_default and parser.get_default(k) == result[k] and (not isinstance(parser.get_default(k), ValueProvider)): del result[k] if overrides: if retain_unknown_options: result.update(overrides) else: _LOGGER.warning('Discarding invalid overrides: %s', overrides) return result
Returns a dictionary of all defined arguments. Returns a dictionary of all defined arguments (arguments that are defined in any subclass of PipelineOptions) into a dictionary. Args: drop_default: If set to true, options that are equal to their default values, are not returned as part of the result dictionary. add_extra_args_fn: Callback to populate additional arguments, can be used by runner to supply otherwise unknown args. retain_unknown_options: If set to true, options not recognized by any known pipeline options class will still be included in the result. If set to false, they will be discarded. Returns: Dictionary of all args and values.
github-repos
def add_user(self, user_obj): LOG.info("Adding user %s to the database", user_obj['email']) if not '_id' in user_obj: user_obj['_id'] = user_obj['email'] try: self.user_collection.insert_one(user_obj) LOG.debug("User inserted") except DuplicateKeyError as err: raise IntegrityError("User {} already exists in database".format(user_obj['email'])) return user_obj
Add a user object to the database Args: user_obj(scout.models.User): A dictionary with user information Returns: user_info(dict): a copy of what was inserted
juraj-google-style
def get_models(self, model, page=None): if page is not None: return self._store.find_all(self._get_model_class(model), params={'page': int(page)}) else: return self._store.find_all(self._get_model_class(model))
Get all the models from the server. Args: model (string): The class as a string. page (string, optional): The page number as a string Returns: list: A list of instances of the requested model.
juraj-google-style
def add_number_parameters(self, number): if isinstance(number, list): for x in number: self.add_number_parameters(x) return self._parameters.append("{ \"value\": " + str(number) + " }")
Add given number parameters to the internal list. Args: number (list of int or list of float): A number or list of numbers to add to the parameters.
juraj-google-style
def aggregate_and_return_name_for_output(self, fused_op_name, output_index, out_graphdef): flattened = self.flatten_nodes() if self.aggregation == OpHint.AGGREGATE_FIRST or self.aggregation == OpHint.AGGREGATE_LAST: assert len(flattened) == 1 if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK: temp_op = _LiteSingleOperand(flattened[0]) return temp_op.aggregate_and_return_name_for_output(fused_op_name, output_index, out_graphdef) else: stack_node = _node_def_pb2.NodeDef() stack_node.op = 'Unpack' stack_node.name = 'OpHintUnstack-%s' % flattened[0].name stack_node.attr['num'].i = len(flattened) output_type = flattened[0].attr['T'].type stack_node.attr['T'].type = output_type stack_node.input.append(_tensorflow_output_name(fused_op_name, output_index)) out_graphdef.node.extend([stack_node]) for idx, discrete in enumerate(flattened): output_node = _copy.deepcopy(discrete) del output_node.input[:] output_node.input.append(_tensorflow_output_name(stack_node.name, idx)) out_graphdef.node.extend([output_node]) return output_type
This adds to `out_graphdef` all the unaggregated outputs. I.e. we are outputting from a fused stub, but we need to make it compatible with the unfused original graph so we insert an unpack. Ideally in a later stage the unpack -> pack sequences will be removed. Args: fused_op_name: The name of the stub we are in the process of fusing. output_index: The output output_index this object represents. out_graphdef: The graphdef we are in the process of buildings Returns: The type of the aggregated output (so we can finish building the stub op).
github-repos
def fa_peft_integration_check(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, target_dtype: Optional[torch.dtype]=None): if target_dtype is None: return (query, key, value) input_dtype = query.dtype if input_dtype == torch.float32: logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query = query.to(target_dtype) key = key.to(target_dtype) value = value.to(target_dtype) return (query, key, value)
PEFT usually casts the layer norms in float32 for training stability reasons therefore the input hidden states gets silently casted in float32. Hence, we need cast them back in float16 / bfloat16 just to be sure everything works as expected. This might slowdown training & inference so it is recommended to not cast the LayerNorms! Args: query (`torch.Tensor`): Input query states to be passed to Flash Attention API key (`torch.Tensor`): Input key states to be passed to Flash Attention API value (`torch.Tensor`): Input value states to be passed to Flash Attention API target_dtype (`torch.dtype`, *optional*): The dtype to convert the attention tensors to. Conversion can be ignored by not providing the target dtype.
github-repos
def group_by(self, key, field=(lambda x: x.xfer)): return Transactions([t for t in self.trans if (field(t) == key)])
Returns all transactions whose given ``field`` matches ``key``. Returns: A ``Transactions`` object.
codesearchnet
def add_server(self, name, prefer=False): if ((not name) or re.match('^[\\s]+$', name)): raise ValueError('ntp server name must be specified') if prefer: name = ('%s prefer' % name) cmd = self.command_builder('ntp server', value=name) return self.configure(cmd)
Add or update an NTP server entry to the node config Args: name (string): The IP address or FQDN of the NTP server. prefer (bool): Sets the NTP server entry as preferred if True. Returns: True if the operation succeeds, otherwise False.
codesearchnet
def poll_for_job_completion(runner, result, duration, state_update_callback=None): if result.state == PipelineState.DONE: return last_message_time = None current_seen_messages = set() last_error_rank = float('-inf') last_error_msg = None last_job_state = None final_countdown_timer_secs = 50.0 sleep_secs = 5.0 def rank_error(msg): if 'work item was attempted' in msg: return -1 elif 'Traceback' in msg: return 1 return 0 if duration: start_secs = time.time() duration_secs = duration job_id = result.job_id() while True: response = runner.dataflow_client.get_job(job_id) if response.currentState is not None: if response.currentState != last_job_state: if state_update_callback: state_update_callback(response.currentState) _LOGGER.info('Job %s is in state %s', job_id, response.currentState) last_job_state = response.currentState if str(response.currentState) != 'JOB_STATE_RUNNING': if final_countdown_timer_secs <= 0.0 or last_error_msg is not None or str(response.currentState) == 'JOB_STATE_DONE' or (str(response.currentState) == 'JOB_STATE_CANCELLED') or (str(response.currentState) == 'JOB_STATE_UPDATED') or (str(response.currentState) == 'JOB_STATE_DRAINED'): break if str(response.currentState) not in ('JOB_STATE_PENDING', 'JOB_STATE_QUEUED'): sleep_secs = 1.0 final_countdown_timer_secs -= sleep_secs time.sleep(sleep_secs) page_token = None while True: messages, page_token = runner.dataflow_client.list_messages(job_id, page_token=page_token, start_time=last_message_time) for m in messages: message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText) if not last_message_time or m.time > last_message_time: last_message_time = m.time current_seen_messages = set() if message in current_seen_messages: continue else: current_seen_messages.add(message) if m.messageImportance is None: continue message_importance = str(m.messageImportance) if message_importance == 'JOB_MESSAGE_DEBUG' or message_importance == 'JOB_MESSAGE_DETAILED': _LOGGER.debug(message) elif message_importance == 'JOB_MESSAGE_BASIC': _LOGGER.info(message) elif message_importance == 'JOB_MESSAGE_WARNING': _LOGGER.warning(message) elif message_importance == 'JOB_MESSAGE_ERROR': _LOGGER.error(message) if rank_error(m.messageText) >= last_error_rank: last_error_rank = rank_error(m.messageText) last_error_msg = m.messageText else: _LOGGER.info(message) if not page_token: break if duration: passed_secs = time.time() - start_secs if passed_secs > duration_secs: _LOGGER.warning('Timing out on waiting for job %s after %d seconds', job_id, passed_secs) break result._job = response runner.last_error_msg = last_error_msg
Polls for the specified job to finish running (successfully or not). Updates the result with the new job information before returning. Args: runner: DataflowRunner instance to use for polling job state. result: DataflowPipelineResult instance used for job information. duration (int): The time to wait (in milliseconds) for job to finish. If it is set to :data:`None`, it will wait indefinitely until the job is finished.
github-repos
def set_conf_str(conf, optstrs): falsy = ['0', 'no', 'n', 'off', 'false', 'f'] bool_actions = ['store_true', 'store_false', internal.Switch] for optstr in optstrs: (opt, val) = optstr.split('=', 1) (sec, opt) = opt.split('.', 1) if (sec not in conf): raise error.SectionError(sec) if (opt not in conf[sec]): raise error.OptionError(opt) meta = conf[sec].def_[opt] if (meta.default is None): if ('type' in meta.cmd_kwargs): cast = meta.cmd_kwargs['type'] else: act = meta.cmd_kwargs.get('action') cast = (bool if (act in bool_actions) else str) else: cast = type(meta.default) if ((cast is bool) and (val.lower() in falsy)): val = '' conf[sec][opt] = cast(val)
Set options from a list of section.option=value string. Args: conf (:class:`~loam.manager.ConfigurationManager`): the conf to update. optstrs (list of str): the list of 'section.option=value' formatted string.
codesearchnet
def deconstruct_single_qubit_matrix_into_angles(mat: np.ndarray) -> Tuple[(float, float, float)]: right_phase = (cmath.phase((mat[(0, 1)] * np.conj(mat[(0, 0)]))) + math.pi) mat = np.dot(mat, _phase_matrix((- right_phase))) bottom_phase = cmath.phase((mat[(1, 0)] * np.conj(mat[(0, 0)]))) mat = np.dot(_phase_matrix((- bottom_phase)), mat) rotation = math.atan2(abs(mat[(1, 0)]), abs(mat[(0, 0)])) mat = np.dot(_rotation_matrix((- rotation)), mat) diagonal_phase = cmath.phase((mat[(1, 1)] * np.conj(mat[(0, 0)]))) return ((right_phase + diagonal_phase), (rotation * 2), bottom_phase)
Breaks down a 2x2 unitary into more useful ZYZ angle parameters. Args: mat: The 2x2 unitary matrix to break down. Returns: A tuple containing the amount to phase around Z, then rotate around Y, then phase around Z (all in radians).
codesearchnet
def variable_shape(handle, out_type=None): if out_type is None: if flags.config().tf_shape_default_int64.value(): out_type = dtypes.int64 else: out_type = dtypes.int32 handle_data = get_eager_safe_handle_data(handle) if handle_data is None or not handle_data.is_set: return gen_resource_variable_ops.variable_shape(handle, out_type=out_type) shape_proto = handle_data.shape_and_type[0].shape if shape_proto.unknown_rank or any((x.size == -1 for x in shape_proto.dim)): return gen_resource_variable_ops.variable_shape(handle, out_type=out_type) return constant_op.constant([x.size for x in shape_proto.dim], dtype=out_type)
Returns the shape of the variable from the handle. If the output shape dtype is not specified, it will be set to int64 if tf_shape_default_int64 is enabled, otherwise it will be set to int32. Args: handle: The handle of the variable. out_type: The dtype of the output shape. Returns: The shape of the variable.
github-repos
def delete(self, resource, timeout=(- 1)): if (type(resource) is dict): headers = {'If-Match': resource.get('eTag', '*')} else: headers = {'If-Match': '*'} return self._client.delete(resource, timeout=timeout, custom_headers=headers)
Deletes a Scope. Args: resource: dict object to delete timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: bool: Indicates if the resource was successfully deleted.
codesearchnet
def Gamma(cls, shape: 'TensorFluent', scale: 'TensorFluent', batch_size: Optional[int]=None) -> Tuple[(Distribution, 'TensorFluent')]: if (shape.scope != scale.scope): raise ValueError('Gamma distribution: parameters must have same scope!') concentration = shape.tensor rate = (1 / scale.tensor) dist = tf.distributions.Gamma(concentration, rate) batch = (shape.batch or scale.batch) if ((not batch) and (batch_size is not None)): t = dist.sample(batch_size) batch = True else: t = dist.sample() scope = shape.scope.as_list() return (dist, TensorFluent(t, scope, batch=batch))
Returns a TensorFluent for the Gamma sampling op with given shape and scale parameters. Args: shape: The shape parameter of the Gamma distribution. scale: The scale parameter of the Gamma distribution. batch_size: The size of the batch (optional). Returns: The Gamma distribution and a TensorFluent sample drawn from the distribution. Raises: ValueError: If parameters do not have the same scope.
codesearchnet
def gym_space_spec(gym_space): try: tf_dtype = tf.as_dtype(gym_space.dtype) except TypeError as e: tf.logging.error("Cannot convert space's type [%s] to tf.dtype", gym_space.dtype) raise e if isinstance(gym_space, Box): return box_space_spec(gym_space, tf_dtype) elif isinstance(gym_space, Discrete): return discrete_space_spec(gym_space, tf_dtype) else: raise NotImplementedError
Returns a reading spec of a gym space. NOTE: Only implemented currently for Box and Discrete. Args: gym_space: instance of gym.spaces whose spec we want. Returns: Reading spec for that space. Raises: NotImplementedError: For spaces whose reading spec we haven't implemented.
juraj-google-style
def send(self, config, log, obs_id, beam_id): log.info('Starting Pulsar Data Transfer...') socket = self._ftp.transfercmd('STOR {0}_{1}'.format(obs_id, beam_id)) socket.send(json.dumps(config).encode()) socket.send(bytearray((1000 * 1000))) config['metadata']['name'] = 'candidate_two' socket.send(json.dumps(config).encode()) socket.send(bytearray((1000 * 1000))) socket.close() log.info('Pulsar Data Transfer Completed...')
Send the pulsar data to the ftp server Args: config (dict): Dictionary of settings log (logging.Logger): Python logging object obs_id: observation id beam_id: beam id
codesearchnet
def reinit_nested_vars(variables, indices=None): if isinstance(variables, (tuple, list)): return tf.group(*[ reinit_nested_vars(variable, indices) for variable in variables]) if indices is None: return variables.assign(tf.zeros_like(variables)) else: zeros = tf.zeros([tf.shape(indices)[0]] + variables.shape[1:].as_list()) return tf.scatter_update(variables, indices, zeros)
Reset all variables in a nested tuple to zeros. Args: variables: Nested tuple or list of variables. indices: Batch indices to reset, defaults to all. Returns: Operation.
juraj-google-style
def __init__(self, channel): self.SendEvents = channel.stream_stream('/tensorflow.EventListener/SendEvents', request_serializer=tensorflow_dot_core_dot_util_dot_event__pb2.Event.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString) self.SendTracebacks = channel.unary_unary('/tensorflow.EventListener/SendTracebacks', request_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.CallTraceback.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString) self.SendSourceFiles = channel.unary_unary('/tensorflow.EventListener/SendSourceFiles', request_serializer=tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DebuggedSourceFiles.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString)
Constructor. Args: channel: A grpc.Channel.
github-repos
def update_function_configuration(self, vpc_config): LOG.info('Updating configuration for lambda function: %s', self.app_name) try: self.lambda_client.update_function_configuration( Environment=self.lambda_environment, FunctionName=self.app_name, Runtime=self.runtime, Role=self.role_arn, Handler=self.handler, Description=self.description, Timeout=int(self.timeout), MemorySize=int(self.memory), VpcConfig=vpc_config) if self.concurrency_limit: self.lambda_client.put_function_concurrency( FunctionName=self.app_name, ReservedConcurrentExecutions=self.concurrency_limit ) else: self.lambda_client.delete_function_concurrency(FunctionName=self.app_name) except boto3.exceptions.botocore.exceptions.ClientError as error: if 'CreateNetworkInterface' in error.response['Error']['Message']: message = '{0} is missing "ec2:CreateNetworkInterface"'.format(self.role_arn) LOG.debug(message) raise SystemExit(message) raise LOG.info('Updating Lambda function tags') lambda_arn = get_lambda_arn(self.app_name, self.env, self.region) self.lambda_client.tag_resource(Resource=lambda_arn, Tags={'app_group': self.group, 'app_name': self.app_name}) LOG.info("Successfully updated Lambda configuration.")
Update existing Lambda function configuration. Args: vpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using a VPC in lambda
juraj-google-style
async def _try_catch_coro(emitter, event, listener, coro): try: (await coro) except Exception as exc: if (event == emitter.LISTENER_ERROR_EVENT): raise emitter.emit(emitter.LISTENER_ERROR_EVENT, event, listener, exc)
Coroutine wrapper to catch errors after async scheduling. Args: emitter (EventEmitter): The event emitter that is attempting to call a listener. event (str): The event that triggered the emitter. listener (async def): The async def that was used to generate the coro. coro (coroutine): The coroutine that should be tried. If an exception is caught the function will use the emitter to emit the failure event. If, however, the current event _is_ the failure event then the method reraises. The reraised exception may show in debug mode for the event loop but is otherwise silently dropped.
codesearchnet
def _build(self, input_batch, is_training, test_local_stats=True): input_shape = input_batch.get_shape() if (self._axis is not None): if (len(self._axis) > len(input_shape)): raise base.IncompatibleShapeError('Too many indices specified in axis: len({}) > len({}).'.format(self._axis, input_shape)) if (max(self._axis) >= len(input_shape)): raise base.IncompatibleShapeError('One or more index in axis is too large for input shape: {} >= {:d}.'.format(self._axis, len(input_shape))) if (min(self._axis) < 0): raise base.IncompatibleShapeError('Indices in axis must be non-negative: {} < 0.'.format(self._axis)) axis = self._axis else: axis = tuple(range(len(input_shape))[:(- 1)]) dtype = input_batch.dtype.base_dtype if (self._fused and (dtype == tf.bfloat16)): raise base.NotSupportedError('Fused batch norm does not support tf.bfloat16.') stat_dtype = (tf.float32 if (dtype in [tf.float16, tf.bfloat16]) else dtype) self._mean_shape = input_batch.get_shape().as_list() for index in axis: self._mean_shape[index] = 1 use_batch_stats = (is_training | test_local_stats) (mean, variance) = self._build_statistics(input_batch, axis, use_batch_stats, stat_dtype) self._build_scale_offset(dtype) (out, mean, variance) = self._batch_norm_op(input_batch, mean, variance, use_batch_stats, stat_dtype) update_ops = self._build_update_ops(mean, variance, is_training) if update_ops: if self._update_ops_collection: for update_op in update_ops: tf.add_to_collection(self._update_ops_collection, update_op) else: with tf.control_dependencies(update_ops): out = tf.identity(out) return out
Connects the BatchNorm module into the graph. Args: input_batch: A Tensor of arbitrary dimension. By default, the final dimension is not reduced over when computing the minibatch statistics. is_training: A boolean to indicate if the module should be connected in training mode, meaning the moving averages are updated. Can be a Tensor. test_local_stats: A boolean to indicate if local batch statistics should be used when `is_training=False`. If not, moving averages are used. By default `True`. Can be a Tensor. Returns: A tensor with the same shape as `input_batch`. Raises: base.IncompatibleShapeError: If `axis` is not valid for the input shape or has negative entries. base.NotSupportedError: If `input_batch` has data type of `tf.bfloat16`.
codesearchnet
def median(data): ordered = sorted(data) length = len(ordered) if length % 2 == 0: return ( ordered[math.floor(length / 2) - 1] + ordered[math.floor(length / 2)] ) / 2.0 elif length % 2 != 0: return ordered[math.floor(length / 2)]
Calculates the median of a list of integers or floating point numbers. Args: data: A list of integers or floating point numbers Returns: Sorts the list numerically and returns the middle number if the list has an odd number of items. If the list contains an even number of items the mean of the two middle numbers is returned.
juraj-google-style
def get_permissions(obj_name, principal=None, obj_type='file'): obj_dacl = dacl(obj_name, obj_type) if (principal is None): return obj_dacl.list_aces() return obj_dacl.get_ace(principal)
Get the permissions for the passed object Args: obj_name (str): The name of or path to the object. principal (Optional[str]): The name of the user or group for which to get permissions. Can also pass a SID. If None, all ACEs defined on the object will be returned. Default is None obj_type (Optional[str]): The type of object for which to get permissions. Returns: dict: A dictionary representing the object permissions Usage: .. code-block:: python salt.utils.win_dacl.get_permissions('C:\\Temp')
codesearchnet
def _shard_counts(layout: layout_lib.Layout, batch_dim: Optional[str]=None) -> List[int]: shard_counts = [] for spec in layout.sharding_specs: if spec in (batch_dim, layout_lib.UNSHARDED): shard_counts.append(1) else: shard_counts.append(layout.mesh.dim_size(spec)) return shard_counts
Computes a list of the number of shards in each dimension of the layout. The shard counts are used to slice each dataset element. The batch dimension's count is overridden to 1 since we only consider how many shards to make locally (within each local replica). Sharding across clients is handled by either tf.data.Dataset's shard transformation (in the single-client case) or tf.data service's distribute function (in the multi-client case). Args: layout: the layout to compute the shard counts for. batch_dim: the name of the batch dimension of the layout, if present. Returns: A list of shard counts, one element per dimension of the layout.
github-repos
def sequence_accuracy(labels, outputs): all_correct = tf.reduce_all( tf.logical_or(tf.equal(labels, outputs), tf.equal(labels, 0)), axis=-1 ) return tf.metrics.mean(all_correct)
Compute the sequence-level accuracy. A sequence is only considered correct if all of its entries were predicted correctly. Args: labels: ground-truth labels, shape=(batch, packed_seq_length) outputs: predicted tokens, shape=(batch, seq_length) Returns: Two ops, one for getting the current average accuracy and another for updating the running average estimate.
juraj-google-style
def locked_get(self): query = {self.key_name: self.key_value} entities = self.model_class.objects.filter(**query) if (len(entities) > 0): credential = getattr(entities[0], self.property_name) if (getattr(credential, 'set_store', None) is not None): credential.set_store(self) return credential else: return None
Retrieve stored credential from the Django ORM. Returns: oauth2client.Credentials retrieved from the Django ORM, associated with the ``model``, ``key_value``->``key_name`` pair used to query for the model, and ``property_name`` identifying the ``CredentialsProperty`` field, all of which are defined in the constructor for this Storage object.
codesearchnet
def get_timestamp(self, url, xpath=None): if not path.exists(self.db_path): return None if self._query(url, xpath).count() > 0: return self._query(url, xpath).one().queried_on
Get time stamp of cached query result. If DB has not yet been initialized or url/xpath has not been queried yet, return None. Args: url (str): If given, clear specific item only. Otherwise remove the DB file. xpath (str): xpath to search (may be ``None``) Returns: datetime.datetime: cached response timestamp, None if not available
juraj-google-style
def flip_channel_order(image: np.ndarray, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: input_data_format = infer_channel_dimension_format(image) if input_data_format is None else input_data_format if input_data_format == ChannelDimension.LAST: image = image[..., ::-1] elif input_data_format == ChannelDimension.FIRST: image = image[::-1, ...] else: raise ValueError(f'Unsupported channel dimension: {input_data_format}') if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image
Flips the channel order of the image. If the image is in RGB format, it will be converted to BGR and vice versa. Args: image (`np.ndarray`): The image to flip. data_format (`ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image.
github-repos
def expect_false(condition, msg, extras=None): try: asserts.assert_false(condition, msg, extras) except signals.TestSignal as e: logging.exception('Expected a `False` value, got `True`.') recorder.add_error(e)
Expects an expression evaluates to False. If the expectation is not met, the test is marked as fail after its execution finishes. Args: expr: The expression that is evaluated. msg: A string explaining the details in case of failure. extras: An optional field for extra information to be included in test result.
codesearchnet
def UserAgentFragment(self): if self.operating_system == OperatingSystem.LINUX: return '({name} {version})'.format(name=self.operating_system.name, version=platform.release()) elif self.operating_system == OperatingSystem.WINDOWS: return '({name} NT {version})'.format(name=self.operating_system.name, version=platform.version()) elif self.operating_system == OperatingSystem.MACOSX: format_string = '(Macintosh; {name} Mac OS X {version})' arch_string = self.architecture.name if self.architecture == Architecture.ppc else 'Intel' return format_string.format(name=arch_string, version=platform.release()) else: return '()'
Generates the fragment of the User-Agent that represents the OS. Examples: (Linux 3.2.5-gg1236) (Windows NT 6.1.7601) (Macintosh; PPC Mac OS X 12.4.0) (Macintosh; Intel Mac OS X 12.4.0) Returns: str, The fragment of the User-Agent string.
github-repos