code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def batch_shape(self): return self.shape[:-2]
`TensorShape` of batch dimensions of this `LinearOperator`. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `TensorShape([B1,...,Bb])`, equivalent to `A.shape[:-2]` Returns: `TensorShape`, statically determined, may be undefined.
github-repos
def safe_call(request: Request, methods: Methods, *, debug: bool) -> Response: with handle_exceptions(request, debug) as handler: result = call(methods.items[request.method], *request.args, **request.kwargs) handler.response = SuccessResponse(result=result, id=request.id) return handler.response
Call a Request, catching exceptions to ensure we always return a Response. Args: request: The Request object. methods: The list of methods that can be called. debug: Include more information in error responses. Returns: A Response object.
codesearchnet
def angular_templates(context): template_paths = context['HORIZON_CONFIG']['external_templates'] all_theme_static_files = context['HORIZON_CONFIG']['theme_static_files'] this_theme_static_files = all_theme_static_files[context['THEME']] template_overrides = this_theme_static_files['template_overrides'] angular_templates = {} for relative_path in template_paths: template_static_path = (context['STATIC_URL'] + relative_path) if (relative_path in template_overrides): relative_path = template_overrides[relative_path] result = [] for finder in finders.get_finders(): result.extend(finder.find(relative_path, True)) path = result[(- 1)] try: if six.PY3: with open(path, encoding='utf-8') as template_file: angular_templates[template_static_path] = template_file.read() else: with open(path) as template_file: angular_templates[template_static_path] = template_file.read() except (OSError, IOError): pass templates = [(key, value) for (key, value) in angular_templates.items()] templates.sort(key=(lambda item: item[0])) return {'angular_templates': templates}
Generate a dictionary of template contents for all static HTML templates. If the template has been overridden by a theme, load the override contents instead of the original HTML file. One use for this is to pre-populate the angular template cache. Args: context: the context of the current Django template Returns: an object containing angular_templates: dictionary of angular template contents - key is the template's static path, - value is a string of HTML template contents
codesearchnet
def from_shape(cls, ragged_shape: dynamic_ragged_shape.DynamicRaggedShape) -> 'StructuredTensor': return StructuredTensor(fields={}, ragged_shape=ragged_shape)
Creates a `StructuredTensor` with no fields and ragged_shape. Args: ragged_shape: the shape of the structured tensor. Returns: a StructuredTensor with no fields and ragged_shape.
github-repos
def to_b58check(self, testnet=False): b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b)
Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key.
juraj-google-style
def expected_mean_g_value(self, vocab_size: int, coinflip_prob: float=0.5) -> float: return coinflip_prob + coinflip_prob * (1 - coinflip_prob) * (1 - 1 / vocab_size)
Compute expected mean g-value after watermarking, assuming uniform LM dist. This is the theoretical expected value for single-layer watermarking. Args: vocab_size (`int`): The size of the vocabulary. coinflip_prob arg_name (`float`, *optional*, defaults to 0.5): Probability of 1 in boolean prf. Returns: The expected mean g-value for watermarked text.
github-repos
def rating(self, value): if not self.can_update(): self._tcex.handle_error(910, [self.type]) request_data = {'rating': value} return self.tc_requests.update( self.api_type, self.api_sub_type, self.unique_id, request_data, owner=self.owner )
Updates the Indicators rating Args: value:
juraj-google-style
def create_ondemand_streaming_locator(access_token, encoded_asset_id, pid, starttime=None): path = '/Locators' endpoint = ''.join([ams_rest_endpoint, path]) if (starttime is None): body = (((('{ \t\t\t"AccessPolicyId":"' + pid) + '", \t\t\t"AssetId":"') + encoded_asset_id) + '", \t\t\t"Type": "2" }') else: body = (((((('{ \t\t\t"AccessPolicyId":"' + pid) + '", \t\t\t"AssetId":"') + encoded_asset_id) + '", \t\t\t"StartTime":"') + str(starttime)) + '", \t\t\t"Type": "2" \t\t}') return do_ams_post(endpoint, path, body, access_token, 'json_only')
Create Media Service OnDemand Streaming Locator. Args: access_token (str): A valid Azure authentication token. encoded_asset_id (str): A Media Service Encoded Asset ID. pid (str): A Media Service Encoded PID. starttime (str): A Media Service Starttime. Returns: HTTP response. JSON body.
codesearchnet
def _keyDown(key): if key not in keyboardMapping or keyboardMapping[key] is None: return needsShift = pyautogui.isShiftCharacter(key) mods, vkCode = divmod(keyboardMapping[key], 0x100) for apply_mod, vk_mod in [(mods & 4, 0x12), (mods & 2, 0x11), (mods & 1 or needsShift, 0x10)]: if apply_mod: ctypes.windll.user32.keybd_event(vk_mod, 0, 0, 0) ctypes.windll.user32.keybd_event(vkCode, 0, 0, 0) for apply_mod, vk_mod in [(mods & 1 or needsShift, 0x10), (mods & 2, 0x11), (mods & 4, 0x12)]: if apply_mod: ctypes.windll.user32.keybd_event(vk_mod, 0, KEYEVENTF_KEYUP, 0)
Performs a keyboard key press without the release. This will put that key in a held down state. NOTE: For some reason, this does not seem to cause key repeats like would happen if a keyboard key was held down on a text field. Args: key (str): The key to be pressed down. The valid names are listed in pyautogui.KEY_NAMES. Returns: None
juraj-google-style
def extension_method(cls, method_name: str) -> Any: def decorator(func): sig = pg_typing.signature(func, auto_typing=False, auto_doc=False) try: extension_arg_index = sig.arg_names.index('value') - 1 except ValueError as e: raise TypeError(f'View method {func.__name__!r} must have a `value` argument, which represents the target object to render.') from e if sig.varargs is not None: raise TypeError(f'View method must not have variable positional argument. Found `*{sig.varargs.name}` in {func.__name__!r}') def get_extension(args: Sequence[Any], kwargs: Dict[str, Any]) -> Any: if 'value' in kwargs: return kwargs['value'] if extension_arg_index < len(args): return args[extension_arg_index] raise ValueError(f'No value is provided for the `value` argument for {func.__name__!r}.') def map_args(args: Sequence[Any], kwargs: Dict[str, Any]) -> Dict[str, Any]: assert len(args) < len(sig.args), (args, sig.args) kwargs.update({sig.args[i].name: arg for i, arg in enumerate(args) if i != extension_arg_index}) kwargs.pop('value', None) return kwargs @functools.wraps(func) def _generated_view_fn(self, *args, **kwargs): return self._maybe_dispatch(*args, **kwargs, extension=get_extension(args, kwargs), view_method=func, extension_method_name=method_name, arg_map_fn=map_args) return _generated_view_fn return decorator
Decorator that dispatches a View method to a View.Extension method. A few things to note: 1) The View method being decorated must have a `value` argument, based on which the Extension method will be dispatched. 2) The View method's `value` argument will map to the Extension method's `self` argument. 3) The Extension method can optionally have a `view` argument, which will be set to the current View class. Args: method_name: The name of the method in the Extension class to dispatch from current View method. Returns: A decorator that dispatches a View method to a View.Extension method.
github-repos
def _wrap_section(source, width): if _get_section('usage', source): return _wrap_usage_section(source, width) if _is_definition_section(source): return _wrap_definition_section(source, width) lines = inspect.cleandoc(source).splitlines() paragraphs = (textwrap.wrap(line, width, replace_whitespace=False) for line in lines) return '\n'.join((line for paragraph in paragraphs for line in paragraph))
Wrap the given section string to the current terminal size. Intelligently wraps the section string to the given width. When wrapping section lines, it auto-adjusts the spacing between terms and definitions. It also adjusts commands the fit the correct length for the arguments. Args: source: The section string to wrap. Returns: The wrapped section string.
codesearchnet
class Blip2ForConditionalGenerationModelOutput(ModelOutput): loss: Optional[Tuple[torch.FloatTensor]] = None logits: Optional[Tuple[torch.FloatTensor]] = None vision_outputs: Optional[torch.FloatTensor] = None qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys()))
Class defining the outputs of [`Blip2ForConditionalGeneration`]. Args: loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Language modeling loss from the language model. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head of the language model. vision_outputs (`BaseModelOutputWithPooling`): Outputs of the vision encoder. qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`): Outputs of the Q-Former (Querying Transformer). language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`): Outputs of the language model.
github-repos
def _VerifyOneTest(self, pool_func, pool_grad_func, input_sizes, ksize, strides, padding, data_format, pool_grad_grad_func=None): total_size = np.prod(input_sizes) x = np.arange(1, total_size + 1, dtype=np.float32) x *= np.random.randint(2, size=total_size) * 2 - 1 x[np.random.choice(total_size)] = np.inf x[np.random.choice(total_size)] = -np.inf x = x.reshape(input_sizes) with self.session() as sess: with ops.device(self.CPU_DEVICE): inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes) outputs = pool_func(inputs, ksize=ksize, strides=strides, padding=padding, data_format='NHWC') output_vals = np.array(sess.run(outputs, {inputs: x})) output_gradient_vals = np.arange(1, output_vals.size + 1, dtype=np.float32) output_gradient_vals = output_gradient_vals.reshape(output_vals.shape) output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32) output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape) with ops.device(self.CPU_DEVICE): output_gradients = array_ops.placeholder(dtypes.float32, shape=output_vals.shape) expected_input_gradients = pool_grad_func(inputs, outputs, output_gradients, ksize=ksize, strides=strides, padding=padding, data_format='NHWC') expected_input_gradient_vals = sess.run(expected_input_gradients, {inputs: x, output_gradients: output_gradient_vals}) output_grad_gradients = array_ops.placeholder(dtypes.float32, shape=expected_input_gradient_vals.shape) if pool_grad_grad_func is not None: expected_grad_gradients = pool_grad_grad_func(inputs, outputs, output_grad_gradients, ksize=ksize, strides=strides, padding=padding, data_format='NHWC') expected_grad_gradients_vals = sess.run(expected_grad_gradients, {inputs: x, output_grad_gradients: output_grad_grad_vals}) with self.test_scope(): outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape) xla_inputs = inputs xla_outputs = outputs xla_output_gradients = output_gradients xla_output_grad_gradients = output_grad_gradients xla_ksize = ksize xla_strides = strides if data_format == 'NCHW': xla_inputs = NHWCToNCHW(inputs) xla_outputs = NHWCToNCHW(outputs) xla_output_gradients = NHWCToNCHW(output_gradients) xla_output_grad_gradients = NHWCToNCHW(output_grad_gradients) xla_ksize = NHWCToNCHW(ksize) xla_strides = NHWCToNCHW(strides) actual_input_gradients = pool_grad_func(xla_inputs, xla_outputs, xla_output_gradients, ksize=xla_ksize, strides=xla_strides, padding=padding, data_format=data_format) if data_format == 'NCHW': actual_input_gradients = NCHWToNHWC(actual_input_gradients) if pool_grad_grad_func is not None: actual_grad_gradients = pool_grad_grad_func(xla_inputs, xla_outputs, xla_output_grad_gradients, ksize=xla_ksize, strides=xla_strides, padding=padding, data_format=data_format) if data_format == 'NCHW': actual_grad_gradients = NCHWToNHWC(actual_grad_gradients) actual_input_gradients_vals = sess.run(actual_input_gradients, {inputs: x, outputs: output_vals, output_gradients: output_gradient_vals}) self.assertAllClose(expected_input_gradient_vals, actual_input_gradients_vals, rtol=0.0001, atol=1e-06) self.assertShapeEqual(actual_input_gradients_vals, inputs) if pool_grad_grad_func is not None: actual_grad_gradients_vals = sess.run(actual_grad_gradients, {inputs: x, outputs: output_vals, output_grad_gradients: output_grad_grad_vals}) self.assertAllClose(expected_grad_gradients_vals, actual_grad_gradients_vals, rtol=0.0001, atol=1e-06) self.assertShapeEqual(actual_grad_gradients_vals, outputs)
Verifies the output values of the pooling gradient function. Args: pool_func: Forward pooling function pool_grad_func: Pooling gradient function for pool_grad_func input_sizes: Input tensor dimensions. ksize: The kernel size dimensions strides: The stride dimensions padding: Padding type. data_format: The data format we use to run the pooling operation. pool_grad_grad_func: Second-order gradient function, if available.
github-repos
def get_filename(self, task, default_ext): url_path = urlparse(task['file_url'])[2] extension = (url_path.split('.')[(- 1)] if ('.' in url_path) else default_ext) file_idx = (self.fetched_num + self.file_idx_offset) return '{:06d}.{}'.format(file_idx, extension)
Set the path where the image will be saved. The default strategy is to use an increasing 6-digit number as the filename. You can override this method if you want to set custom naming rules. The file extension is kept if it can be obtained from the url, otherwise ``default_ext`` is used as extension. Args: task (dict): The task dict got from ``task_queue``. Output: Filename with extension.
codesearchnet
def _validate_at_hash(claims, access_token, algorithm): if 'at_hash' not in claims and not access_token: return elif 'at_hash' in claims and not access_token: msg = 'No access_token provided to compare against at_hash claim.' raise JWTClaimsError(msg) elif access_token and 'at_hash' not in claims: msg = 'at_hash claim missing from token.' raise JWTClaimsError(msg) try: expected_hash = calculate_at_hash(access_token, ALGORITHMS.HASHES[algorithm]) except (TypeError, ValueError): msg = 'Unable to calculate at_hash to verify against token claims.' raise JWTClaimsError(msg) if claims['at_hash'] != expected_hash: raise JWTClaimsError('at_hash claim does not match access_token.')
Validates that the 'at_hash' parameter included in the claims matches with the access_token returned alongside the id token as part of the authorization_code flow. Args: claims (dict): The claims dictionary to validate. access_token (str): The access token returned by the OpenID Provider. algorithm (str): The algorithm used to sign the JWT, as specified by the token headers.
juraj-google-style
def _compute_gradient_error_float16(self, x, x32, x_shape, y, y32, y_shape, x_dtype): x_init_val = np.random.random_sample(x_shape).astype(x_dtype) x32_init_val = x_init_val.astype(np.float32) theoretical_grad, _ = gradient_checker.compute_gradient(x, x_shape, y, y_shape, delta=0.001, x_init_value=x_init_val) _, numerical_grad = gradient_checker.compute_gradient(x32, x_shape, y32, y_shape, delta=0.001, x_init_value=x32_init_val) if theoretical_grad.size == 0 and numerical_grad.size == 0: return 0 return np.fabs(theoretical_grad - numerical_grad).max()
Computes the gradient error for float16 inputs and/or outputs. This returns the same value as gradient_checker.compute_gradient_error. The difference is that gradient_checker.compute_gradient_error does not numerically compute the gradients in a numerically stable way for float16 tensors. To fix this, this function requires float32 versions of x and y to numerically compute the gradients, to compare with the float16 symbolically computed gradients. Args: x: The input tensor. x32: A float32 version of x. x_shape: The shape of x. y: The output tensor. y32: A float32 version of y. Must be calculated based on x32, not x. y_shape: The shape of y. x_dtype: The type of x, float16 or bfloat16. Returns: The maximum error in between the two Jacobians, as in gradient_checker.compute_gradient_error.
github-repos
def get_current_round(self, tournament=1): query = '\n query($tournament: Int!) {\n rounds(tournament: $tournament\n number: 0) {\n number\n }\n }\n ' arguments = {'tournament': tournament} data = self.raw_query(query, arguments)['data']['rounds'][0] if (data is None): return None round_num = data['number'] return round_num
Get number of the current active round. Args: tournament (int): ID of the tournament (optional, defaults to 1) Returns: int: number of the current active round Example: >>> NumerAPI().get_current_round() 104
codesearchnet
def prod( self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0, **kwargs ): axis = self._get_axis_number(axis) if axis is not None else 0 data = self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True) return data._reduce_dimension( data._query_compiler.prod( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, min_count=min_count, **kwargs ) )
Return the product of the values for the requested axis Args: axis : {index (0), columns (1)} skipna : boolean, default True level : int or level name, default None numeric_only : boolean, default None min_count : int, default 0 Returns: prod : Series or DataFrame (if level specified)
juraj-google-style
def HandleNetworkInterfaces(self, result): network_interfaces = self._ExtractInterfaceMetadata(result) if self.network_setup_enabled: self.network_setup.EnableNetworkInterfaces( [interface.name for interface in network_interfaces[1:]]) for interface in network_interfaces: if self.ip_forwarding_enabled: self.ip_forwarding.HandleForwardedIps( interface.name, interface.forwarded_ips, interface.ip)
Called when network interface metadata changes. Args: result: dict, the metadata response with the network interfaces.
juraj-google-style
class GraniteMoeHybridMoE(nn.Module): def __init__(self, config: GraniteMoeHybridConfig): super(GraniteMoeHybridMoE, self).__init__() self.input_size = config.hidden_size self.hidden_size = config.intermediate_size self.activation = ACT2FN[config.hidden_act] self.input_linear = GraniteMoeHybridParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2) self.output_linear = GraniteMoeHybridParallelExperts(config.num_local_experts, self.hidden_size, self.input_size) self.router = GraniteMoeHybridTopKGating(input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok) def forward(self, layer_input): bsz, length, emb_size = layer_input.size() layer_input = layer_input.reshape(-1, emb_size) _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input) expert_inputs = layer_input[batch_index] hidden_states = self.input_linear(expert_inputs, expert_size) chunked_hidden_states = hidden_states.chunk(2, dim=-1) hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1] expert_outputs = self.output_linear(hidden_states, expert_size) expert_outputs = expert_outputs * batch_gates[:, None] zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device) layer_output = zeros.index_add(0, batch_index, expert_outputs) layer_output = layer_output.view(bsz, length, self.input_size) return (layer_output, router_logits)
A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts. Args: config: Configuration object with model hyperparameters.
github-repos
def copen(fileobj, mode='rb', **kwargs): algo = io.open mode = mode.lower().strip() modules = {} write_mode = (False if (mode.lstrip('U')[0] == 'r') else True) kwargs['mode'] = mode modules_to_import = {'bz2': 'BZ2File', 'gzip': 'GzipFile', 'lzma': 'LZMAFile'} for (mod, _class) in modules_to_import.items(): try: modules[_class] = getattr(import_module(mod), _class) except (ImportError, AttributeError) as e: modules[_class] = open warn('Cannot process {0} files due to following error:{1}{2}{1}You will need to install the {0} library to properly use these files. Currently, such files will open in "text" mode.'.format(mod, linesep, e)) if (write_mode is True): algo_map = {'bz2': modules['BZ2File'], 'gz': modules['GzipFile'], 'xz': modules['LZMAFile']} ext = fileobj.split('.')[(- 1)] try: algo = algo_map[ext] except KeyError: pass else: algo = io.TextIOWrapper file_sigs = {b'BZh': modules['BZ2File'], b'\x1f\x8b\x08': modules['GzipFile'], b'\xfd7zXZ\x00': modules['LZMAFile']} fileobj = io.BufferedReader(io.open(fileobj, 'rb')) max_len = max((len(x) for x in file_sigs.keys())) start = fileobj.peek(max_len) for sig in file_sigs.keys(): if start.startswith(sig): algo = file_sigs[sig] break algo_args = set(getfullargspec(algo).args) good_args = set(kwargs.keys()).intersection(algo_args) _kwargs = {arg: kwargs[arg] for arg in good_args} if (write_mode is True): handle = algo(fileobj, **_kwargs) else: try: handle = algo(fileobj=fileobj, **_kwargs) except TypeError: handle = algo(fileobj, **_kwargs) return handle
Detects and opens compressed file for reading and writing. Args: fileobj (File): any File-like object supported by an underlying compression algorithm mode (unicode): mode to open fileobj with **kwargs: keyword-arguments to pass to the compression algorithm Returns: File: TextWrapper if no compression, else returns appropriate wrapper for the compression type Example: .. code-block:: Python >>> from tempfile import NamedTemporaryFile >>> # Write compressed file >>> temp = NamedTemporaryFile(delete=False, suffix='.bz2') >>> test_bz2 = copen(temp.name, 'wb') >>> test_bz2.write(b'bzip2') >>> test_bz2.close() >>> # Read compressed bzip file >>> test_bz2 = copen(temp.name, 'rb') >>> test_bz2.read() b'bzip2'
codesearchnet
def recipe_dataset(config, auth_write, dataset_dataset, dataset_emails, dataset_groups): dataset(config, {'auth': auth_write, 'dataset': dataset_dataset, 'emails': dataset_emails, 'groups': dataset_groups})
Create and permission a dataset in BigQuery. Args: auth_write (authentication) - Credentials used for writing data. dataset_dataset (string) - Name of Google BigQuery dataset to create. dataset_emails (string_list) - Comma separated emails. dataset_groups (string_list) - Comma separated groups.
github-repos
def inv(x): if any_symbolic_tensors((x,)): return Inv().symbolic_call(x) return _inv(x)
Computes the inverse of a square tensor. Args: x: Input tensor of shape `(..., M, M)`. Returns: A tensor of shape `(..., M, M)` representing the inverse of `x`.
github-repos
def _GetAttributeScripts(self, attribute_data, dest_dir): script_dict = {} attribute_data = attribute_data or {} metadata_key = '%s-script' % self.script_type metadata_value = attribute_data.get(metadata_key) if metadata_value: self.logger.info('Found %s in metadata.', metadata_key) with tempfile.NamedTemporaryFile( mode='w', dir=dest_dir, delete=False) as dest: dest.write(metadata_value.lstrip()) script_dict[metadata_key] = dest.name metadata_key = '%s-script-url' % self.script_type metadata_value = attribute_data.get(metadata_key) if metadata_value: self.logger.info('Found %s in metadata.', metadata_key) script_dict[metadata_key] = self._DownloadScript( metadata_value, dest_dir) return script_dict
Retrieve the scripts from attribute metadata. Args: attribute_data: dict, the contents of the attributes metadata. dest_dir: string, the path to a directory for storing metadata scripts. Returns: dict, a dictionary mapping metadata keys to files storing scripts.
juraj-google-style
def extract_archive(file_path, path='.', archive_format='auto'): if archive_format is None: return False if archive_format == 'auto': archive_format = ['tar', 'zip'] if isinstance(archive_format, str): archive_format = [archive_format] file_path = path_to_string(file_path) path = path_to_string(path) for archive_type in archive_format: if archive_type == 'tar': open_fn = tarfile.open is_match_fn = tarfile.is_tarfile elif archive_type == 'zip': open_fn = zipfile.ZipFile is_match_fn = zipfile.is_zipfile else: raise NotImplementedError(archive_type) if is_match_fn(file_path): with open_fn(file_path) as archive: try: if zipfile.is_zipfile(file_path): archive.extractall(path) else: archive.extractall(path, members=filter_safe_paths(archive)) except (tarfile.TarError, RuntimeError, KeyboardInterrupt): if os.path.exists(path): if os.path.isfile(path): os.remove(path) else: shutil.rmtree(path) raise return True return False
Extracts an archive if it matches a support format. Supports `.tar`, `.tar.gz`, `.tar.bz`, and `.zip` formats. Args: file_path: Path to the archive file. path: Where to extract the archive file. archive_format: Archive format to try for extracting the file. Options are `"auto"`, `"tar"`, `"zip"`, and `None`. `"tar"` includes `.tar`, `.tar.gz`, and `.tar.bz` files. The default `"auto"` uses `["tar", "zip"]`. `None` or an empty list will return no matches found. Returns: `True` if a match was found and an archive extraction was completed, `False` otherwise.
github-repos
def decode_list_offset_response(cls, response): return [ kafka.structs.ListOffsetResponsePayload(topic, partition, error, timestamp, offset) for topic, partitions in response.topics for partition, error, timestamp, offset in partitions ]
Decode OffsetResponse_v2 into ListOffsetResponsePayloads Arguments: response: OffsetResponse_v2 Returns: list of ListOffsetResponsePayloads
juraj-google-style
def FromFile(cls, inpath): with open(inpath, 'r') as infile: indata = infile.read() return cls.FromString(indata)
Load a CommandFile from a path. Args: inpath (str): The path to the file to load Returns: CommandFile: The decoded CommandFile object.
codesearchnet
def run_step(context): logger.debug("started") assert context, ("context must be set for echo. Did you set " "'echoMe=text here'?") context.assert_key_exists('echoMe', __name__) if isinstance(context['echoMe'], str): val = context.get_formatted('echoMe') else: val = context['echoMe'] logger.info(val) logger.debug("done")
Simple echo. Outputs context['echoMe']. Args: context: dictionary-like. context is mandatory. context must contain key 'echoMe' context['echoMe'] will echo the value to logger. This logger could well be stdout. When you execute the pipeline, it should look something like this: pypyr [name here] 'echoMe=test', assuming a keyvaluepair context parser.
juraj-google-style
def __batch_evaluate(self, test_events): percentiles = np.zeros(len(test_events)) all_items = set(self.item_buffer) for i, e in enumerate(test_events): unobserved = all_items if not self.repeat: unobserved -= self.rec.users[e.user.index]['known_items'] unobserved.add(e.item.index) candidates = np.asarray(list(unobserved)) recos, scores = self.__recommend(e, candidates) pos = np.where(recos == e.item.index)[0][0] percentiles[i] = pos / (len(recos) - 1) * 100 return np.mean(percentiles)
Evaluate the current model by using the given test events. Args: test_events (list of Event): Current model is evaluated by these events. Returns: float: Mean Percentile Rank for the test set.
juraj-google-style
def unset(config, section, opt=None): if section not in config.keys(): raise ConfigError("section '{}' doesn't exist".format(section)) if opt is None: del config[section] return if opt not in config[section].keys(): raise ConfigError( "option '{}.{}' doesn't exist".format(section, opt) ) del config[section][opt] if not config[section]: del config[section]
Unsets specified option and/or section in the config. Args: config (configobj.ConfigObj): config to work on. section (str): section name. opt (str): optional option name.
juraj-google-style
def get_module_file(self, namespace, module, version): module_parts = module.split('.') module_path = path_utils.join(*module_parts) paths = [] if namespace == 'stdlib': path = path_utils.join(namespace, module_path) if self._is_module_in_typeshed(module_parts, version) or path in self.missing: paths.append(path) elif namespace == 'third_party': for package in sorted(self._third_party_packages[module_parts[0]]): paths.append(path_utils.join('stubs', package, module_path)) for path_rel in paths: if path_rel in self.missing: relpath = path_utils.join('nonexistent', path_rel + '.pyi') return (relpath, builtin_stubs.DEFAULT_SRC) for path in [path_utils.join(path_rel, '__init__.pyi'), path_rel + '.pyi']: try: name, src = self._store.load_file(path) return (name, src) except OSError: pass raise OSError(f"Couldn't find {module}")
Get the contents of a typeshed .pyi file. Arguments: namespace: selects a top-level directory within typeshed/ Allowed values are "stdlib" and "third_party". "third_party" corresponds to the the typeshed/stubs/ directory. module: module name (e.g., "sys" or "__builtins__"). Can contain dots, if it's a submodule. Package names should omit the "__init__" suffix (e.g., pass in "os", not "os.__init__"). version: The Python version. (major, minor) Returns: A tuple with the filename and contents of the file Raises: IOError: if file not found
github-repos
def compile_file_into_spirv(filepath, stage, optimization='size', warnings_as_errors=False): with open(filepath, 'rb') as f: content = f.read() return compile_into_spirv(content, stage, filepath, optimization=optimization, warnings_as_errors=warnings_as_errors)
Compile shader file into Spir-V binary. This function uses shaderc to compile your glsl file code into Spir-V code. Args: filepath (strs): Absolute path to your shader file stage (str): Pipeline stage in ['vert', 'tesc', 'tese', 'geom', 'frag', 'comp'] optimization (str): 'zero' (no optimization) or 'size' (reduce size) warnings_as_errors (bool): Turn warnings into errors Returns: bytes: Compiled Spir-V binary. Raises: CompilationError: If compilation fails.
codesearchnet
def __init__(self, options=None): if options is not None: self._options = copy.deepcopy(options) else: self._options = {'max_depth': 100, 'min_bytes': 0, 'min_micros': 0, 'min_params': 0, 'min_float_ops': 0, 'min_occurrence': 0, 'order_by': 'name', 'account_type_regexes': ['.*'], 'start_name_regexes': ['.*'], 'trim_name_regexes': [], 'show_name_regexes': ['.*'], 'hide_name_regexes': [], 'account_displayed_op_only': False, 'select': ['micros'], 'step': -1, 'output': 'stdout'}
Constructor. Args: options: Optional initial option dict to start with.
github-repos
def pymmh3_hash64(key: Union[bytes, bytearray], seed: int = 0, x64arch: bool = True) -> Tuple[int, int]: hash_128 = pymmh3_hash128(key, seed, x64arch) unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF if unsigned_val1 & 0x8000000000000000 == 0: signed_val1 = unsigned_val1 else: signed_val1 = -((unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1) unsigned_val2 = (hash_128 >> 64) & 0xFFFFFFFFFFFFFFFF if unsigned_val2 & 0x8000000000000000 == 0: signed_val2 = unsigned_val2 else: signed_val2 = -((unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1) return signed_val1, signed_val2
Implements 64bit murmur3 hash, as per ``pymmh3``. Returns a tuple. Args: key: data to hash seed: seed x64arch: is a 64-bit architecture available? Returns: tuple: tuple of integers, ``(signed_val1, signed_val2)``
juraj-google-style
def push_obj(self, obj: T, offset: int=0): traceable_obj = TraceableObject(obj) self._stack.append(traceable_obj) return traceable_obj.set_filename_and_line_from_caller(offset + 1)
Add object to the stack and record its filename and line information. Args: obj: An object to store on the stack. offset: Integer. If 0, the caller's stack frame is used. If 1, the caller's caller's stack frame is used. Returns: TraceableObject.SUCCESS if appropriate stack information was found, TraceableObject.HEURISTIC_USED if the stack was smaller than expected, and TraceableObject.FAILURE if the stack was empty.
github-repos
def get_markdown_files(self, dir_): md_files = OrderedSet() for root, _, files in os.walk(dir_): for name in files: split = os.path.splitext(name) if len(split) == 1: continue if split[1] in ('.markdown', '.md', '.yaml'): md_files.add(os.path.join(root, name)) return md_files
Get all the markdown files in a folder, recursively Args: dir_: str, a toplevel folder to walk.
juraj-google-style
def sort_dict(d, key=None, reverse=False): kv_items = [kv for kv in d.items()] if key is None: kv_items.sort(key=lambda t: t[1], reverse=reverse) else: kv_items.sort(key=key, reverse=reverse) return collections.OrderedDict(kv_items)
Sorts a dict by value. Args: d: Input dictionary key: Function which takes an tuple (key, object) and returns a value to compare and sort by. By default, the function compares the values of the dict i.e. key = lambda t : t[1] reverse: Allows to reverse sort order. Returns: OrderedDict object whose keys are ordered according to their value.
juraj-google-style
def _cleanup_unregistered_flag_from_module_dicts(self, flag_obj): if self._flag_is_registered(flag_obj): return for flags_by_module_dict in (self.flags_by_module_dict(), self.flags_by_module_id_dict(), self.key_flags_by_module_dict()): for flags_in_module in six.itervalues(flags_by_module_dict): while flag_obj in flags_in_module: flags_in_module.remove(flag_obj)
Cleans up unregistered flags from all module -> [flags] dictionaries. If flag_obj is registered under either its long name or short name, it won't be removed from the dictionaries. Args: flag_obj: Flag, the Flag instance to clean up for.
juraj-google-style
class OneFormerPixelLevelModuleOutput(ModelOutput): encoder_features: List[torch.FloatTensor] = None decoder_features: List[torch.FloatTensor] = None decoder_last_feature: Optional[torch.FloatTensor] = None
OneFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the `encoder` and `decoder`. By default, the `encoder` is a Swin/Dinat Backbone and the `decoder` is a Multi-Scale Deformable Attention based decoder. Args: encoder_features (List of `(torch.FloatTensor)`): List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. decoder_features (List of `(torch.FloatTensor)`): List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. decoder_last_feature (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)): 1/4 scale features from the last Pixel Decoder Layer.
github-repos
def unq_argument(self) -> str: start = self.offset self.dfa([{'': (lambda : 0), ';': (lambda : (- 1)), ' ': (lambda : (- 1)), '\t': (lambda : (- 1)), '\r': (lambda : (- 1)), '\n': (lambda : (- 1)), '{': (lambda : (- 1)), '/': (lambda : 1)}, {'': (lambda : 0), '/': self._back_break, '*': self._back_break}]) self._arg = self.input[start:self.offset]
Parse unquoted argument. Raises: EndOfInput: If past the end of input.
codesearchnet
def getValue(self, unit=None): if unit or self.unit: r = float(self.value * UnitToValue(self.unit)) / UnitToValue(unit) return int(round(r)) if isinstance(self.value, int) else r return self.value
Return the value of the feature. If the unit is specified and the feature has a unit, the value is converted Args: - unit(str,optional): A unit to convert the current feature value ('B','K','M','G')
juraj-google-style
def store_object(file_name, save_key, file_location, object_to_store=None): file = __os.path.join(file_location, file_name) try: shelve_store = __shelve.open(file) except Exception as e: LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e)) print('Bad storage dB, rebuilding!!') __os.remove(file) shelve_store = __shelve.open(file) shelve_store[save_key] = object_to_store shelve_store.close()
Function to store objects in a shelve Args: file_name: Shelve storage file name save_key: The name of the key to store the item to file_location: The location of the file, derive from the os module object_to_store: The object you want to store Returns:
juraj-google-style
def test_step(self, data): data = data_adapter.expand_1d(data) x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data) y_pred = self(x, training=False) self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses) self.compiled_metrics.update_state(y, y_pred, sample_weight) return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics
The logic for one evaluation step. This method can be overridden to support custom evaluation logic. This method is called by `Model.make_test_function`. This function should contain the mathematical logic for one step of evaluation. This typically includes the forward pass, loss calculation, and metrics updates. Configuration details for *how* this logic is run (e.g. `tf.function` and `tf.distribute.Strategy` settings), should be left to `Model.make_test_function`, which can also be overridden. Args: data: A nested structure of `Tensor`s. Returns: A `dict` containing values that will be passed to `tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the values of the `Model`'s metrics are returned.
github-repos
def __method_descriptor(self, service, method_info, protorpc_method_info): descriptor = {} request_message_type = (resource_container.ResourceContainer. get_request_message(protorpc_method_info.remote)) request_kind = self.__get_request_kind(method_info) remote_method = protorpc_method_info.remote method_id = method_info.method_id(service.api_info) path = method_info.get_path(service.api_info) description = protorpc_method_info.remote.method.__doc__ descriptor['id'] = method_id descriptor['path'] = path descriptor['httpMethod'] = method_info.http_method if description: descriptor['description'] = description descriptor['scopes'] = [ 'https: ] parameters = self.__params_descriptor( request_message_type, request_kind, path, method_id, method_info.request_params_class) if parameters: descriptor['parameters'] = parameters if method_info.request_params_class: parameter_order = self.__params_order_descriptor( method_info.request_params_class, path, is_params_class=True) else: parameter_order = self.__params_order_descriptor( request_message_type, path, is_params_class=False) if parameter_order: descriptor['parameterOrder'] = parameter_order request_descriptor = self.__request_message_descriptor( request_kind, request_message_type, method_id, method_info.request_body_class) if request_descriptor is not None: descriptor['request'] = request_descriptor response_descriptor = self.__response_message_descriptor( remote_method.response_type(), method_info.method_id(service.api_info)) if response_descriptor is not None: descriptor['response'] = response_descriptor return descriptor
Describes a method. Args: service: endpoints.Service, Implementation of the API as a service. method_info: _MethodInfo, Configuration for the method. protorpc_method_info: protorpc.remote._RemoteMethodInfo, ProtoRPC description of the method. Returns: Dictionary describing the method.
juraj-google-style
def get_self_attention_bias(x): x_shape = common_layers.shape_list(x) self_attention_bias = common_attention.attention_bias_lower_triangle(x_shape[1]) return self_attention_bias
Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1]
codesearchnet
def sample(self, num_samples=1): self.check_fit() return np.random.normal(self.mean, self.std, num_samples)
Returns new data point based on model. Arguments: n_samples: `int` Returns: np.ndarray: Generated samples
juraj-google-style
def discard_event(event: events.Event, bot_id: str=None) -> bool: if (event['type'] in SKIP_EVENTS): return True elif (bot_id and isinstance(event, events.Message)): if (event.get('bot_id') == bot_id): LOG.debug('Ignoring event: %s', event) return True elif (('message' in event) and (event['message'].get('bot_id') == bot_id)): LOG.debug('Ignoring event: %s', event) return True return False
Check if the incoming event needs to be discarded Args: event: Incoming :class:`slack.events.Event` bot_id: Id of connected bot Returns: boolean
codesearchnet
def __init__(self, channel): self.ListUptimeCheckConfigs = channel.unary_unary( "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckConfigsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckConfigsResponse.FromString, ) self.GetUptimeCheckConfig = channel.unary_unary( "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.GetUptimeCheckConfigRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.FromString, ) self.CreateUptimeCheckConfig = channel.unary_unary( "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.CreateUptimeCheckConfigRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.FromString, ) self.UpdateUptimeCheckConfig = channel.unary_unary( "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.UpdateUptimeCheckConfigRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.FromString, ) self.DeleteUptimeCheckConfig = channel.unary_unary( "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.DeleteUptimeCheckConfigRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListUptimeCheckIps = channel.unary_unary( "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckIpsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckIpsResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def _append_defects(self, part, part_content_type): part_defects = {} for e in part.defects: defects = '{}: {}'.format(e.__class__.__name__, e.__doc__) self._defects_categories.add(e.__class__.__name__) part_defects.setdefault(part_content_type, []).append(defects) log.debug('Added defect {!r}'.format(defects)) if part_defects: self._has_defects = True self._defects.append(part_defects)
Add new defects and defects categories to object attributes. The defects are a list of all the problems found when parsing this message. Args: part (string): mail part part_content_type (string): content type of part
codesearchnet
def unsubscribe(self, future): assert (future not in self._pending_unsubscribes), ('%r has already been unsubscribed from' % self._pending_unsubscribes[future]) subscribe = self._requests[future] self._pending_unsubscribes[future] = subscribe self._subscriptions.pop(subscribe.id) request = Unsubscribe(subscribe.id) request.set_callback(self._q.put) try: controller = self.get_controller(subscribe.path[0]) except ValueError: pass else: self.handle_request(controller, request)
Terminates the subscription given by a future Args: future (Future): The future of the original subscription
codesearchnet
def _ip_int_from_string(self, ip_str): if not ip_str: raise AddressValueError('Address cannot be empty') octets = ip_str.split('.') if len(octets) != 4: raise AddressValueError("Expected 4 octets in %r" % ip_str) try: bvs = map(self._parse_octet, octets) return _compat_int_from_byte_vals(bvs, 'big') except ValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str))
Turn the given IP string into an integer for comparison. Args: ip_str: A string, the IP ip_str. Returns: The IP ip_str as an integer. Raises: AddressValueError: if ip_str isn't a valid IPv4 Address.
juraj-google-style
def all(self, scope=None, **kwargs): path = '/runners/all' query_data = {} if (scope is not None): query_data['scope'] = scope return self.gitlab.http_list(path, query_data, **kwargs)
List all the runners. Args: scope (str): The scope of runners to show, one of: specific, shared, active, paused, online all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the server failed to perform the request Returns: list(Runner): a list of runners matching the scope.
codesearchnet
def _get_node_parent(self, age, pos): return self.nodes[age][int((pos / self.comp))]
Get the parent node of node, whch is located in tree's node list. Returns: object: The parent node.
codesearchnet
def verifyToken(self, auth): if (auth in (self.Auth.SkypeToken, self.Auth.Authorize)): if (('skype' not in self.tokenExpiry) or (datetime.now() >= self.tokenExpiry['skype'])): if (not hasattr(self, 'getSkypeToken')): raise SkypeAuthException('Skype token expired, and no password specified') self.getSkypeToken() elif (auth == self.Auth.RegToken): if (('reg' not in self.tokenExpiry) or (datetime.now() >= self.tokenExpiry['reg'])): self.getRegToken()
Ensure the authentication token for the given auth method is still valid. Args: auth (Auth): authentication type to check Raises: .SkypeAuthException: if Skype auth is required, and the current token has expired and can't be renewed
codesearchnet
def list_distribute_contents_simple(input_list, function=lambda x: x): dictionary = dict() for obj in input_list: dict_of_lists_add(dictionary, function(obj), obj) output_list = list() i = 0 done = False while not done: found = False for key in sorted(dictionary): if i < len(dictionary[key]): output_list.append(dictionary[key][i]) found = True if found: i += 1 else: done = True return output_list
Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 3, 1, 2, 1]. List can contain complex types like dictionaries in which case the function can return the appropriate value eg. lambda x: x[KEY] Args: input_list (List): List to distribute values function (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x. Returns: List: Distributed list
juraj-google-style
def ParseOptions(cls, options, output_module): if not isinstance(output_module, xlsx.XLSXOutputModule): raise errors.BadConfigObject( 'Output module is not an instance of XLSXOutputModule') fields = cls._ParseStringOption( options, 'fields', default_value=cls._DEFAULT_FIELDS) additional_fields = cls._ParseStringOption(options, 'additional_fields') if additional_fields: fields = '{0:s},{1:s}'.format(fields, additional_fields) filename = getattr(options, 'write', None) if not filename: raise errors.BadConfigOption( 'Output filename was not provided use "-w filename" to specify.') timestamp_format = cls._ParseStringOption( options, 'timestamp_format', default_value=cls._DEFAULT_TIMESTAMP_FORMAT) output_module.SetFields([ field_name.strip() for field_name in fields.split(',')]) output_module.SetFilename(filename) output_module.SetTimestampFormat(timestamp_format)
Parses and validates options. Args: options (argparse.Namespace): parser options. output_module (XLSXOutputModule): output module to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when the output filename was not provided.
juraj-google-style
def AppendContent(self, src_fd): while 1: blob = src_fd.read(self.chunksize) if not blob: break blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob) self.AddBlob(blob_id, len(blob)) self.Flush()
Create new blob hashes and append to BlobImage. We don't support writing at arbitrary file offsets, but this method provides a convenient way to add blobs for a new file, or append content to an existing one. Args: src_fd: source file handle open for read Raises: IOError: if blob has already been finalized.
juraj-google-style
def _make_output_dense(self, query_shape, common_kwargs, name=None): query_rank = len(query_shape) if self._output_shape: output_shape = self._output_shape else: output_shape = [query_shape[-1]] einsum_equation, bias_axes, output_rank = _build_proj_equation(query_rank - 1, bound_dims=2, output_dims=len(output_shape)) return EinsumDense(einsum_equation, output_shape=_get_output_shape(output_rank - 1, output_shape), bias_axes=bias_axes if self._use_bias else None, name=name, **common_kwargs)
Builds the output projection matrix. Args: free_dims: Number of free dimensions for einsum equation building. common_kwargs: Common keyword arguments for einsum layer. name: Name for the projection layer. Returns: Projection layer.
github-repos
class custom_gradient: def __init__(self, fun): warnings.warn('`custom_gradient` for the numpy backend acts as a pass-through to support the forward pass. No gradient computation or modification takes place.') self.fun = fun def __call__(self, *args, **kwargs): outputs, _ = self.fun(*args, **kwargs) return outputs
Decorator for custom gradients. Args: fun: Forward pass function.
github-repos
def process(self, element): import apache_beam as beam import six import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) try: clean_element = [] for line in element: clean_element.append(line.rstrip()) batch_result = self._session.run(fetches=self._transformed_features, feed_dict={self._input_placeholder_tensor: clean_element}) for i in range(len(clean_element)): transformed_features = {} for (name, value) in six.iteritems(batch_result): if isinstance(value, tf.SparseTensorValue): batch_i_indices = (value.indices[(:, 0)] == i) batch_i_values = value.values[batch_i_indices] transformed_features[name] = batch_i_values.tolist() else: transformed_features[name] = value[i].tolist() (yield transformed_features) except Exception as e: (yield beam.pvalue.TaggedOutput('errors', (str(e), element)))
Run the transformation graph on batched input data Args: element: list of csv strings, representing one batch input to the TF graph. Returns: dict containing the transformed data. Results are un-batched. Sparse tensors are converted to lists.
codesearchnet
def get_current_semver_version(): bazel_rc_file = open(BAZEL_RC, 'r') wheel_type = '' wheel_build_date = '' wheel_version_suffix = '' for line in bazel_rc_file: wheel_type = _get_regex_match(line, '^build --repo_env=ML_WHEEL_TYPE="(.+)"')[0] or wheel_type wheel_build_date = _get_regex_match(line, '^build --repo_env=ML_WHEEL_BUILD_DATE="([0-9]*)"')[0] or wheel_build_date wheel_version_suffix, is_matched = _get_regex_match(line, '^build --repo_env=ML_WHEEL_VERSION_SUFFIX="(.*)"', is_last_match=True) if is_matched: break tf_version_bzl_file = open(TF_VERSION_BZL, 'r') wheel_version = '' for line in tf_version_bzl_file: wheel_version, is_matched = _get_regex_match(line, '^TF_VERSION = "([0-9.]+)"', is_last_match=True) if is_matched: break old_major, old_minor, old_patch_num = wheel_version.split('.') if wheel_type == 'nightly': version_type = NIGHTLY_VERSION else: version_type = SNAPSHOT_VERSION old_extension = '' if wheel_type == 'nightly': old_extension = '-dev{}'.format(wheel_build_date) else: if wheel_build_date: old_extension += '-dev{}'.format(wheel_build_date) if wheel_version_suffix: old_extension += wheel_version_suffix return Version(old_major, old_minor, old_patch_num, old_extension, version_type)
Returns a Version object of current version. Returns: version: Version object of current SemVer string based on information from .bazelrc and tf_version.bzl files.
github-repos
def dry_bulb_temperature(self, value=99.9): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `dry_bulb_temperature`'.format(value)) if (value <= (- 70.0)): raise ValueError('value need to be greater -70.0 for field `dry_bulb_temperature`') if (value >= 70.0): raise ValueError('value need to be smaller 70.0 for field `dry_bulb_temperature`') self._dry_bulb_temperature = value
Corresponds to IDD Field `dry_bulb_temperature` Args: value (float): value for IDD Field `dry_bulb_temperature` Unit: C value > -70.0 value < 70.0 Missing value: 99.9 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def check_causatives(self, case_obj=None, institute_obj=None): institute_id = (case_obj['owner'] if case_obj else institute_obj['_id']) institute_causative_variant_ids = self.get_causatives(institute_id) if (len(institute_causative_variant_ids) == 0): return [] if case_obj: case_causative_ids = set(case_obj.get('causatives', [])) institute_causative_variant_ids = list(set(institute_causative_variant_ids).difference(case_causative_ids)) query = self.variant_collection.find({'_id': {'$in': institute_causative_variant_ids}}, {'variant_id': 1}) positional_variant_ids = [item['variant_id'] for item in query] filters = {'variant_id': {'$in': positional_variant_ids}} if case_obj: filters['case_id'] = case_obj['_id'] else: filters['institute'] = institute_obj['_id'] return self.variant_collection.find(filters)
Check if there are any variants that are previously marked causative Loop through all variants that are marked 'causative' for an institute and check if any of the variants are present in the current case. Args: case_obj (dict): A Case object institute_obj (dict): check across the whole institute Returns: causatives(iterable(Variant))
codesearchnet
def dimension_value(dimension: Union['Dimension', int, None]) -> Union[int, None]: if isinstance(dimension, Dimension): return dimension.value return dimension
Compatibility utility required to allow for both V1 and V2 behavior in TF. Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to coexist with the new behavior. This utility is a bridge between the two. When accessing the value of a TensorShape dimension, use this utility, like this: ``` # If you had this in your V1 code: value = tensor_shape[i].value # Use `dimension_value` as direct replacement compatible with both V1 & V2: value = dimension_value(tensor_shape[i]) # This would be the V2 equivalent: value = tensor_shape[i] # Warning: this will return the dim value in V2! ``` Args: dimension: Either a `Dimension` instance, an integer, or None. Returns: A plain value, i.e. an integer or None.
github-repos
def purity(labels, true_labels): purity = 0.0 for i in set(labels): indices = (labels==i) true_clusters = true_labels[indices] if len(true_clusters)==0: continue counts = Counter(true_clusters) lab, count = counts.most_common()[0] purity += count return float(purity)/len(labels)
Calculates the purity score for the given labels. Args: labels (array): 1D array of integers true_labels (array): 1D array of integers - true labels Returns: purity score - a float bewteen 0 and 1. Closer to 1 is better.
juraj-google-style
def MakeSuiteFromHist(hist, name=None): if (name is None): name = hist.name d = dict(hist.GetDict()) return MakeSuiteFromDict(d, name)
Makes a normalized suite from a Hist object. Args: hist: Hist object name: string name Returns: Suite object
codesearchnet
def parse(inp, format=None, encoding='utf-8', force_types=True): proper_inp = inp if hasattr(inp, 'read'): proper_inp = inp.read() if isinstance(proper_inp, six.text_type): proper_inp = proper_inp.encode(encoding) fname = None if hasattr(inp, 'name'): fname = inp.name fmt = _get_format(format, fname, proper_inp) proper_inp = six.BytesIO(proper_inp) try: res = _do_parse(proper_inp, fmt, encoding, force_types) except Exception as e: raise AnyMarkupError(e, traceback.format_exc()) if (res is None): res = {} return res
Parse input from file-like object, unicode string or byte string. Args: inp: file-like object, unicode string or byte string with the markup format: explicitly override the guessed `inp` markup format encoding: `inp` encoding, defaults to utf-8 force_types: if `True`, integers, floats, booleans and none/null are recognized and returned as proper types instead of strings; if `False`, everything is converted to strings if `None`, backend return value is used Returns: parsed input (dict or list) containing unicode values Raises: AnyMarkupError if a problem occurs while parsing or inp
codesearchnet
def upload(self, local_fn: str, remote_fn: str = '', dont_overwrite: bool = False): raise NotImplementedError()
Uploads given file to the task. If remote_fn is not specified, dumps it into task current directory with the same name. Args: local_fn: location of file locally remote_fn: location of file on task dont_overwrite: if True, will be no-op if target file exists
juraj-google-style
def replace_with_higgs_linear(model, quantization_config=None, current_key_name=None, has_been_replaced=False): from accelerate import init_empty_weights for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, nn.Linear): current_key_name_str = '.'.join(current_key_name) if not any((current_key_name_str.endswith(key) for key in quantization_config.modules_to_not_convert)): with init_empty_weights(): in_features = module.in_features out_features = module.out_features model._modules[name] = HiggsLinear(in_features, out_features, bias=module.bias is not None, num_bits=quantization_config.bits, hadamard_size=quantization_config.hadamard_size, group_size=quantization_config.group_size) has_been_replaced = True model._modules[name].source_cls = type(module) model._modules[name].requires_grad_(False) if len(list(module.children())) > 0: _, has_been_replaced = replace_with_higgs_linear(module, quantization_config=quantization_config, current_key_name=current_key_name, has_been_replaced=has_been_replaced) current_key_name.pop(-1) return (model, has_been_replaced)
Public method that recursively replaces the Linear layers of the given model with HIGGS quantized layers. `accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the conversion has been successful or not. Args: model (`torch.nn.Module`): The model to convert, can be any `torch.nn.Module` instance. quantization_config (`HiggsConfig`): The quantization config object that contains the quantization parameters. current_key_name (`list`, *optional*): A list that contains the current key name. This is used for recursion and should not be passed by the user. has_been_replaced (`bool`, *optional*): A boolean that indicates if the conversion has been successful or not. This is used for recursion and should not be passed by the user.
github-repos
def _get_decoratables(self, atype): result = [] defmsg = 'Skipping {}; not decoratable or already decorated.' for varname in self.shell.run_line_magic('who_ls', atype): varobj = self.shell.user_ns.get(varname, None) decorate = False if (varobj is None): continue if (atype in ['classobj', 'type']): if ((not hasattr(varobj, '__acorn__')) and hasattr(varobj, '__module__') and (varobj.__module__ == '__main__') and (not hasattr(varobj, '__file__'))): decorate = True else: msg.std(defmsg.format(varname), 3) elif (atype in ['function', 'staticmethod']): func = None if ((atype == 'staticmethod') and hasattr(varobj, '__func__')): func = varobj.__func__ elif (atype == 'function'): func = varobj if ((func is not None) and (not hasattr(func, '__acorn__')) and hasattr(func, '__code__') and ('<ipython-input' in func.__code__.co_filename)): decorate = True else: msg.std(defmsg.format(varname), 3) if decorate: self.entities[atype][varname] = varobj result.append((varname, varobj)) return result
Returns a list of the objects that need to be decorated in the current user namespace based on their type. Args: atype (str): one of the values in :attr:`atypes`. Specifies the type of object to search.
codesearchnet
def __init__(self, x: int, *, y: str, **kwargs):
Constructor. Args: x: An int. y: A str. **kwargs: Kwargs.
github-repos
def rolldim(P, n=1): dim = P.dim shape = P.shape dtype = P.dtype A = dict(((key[n:]+key[:n],P.A[key]) for key in P.keys)) return Poly(A, dim, shape, dtype)
Roll the axes. Args: P (Poly) : Input polynomial. n (int) : The axis that after rolling becomes the 0th axis. Returns: (Poly) : Polynomial with new axis configuration. Examples: >>> x,y,z = variable(3) >>> P = x*x*x + y*y + z >>> print(P) q0^3+q1^2+q2 >>> print(rolldim(P)) q0^2+q2^3+q1
juraj-google-style
def _stop_server(self): if self._proc: utils.stop_standing_subprocess(self._proc) self._proc = None out = self._adb.shell(_STOP_CMD.format(snippet_package=self.package, user=self._get_user_command_string()), timeout=_STOP_CMD_TIMEOUT_SEC).decode('utf-8') if 'OK (0 tests)' not in out: raise android_device_lib_errors.DeviceError(self._device, f'Failed to stop existing apk. Unexpected output: {out}.')
Releases all the resources acquired in `start_server`. Raises: android_device_lib_errors.DeviceError: if the server exited with errors on the device side.
github-repos
def load_config(self, file_name): def load_settings(file_name): instruments_loaded = {} probes_loaded = {} scripts_loaded = {} if os.path.isfile(file_name): in_data = load_b26_file(file_name) instruments = in_data['instruments'] if 'instruments' in in_data else {} scripts = in_data['scripts'] if 'scripts' in in_data else {} probes = in_data['probes'] if 'probes' in in_data else {} instruments_loaded, failed = Instrument.load_and_append(instruments) if len(failed) > 0: print(('WARNING! Following instruments could not be loaded: ', failed)) scripts_loaded, failed, instruments_loaded = Script.load_and_append( script_dict=scripts, instruments=instruments_loaded, log_function=self.log, data_path=self.gui_settings['data_folder']) if len(failed) > 0: print(('WARNING! Following scripts could not be loaded: ', failed)) probes_loaded, failed, instruments_loadeds = Probe.load_and_append( probe_dict=probes, probes=probes_loaded, instruments=instruments_loaded) return instruments_loaded, scripts_loaded, probes_loaded print(('loading script/instrument/probes config from {:s}'.format(file_name))) try: config = load_b26_file(file_name)['gui_settings'] if config['settings_file'] != file_name: print(( 'WARNING path to settings file ({:s}) in config file is different from path of settings file ({:s})'.format( config['settings_file'], file_name))) config['settings_file'] = file_name print(('loading of {:s} successful'.format(file_name))) except Exception: print(('WARNING path to settings file ({:s}) invalid use default settings'.format(file_name))) config = self._DEFAULT_CONFIG for x in list(self._DEFAULT_CONFIG.keys()): if x in config: if not os.path.exists(config[x]): try: os.makedirs(config[x]) except Exception: config[x] = self._DEFAULT_CONFIG[x] os.makedirs(config[x]) print(('WARNING: failed validating or creating path: set to default path'.format(config[x]))) else: config[x] = self._DEFAULT_CONFIG[x] os.makedirs(config[x]) print(('WARNING: path {:s} not specified set to default {:s}'.format(x, config[x]))) if os.path.exists(os.path.dirname(file_name)): config['settings_file'] = file_name self.gui_settings = config self.instruments, self.scripts, self.probes = load_settings(file_name) self.refresh_tree(self.tree_gui_settings, self.gui_settings) self.refresh_tree(self.tree_scripts, self.scripts) self.refresh_tree(self.tree_settings, self.instruments) self._hide_parameters(file_name)
checks if the file is a valid config file Args: file_name:
juraj-google-style
def from_spec(cls, spec: str) -> Self: if not spec: return cls() try: full_shape_str, slice_str = spec.rsplit(' ', 1) except ValueError as e: raise ValueError('Spec string must contain space-separated full_shape info.') from e full_shape = [] for dim in full_shape_str.split(): try: full_shape.append(int(dim)) except ValueError as e: raise ValueError(f"Spec string full_shape must be a sequence of integers. Found '{dim}', which is not an integer.") from e var_offset = [] var_shape = [] for dim_spec in slice_str.split(':'): try: offset, shape = dim_spec.split(',') except ValueError as e: raise ValueError('Spec string must contain comma-separated pairs of offsets and shapes.') from e try: var_offset.append(int(offset)) except ValueError as e: raise ValueError(f"Spec string var_offset must be an integer. Found '{offset}', which is not an integer.") from e try: var_shape.append(int(shape)) except ValueError as e: raise ValueError(f"Spec string var_shape must be an integer. Found '{shape}', which is not an integer.") from e return cls(full_shape=full_shape, var_offset=var_offset, var_shape=var_shape)
Parses a SaveSliceInfo spec string and returns a SaveSliceInfo object. Args: spec: The tensor slice spec string according to the SaveSliceInfo.spec property. The spec contains the space-separated shape of the full variable, followed by colon-separated pairs of the variable's offset and shape, where each pair is comma-separated. For example, consider a variable whose full shape is [4 3 5], offset is [0 1 3], and shape is [4 1 2]. This variable's SaveSliceInfo.spec would be "4 3 5 0,4:1,1:3,2". Returns: A SaveSliceInfo object containing the extracted information. Raises: ValueError: If the input string is not in the expected format.
github-repos
def check_partition_column(partition_column, cols): for (k, v) in cols.items(): if (k == partition_column): if (v == 'int'): return else: raise InvalidPartitionColumn('partition_column must be int, and not {0}'.format(v)) raise InvalidPartitionColumn('partition_column {0} not found in the query'.format(partition_column))
Check partition_column existence and type Args: partition_column: partition_column name cols: dict with columns names and python types Returns: None
codesearchnet
def derivative_extraction(feat, DeltaWindows): (rows, cols) = feat.shape DIF = np.zeros(feat.shape, dtype=feat.dtype) Scale = 0 FEAT = np.lib.pad(feat, ((0, 0), (DeltaWindows, DeltaWindows)), 'edge') for i in range(DeltaWindows): offset = DeltaWindows Range = (i + 1) dif = (Range * FEAT[(:, (offset + Range):((offset + Range) + cols))]) (- FEAT[(:, (offset - Range):((offset - Range) + cols))]) Scale += (2 * np.power(Range, 2)) DIF += dif return (DIF / Scale)
This function the derivative features. Args: feat (array): The main feature vector(For returning the second order derivative it can be first-order derivative). DeltaWindows (int): The value of DeltaWindows is set using the configuration parameter DELTAWINDOW. Returns: array: Derivative feature vector - A NUMFRAMESxNUMFEATURES numpy array which is the derivative features along the features.
codesearchnet
def last_updated(self, url): return self.metadata(url).last_updated_in_seconds
Fetches last updated time for a URL. Args: url: string url of file. Returns: float UNIX Epoch time Raises: ``BeamIOError``: if path doesn't exist.
github-repos
def dump_stats(filename): res = _dump_impl() f = open(filename, 'w') json.dump(res, f, indent=4) f.close()
Write collected information to file. Args: filename: absolute filename
codesearchnet
def add_edge_end_unused(intersection, duplicates, intersections): found = None for other in intersections: if ((intersection.index_first == other.index_first) and (intersection.index_second == other.index_second)): if ((intersection.s == 0.0) and (other.s == 0.0)): found = other break if ((intersection.t == 0.0) and (other.t == 0.0)): found = other break if (found is not None): intersections.remove(found) duplicates.append(found) intersections.append(intersection)
Add intersection that is ``COINCIDENT_UNUSED`` but on an edge end. This is a helper for :func:`~._surface_intersection.add_intersection`. It assumes that * ``intersection`` will have at least one of ``s == 0.0`` or ``t == 0.0`` * A "misclassified" intersection in ``intersections`` that matches ``intersection`` will be the "same" if it matches both ``index_first`` and ``index_second`` and if it matches the start index exactly Args: intersection (.Intersection): An intersection to be added. duplicates (List[.Intersection]): List of duplicate intersections. intersections (List[.Intersection]): List of "accepted" (i.e. non-duplicate) intersections.
codesearchnet
def setModelData(self, editor, model, index): model.setData(index, editor.itemText(editor.currentIndex()))
Updates the model after changing data in the editor. Args: editor (QtGui.QComboBox): The current editor for the item. Should be a `QtGui.QComboBox` as defined in `createEditor`. model (ColumnDtypeModel): The model which holds the displayed data. index (QtCore.QModelIndex): The index of the current item of the model.
codesearchnet
def poll_output(self): if self.block: return self.output new_list = self.output[self.old_output_size:] self.old_output_size += len(new_list) return new_list
Append lines from stdout to self.output. Returns: list: The lines added since last call
codesearchnet
def destroy(ads): for ad in ads: try: ad.services.stop_all() except: ad.log.exception('Failed to clean up properly.')
Cleans up AndroidDevice objects. Args: ads: A list of AndroidDevice objects.
codesearchnet
def _netsh_file(content): with tempfile.NamedTemporaryFile(mode='w', prefix='salt-', suffix='.netsh', delete=False) as fp: fp.write(content) try: log.debug('%s:\n%s', fp.name, content) return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True) finally: os.remove(fp.name)
helper function to get the results of ``netsh -f content.txt`` Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue ``netsh`` commands. You can put a series of commands in an external file and run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what this function does. Args: content (str): The contents of the file that will be run by the ``netsh -f`` command Returns: str: The text returned by the netsh command
codesearchnet
def _add_imports_to_env(self, raw_api): for (namespace, desc) in raw_api: for item in desc: if isinstance(item, AstImport): if (namespace.name == item.target): raise InvalidSpec('Cannot import current namespace.', item.lineno, item.path) if (item.target not in self.api.namespaces): raise InvalidSpec(('Namespace %s is not defined in any spec.' % quote(item.target)), item.lineno, item.path) env = self._get_or_create_env(namespace.name) imported_env = self._get_or_create_env(item.target) if (namespace.name in imported_env): raise InvalidSpec(('Circular import of namespaces %s and %s detected.' % (quote(namespace.name), quote(item.target))), item.lineno, item.path) env[item.target] = imported_env
Scans raw parser output for import declarations. Checks if the imports are valid, and then creates a reference to the namespace in the environment. Args: raw_api (Tuple[Namespace, List[stone.stone.parser._Element]]): Namespace paired with raw parser output.
codesearchnet
def register(self, name): def register_func(func): self.store[name] = func return func return register_func
Decorator for registering a function with PyPhi. Args: name (string): The name of the function
codesearchnet
def __init__(self, type_enum): type_enum = int(type_enum) if ( type_enum not in types_pb2.DataType.values() or type_enum == types_pb2.DT_INVALID ): raise TypeError( "type_enum is not a valid types_pb2.DataType: %s" % type_enum ) self._type_enum = type_enum
Creates a new `DataType`. NOTE(mrry): In normal circumstances, you should not need to construct a `DataType` object directly. Instead, use the `tf.as_dtype()` function. Args: type_enum: A `types_pb2.DataType` enum value. Raises: TypeError: If `type_enum` is not a value `types_pb2.DataType`.
juraj-google-style
def config_cmd_handler(conf, config='config'): if conf[config].create or conf[config].update: conf.create_config_(update=conf[config].update) if conf[config].create_local: conf.create_config_(index=-1, update=conf[config].update) if conf[config].edit: if not conf.config_files_[0].is_file(): conf.create_config_(update=conf[config].update) subprocess.call(shlex.split('{} {}'.format(conf[config].editor, conf.config_files_[0])))
Implement the behavior of a subcmd using config_conf_section Args: conf (:class:`~loam.manager.ConfigurationManager`): it should contain a section created with :func:`config_conf_section` function. config (str): name of the configuration section created with :func:`config_conf_section` function.
juraj-google-style
def __contains__(self, id): try: backend.spreadsheet(self._sheets, id) except KeyError: return False else: return True
Return if there is a spreadsheet with the given id. Args: id (str): unique alphanumeric id of the spreadsheet Returns: bool: ``True`` if it can be fetched else ``False``
juraj-google-style
def make_absolute(base, relative): while relative.startswith('/../') or relative.startswith('../'): relative = relative[3:] base_parsed = urlparse(base) new_path = base_parsed.path.rsplit('/', 1)[0] base_parsed = base_parsed._replace(path=new_path) base = base_parsed.geturl() return urljoin(base, relative)
Make the given (relative) URL absolute. Args: base (str): The absolute URL the relative url was found on. relative (str): The (possibly relative) url to make absolute. Returns: str: The absolute URL.
juraj-google-style
def line_similarity(p1a, p1b, p2a, p2b, T=CLOSE_DISTANCE_THRESHOLD): d = line_distance_similarity(p1a, p1b, p2a, p2b, T=T) a = abs(angle_similarity(normalize(line(p1a, p1b)), normalize(line(p2a, p2b)))) return (d * a)
Similarity between two lines Args: p1a ([float, float]): x and y coordinates. Line A start p1b ([float, float]): x and y coordinates. Line A end p2a ([float, float]): x and y coordinates. Line B start p2b ([float, float]): x and y coordinates. Line B end Returns: float: between 0 and 1. Where 1 is very similar and 0 is completely different
codesearchnet
def account_displayed_op_only(self, is_true): self._options['account_displayed_op_only'] = is_true return self
Whether only account the statistics of displayed profiler nodes. Args: is_true: If true, only account statistics of nodes eventually displayed by the outputs. Otherwise, a node's statistics are accounted by its parents as long as it's types match 'account_type_regexes', even if it is hidden from the output, say, by hide_name_regexes. Returns: self
github-repos
def plot_soma3d(ax, soma, color=None, alpha=_ALPHA): color = _get_color(color, tree_type=NeuriteType.soma) if isinstance(soma, SomaCylinders): for start, end in zip(soma.points, soma.points[1:]): common.plot_cylinder(ax, start=start[COLS.XYZ], end=end[COLS.XYZ], start_radius=start[COLS.R], end_radius=end[COLS.R], color=color, alpha=alpha) else: common.plot_sphere(ax, center=soma.center[COLS.XYZ], radius=soma.radius, color=color, alpha=alpha) _update_3d_datalim(ax, soma)
Generates a 3d figure of the soma. Args: ax(matplotlib axes): on what to plot soma(neurom.core.Soma): plotted soma color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values
juraj-google-style
def handle_length(schema, field, validator, parent_schema): if isinstance(field, fields.String): minKey = 'minLength' maxKey = 'maxLength' elif isinstance(field, (fields.List, fields.Nested)): minKey = 'minItems' maxKey = 'maxItems' else: raise ValueError('In order to set the Length validator for JSON schema, the field must be either a List or a String') if validator.min: schema[minKey] = validator.min if validator.max: schema[maxKey] = validator.max if validator.equal: schema[minKey] = validator.equal schema[maxKey] = validator.equal return schema
Adds validation logic for ``marshmallow.validate.Length``, setting the values appropriately for ``fields.List``, ``fields.Nested``, and ``fields.String``. Args: schema (dict): The original JSON schema we generated. This is what we want to post-process. field (fields.Field): The field that generated the original schema and who this post-processor belongs to. validator (marshmallow.validate.Length): The validator attached to the passed in field. parent_schema (marshmallow.Schema): The Schema instance that the field belongs to. Returns: dict: A, possibly, new JSON Schema that has been post processed and altered. Raises: ValueError: Raised if the `field` is something other than `fields.List`, `fields.Nested`, or `fields.String`
codesearchnet
def unpack(self, buff, offset=0): super().unpack(buff, offset) try: self.oxm_field = self._unpack_oxm_field() except ValueError as exception: raise UnpackException(exception) self.oxm_hasmask = ((self.oxm_field_and_mask & 1) == 1) start = (offset + 4) end = (start + self.oxm_length) self.oxm_value = buff[start:end]
Unpack the buffer into a OxmTLV. Args: buff (bytes): The binary data to be unpacked. offset (int): If we need to shift the beginning of the data.
codesearchnet
def enumerate(self: EventSetOrNode) -> EventSetOrNode: from temporian.core.operators.enumerate import enumerate return enumerate(self)
Create an `int64` feature with the ordinal position of each event in an [`EventSet`][temporian.EventSet]. Each index group is enumerated independently. Usage: ```python >>> a = tp.event_set( ... timestamps=[-1, 2, 3, 5, 0], ... features={"cat": ["A", "A", "A", "A", "B"]}, ... indexes=["cat"], ... ) >>> b = a.enumerate() >>> b indexes: [('cat', str_)] features: [('enumerate', int64)] events: cat=b'A' (4 events): timestamps: [-1. 2. 3. 5.] 'enumerate': [0 1 2 3] cat=b'B' (1 events): timestamps: [0.] 'enumerate': [0] ... ``` Returns: EventSet with a single feature with each event's ordinal position in its index group.
github-repos
def as_treemap(self): if self._treemap_cache: return self._treemap_cache self._treemap_cache = treemap = TreeMap(self) return treemap
Return the dependencies as a TreeMap. Returns: TreeMap: instance of TreeMap.
codesearchnet
def AddUserAccount(self, user_account, session_identifier=CURRENT_SESSION): if session_identifier not in self._user_accounts: self._user_accounts[session_identifier] = {} user_accounts = self._user_accounts[session_identifier] if user_account.identifier in user_accounts: raise KeyError('User account: {0:s} already exists.'.format( user_account.identifier)) user_accounts[user_account.identifier] = user_account
Adds an user account. Args: user_account (UserAccountArtifact): user account artifact. session_identifier (Optional[str])): session identifier, where CURRENT_SESSION represents the active session. Raises: KeyError: if the user account already exists.
juraj-google-style
def merge_corpora(cls, corpora): ds = Corpus() for merging_corpus in corpora: ds.merge_corpus(merging_corpus) return ds
Merge a list of corpora into one. Args: corpora (Iterable): An iterable of :py:class:`audiomate.corpus.CorpusView`. Returns: Corpus: A corpus with the data from all given corpora merged into one.
codesearchnet
def unpack_message(buffer): hdr_size = Header().get_size() (hdr_buff, msg_buff) = (buffer[:hdr_size], buffer[hdr_size:]) header = Header() header.unpack(hdr_buff) message = new_message_from_header(header) message.unpack(msg_buff) return message
Unpack the whole buffer, including header pack. Args: buffer (bytes): Bytes representation of a openflow message. Returns: object: Instance of openflow message.
codesearchnet