code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def dict_isect(*args): if (not args): return {} else: dictclass = (OrderedDict if isinstance(args[0], OrderedDict) else dict) common_keys = set.intersection(*map(set, args)) first_dict = args[0] return dictclass(((k, first_dict[k]) for k in common_keys))
Constructs a dictionary that contains keys common between all inputs. The returned values will only belong to the first dictionary. Args: *args : a sequence of dictionaries (or sets of keys) Returns: Dict | OrderedDict : OrderedDict if the first argument is an OrderedDict, otherwise dict Notes: This function can be used as an alternative to `dict_subset` where any key not in the dictionary is ignored. See the following example: >>> dict_isect({'a': 1, 'b': 2, 'c': 3}, ['a', 'c', 'd']) {'a': 1, 'c': 3} Example: >>> dict_isect({'a': 1, 'b': 1}, {'b': 2, 'c': 2}) {'b': 1} >>> dict_isect(odict([('a', 1), ('b', 2)]), odict([('c', 3)])) OrderedDict() >>> dict_isect() {}
codesearchnet
def safe_call(request: Request, methods: Methods, *, debug: bool) -> Response: with handle_exceptions(request, debug) as handler: result = call(methods.items[request.method], *request.args, **request.kwargs) handler.response = SuccessResponse(result=result, id=request.id) return handler.response
Call a Request, catching exceptions to ensure we always return a Response. Args: request: The Request object. methods: The list of methods that can be called. debug: Include more information in error responses. Returns: A Response object.
juraj-google-style
def GetTermIdentifier(self): return self._term
Returns the TERM environment variable for the console. Returns: str: A str that describes the console's text capabilities
github-repos
def reshape(self, shape: tf.TensorShape) -> 'TensorFluent': t = tf.reshape(self.tensor, shape) scope = self.scope.as_list() batch = self.batch return TensorFluent(t, scope, batch=batch)
Returns a TensorFluent for the reshape operation with given `shape`. Args: shape: The output's shape. Returns: A TensorFluent wrapping the reshape operation.
codesearchnet
def resolve_aliases(data_type): if not is_alias(data_type): return data_type resolved = resolve_aliases(data_type.data_type) data_type.data_type = resolved return resolved
Resolve all chained / nested aliases. This will recursively point nested aliases to their resolved data type (first non-alias in the chain). Note: This differs from unwrap_alias which simply identifies/returns the resolved data type. Args: data_type (DataType): The target DataType/Alias to resolve. Return: DataType: The resolved type.
juraj-google-style
def run(self, data_loaders, workflow, max_epochs, **kwargs): assert isinstance(data_loaders, list) assert mmcv.is_list_of(workflow, tuple) assert len(data_loaders) == len(workflow) self._max_epochs = max_epochs work_dir = self.work_dir if self.work_dir is not None else 'NONE' self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir) self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs) self.call_hook('before_run') while self.epoch < max_epochs: for i, flow in enumerate(workflow): mode, epochs = flow if isinstance(mode, str): if not hasattr(self, mode): raise ValueError( 'runner has no method named "{}" to run an epoch'. format(mode)) epoch_runner = getattr(self, mode) elif callable(mode): epoch_runner = mode else: raise TypeError('mode in workflow must be a str or ' 'callable function, not {}'.format( type(mode))) for _ in range(epochs): if mode == 'train' and self.epoch >= max_epochs: return epoch_runner(data_loaders[i], **kwargs) time.sleep(1) self.call_hook('after_run')
Start running. Args: data_loaders (list[:obj:`DataLoader`]): Dataloaders for training and validation. workflow (list[tuple]): A list of (phase, epochs) to specify the running order and epochs. E.g, [('train', 2), ('val', 1)] means running 2 epochs for training and 1 epoch for validation, iteratively. max_epochs (int): Total training epochs.
juraj-google-style
def get(self, value): config = self.get_block('vrf definition %s' % value) if not config: return None response = dict(vrf_name=value) response.update(self._parse_rd(config)) response.update(self._parse_description(config)) config = self.get_block('no ip routing vrf %s' % value) if config: response['ipv4_routing'] = False else: response['ipv4_routing'] = True config = self.get_block('no ipv6 unicast-routing vrf %s' % value) if config: response['ipv6_routing'] = False else: response['ipv6_routing'] = True return response
Returns the VRF configuration as a resource dict. Args: value (string): The vrf name to retrieve from the running configuration. Returns: A Python dict object containing the VRF attributes as key/value pairs.
juraj-google-style
def with_bloomberg(func): @wraps(func) def wrapper(*args, **kwargs): scope = utils.func_scope(func=func) param = inspect.signature(func).parameters port = kwargs.pop('port', _PORT_) timeout = kwargs.pop('timeout', _TIMEOUT_) restart = kwargs.pop('restart', False) all_kw = { k: args[n] if n < len(args) else v.default for n, (k, v) in enumerate(param.items()) if k != 'kwargs' } all_kw.update(kwargs) log_level = kwargs.get('log', logs.LOG_LEVEL) for to_list in ['tickers', 'flds']: conv = all_kw.get(to_list, None) if hasattr(conv, 'tolist'): all_kw[to_list] = getattr(conv, 'tolist')() if isinstance(conv, str): all_kw[to_list] = [conv] cached_data = [] if scope in ['xbbg.blp.bdp', 'xbbg.blp.bds']: to_qry = cached.bdp_bds_cache(func=func.__name__, **all_kw) cached_data += to_qry.cached_data if not (to_qry.tickers and to_qry.flds): if not cached_data: return pd.DataFrame() res = pd.concat(cached_data, sort=False).reset_index(drop=True) if not all_kw.get('raw', False): res = assist.format_output( data=res, source=func.__name__, col_maps=all_kw.get('col_maps', dict()) ) return res all_kw['tickers'] = to_qry.tickers all_kw['flds'] = to_qry.flds if scope in ['xbbg.blp.bdib']: data_file = storage.hist_file( ticker=all_kw['ticker'], dt=all_kw['dt'], typ=all_kw['typ'], ) if files.exists(data_file): logger = logs.get_logger(func, level=log_level) if all_kw.get('batch', False): return logger.debug(f'reading from {data_file} ...') return assist.format_intraday(data=pd.read_parquet(data_file), **all_kw) _, new = create_connection(port=port, timeout=timeout, restart=restart) res = func(**{ k: v for k, v in all_kw.items() if k not in ['raw', 'col_maps'] }) if new: delete_connection() if scope.startswith('xbbg.blp.') and isinstance(res, list): final = cached_data + res if not final: return pd.DataFrame() res = pd.DataFrame(pd.concat(final, sort=False)) if (scope in ['xbbg.blp.bdp', 'xbbg.blp.bds']) \ and (not all_kw.get('raw', False)): res = assist.format_output( data=res.reset_index(drop=True), source=func.__name__, col_maps=all_kw.get('col_maps', dict()), ) return res return wrapper
Wrapper function for Bloomberg connection Args: func: function to wrap
juraj-google-style
def matchmaker_request(url, token, method, content_type=None, accept=None, data=None): headers = Headers() headers = { 'X-Auth-Token': token} if content_type: headers['Content-Type'] = content_type if accept: headers['Accept'] = accept req_data = data or {'timestamp' : datetime.datetime.now().timestamp()} json_response = None try: LOG.info('Sending {} request to MME url {}. Data sent: {}'.format( method, url, req_data)) resp = requests.request( method = method, url = url, headers = headers, data = json.dumps(req_data) ) json_response = resp.json() LOG.info('MME server response was:{}'.format(json_response)) if isinstance(json_response, str): json_response = { 'message' : json_response, } elif isinstance(json_response, list): return json_response json_response['status_code'] = resp.status_code except Exception as err: LOG.info('An error occurred while sending HTTP request to server ({})'.format(err)) json_response = { 'message' : str(err) } return json_response
Send a request to MatchMaker and return its response Args: url(str): url to send request to token(str): MME server authorization token method(str): 'GET', 'POST' or 'DELETE' content_type(str): MME request Content-Type accept(str): accepted response data(dict): eventual data to send in request Returns: json_response(dict): server response
juraj-google-style
def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name): if keyfunc is not cls._default: return "{}.{}[{}, {}, {}]".format( cls.__module__, cls.__name__, cls._get_fullname(type_), cls._get_bound_repr(bound), keyfunc_name, ) return "{}.{}[{}, {}]".format( cls.__module__, cls.__name__, cls._get_fullname(type_), cls._get_bound_repr(bound), )
Return a class representation using the slice parameters. Args: type_: The type the class was sliced with. bound: The boundaries specified for the values of type_. keyfunc: The comparison function used to check the value boundaries. keyfunc_name: The name of keyfunc. Returns: A string representing the class.
juraj-google-style
def _get_truncated_table_rows(self, query_tokens: List[str], tokenized_table: TokenizedTable, num_rows: int, num_columns: int, max_length: int, truncation_strategy: Union[str, TapasTruncationStrategy]) -> Tuple[int, int]: if not isinstance(truncation_strategy, TapasTruncationStrategy): truncation_strategy = TapasTruncationStrategy(truncation_strategy) if max_length is None: max_length = self.model_max_length if truncation_strategy == TapasTruncationStrategy.DROP_ROWS_TO_FIT: while True: num_tokens = self._get_max_num_tokens(query_tokens, tokenized_table, num_rows=num_rows, num_columns=num_columns, max_length=max_length) if num_tokens is not None: break num_rows -= 1 if num_rows < 1: break elif truncation_strategy != TapasTruncationStrategy.DO_NOT_TRUNCATE: raise ValueError(f'Unknown truncation strategy {truncation_strategy}.') return (num_rows, num_tokens or 1)
Truncates a sequence pair in-place following the strategy. Args: query_tokens (`List[str]`): List of strings corresponding to the tokenized query. tokenized_table (`TokenizedTable`): Tokenized table num_rows (`int`): Total number of table rows num_columns (`int`): Total number of table columns max_length (`int`): Total maximum length. truncation_strategy (`str` or [`TapasTruncationStrategy]`): Truncation strategy to use. Seeing as this method should only be called when truncating, the only available strategy is the `"drop_rows_to_fit"` strategy. Returns: `Tuple(int, int)`: tuple containing the number of rows after truncation, and the number of tokens available for each table element.
github-repos
def banner(text, border='=', width=80): text_padding = ('{0:^%d}' % width) LOG.info((border * width)) LOG.info(text_padding.format(text)) LOG.info((border * width))
Center _text_ in a banner _width_ wide with _border_ characters. Args: text (str): What to write in the banner border (str): Border character width (int): How long the border should be
codesearchnet
def create_application_configuration(self, name, properties, description=None): if (not hasattr(self, 'applicationConfigurations')): raise NotImplementedError() cv = ApplicationConfiguration._props(name, properties, description) res = self.rest_client.session.post(self.applicationConfigurations, headers={'Accept': 'application/json'}, json=cv) _handle_http_errors(res) return ApplicationConfiguration(res.json(), self.rest_client)
Create an application configuration. Args: name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a .. versionadded 1.12
codesearchnet
def make_conv_bias_activation_tests(activation_op): def create_test(options): test_parameters = [{'input_shape': [[1, 3, 4, 3]], 'filter_shape': [[2, 3], [3, 3]], 'filter_2_shape': [[2, 1, 1, 3]], 'strides': [[1, 1, 1, 1]], 'dilations': [[1, 1, 1, 1]], 'data_format': ['NCHW'], 'channel_multiplier': [1, 2], 'fully_quantize': [False], 'dynamic_range_quantize': [False]}] def get_tensor_shapes(parameters): input_shape = parameters['input_shape'] filter_size = parameters['filter_shape'] filter_shape = filter_size + [input_shape[3], parameters['channel_multiplier']] return [input_shape, filter_shape] @tf.function(jit_compile=True) def add_conv(input_tensor, filter_input, parameters): out = tf.nn.conv2d(input=input_tensor, filters=filter_input, strides=parameters['strides'], dilations=parameters['dilations'], padding='VALID', data_format=parameters['data_format']) return out def add_bias_add(data_input, filter_shape): bias_input = create_tensor_data(np.float32, (filter_shape[-1],)) out = tf.nn.bias_add(data_input, bias_input, data_format='NHWC') return out def build_graph(parameters): input_shape, filter_shape = get_tensor_shapes(parameters) input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=input_shape) filter_input = create_tensor_data(np.float32, filter_shape, min_value=-10, max_value=10) input_tensors = [input_tensor] if parameters['data_format'] == 'NCHW': out = add_conv(input_tensor, filter_input, parameters) else: out = tf.nn.conv2d(input=input_tensor, filters=filter_input, strides=parameters['strides'], dilations=parameters['dilations'], padding='VALID', data_format=parameters['data_format']) out = add_bias_add(out, filter_shape) out = activation_op(out) filter_input_2 = create_tensor_data(np.float32, parameters['filter_2_shape'], min_value=-10, max_value=10) if parameters['data_format'] == 'NCHW': out = add_conv(out, filter_input_2, parameters) else: out = tf.nn.conv2d(input=out, filters=filter_input_2, strides=parameters['strides'], dilations=parameters['dilations'], padding='VALID', data_format=parameters['data_format']) out = add_bias_add(out, filter_shape) out = activation_op(out) return (input_tensors, [out]) def build_inputs(parameters, sess, inputs, outputs): input_shape, _ = get_tensor_shapes(parameters) values = [create_tensor_data(np.float32, input_shape, min_value=-1, max_value=1)] return (values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs, expected_tf_failures=2) return create_test
Make a set of tests to do convolution with activation and bias. This test will create multiple consecutive convolutions with NCHW layout to make sure that the tranformations to NHWC works as expected. Note this doesn't check any performance so manual checking of the generated model is advised. Args: activation_op: The activation op to be used in the test. Returns: The function that creates the test.
github-repos
def __init__(self, dataset_merger, problem_type=transitfeed.TYPE_WARNING, **kwargs): kwargs['type'] = problem_type kwargs['entity_type_name'] = dataset_merger.ENTITY_TYPE_NAME transitfeed.ExceptionWithContext.__init__(self, None, None, **kwargs) self.dataset_merger = dataset_merger
Initialise the exception object. Args: dataset_merger: The DataSetMerger instance that generated this problem. problem_type: The problem severity. This should be set to one of the corresponding constants in transitfeed. kwargs: Keyword arguments to be saved as instance attributes.
juraj-google-style
def load(self, validate=True): self._load() try: self.config = self._load_config(self.system_config_file) user = self._load_config(self.global_config_file) config = self._load_config(self.config_file) local = self._load_config(self.config_local_file) for conf in [user, config, local]: self.config = self._merge(self.config, conf) if validate: self.config = Schema(self.SCHEMA).validate(self.config) self.config = configobj.ConfigObj( self.config, write_empty_values=True ) self.config.filename = self.config_file self._resolve_paths(self.config, self.config_file) except Exception as ex: raise ConfigError(ex)
Loads config from all the config files. Args: validate (bool): optional flag to tell dvc if it should validate the config or just load it as is. 'True' by default. Raises: dvc.config.ConfigError: thrown if config has invalid format.
juraj-google-style
def _create_w_objective(m, X, Z=None): genes, clusters = m.shape cells = X.shape[1] nonzeros = (X!=0) def objective(w): w = w.reshape((m.shape[1], X.shape[1])) d = m.dot(w)+eps temp = X/d m_sum = m.T.dot(nonzeros) m2 = m.T.dot(temp) deriv = m_sum - m2 return np.sum(nonzeros*(d - X*np.log(d)))/genes, deriv.flatten()/genes return objective
Creates an objective function and its derivative for W, given M and X (data) Args: m (array): genes x clusters X (array): genes x cells Z (array): zero-inflation parameters - genes x 1
juraj-google-style
def binary_arguments_to_tensors(x1, x2): if ((not isinstance(x1, Tensor)) and (not isinstance(x2, Tensor))): raise ValueError('at least one of x1 and x2 must be an mtf Tensor') elif (isinstance(x1, Tensor) and isinstance(x2, Tensor)): return (x1, x2) elif isinstance(x1, Tensor): return (x1, import_tf_tensor(x1.mesh, tf.convert_to_tensor(x2, dtype=x1.dtype), Shape([]))) else: return (import_tf_tensor(x2.mesh, tf.convert_to_tensor(x1, dtype=x2.dtype), Shape([])), x2)
Convert argument of a binary operation to Tensors. Args: x1: a Tensor or something convertible to a tf Scalar x2: a Tensor or something convertible to a tf Scalar Returns: new_x1: a Tensor new_x2: a Tensor Raises: ValueError: on failure
codesearchnet
def AddProcessingOptions(self, argument_group): argument_helper_names = ['temporary_directory', 'zeromq'] if self._CanEnforceProcessMemoryLimit(): argument_helper_names.append('process_resources') helpers_manager.ArgumentHelperManager.AddCommandLineArguments(argument_group, names=argument_helper_names) argument_group.add_argument('--worker-memory-limit', '--worker_memory_limit', dest='worker_memory_limit', action='store', type=int, metavar='SIZE', help='Maximum amount of memory (data segment and shared memory) a worker process is allowed to consume in bytes, where 0 represents no limit. The default limit is 2147483648 (2 GiB). If a worker process exceeds this limit is is killed by the main (foreman) process.')
Adds processing options to the argument group Args: argument_group (argparse._ArgumentGroup): argparse argument group.
codesearchnet
def IsErrorSuppressedByNolint(category, linenum): return ((linenum in _error_suppressions.get(category, set())) or (linenum in _error_suppressions.get(None, set())))
Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment.
codesearchnet
def _group(self, group_data): if isinstance(group_data, dict): xid = group_data.get('xid') else: xid = group_data.xid if self.groups.get(xid) is not None: group_data = self.groups.get(xid) elif self.groups_shelf.get(xid) is not None: group_data = self.groups_shelf.get(xid) else: self.groups[xid] = group_data return group_data
Return previously stored group or new group. Args: group_data (dict|obj): An Group dict or instance of Group object. Returns: dict|obj: The new Group dict/object or the previously stored dict/object.
juraj-google-style
def publish(self, event_type: str, event_data: dict=None): import inspect import os.path _stack = inspect.stack() _origin = (((os.path.basename(_stack[3][1]) + '::') + _stack[3][3]) + '::L{}'.format(_stack[3][2])) publish(event_type=event_type, event_data=event_data, object_type=self._type, object_id=self._id, object_key=self._key, origin=_origin)
Publish an event associated with the scheduling object. Note: Ideally publish should not be used directly but by other methods which perform actions on the object. Args: event_type (str): Type of event. event_data (dict, optional): Event data.
codesearchnet
def SerializeUnsigned(self, writer): writer.WriteUInt32(self.Version) writer.WriteUInt256(self.PrevHash) writer.WriteUInt256(self.MerkleRoot) writer.WriteUInt32(self.Timestamp) writer.WriteUInt32(self.Index) writer.WriteUInt64(self.ConsensusData) writer.WriteUInt160(self.NextConsensus)
Serialize unsigned data only. Args: writer (neo.IO.BinaryWriter):
juraj-google-style
def __init__(self, url=None): self._url = None self.users_url = None self.groups_url = None self.url = url
Construct a SLUGSConnector. Args: url (string): The base URL for the remote SLUGS instance. Optional, defaults to None. Required for authentication.
juraj-google-style
def Detect(self, baseline, host_data): result = CheckResult() for detector in self.detectors: finding = detector(baseline, host_data) if finding: result.ExtendAnomalies([finding]) if result: return result
Run host_data through detectors and return them if a detector triggers. Args: baseline: The base set of rdf values used to evaluate whether an issue exists. host_data: The rdf values passed back by the filters. Returns: A CheckResult message containing anomalies if any detectors identified an issue, None otherwise.
juraj-google-style
def read_full(fileobj, size): if (size < 0): raise ValueError('size must not be negative') data = fileobj.read(size) if (len(data) != size): raise IOError return data
Like fileobj.read but raises IOError if not all requested data is returned. If you want to distinguish IOError and the EOS case, better handle the error yourself instead of using this. Args: fileobj (fileobj) size (int): amount of bytes to read Raises: IOError: In case read fails or not enough data is read
codesearchnet
def get_variant_slice(self, package_name, range_): variant_list = self.variant_lists.get(package_name) if (variant_list is None): variant_list = _PackageVariantList(package_name, self.solver) self.variant_lists[package_name] = variant_list entries = variant_list.get_intersection(range_) if (not entries): return None slice_ = _PackageVariantSlice(package_name, entries=entries, solver=self.solver) return slice_
Get a list of variants from the cache. Args: package_name (str): Name of package. range_ (`VersionRange`): Package version range. Returns: `_PackageVariantSlice` object.
codesearchnet
def dispatch_command(self, prefix, argv, screen_info=None): if not prefix: raise ValueError('Prefix is empty') resolved_prefix = self._resolve_prefix(prefix) if not resolved_prefix: raise ValueError('No handler is registered for command prefix "%s"' % prefix) handler = self._handlers[resolved_prefix] try: output = handler(argv, screen_info=screen_info) except CommandLineExit as e: raise e except SystemExit as e: lines = ['Syntax error for command: %s' % prefix, 'For help, do "help %s"' % prefix] output = RichTextLines(lines) except BaseException as e: lines = ['Error occurred during handling of command: %s %s:' % (resolved_prefix, ' '.join(argv)), '%s: %s' % (type(e), str(e))] lines.append('') lines.extend(traceback.format_exc().split('\n')) output = RichTextLines(lines) if not isinstance(output, RichTextLines) and output is not None: raise ValueError('Return value from command handler %s is not None or a RichTextLines instance' % str(handler)) return output
Handles a command by dispatching it to a registered command handler. Args: prefix: Command prefix, as a str, e.g., "print". argv: Command argument vector, excluding the command prefix, represented as a list of str, e.g., ["tensor_1"] screen_info: A dictionary containing screen info, e.g., {"cols": 100}. Returns: An instance of RichTextLines or None. If any exception is caught during the invocation of the command handler, the RichTextLines will wrap the error type and message. Raises: ValueError: If 1) prefix is empty, or 2) no command handler is registered for the command prefix, or 3) the handler is found for the prefix, but it fails to return a RichTextLines or raise any exception. CommandLineExit: If the command handler raises this type of exception, this method will simply pass it along.
github-repos
def convert_to_tensor_or_ragged_tensor(value, dtype=None, preferred_dtype=None, name=None): if isinstance(value, RaggedTensor): if dtype and (not dtype.is_compatible_with(value.dtype)): raise ValueError(f'Tensor conversion requested dtype {dtype.name} for RaggedTensor with dtype {value.dtype.name}: {value}.') return value elif isinstance(value, ragged_tensor_value.RaggedTensorValue): with ops.name_scope(name, 'ConvertToTensorOrRaggedTensor', []): flat_values = ops.convert_to_tensor(value=value.flat_values, dtype=dtype, dtype_hint=preferred_dtype, name='flat_values') return RaggedTensor.from_nested_row_splits(flat_values, value.nested_row_splits, validate=False) else: return tensor_conversion.convert_to_tensor_v2_with_dispatch(value=value, dtype=dtype, dtype_hint=preferred_dtype, name=name)
Converts value to a `RaggedTensor` or `Tensor`. * If `value` is a `RaggedTensor`, then return it as-is. * If `value` is a `RaggedTensorValue`, return a corresponding constant `RaggedTensor`. * Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`. Args: value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing the type is inferred from the type of `value`. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. This argument has no effect if `value` is already a tensor, or when conversion is not possible. name: Optional name to use if a new `Tensor` is created. Returns: A `Tensor` or `RaggedTensor`.
github-repos
def _slice_params_to_dict(dist, params_event_ndims, slices): override_dict = {} for param_name, param_event_ndims in six.iteritems(params_event_ndims): if param_name not in dist.parameters: raise ValueError('Distribution {} is missing advertised ' 'parameter {}'.format(dist, param_name)) param = dist.parameters[param_name] if param is None: continue dtype = None if hasattr(dist, param_name): attr = getattr(dist, param_name) dtype = getattr(attr, 'dtype', None) if dtype is None: dtype = dist.dtype warnings.warn('Unable to find property getter for parameter Tensor {} ' 'on {}, falling back to Distribution.dtype {}'.format( param_name, dist, dtype)) param = tf.convert_to_tensor(value=param, dtype=dtype) override_dict[param_name] = _slice_single_param(param, param_event_ndims, slices, dist.batch_shape_tensor()) return override_dict
Computes the override dictionary of sliced parameters. Args: dist: The tfd.Distribution being batch-sliced. params_event_ndims: Per-event parameter ranks, a `str->int` `dict`. slices: Slices as received by __getitem__. Returns: overrides: `str->Tensor` `dict` of batch-sliced parameter overrides.
juraj-google-style
def _contains_internal_dynamic_call(contract): for func in contract.all_functions_called: for node in func.nodes: for ir in node.irs: if isinstance(ir, InternalDynamicCall): return True return False
Checks if a contract contains a dynamic call either in a direct definition, or through inheritance. Returns: (boolean): True if this contract contains a dynamic call (including through inheritance).
codesearchnet
def __init__(self, **sections): self._sections = [] for sct_name, sct_meta in sections.items(): if _is_valid(sct_name): setattr(self, sct_name, Section(**sct_meta.def_)) self._sections.append(sct_name) else: raise error.SectionError(sct_name) self._parser = None self._nosub_valid = False self._subcmds = {} self._config_files = ()
Initialization of instances. Args: sections (:class:`~loam.manager.Section`): section metadata. The name of each *section* is the name of the keyword argument passed on to this function. Section names should be valid identifiers, otherwise a :class:`~loam.error.SectionError` is raised.
juraj-google-style
def build_avatar_url(jid): digest = md5(str(jid).encode('utf-8')).hexdigest() return 'http:
Static method to build a gravatar url with the agent's JID Args: jid (aioxmpp.JID): an XMPP identifier Returns: str: an URL for the gravatar
codesearchnet
def _insert(self, new_item, feed_item): filename = feed_item.get(FieldMap.CREATIVE_ASSET_FILE_NAME, None) file_buffer = object_get(self.config, self.auth, '%s:%s' % (feed_item.get(FieldMap.CREATIVE_ASSET_BUCKET_NAME, None), filename)) file_mime = mimetypes.guess_type(filename, strict=False)[0] media = MediaIoBaseUpload(BytesIO(file_buffer), mimetype=file_mime, chunksize=CHUNKSIZE, resumable=True) result = self._api().insert(profileId=self.profile_id, advertiserId=str(feed_item.get(FieldMap.ADVERTISER_ID, None)), media_body=media, body=new_item).execute() return result
Handles the upload of creative assets to DCM and the creation of the associated entity. This method makes a call to the DCM API to create a new entity. Args: new_item: The item to insert into DCM. feed_item: The feed item representing the creative asset from the Bulkdozer feed. Returns: The newly created item in DCM.
github-repos
def get_query_parameters(args, cell_body, date_time=datetime.datetime.now()): env = google.datalab.utils.commands.notebook_environment() config = google.datalab.utils.commands.parse_config(cell_body, env=env, as_dict=False) sql = args['query'] if (sql is None): raise Exception('Cannot extract query parameters in non-query cell') if config: jsonschema.validate(config, BigQuerySchema.QUERY_PARAMS_SCHEMA) config = (config or {}) config_parameters = config.get('parameters', []) return bigquery.Query.get_query_parameters(config_parameters, date_time=date_time)
Extract query parameters from cell body if provided Also validates the cell body schema using jsonschema to catch errors before sending the http request. This validation isn't complete, however; it does not validate recursive schemas, but it acts as a good filter against most simple schemas Args: args: arguments passed to the magic cell cell_body: body of the magic cell date_time: The timestamp at which the date-time related parameters need to be resolved. Returns: Validated object containing query parameters
codesearchnet
def get_ogr_driver(filepath): filename, file_extension = os.path.splitext(filepath) EXTENSION = file_extension[1:] ogr_driver_count = ogr.GetDriverCount() for idx in range(ogr_driver_count): driver = ogr.GetDriver(idx) driver_extension = driver.GetMetadataItem(str('DMD_EXTENSION')) or '' driver_extensions = driver.GetMetadataItem(str('DMD_EXTENSIONS')) or '' if EXTENSION == driver_extension or EXTENSION in driver_extensions: return driver else: msg = 'No driver found for the following file extension: {}'.format( EXTENSION) raise ValueError(msg)
Get the OGR driver from the provided file extension. Args: file_extension (str): file extension Returns: osgeo.ogr.Driver Raises: ValueError: no driver is found
juraj-google-style
class TimeSeriesFeatureEmbedder(nn.Module): def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None: super().__init__() self.num_features = len(cardinalities) self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)]) def forward(self, features: torch.Tensor) -> torch.Tensor: if self.num_features > 1: cat_feature_slices = torch.chunk(features, self.num_features, dim=-1) else: cat_feature_slices = [features] return torch.cat([embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)], dim=-1)
Embed a sequence of categorical features. Args: cardinalities (`list[int]`): List of cardinalities of the categorical features. embedding_dims (`list[int]`): List of embedding dimensions of the categorical features.
github-repos
def put(self, key, value): path = self.object_path(key) self._write_object(path, value)
Stores the object `value` named by `key`. Args: key: Key naming `value` value: the object to store.
juraj-google-style
def setdefault(pb_or_dict, key, value): if (not get(pb_or_dict, key, default=None)): set(pb_or_dict, key, value)
Set the key on the object to the value if the current value is falsy. Because protobuf Messages do not distinguish between unset values and falsy ones particularly well, this method treats any falsy value (e.g. 0, empty list) as a target to be overwritten, on both Messages and dictionaries. Args: pb_or_dict (Union[~google.protobuf.message.Message, Mapping]): the object. key (str): The key on the object in question. value (Any): The value to set. Raises: TypeError: If pb_or_dict is not a Message or Mapping.
codesearchnet
def _expansion_request_url_for_value_set_url(value_set_url: str) -> Tuple[str, str]: value_set_domain = urllib.parse.urlparse(value_set_url).netloc root_url = TERMINOLOGY_BASE_URL_PER_DOMAIN.get(value_set_domain) if root_url is None: raise ValueError('Unknown domain %s. Can not find appropriate terminology server.' % value_set_domain) return (root_url, urllib.parse.urljoin(root_url, 'ValueSet/$expand'))
Builds a URL for querying a terminology service to expand `value_set_url`. Args: value_set_url: The URL being expanded. Raises: ValueError: If a terminology service can not be found for `value_set_url`. Returns: A tuple of (root_url, expansion_url) where root_url is the root URL of the terminology service and expansion_url is the URL to use when performing value set expansion against that terminology service.
github-repos
def translate_index(index_name): uuid = SEARCH_INDEX_UUIDS.get(index_name.strip().lower()) if not uuid: try: index_info = globus_sdk.SearchClient().get_index(index_name).data if not isinstance(index_info, dict): raise ValueError("Multiple UUIDs possible") uuid = index_info.get("id", index_name) except Exception: uuid = index_name return uuid
Translate a known Globus Search index into the index UUID. The UUID is the proper way to access indices, and will eventually be the only way. This method will return names it cannot disambiguate. Arguments: index_name (str): The name of the index. Returns: str: The UUID of the index. If the index is not known and is not unambiguous, this will be the ``index_name`` unchanged instead.
juraj-google-style
def timestampFormat(self, timestampFormat): if (not isinstance(timestampFormat, str)): raise TypeError('not of type unicode') self._timestampFormat = timestampFormat
Setter to _timestampFormat. Formatting string for conversion of timestamps to QtCore.QDateTime Raises: AssertionError: if timestampFormat is not of type unicode. Args: timestampFormat (unicode): assign timestampFormat to _timestampFormat. Formatting string for conversion of timestamps to QtCore.QDateTime. Used in data method.
codesearchnet
def send_state_event(self, room_id, event_type, content, state_key="", timestamp=None): path = "/rooms/%s/state/%s" % ( quote(room_id), quote(event_type), ) if state_key: path += "/%s" % (quote(state_key)) params = {} if timestamp: params["ts"] = timestamp return self._send("PUT", path, content, query_params=params)
Perform PUT /rooms/$room_id/state/$event_type Args: room_id(str): The room ID to send the state event in. event_type(str): The state event type to send. content(dict): The JSON content to send. state_key(str): Optional. The state key for the event. timestamp (int): Set origin_server_ts (For application services only)
juraj-google-style
def subprogram_signature(vo, fullname=None): if fullname is None: fullname = vo.name if isinstance(vo, VhdlFunction): plist = ','.join(p.data_type for p in vo.parameters) sig = '{}[{} return {}]'.format(fullname, plist, vo.return_type) else: plist = ','.join(p.data_type for p in vo.parameters) sig = '{}[{}]'.format(fullname, plist) return sig
Generate a signature string Args: vo (VhdlFunction, VhdlProcedure): Subprogram object Returns: Signature string.
juraj-google-style
def pprint(sequence, keys=None): if (len(sequence) > 0): columns = calculate_columns(sequence) row_format = calculate_row_format(columns, keys) header = (row_format % dict([(key, key.title()) for key in columns])) separator = (row_format % dict([(key, ('-' * columns[key])) for key in columns])) print(separator) print(header) print(separator) for row in sequence: print((row_format % row)) print(separator)
Print sequence as ascii table to stdout. Args: sequence (list or tuple): a sequence with a dictionary each entry. keys (list): optional list of keys to order columns as well as to filter for them.
codesearchnet
def combine_graph_defs(to_proto, from_proto): if (from_proto.version != to_proto.version): raise ValueError('Cannot combine GraphDefs of different versions.') try: _safe_copy_proto_list_values(to_proto.node, from_proto.node, (lambda n: n.name)) except _ProtoListDuplicateKeyError as exc: raise ValueError(('A GraphDef contains non-unique node names: %s' % exc)) except _SameKeyDiffContentError as exc: raise ValueError(('Cannot combine GraphDefs because nodes share a name but contents are different: %s' % exc)) try: _safe_copy_proto_list_values(to_proto.library.function, from_proto.library.function, (lambda n: n.signature.name)) except _ProtoListDuplicateKeyError as exc: raise ValueError(('A GraphDef contains non-unique function names: %s' % exc)) except _SameKeyDiffContentError as exc: raise ValueError(('Cannot combine GraphDefs because functions share a name but are different: %s' % exc)) try: _safe_copy_proto_list_values(to_proto.library.gradient, from_proto.library.gradient, (lambda g: g.gradient_func)) except _ProtoListDuplicateKeyError as exc: raise ValueError(('A GraphDef contains non-unique gradient function names: %s' % exc)) except _SameKeyDiffContentError as exc: raise ValueError(('Cannot combine GraphDefs because gradients share a gradient_func name but map to different functions: %s' % exc)) return to_proto
Combines two GraphDefs by adding nodes from from_proto into to_proto. All GraphDefs are expected to be of TensorBoard's. It assumes node names are unique across GraphDefs if contents differ. The names can be the same if the NodeDef content are exactly the same. Args: to_proto: A destination TensorBoard GraphDef. from_proto: A TensorBoard GraphDef to copy contents from. Returns: to_proto Raises: ValueError in case any assumption about GraphDef is violated: A GraphDef should have unique node, function, and gradient function names. Also, when merging GraphDefs, they should have not have nodes, functions, or gradient function mappings that share the name but details do not match.
codesearchnet
def get_sequence_dense_tensor(self, transformation_cache, state_manager): sp_tensor = transformation_cache.get(self, state_manager) dense_tensor = sparse_ops.sparse_tensor_to_dense(sp_tensor, default_value=self.default_value) dense_shape = array_ops.concat([array_ops.shape(dense_tensor)[:1], [-1], self.variable_shape], axis=0) dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape) if sp_tensor.shape.ndims == 2: num_elements = self.variable_shape.num_elements() else: num_elements = 1 seq_length = fc_utils.sequence_length_from_sparse_tensor(sp_tensor, num_elements=num_elements) return fc.SequenceDenseColumn.TensorSequenceLengthPair(dense_tensor=dense_tensor, sequence_length=seq_length)
Returns a `TensorSequenceLengthPair`. Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables.
github-repos
def is_debug_node(node_name): return node_name.startswith('__dbg_')
Determine whether a node name is that of a debug node. Such nodes are inserted by TensorFlow core upon request in RunOptions.debug_options.debug_tensor_watch_opts. Args: node_name: Name of the node. Returns: A bool indicating whether the input argument is the name of a debug node.
github-repos
def put(self, json=None): return self._call('put', url=self.endpoint, json=json)
Send a PUT request and return the JSON decoded result. Args: json (dict, optional): Object to encode and send in request. Returns: mixed: JSON decoded response data.
codesearchnet
def setAvatar(self, image): self.conn("PUT", "{0}/users/{1}/profile/avatar".format(SkypeConnection.API_USER, self.userId), auth=SkypeConnection.Auth.SkypeToken, data=image.read())
Update the profile picture for the current user. Args: image (file): a file-like object to read the image from
juraj-google-style
def __init__(self, **kwargs): self.address = self.name = address.new(kwargs['name']) self.subgraph = None self.params = {} log.debug('New target: %s', self.address) try: for param_name, param_type in self.required_params: self.params[param_name] = kwargs.pop(param_name) assert isinstance(self.params[param_name], param_type) except AssertionError as err: if isinstance(param_type, tuple) and len(param_type) > 1: msg = 'one of: %s' % ', '.join(param_type.__name__) else: msg = str(param_type.__name__) raise error.InvalidRule( 'While loading %s: Invalid type for %s. ' 'Expected: %s. Actual: %s.' % ( self.address, param_name, msg, repr(self.params[param_name]))) except KeyError as err: log.error(err) raise error.InvalidRule( 'While loading %s: Required parameter %s not given.' % repr( self.address, param_name)) for (param_name, param_type, param_default) in self.optional_params: if param_name not in kwargs: self.params[param_name] = param_default else: self.params[param_name] = kwargs.pop(param_name) if not isinstance(self.params[param_name], param_type): msg = str(param_type.__name__) if isinstance(param_type, tuple) and len(param_type) > 1: msg = 'one of: %s' % ', '.join(param_type.__name__) raise error.InvalidRule( 'While loading %s: Invalid type for %s. ' 'Expected: %s. Actual: %s.' % ( self.address, param_name, msg, repr(self.params[param_name]))) if kwargs: raise error.InvalidRule( '[%s]: Unknown argument(s): %s' % ( self.address, ', '.join(kwargs.keys()))) if self.graphcontext is not None: self.graphcontext.add_node(self.address, target_obj=self) try: self.validate_args() except AssertionError as err: raise error.InvalidRule('Error in %s: %s' % (self.address, err))
Initialize the build rule. Args: **kwargs: Assorted parameters; see subclass implementations for details.
juraj-google-style
def serialize_array(array, domain=(0, 1), fmt='png', quality=70): normalized = _normalize_array(array, domain=domain) return _serialize_normalized_array(normalized, fmt=fmt, quality=quality)
Given an arbitrary rank-3 NumPy array, returns the byte representation of the encoded image. Args: array: NumPy array of dtype uint8 and range 0 to 255 domain: expected range of values in array, see `_normalize_array()` fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer
juraj-google-style
def list_alias(): alias_table = get_alias_table() output = [] for alias in alias_table.sections(): if alias_table.has_option(alias, 'command'): output.append({'alias': alias, 'command': ' '.join(alias_table.get(alias, 'command').split())}) return output
List all registered aliases. Returns: An array of dictionary containing the alias and the command that it points to.
codesearchnet
def init(self, address, hard_reset=False): self.address = address if hard_reset: pass for i in range(Dongle.PORT_RETRIES): try: logger.debug('Setting up BGAPI, attempt {}/{}'.format((i + 1), Dongle.PORT_RETRIES)) self.api = BlueGigaAPI(port=self.address, callbacks=self, baud=Dongle.BAUDRATE, timeout=DEF_TIMEOUT) self.api.start_daemon() break except serial.serialutil.SerialException as e: logger.debug('Failed to init BlueGigaAPI: {}, attempt {}/{}'.format(e, (i + 1), Dongle.PORT_RETRIES)) time.sleep(0.1) if (self.api is None): return False time.sleep(0.5) self.get_supported_connections() logger.info('Dongle supports {} connections'.format(self.supported_connections)) if (self.supported_connections == (- 1)): logger.error('Failed to retrieve number of supported connections from the dongle! (try reinserting it)') return False self.conn_state = {x: self._STATE_IDLE for x in range(self.supported_connections)} self.reset() self._cbthread = threading.Thread(target=self._cbthreadfunc) self._cbthread.setDaemon(True) self._cbthread_q = Queue() self._cbthread.start() return True
Open the serial connection to a dongle at the supplied address. Args: address (str): the serial port address of the BLED112 dongle, e.g. 'COM5' hard_reset (bool): not currently used Returns: True if a connection with the dongle was established, False otherwise.
codesearchnet
def serialize(metric): return serialization_lib.serialize_keras_object(metric)
Serializes metric function or `Metric` instance. Args: metric: A Keras `Metric` instance or a metric function. Returns: Metric configuration dictionary.
github-repos
def max_range(ranges, combined=True): try: with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'All-NaN (slice|axis) encountered') values = [tuple(((np.NaN if (v is None) else v) for v in r)) for r in ranges] if (pd and any(((isinstance(v, datetime_types) and (not isinstance(v, cftime_types))) for r in values for v in r))): converted = [] for (l, h) in values: if (isinstance(l, datetime_types) and isinstance(h, datetime_types)): (l, h) = (pd.Timestamp(l).to_datetime64(), pd.Timestamp(h).to_datetime64()) converted.append((l, h)) values = converted arr = np.array(values) if (not len(arr)): return (np.NaN, np.NaN) elif (arr.dtype.kind in 'OSU'): arr = list(python2sort([v for r in values for v in r if ((not is_nan(v)) and (v is not None))])) return (arr[0], arr[(- 1)]) elif (arr.dtype.kind in 'M'): return ((arr.min(), arr.max()) if combined else (arr[(:, 0)].min(), arr[(:, 1)].min())) if combined: return (np.nanmin(arr), np.nanmax(arr)) else: return (np.nanmin(arr[(:, 0)]), np.nanmax(arr[(:, 1)])) except: return (np.NaN, np.NaN)
Computes the maximal lower and upper bounds from a list bounds. Args: ranges (list of tuples): A list of range tuples combined (boolean, optional): Whether to combine bounds Whether range should be computed on lower and upper bound independently or both at once Returns: The maximum range as a single tuple
codesearchnet
def angle_to_distance(angle, units='metric'): distance = (math.radians(angle) * BODY_RADIUS) if (units in ('km', 'metric')): return distance elif (units in ('sm', 'imperial', 'US customary')): return (distance / STATUTE_MILE) elif (units in ('nm', 'nautical')): return (distance / NAUTICAL_MILE) else: raise ValueError(('Unknown units type %r' % units))
Convert angle in to distance along a great circle. Args: angle (float): Angle in degrees to convert to distance units (str): Unit type to be used for distances Returns: float: Distance in ``units`` Raises: ValueError: Unknown value for ``units``
codesearchnet
def convert_data_to_dtype(data, data_type, mot_float_type='float'): scalar_dtype = ctype_to_dtype(data_type, mot_float_type) if isinstance(data, numbers.Number): data = scalar_dtype(data) if is_vector_ctype(data_type): shape = data.shape dtype = ctype_to_dtype(data_type, mot_float_type) ve = np.zeros(shape[:(- 1)], dtype=dtype) if (len(shape) == 1): for vector_ind in range(shape[0]): ve[0][vector_ind] = data[vector_ind] elif (len(shape) == 2): for i in range(data.shape[0]): for vector_ind in range(data.shape[1]): ve[i][vector_ind] = data[(i, vector_ind)] elif (len(shape) == 3): for i in range(data.shape[0]): for j in range(data.shape[1]): for vector_ind in range(data.shape[2]): ve[(i, j)][vector_ind] = data[(i, j, vector_ind)] return np.require(ve, requirements=['C', 'A', 'O']) return np.require(data, scalar_dtype, ['C', 'A', 'O'])
Convert the given input data to the correct numpy type. Args: data (ndarray): The value to convert to the correct numpy type data_type (str): the data type we need to convert the data to mot_float_type (str): the data type of the current ``mot_float_type`` Returns: ndarray: the input data but then converted to the desired numpy data type
codesearchnet
def to_dict(self): output = asdict(self) output['structure_module'] = self.structure_module.to_dict() return output
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
github-repos
def spawn_agent(self, agent_definition, location): self._should_write_to_command_buffer = True self._add_agents(agent_definition) command_to_send = SpawnAgentCommand(location, agent_definition.name, agent_definition.type) self._commands.add_command(command_to_send)
Queues a spawn agent command. It will be applied when `tick` or `step` is called next. The agent won't be able to be used until the next frame. Args: agent_definition (:obj:`AgentDefinition`): The definition of the agent to spawn. location (np.ndarray or list): The position to spawn the agent in the world, in XYZ coordinates (in meters).
codesearchnet
def fetch_layout(tensor: tensor_lib.Tensor) -> layout_lib.Layout: return _dtensor_device().fetch_layout(tensor)
Fetches the layout of a DTensor. Args: tensor: The DTensor whose layout is to be fetched. Returns: The `Layout` of this DTensor. Raises: RuntimeError: When not called eagerly.
github-repos
def get_func_and_args_from_str(call_str): open_paren_index = call_str.find('(') close_paren_index = call_str.rfind(')') function_name = call_str[:call_str.find('(')] args = call_str[open_paren_index + 1:close_paren_index].split(',') args = [arg.split('=')[0].strip() for arg in args] args = [arg for arg in args if arg] return (function_name, args)
Parse call string to get function and argument names. Args: call_str: Call string must be in the form: `tf.foo(arg1=val1, arg2=val2, ...)`. Returns: (function_name, list of arg names) tuple.
github-repos
def __init__( self, bar_gram, midi_df_list, batch_size=20, seq_len=10, time_fraction=0.1, conditional_flag=True ): if isinstance(bar_gram, BarGram) is False: raise TypeError() self.__bar_gram = bar_gram program_list = [] self.__midi_df_list = midi_df_list for i in range(len(self.__midi_df_list)): program_list.extend( self.__midi_df_list[i]["program"].drop_duplicates().values.tolist() ) program_list = list(set(program_list)) self.__batch_size = batch_size self.__seq_len = seq_len self.__channel = len(program_list) self.__program_list = program_list self.__time_fraction = time_fraction self.__dim = self.__bar_gram.dim self.__conditional_flag = conditional_flag
Init. Args: bar_gram: is-a `BarGram`. midi_df_list: `list` of paths to MIDI data extracted by `MidiController`. batch_size: Batch size. seq_len: The length of sequneces. The length corresponds to the number of `time` splited by `time_fraction`. time_fraction: Time fraction which means the length of bars.
juraj-google-style
def add_transition(self, source: str, dest: str): self._transitions[source].append(dest)
Adds a transition from one state to another. Args: source (str): the name of the state from where the transition starts dest (str): the name of the state where the transition ends
codesearchnet
def make_encoder(activation, num_topics, layer_sizes): encoder_net = tf.keras.Sequential() for num_hidden_units in layer_sizes: encoder_net.add(tf.keras.layers.Dense(num_hidden_units, activation=activation, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) encoder_net.add(tf.keras.layers.Dense(num_topics, activation=tf.nn.softplus, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) def encoder(bag_of_words): net = _clip_dirichlet_parameters(encoder_net(bag_of_words)) return tfd.Dirichlet(concentration=net, name='topics_posterior') return encoder
Create the encoder function. Args: activation: Activation function to use. num_topics: The number of topics. layer_sizes: The number of hidden units per layer in the encoder. Returns: encoder: A `callable` mapping a bag-of-words `Tensor` to a `tfd.Distribution` instance over topics.
codesearchnet
def __init__(self, element=None): super(TreeMapNode, self).__init__() self._element = element self._nodes = dict() self._parent = None self._depth = -1
Constructor. Args: element: Object to add into the node.
juraj-google-style
def remove_overlap(self, also_remove_contiguous: bool = False) -> None: overlap = True while overlap: overlap = self._remove_overlap_sub(also_remove_contiguous) self._sort()
Merges any overlapping intervals. Args: also_remove_contiguous: treat contiguous (as well as overlapping) intervals as worthy of merging?
juraj-google-style
def convert_dict_to_params(src_dict): return "&".join([ "{}={}".format(key, value) for key, value in src_dict.items() ])
convert dict to params string Args: src_dict (dict): source mapping data structure Returns: str: string params data Examples: >>> src_dict = { "a": 1, "b": 2 } >>> convert_dict_to_params(src_dict) >>> "a=1&b=2"
juraj-google-style
class Permute(Layer): def __init__(self, dims, **kwargs): super().__init__(**kwargs) self.dims = tuple(dims) if sorted(dims) != list(range(1, len(dims) + 1)): raise ValueError(f'Invalid permutation argument `dims` for Permute Layer. The set of indices in `dims` must be consecutive and start from 1. Received dims={dims}') self.input_spec = InputSpec(ndim=len(self.dims) + 1) def compute_output_shape(self, input_shape): output_shape = [input_shape[0]] for dim in self.dims: output_shape.append(input_shape[dim]) return tuple(output_shape) def compute_output_spec(self, inputs): output_shape = self.compute_output_shape(inputs.shape) return KerasTensor(shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse) def call(self, inputs): return ops.transpose(inputs, axes=(0,) + self.dims) def get_config(self): config = {'dims': self.dims} base_config = super().get_config() return {**base_config, **config}
Permutes the dimensions of the input according to a given pattern. Useful e.g. connecting RNNs and convnets. Args: dims: Tuple of integers. Permutation pattern does not include the batch dimension. Indexing starts at 1. For instance, `(1, 3, 2)` permutes the second and third dimensions of the input. Input shape: Arbitrary. Output shape: Same as the input shape, but with the dimensions re-ordered according to the specified pattern. Example: >>> x = keras.Input(shape=(10, 64)) >>> y = keras.layers.Permute((2, 1))(x) >>> y.shape (None, 64, 10)
github-repos
def _assertOpOutputMatchesExpected(self, params, solution, high_level=True, rtol=0.001, atol=1e-05): input = params['input'] with self.session() as session: for dtype in self.numeric_types - {np.int8, np.uint8}: expected = solution.astype(dtype) with self.test_scope(): params['input'] = array_ops.placeholder(dtype, input.shape, name='input') if high_level: output = array_ops.matrix_diag_part(**params) else: output = gen_array_ops.matrix_diag_part(**params) output = array_ops.matrix_diag_part(**params) result = session.run(output, {params['input']: input.astype(dtype)}) self.assertEqual(output.dtype, expected.dtype) self.assertAllCloseAccordingToType(expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)
Verifies that matrix_diag_part produces `solution` when fed `params`. Args: params: dictionary containing input parameters to matrix_diag_part. solution: numpy array representing the expected output. high_level: call high_level matrix_set_diag rtol: relative tolerance for equality test. atol: absolute tolerance for equality test.
github-repos
def quantile(self, value, name='quantile'): return self._call_quantile(value, name)
Quantile function. Aka "inverse cdf" or "percent point function". Given random variable `X` and `p in [0, 1]`, the `quantile` is: ```none quantile(p) := x such that P[X <= x] == p ``` Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`.
github-repos
def item_coords(self, table_item): for row_key in self.children.keys(): for item_key in self.children[row_key].children.keys(): if self.children[row_key].children[item_key] == table_item: return (int(row_key), int(item_key)) return None
Returns table_item's (row, column) cordinates. Returns None in case of item not found. Args: table_item (TableItem): an item instance
juraj-google-style
def __init__(self, subdomain, username, password, ssl=False, currentUser=None): self.base_url = "http%s: self._settings = { "subdomain": subdomain, "username": username, "password": password, "ssl": ssl } self._user = currentUser self._users = {} self._rooms = {} if not self._user: _connection = Connection(url="%s/users/me" % self.base_url, user=username, password=password) user = _connection.get(key="user") self._connection = Connection( base_url=self.base_url, user=self._user.token if self._user else user["api_auth_token"], password="x" ) if self._user: self._user.set_connection(self._connection) else: self._user = User(self, user["id"], current=True) self._user.token = user["api_auth_token"]
Initialize. Args: subdomain (str): Campfire subdomain username (str): User password (str): pasword Kwargs: ssl (bool): enabled status of SSL currentUser (:class:`User`): If specified, don't auto load current user, use this one instead
juraj-google-style
def write_registers(self, registeraddress, values): if (not isinstance(values, list)): raise TypeError('The "values parameter" must be a list. Given: {0!r}'.format(values)) _checkInt(len(values), minvalue=1, description='length of input list') self._genericCommand(16, registeraddress, values, numberOfRegisters=len(values), payloadformat='registers')
Write integers to 16-bit registers in the slave. The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16"). Uses Modbus function code 16. The number of registers that will be written is defined by the length of the ``values`` list. Args: * registeraddress (int): The slave register start address (use decimal numbers, not hex). * values (list of int): The values to store in the slave registers. Any scaling of the register data, or converting it to negative number (two's complement) must be done manually. Returns: None Raises: ValueError, TypeError, IOError
codesearchnet
def universal_transformer_layer(x, hparams, ffn_unit, attention_unit, pad_remover=None): def add_vanilla_transformer_layer(x, num_layers, name): if hparams.add_position_timing_signal: x = common_attention.add_timing_signal_1d(x) for layer in range(num_layers): with tf.variable_scope(name + "layer_%d" % layer): x = ffn_unit(attention_unit(x)) return x with tf.variable_scope("universal_transformer_%s" % hparams.recurrence_type): if (hparams.mix_with_transformer and "before_ut" in hparams.mix_with_transformer): x = add_vanilla_transformer_layer(x, hparams.num_mixedin_layers, "before_ut_") if hparams.recurrence_type == "act": output, extra_output = universal_transformer_act( x, hparams, ffn_unit, attention_unit) else: ut_function, initializer = get_ut_layer(x, hparams, ffn_unit, attention_unit, pad_remover) output, _, extra_output = tf.foldl( ut_function, tf.range(hparams.num_rec_steps), initializer=initializer) if (hparams.recurrence_type == "lstm" and hparams.get("use_memory_as_final_state", False)): output = extra_output if (hparams.mix_with_transformer and "after_ut" in hparams.mix_with_transformer): output = add_vanilla_transformer_layer(output, hparams.num_mixedin_layers, "after_ut_") return output, extra_output
Core function applying the universal transformer layer. Args: x: input hparams: model hyper-parameters ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: the output tensor, extra output (can be memory, ponder time, etc.) Raises: ValueError: Unknown recurrence type
juraj-google-style
def write_csv(data, file_name, encoding='utf-8'): name_extension = len(data) > 1 root, ext = os.path.splitext(file_name) for i, sheet in enumerate(data): fname = file_name if not name_extension else root+"_"+str(i)+ext with open(fname, 'wb') as date_file: csv_file = csv.writer(date_file, encoding=encoding) for line in sheet: csv_file.writerow(line)
Writes out to csv format. Args: data: 2D list of tables/worksheets. file_name: Name of the output file.
juraj-google-style
def GetLoggingLocation(): frame = inspect.currentframe() this_file = frame.f_code.co_filename frame = frame.f_back while frame: if (this_file == frame.f_code.co_filename): if ('cdbg_logging_location' in frame.f_locals): ret = frame.f_locals['cdbg_logging_location'] if (len(ret) != 3): return (None, None, None) return ret frame = frame.f_back return (None, None, None)
Search for and return the file and line number from the log collector. Returns: (pathname, lineno, func_name) The full path, line number, and function name for the logpoint location.
codesearchnet
def parse_helpfull_output(help_output, regex=FLAG_HELP_RE_PY): valid_flags = set() for _, no_prefix, flag_name in regex.findall(help_output): valid_flags.add('--' + flag_name) if no_prefix: valid_flags.add('--no' + flag_name) return valid_flags
Parses the output of --helpfull. Args: help_output: str, the full output of --helpfull. Returns: A set of flags that are valid flags.
juraj-google-style
def get(url, params={}): request_url = url if len(params): request_url = "{}?{}".format(url, urlencode(params)) try: req = Request(request_url, headers={'User-Agent': 'Mozilla/5.0'}) response = json.loads(urlopen(req).read().decode("utf-8")) return response except HTTPError as err: raise MtgException(err.read())
Invoke an HTTP GET request on a url Args: url (string): URL endpoint to request params (dict): Dictionary of url parameters Returns: dict: JSON response as a dictionary
juraj-google-style
def publish_values(self, labeled_values): metric_dicts = [Metric(time.time(), uuid.uuid4().hex, value, label=label).as_dict() for label, value in labeled_values] for publisher in self.publishers: publisher.publish(metric_dicts)
The method to publish simple labeled values. Args: labeled_values (List[Tuple(str, int)]): list of (label, value)
github-repos
def wbmax(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `wbmax`'.format(value)) self._wbmax = value
Corresponds to IDD Field `wbmax` Extreme maximum wet-bulb temperature Args: value (float): value for IDD Field `wbmax` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def _build_endpoint(self, endpoint_name): endpoint_relative = settings.get('asmaster_endpoints', endpoint_name) return ('%s%s' % (self.host, endpoint_relative))
Generate an enpoint url from a setting name. Args: endpoint_name(str): setting name for the enpoint to build Returns: (str) url enpoint
codesearchnet
def update_media_assetfile(access_token, parent_asset_id, asset_id, content_length, name): path = '/Files' full_path = ''.join([path, "('", asset_id, "')"]) full_path_encoded = urllib.parse.quote(full_path, safe='') endpoint = ''.join([ams_rest_endpoint, full_path_encoded]) body = '{ \ "ContentFileSize": "' + str(content_length) + '", \ "Id": "' + asset_id + '", \ "MimeType": "video/mp4", \ "Name": "' + name + '", \ "ParentAssetId": "' + parent_asset_id + '" \ }' return do_ams_patch(endpoint, full_path_encoded, body, access_token)
Update Media Service Asset File. Args: access_token (str): A valid Azure authentication token. parent_asset_id (str): A Media Service Asset Parent Asset ID. asset_id (str): A Media Service Asset Asset ID. content_length (str): A Media Service Asset Content Length. name (str): A Media Service Asset name. Returns: HTTP response. JSON body.
juraj-google-style
def derive_depth(self, depth): cls = type(self) return cls( self[0], self[1], self[2], self[3], self[4], depth )
Derives a new event from this one setting the ``depth`` attribute. Args: depth: (int): The annotations associated with the derived event. Returns: IonEvent: The newly generated event.
juraj-google-style
def register_intent_parser(self, intent_parser, domain=0): if (domain not in self.domains): self.register_domain(domain=domain) self.domains[domain].register_intent_parser(intent_parser=intent_parser)
Register a intent parser with a domain. Args: intent_parser(intent): The intent parser you wish to register. domain(str): a string representing the domain you wish register the intent parser to.
codesearchnet
def escape_for_cmd_exe(arg): meta_chars = '()%!^"<>&|' meta_re = re.compile((('(' + '|'.join((re.escape(char) for char in list(meta_chars)))) + ')')) meta_map = {char: '^{0}'.format(char) for char in meta_chars} def escape_meta_chars(m): char = m.group(1) return meta_map[char] return meta_re.sub(escape_meta_chars, arg)
Escape an argument string to be suitable to be passed to cmd.exe on Windows This method takes an argument that is expected to already be properly escaped for the receiving program to be properly parsed. This argument will be further escaped to pass the interpolation performed by cmd.exe unchanged. Any meta-characters will be escaped, removing the ability to e.g. use redirects or variables. Args: arg (str): a single command line argument to escape for cmd.exe Returns: str: an escaped string suitable to be passed as a program argument to cmd.exe
codesearchnet
def __init__(self, *args, **kwargs): if isinstance(kwargs.get('record'), dict): prefix, _ = kwargs['event_type'].split('.', 1) model = self.EVENT_PREFIX_TO_MODEL[prefix] kwargs['record'] = model.from_api(**kwargs['record']) super(WebHookEvent, self).__init__(*args, **kwargs)
Parse raw record data if required. Args: record (dict or BaseModel): The record data that was received for the request. If it is a ``dict``, the data will be parsed using the proper model's ``from_api`` method.
juraj-google-style
def _verify_iat_and_exp(payload): now = _helpers.datetime_to_secs(_helpers.utcnow()) for key in ('iat', 'exp'): if key not in payload: raise ValueError( 'Token does not contain required claim {}'.format(key)) iat = payload['iat'] earliest = iat - _helpers.CLOCK_SKEW_SECS if now < earliest: raise ValueError('Token used too early, {} < {}'.format(now, iat)) exp = payload['exp'] latest = exp + _helpers.CLOCK_SKEW_SECS if latest < now: raise ValueError('Token expired, {} < {}'.format(latest, now))
Verifies the ``iat`` (Issued At) and ``exp`` (Expires) claims in a token payload. Args: payload (Mapping[str, str]): The JWT payload. Raises: ValueError: if any checks failed.
juraj-google-style
def emit_completion(self, completion_percent): completion_mode = XBlockCompletionMode.get_mode(self) if ((not self.has_custom_completion) or (completion_mode != XBlockCompletionMode.COMPLETABLE)): raise AttributeError("Using `emit_completion` requires `has_custom_completion == True` (was {}) and `completion_mode == 'completable'` (was {})".format(self.has_custom_completion, completion_mode)) if ((completion_percent is None) or (not (0.0 <= completion_percent <= 1.0))): raise ValueError('Completion percent must be in [0.0; 1.0] interval, {} given'.format(completion_percent)) self.runtime.publish(self, 'completion', {'completion': completion_percent})
Emits completion event through Completion API. Unlike grading API, calling this method allows completion to go down - i.e. emitting a value of 0.0 on a previously completed block indicates that it is no longer considered complete. Arguments: completion_percent (float): Completion in range [0.0; 1.0] (inclusive), where 0.0 means the block is not completed, 1.0 means the block is fully completed. Returns: None
codesearchnet
def Log(self, format_str, *args): log_entry = rdf_flow_objects.FlowLogEntry(client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, message=(format_str % args)) data_store.REL_DB.WriteFlowLogEntries([log_entry]) if self.rdf_flow.parent_hunt_id: db_compat.ProcessHuntFlowLog(self.rdf_flow, (format_str % args))
Logs the message using the flow's standard logging. Args: format_str: Format string *args: arguments to the format string
codesearchnet
def __init__(self, fixer_names, options=None, explicit=None): self.fixers = fixer_names self.explicit = explicit or [] self.options = self._default_options.copy() if options is not None: self.options.update(options) if self.options["print_function"]: self.grammar = pygram.python_grammar_no_print_statement else: self.grammar = pygram.python_grammar self.write_unchanged_files = self.options.get("write_unchanged_files") self.errors = [] self.logger = logging.getLogger("RefactoringTool") self.fixer_log = [] self.wrote = False self.driver = driver.Driver(self.grammar, convert=pytree.convert, logger=self.logger) self.pre_order, self.post_order = self.get_fixers() self.files = [] self.BM = bm.BottomMatcher() self.bmi_pre_order = [] self.bmi_post_order = [] for fixer in chain(self.post_order, self.pre_order): if fixer.BM_compatible: self.BM.add_fixer(fixer) elif fixer in self.pre_order: self.bmi_pre_order.append(fixer) elif fixer in self.post_order: self.bmi_post_order.append(fixer) self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order) self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
Initializer. Args: fixer_names: a list of fixers to import options: an dict with configuration. explicit: a list of fixers to run even if they are explicit.
juraj-google-style
def format_level_0_memory(memory): formatted_memory = _list_to_complex_array(memory) if (not (2 <= len(formatted_memory.shape) <= 3)): raise QiskitError('Level zero memory is not of correct shape.') return formatted_memory
Format an experiment result memory object for measurement level 0. Args: memory (list): Memory from experiment with `meas_level==1`. `avg` or `single` will be inferred from shape of result memory. Returns: np.ndarray: Measurement level 0 complex numpy array Raises: QiskitError: If the returned numpy array does not have 2 (avg) or 3 (single) indicies.
codesearchnet
def _on_connection_error(self, connection, error_message): self._channel = None if isinstance(error_message, pika_errs.AMQPConnectionError): error_message = repr(error_message.args[0]) _log.error(error_message) self.call_later(1, self.reconnect)
Callback invoked when the connection failed to be established. Args: connection (pika.connection.SelectConnection): The connection that failed to open. error_message (str): The reason the connection couldn't be opened.
codesearchnet
def min_validator(min_value): def validator(value): if (value < min_value): raise ValidationError('{} is not >= {}'.format(value, min_value)) return validator
Return validator function that ensures lower bound of a number. Result validation function will validate the internal value of resource instance field with the ``value >= min_value`` check Args: min_value: minimal value for new validator
codesearchnet
def ReadIndex(self, index_file=None): self.index_file = index_file or self.index_file fullpath = os.path.join(self.template_dir, self.index_file) if self.index_file and fullpath not in self.INDEX: self.index = IndexTable(self._PreParse, self._PreCompile, fullpath) self.INDEX[fullpath] = self.index else: self.index = self.INDEX[fullpath] if "Template" not in self.index.index.header: raise CliTableError("Index file does not have 'Template' column.")
Reads the IndexTable index file of commands and templates. Args: index_file: String, file where template/command mappings reside. Raises: CliTableError: A template column was not found in the table.
juraj-google-style
def _initialize_operation_name_to_id(self): operation_name_to_id = {} for (i, operation) in enumerate(self._operations): operation_name_to_id[operation.name] = i return operation_name_to_id
Initializer for _operation_name_to_id. Returns: a {string: int}, mapping operation names to their index in _operations.
codesearchnet
def stop_capture_handler(self, name): empty_capturers_indeces = [] for k, sc in self._stream_capturers.iteritems(): stream_capturer = sc[0] stream_capturer.remove_handler(name) if stream_capturer.handler_count == 0: self._pool.killone(sc[1]) empty_capturers_indeces.append(k) for i in empty_capturers_indeces: del self._stream_capturers[i]
Remove all handlers with a given name Args: name: The name of the handler(s) to remove.
juraj-google-style
def remove_delegate(self, callback): if callback not in self._delegate_methods: return self._delegate_methods.remove(callback)
Unregisters a registered delegate function or a method. Args: callback(function): method to trigger when push center receives events
juraj-google-style
def reset(self, *args): self.resource = self.resource.reset(list(args)) return self
Resets any of the tokens for this Application. Note that you may have to reauthenticate afterwards. Usage: application.reset('api_token') application.reset('api_token', 'totp_secret') Args: *args (list of str): one or more of ['api_token', 'subscription_token', 'totp_secret'] Returns: The Application.
juraj-google-style
def scalar(name, tensor, family=None, step=None): def function(tag, scope): return gen_summary_ops.write_scalar_summary(_summary_state.writer._resource, _choose_step(step), tag, array_ops.identity(tensor), name=scope) return summary_writer_function(name, tensor, function, family=family)
Writes a scalar summary if possible. Unlike `tf.contrib.summary.generic` this op may change the dtype depending on the writer, for both practical and efficiency concerns. Args: name: An arbitrary name for this summary. tensor: A `tf.Tensor` Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`, `uint32`, `uint64`. family: Optional, the summary's family. step: The `int64` monotonic step variable, which defaults to `tf.compat.v1.train.get_global_step`. Returns: The created `tf.Operation` or a `tf.no_op` if summary writing has not been enabled for this context.
github-repos