code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def movies_in_theaters(self, **kwargs): path = self._get_path('movies_in_theaters') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Gets the movies currently in theaters from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def _show_defined_functions(saved_model_dir, meta_graphs): has_object_graph_def = False for meta_graph_def in meta_graphs: has_object_graph_def |= meta_graph_def.HasField('object_graph_def') if not has_object_graph_def: return print('\nConcrete Functions:', end='') try: with ops_lib.Graph().as_default(): trackable_object = load.load(saved_model_dir, options=load_options.LoadOptions(experimental_skip_checkpoint=True)) except Exception as e: if 'Op type not registered' in str(e): error = 'the existence of custom ops in the SavedModel' else: error = 'unknown reasons' print(f' N/A (could not be listed due to {error})') return children = list(save._AugmentedGraphView(trackable_object).list_children(trackable_object)) children = sorted(children, key=lambda x: x.name) for name, child in children: concrete_functions = [] if isinstance(child, defun.ConcreteFunction): concrete_functions.append(child) elif isinstance(child, def_function.Function): concrete_functions.extend(child._list_all_concrete_functions_for_serialization()) else: continue print("\n Function Name: '%s'" % name) concrete_functions = sorted(concrete_functions, key=lambda x: x.name) for index, concrete_function in enumerate(concrete_functions, 1): args, kwargs = (None, None) if concrete_function.structured_input_signature: args, kwargs = concrete_function.structured_input_signature elif concrete_function._arg_keywords: args = concrete_function._arg_keywords if args: print(' Option print(' Callable with:') _print_args(args, indent=4) if kwargs: _print_args(kwargs, 'Named Argument', indent=4)
Prints the callable concrete and polymorphic functions of the Saved Model. Args: saved_model_dir: Directory containing the SavedModel to inspect. meta_graphs: Already-extracted MetaGraphDef of the SavedModel.
github-repos
def downsampled_mesh(self, step): from lace.mesh import Mesh if self.f is not None: raise ValueError( 'Function `downsampled_mesh` does not support faces.') low = Mesh() if self.v is not None: low.v = self.v[::step] if self.vc is not None: low.vc = self.vc[::step] return low
Returns a downsampled copy of this mesh. Args: step: the step size for the sampling Returns: a new, downsampled Mesh object. Raises: ValueError if this Mesh has faces.
juraj-google-style
def embedded_tweet(self): embedded_tweet = tweet_embeds.get_embedded_tweet(self) if (embedded_tweet is not None): try: return Tweet(embedded_tweet) except NotATweetError as nate: raise NotATweetError(('The embedded tweet payload {} appears malformed.' + " Failed with '{}'".format(embedded_tweet, nate))) else: return None
Get the retweeted Tweet OR the quoted Tweet and return it as a Tweet object Returns: Tweet (or None, if the Tweet is neither a quote tweet or a Retweet): a Tweet representing the quote Tweet or the Retweet (see tweet_embeds.get_embedded_tweet, this is that value as a Tweet) Raises: NotATweetError: if embedded tweet is malformed
codesearchnet
def split_last_dimension(x, n): x_shape = common_layers.shape_list(x) m = x_shape[(- 1)] if (isinstance(m, int) and isinstance(n, int)): assert ((m % n) == 0) return tf.reshape(x, (x_shape[:(- 1)] + [n, (m
Reshape x so that the last dimension becomes two dimensions. The first of these two dimensions is n. Args: x: a Tensor with shape [..., m] n: an integer. Returns: a Tensor with shape [..., n, m/n]
codesearchnet
def handle(*codes, **kwargs): regToken = kwargs.get("regToken", False) subscribe = kwargs.get("subscribe") def decorator(fn): @functools.wraps(fn) def wrapper(self, *args, **kwargs): try: return fn(self, *args, **kwargs) except SkypeApiException as e: if isinstance(e.args[1], requests.Response) and e.args[1].status_code in codes: conn = self if isinstance(self, SkypeConnection) else self.conn if regToken: conn.getRegToken() if subscribe: conn.endpoints[subscribe].subscribe() return fn(self, *args, **kwargs) raise return wrapper return decorator
Method decorator: if a given status code is received, re-authenticate and try again. Args: codes (int list): status codes to respond to regToken (bool): whether to try retrieving a new token on error Returns: method: decorator function, ready to apply to other methods
juraj-google-style
def get_stops_line(self, **kwargs): params = {'line': util.ints_to_string(kwargs.get('lines', [])), 'direction': util.direction_code(kwargs.get('direction', '')), 'cultureInfo': util.language_code(kwargs.get('lang'))} result = self.make_request('geo', 'get_stops_line', **params) if (not util.check_result(result, 'stop')): return (False, 'UNKNOWN ERROR') values = util.response_list(result, 'stop') return (True, [emtype.Stop(**a) for a in values])
Obtain information on the stops of the given lines. Arguments: lines (list[int] | int): Lines to query, may be empty to get all the lines. direction (str): Optional, either *forward* or *backward*. lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Stop]), or message string in case of error.
codesearchnet
def distance_to_line(a, b, p): return distance(closest_point(a, b, p), p)
Closest distance between a line segment and a point Args: a ([float, float]): x and y coordinates. Line start b ([float, float]): x and y coordinates. Line end p ([float, float]): x and y coordinates. Point to compute the distance Returns: float
codesearchnet
def add_scalar_value(self, value_buf): self.__container_node.add_child(_Node(value_buf)) self.current_container_length += len(value_buf)
Add a node to the tree containing a scalar value. Args: value_buf (bytearray): bytearray containing the scalar value.
juraj-google-style
def __init__(self, fetches): values = _get_attrs_values(fetches) self._fetch_type = type(fetches) self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values] self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
Creates a _AttrsFetchMapper. Args: fetches: An instance of an attrs decorated class.
github-repos
def configure_stream(level='WARNING'): root_logger = logging.getLogger() root_logger.setLevel(level) template = "[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s" formatter = logging.Formatter(template) console = logging.StreamHandler() console.setLevel(level) console.setFormatter(formatter) root_logger.addHandler(console) return root_logger
Configure root logger using a standard stream handler. Args: level (string, optional): lowest level to log to the console Returns: logging.RootLogger: root logger instance with attached handler
juraj-google-style
def __init__(self, type=None, hashes=None): self.Type = type self.Hashes = hashes if hashes else []
Create an instance. Args: type (neo.Network.InventoryType): hashes (list): of bytearray items.
juraj-google-style
def rest_action(self, func, url, **kwargs): try: response = func(url, timeout=self.TIMEOUT, **kwargs) except requests.RequestException, err: log.exception( "[PyLmod] Error - connection error in " "rest_action, err=%s", err ) raise err try: return response.json() except ValueError, err: log.exception('Unable to decode %s', response.content) raise err
Routine to do low-level REST operation, with retry. Args: func (callable): API function to call url (str): service URL endpoint kwargs (dict): addition parameters Raises: requests.RequestException: Exception connection error ValueError: Unable to decode response content Returns: list: the json-encoded content of the response
juraj-google-style
def create_explicit(bounds): safe_bounds = sorted((float(x) for x in bounds)) if (len(safe_bounds) != len(set(safe_bounds))): raise ValueError(u'Detected two elements of bounds that are the same') return sc_messages.Distribution(bucketCounts=([0] * (len(safe_bounds) + 1)), explicitBuckets=sc_messages.ExplicitBuckets(bounds=safe_bounds))
Creates a new instance of distribution with explicit buckets. bounds is an iterable of ordered floats that define the explicit buckets Args: bounds (iterable[float]): initializes the bounds Return: :class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution` Raises: ValueError: if the args are invalid for creating an instance
codesearchnet
def get_signatures_with_results(vcs): results_dir = os.path.join(vcs.private_dir(), 'results') if not os.path.exists(results_dir): return [] rel_paths = os.listdir(results_dir) return [p for p in rel_paths if os.path.isdir(os.path.join(results_dir, p))]
Returns the list of signatures for which test results are saved. Args: vcs (easyci.vcs.base.Vcs) Returns: List[str]
juraj-google-style
def set_from_tree(self, address_value_dict): for address, value in address_value_dict.items(): if address in self._state: self._state[address].set_result(result=value, from_tree=True)
Set the result for each future at the given addresses with the value stored in the merkle database. Args: address_value_dict (dict of str: bytes): The unique full addresses that the bytes values should be set with.
juraj-google-style
def purge_unused(self, pass_count=3): for purge_count in range(0, pass_count): self._add_entry(templates.PROJECT_PURGE)
Append an purge model entry to the journal. This instructs Revit to purge the open model. Args: pass_count (int): number of times to execute the purge. default is 3
codesearchnet
def _GetTimeValue(self, name): timestamp = getattr(self._tsk_file.info.meta, name, None) if (self._file_system_type in self._TSK_HAS_NANO_FS_TYPES): name_fragment = '{0:s}_nano'.format(name) fraction_of_second = getattr(self._tsk_file.info.meta, name_fragment, None) else: fraction_of_second = None return TSKTime(timestamp=timestamp, fraction_of_second=fraction_of_second)
Retrieves a date and time value. Args: name (str): name of the date and time value, for example "atime" or "mtime". Returns: dfdatetime.DateTimeValues: date and time value or None if not available.
codesearchnet
def __try_read_record(self): block_remaining = (_BLOCK_SIZE - (self.__reader.tell() % _BLOCK_SIZE)) if (block_remaining < _HEADER_LENGTH): return ('', _RECORD_TYPE_NONE) header = self.__reader.read(_HEADER_LENGTH) if (len(header) != _HEADER_LENGTH): raise EOFError(('Read %s bytes instead of %s' % (len(header), _HEADER_LENGTH))) (masked_crc, length, record_type) = struct.unpack(_HEADER_FORMAT, header) crc = _unmask_crc(masked_crc) if ((length + _HEADER_LENGTH) > block_remaining): raise errors.InvalidRecordError('Length is too big') data = self.__reader.read(length) if (len(data) != length): raise EOFError(('Not enough data read. Expected: %s but got %s' % (length, len(data)))) if (record_type == _RECORD_TYPE_NONE): return ('', record_type) actual_crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) actual_crc = crc32c.crc_update(actual_crc, data) actual_crc = crc32c.crc_finalize(actual_crc) if (actual_crc != crc): raise errors.InvalidRecordError('Data crc does not match') return (data, record_type)
Try reading a record. Returns: (data, record_type) tuple. Raises: EOFError: when end of file was reached. InvalidRecordError: when valid record could not be read.
codesearchnet
def _preprocess_conv2d_input(x, data_format, force_transpose=False): tf_data_format = 'NHWC' if data_format == 'channels_first': if not _has_nchw_support() or force_transpose: x = array_ops.transpose(x, (0, 2, 3, 1)) else: tf_data_format = 'NCHW' return (x, tf_data_format)
Transpose and cast the input before the conv2d. Args: x: input tensor. data_format: string, `"channels_last"` or `"channels_first"`. force_transpose: Boolean. If True, the input will always be transposed from NCHW to NHWC if `data_format` is `"channels_first"`. If False, the transposition only occurs on CPU (GPU ops are assumed to support NCHW). Returns: A tensor.
github-repos
def __call__(self, shape, dtype=None): if len(shape) != 2: raise ValueError(f'Identity matrix initializer can only be used for 2D matrices. Received: shape={shape} of rank {len(shape)}.') dtype = standardize_dtype(dtype) return self.gain * ops.eye(*shape, dtype=dtype)
Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. If not specified, `keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `keras.backend.set_floatx(float_dtype)`).
github-repos
def _prefix_output_keys(self, output_dict, output_name): new_outputs = {} for key, val in output_dict.items(): key = self._prefix_key(key, output_name) new_outputs[key] = val return new_outputs
Prepend output_name to the output_dict keys if it doesn't exist. This produces predictable prefixes for the pre-determined outputs of SupervisedOutput. Args: output_dict: dict of string to Tensor, assumed valid. output_name: prefix string to prepend to existing keys. Returns: dict with updated keys and existing values.
github-repos
def get_value(x): if not tensor_util.is_tf_type(x): return x if context.executing_eagerly() or isinstance(x, ops.EagerTensor): return x.numpy() if not getattr(x, '_in_graph_mode', True): with context.eager_mode(): return x.numpy() if ops.executing_eagerly_outside_functions(): with ops.init_scope(): return x.numpy() with x.graph.as_default(): return x.eval(session=get_session((x,)))
Returns the value of a variable. `backend.get_value` is the complement of `backend.set_value`, and provides a generic interface for reading from variables while abstracting away the differences between TensorFlow 1.x and 2.x semantics. {snippet} Args: x: input variable. Returns: A Numpy array.
github-repos
async def get(self, cid, coinid): if settings.SIGNATURE_VERIFICATION: super().verify() message = json.loads(self.get_argument('message', '{}')) public_key = message.get('public_key') if (coinid in settings.bridges.keys()): self.account.blockchain.setendpoint(settings.bridges[coinid]) content = (await self.account.blockchain.getsinglecontent(cid=cid)) if ('error' in content.keys()): self.set_status(content['error']) self.write(content) raise tornado.web.Finish account = (await self.account.getaccountbywallet(wallet=content['owneraddr'])) if ('error' in account.keys()): self.set_status(account['error']) self.write(account) raise tornado.web.Finish cids = (await self.account.getuserscontent(public_key=public_key)) deals = (await self.account.getdeals(buyer=public_key)) if (int(content['cid']) in [i[0] for i in cids.get(coinid, [])]): content['access_type'] = 'write_access' elif (int(content['cid']) in [i[0] for i in deals.get(coinid, [])]): content['access_type'] = 'read_access' try: offer = (await self.account.blockchain.getoffer(cid=cid, buyer_address=self.account.validator[coinid](public_key))) content['owner'] = account.get('public_key') content['seller_access_string'] = offer.get('seller_access_string') content['seller_pubkey'] = offer.get('seller_public_key') except: pass self.write(content)
Receives content by content id and coin id Accepts: Query string arguments: - "cid" - int - "coinid" - str Returns: return dict with following fields: - "description" - str - "read_access" - int - "write_access" - int - "content" - str - "cid" - int - "owneraddr" - str - "owner" - str - "coinid" - str Verified: True
codesearchnet
def reduce_to_2d(arr): if not isinstance(arr, np.ndarray): raise ValueError('reduce_to_2d requires a numpy.ndarray') ndims = len(arr.shape) if ndims < 2: raise ValueError('reduce_to_2d requires an array of dimensionality >=2') slices = ([0] * (ndims - 2)) + [slice(None), slice(None)] return arr[slices]
Given a np.npdarray with nDims > 2, reduce it to 2d. It does this by selecting the zeroth coordinate for every dimension greater than two. Args: arr: a numpy ndarray of dimension at least 2. Returns: A two-dimensional subarray from the input array. Raises: ValueError: If the argument is not a numpy ndarray, or the dimensionality is too low.
juraj-google-style
def image_feature_engineering(features, feature_tensors_dict): engineered_features = {} for (name, feature_tensor) in six.iteritems(feature_tensors_dict): if ((name in features) and (features[name]['transform'] == IMAGE_TRANSFORM)): with tf.name_scope(name, 'Wx_plus_b'): hidden = tf.contrib.layers.fully_connected(feature_tensor, IMAGE_HIDDEN_TENSOR_SIZE) engineered_features[name] = hidden else: engineered_features[name] = feature_tensor return engineered_features
Add a hidden layer on image features. Args: features: features dict feature_tensors_dict: dict of feature-name: tensor
codesearchnet
def recover(self, history: Iterable[Trial]) -> None: for trial in history: if trial.status in ['COMPLETED', 'PENDING', 'STOPPING']: self.should_stop_early(trial)
Recover states by replaying the trial history. Subclass can override. NOTE: `recover` will always be called before the first `should_stop_early` is called. It could be called multiple times if there are multiple source of history, e.g: trials from a previous study and existing trials from current study. The default behavior is to replay `should_stop_early` on all trials that contain all intermediate measurements. Args: history: An iterable object of trials.
github-repos
def _list_to_string(l, s): return s.join(l)
Concatenates list items into a single string separated by `s`. Args: l: List with items to be concatenated into a single string. s: String or char that will be concatenated in between each item. Returns: String that has all items in list `l` concatenated with `s` separator.
github-repos
def get_keys(keyfiles, signature_type): builtin_keys = {('release', 'sha1'): [mardor.mozilla.release1_sha1, mardor.mozilla.release2_sha1], ('release', 'sha384'): [mardor.mozilla.release1_sha384, mardor.mozilla.release2_sha384], ('nightly', 'sha1'): [mardor.mozilla.nightly1_sha1, mardor.mozilla.nightly2_sha1], ('nightly', 'sha384'): [mardor.mozilla.nightly1_sha384, mardor.mozilla.nightly2_sha384], ('dep', 'sha1'): [mardor.mozilla.dep1_sha1, mardor.mozilla.dep2_sha1], ('dep', 'sha384'): [mardor.mozilla.dep1_sha384, mardor.mozilla.dep2_sha384], ('autograph-stage', 'sha384'): [mardor.mozilla.autograph_stage_sha384]} keys = [] for keyfile in keyfiles: if keyfile.startswith(':mozilla-'): name = keyfile.split(':mozilla-')[1] try: keys.extend(builtin_keys[(name, signature_type)]) except KeyError: raise ValueError('Invalid internal key name: {}'.format(keyfile)) else: key = open(keyfile, 'rb').read() keys.append(key) return keys
Get public keys for the given keyfiles. Args: keyfiles: List of filenames with public keys, or :mozilla- prefixed key names signature_type: one of 'sha1' or 'sha384' Returns: List of public keys as strings
codesearchnet
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] declarator_end = line.rfind(')') if (declarator_end >= 0): fragment = line[declarator_end:] elif ((linenum > 1) and (clean_lines.elided[(linenum - 1)].rfind(')') >= 0)): fragment = line else: return if (Search('\\boverride\\b', fragment) and Search('\\bfinal\\b', fragment)): error(filename, linenum, 'readability/inheritance', 4, '"override" is redundant since function is already declared as "final"')
Check if line contains a redundant "override" or "final" virt-specifier. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
codesearchnet
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): shutdown_value = registry_key.GetValueByName('ShutdownTime') if not shutdown_value: return try: date_time = self._ParseFiletime(shutdown_value.data) except errors.ParseError as exception: parser_mediator.ProduceExtractionWarning( 'unable to determine shutdown timestamp with error: {0!s}'.format( exception)) return if not date_time: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event_data = ShutdownWindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = shutdown_value.offset event_data.value_name = shutdown_value.name event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_SHUTDOWN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts events from a ShutdownTime Windows Registry value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
juraj-google-style
def GetPathInfo(self, timestamp=None): path_info_timestamp = self._LastEntryTimestamp(self._path_infos, timestamp) try: result = self._path_infos[path_info_timestamp].Copy() except KeyError: result = rdf_objects.PathInfo(path_type=self._path_type, components=self._components) stat_entry_timestamp = self._LastEntryTimestamp(self._stat_entries, timestamp) result.last_stat_entry_timestamp = stat_entry_timestamp result.stat_entry = self._stat_entries.get(stat_entry_timestamp) hash_entry_timestamp = self._LastEntryTimestamp(self._hash_entries, timestamp) result.last_hash_entry_timestamp = hash_entry_timestamp result.hash_entry = self._hash_entries.get(hash_entry_timestamp) return result
Generates a summary about the path record. Args: timestamp: A point in time from which the data should be retrieved. Returns: A `rdf_objects.PathInfo` instance.
codesearchnet
class ProgbarLogger(Callback): def __init__(self, count_mode='samples', stateful_metrics=None): super(ProgbarLogger, self).__init__() self._supports_tf_logs = True if count_mode == 'samples': self.use_steps = False elif count_mode == 'steps': self.use_steps = True else: raise ValueError('Unknown `count_mode`: ' + str(count_mode)) self.stateful_metrics = set(stateful_metrics) if stateful_metrics else set() self.seen = 0 self.progbar = None self.target = None self.verbose = 1 self.epochs = 1 self._train_step, self._test_step, self._predict_step = (None, None, None) self._call_batch_hooks = True self._called_in_fit = False def set_params(self, params): self.verbose = params['verbose'] self.epochs = params['epochs'] if self.use_steps and 'steps' in params: self.target = params['steps'] elif not self.use_steps and 'samples' in params: self.target = params['samples'] else: self.target = None self._call_batch_hooks = self.verbose == 1 if self.target is None: try: self._train_step = self.model._train_counter self._test_step = self.model._test_counter self._predict_step = self.model._predict_counter except AttributeError: self._call_batch_hooks = True def on_train_begin(self, logs=None): self._called_in_fit = True def on_test_begin(self, logs=None): if not self._called_in_fit: self._reset_progbar() self._maybe_init_progbar() def on_predict_begin(self, logs=None): self._reset_progbar() self._maybe_init_progbar() def on_epoch_begin(self, epoch, logs=None): self._reset_progbar() self._maybe_init_progbar() if self.verbose and self.epochs > 1: print('Epoch %d/%d' % (epoch + 1, self.epochs)) def on_train_batch_end(self, batch, logs=None): self._batch_update_progbar(batch, logs) def on_test_batch_end(self, batch, logs=None): if not self._called_in_fit: self._batch_update_progbar(batch, logs) def on_predict_batch_end(self, batch, logs=None): self._batch_update_progbar(batch, None) def on_epoch_end(self, epoch, logs=None): self._finalize_progbar(logs, self._train_step) def on_test_end(self, logs=None): if not self._called_in_fit: self._finalize_progbar(logs, self._test_step) def on_predict_end(self, logs=None): self._finalize_progbar(logs, self._predict_step) def _reset_progbar(self): self.seen = 0 self.progbar = None def _maybe_init_progbar(self): self.stateful_metrics = set(self.stateful_metrics) if self.model: self.stateful_metrics = self.stateful_metrics.union(set((m.name for m in self.model.metrics))) if self.progbar is None: self.progbar = Progbar(target=self.target, verbose=self.verbose, stateful_metrics=self.stateful_metrics, unit_name='step' if self.use_steps else 'sample') self.progbar._update_stateful_metrics(self.stateful_metrics) def _implements_train_batch_hooks(self): return self._call_batch_hooks def _implements_test_batch_hooks(self): return self._call_batch_hooks def _implements_predict_batch_hooks(self): return self._call_batch_hooks def _batch_update_progbar(self, batch, logs=None): logs = logs or {} self._maybe_init_progbar() if self.use_steps: self.seen = batch + 1 else: logs = copy.copy(logs) batch_size = logs.pop('size', 0) num_steps = logs.pop('num_steps', 1) logs.pop('batch', None) add_seen = num_steps * batch_size self.seen += add_seen if self.verbose == 1: logs = tf_utils.sync_to_numpy_or_python_type(logs) self.progbar.update(self.seen, list(logs.items()), finalize=False) def _finalize_progbar(self, logs, counter): logs = tf_utils.sync_to_numpy_or_python_type(logs or {}) if self.target is None: if counter is not None: counter = counter.numpy() if not self.use_steps: counter *= logs.get('size', 1) self.target = counter or self.seen self.progbar.target = self.target self.progbar.update(self.target, list(logs.items()), finalize=True)
Callback that prints metrics to stdout. Args: count_mode: One of `"steps"` or `"samples"`. Whether the progress bar should count samples seen or steps (batches) seen. stateful_metrics: Iterable of string names of metrics that should *not* be averaged over an epoch. Metrics in this list will be logged as-is. All others will be averaged over time (e.g. loss, etc). If not provided, defaults to the `Model`'s metrics. Raises: ValueError: In case of invalid `count_mode`.
github-repos
def multinomial(logits, num_samples, seed=None, name=None, output_dtype=None): with ops.name_scope(name, 'multinomial', [logits]): return multinomial_categorical_impl(logits, num_samples, output_dtype, seed)
Draws samples from a multinomial distribution. Example: ```python # samples has shape [1, 5], where each value is either 0 or 1 with equal # probability. samples = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 5) ``` Args: logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A Python integer. Used to create a random seed for the distribution. See `tf.random.set_seed` for behavior. name: Optional name for the operation. output_dtype: The integer type of the output: `int32` or `int64`. Defaults to `int64`. Returns: The drawn samples of shape `[batch_size, num_samples]`.
github-repos
def normalise(self, to_currency): out = Money(currency=to_currency) for money in self._money_obs: out += converter.convert(money, to_currency) return Balance([out])
Normalise this balance into a single currency Args: to_currency (str): Destination currency Returns: (Balance): A new balance object containing a single Money value in the specified currency
juraj-google-style
def should_close(http_version, connection_field): connection_field = (connection_field or '').lower() if http_version == 'HTTP/1.0': return connection_field.replace('-', '') != 'keepalive' else: return connection_field == 'close'
Return whether the connection should be closed. Args: http_version (str): The HTTP version string like ``HTTP/1.0``. connection_field (str): The value for the ``Connection`` header.
juraj-google-style
def download_file(self, url): response = requests.get(url, stream=True) response.raise_for_status() return (int(response.headers.get('content-length', 0)), response)
Initiate a streaming download Args: url (str): The url to download Returns: A tuple of the content length and the streaming response
juraj-google-style
def latlong(text): nlat, nlon = text.split(',') return (float(nlat), float(nlon))
Chop a latlong string and return (float,float). Does not perform validation on the coordinates. Args: text (str): A longitude,latitude string. Returns: (float,float): A longitude, latitude float tuple.
juraj-google-style
def _wrap_callback_errors(callback, message): try: callback(message) except Exception: _LOGGER.exception('Top-level exception occurred in callback while processing a message') message.nack()
Wraps a user callback so that if an exception occurs the message is nacked. Args: callback (Callable[None, Message]): The user callback. message (~Message): The Pub/Sub message.
codesearchnet
def authentication(self, username, password): _auth_text = '{}:{}'.format(username, password) if (int(sys.version[0]) > 2): _auth_bin = base64.encodebytes(_auth_text.encode()) _auth = _auth_bin.decode() _auth = _auth.replace('\n', '') self._auth = _auth else: _auth = base64.encodestring(_auth_text) self._auth = str(_auth).replace('\n', '') _LOGGER.debug('Autentication string is: {}:***'.format(username))
Configures the user authentication for eAPI This method configures the username and password combination to use for authenticating to eAPI. Args: username (str): The username to use to authenticate the eAPI connection with password (str): The password in clear text to use to authenticate the eAPI connection with
codesearchnet
def open_window(self, private=False): handles_before = self.selenium.window_handles self.switch_to() with self.selenium.context(self.selenium.CONTEXT_CHROME): self.selenium.find_element(*self._file_menu_button_locator).click() if private: self.selenium.find_element( *self._file_menu_private_window_locator ).click() else: self.selenium.find_element( *self._file_menu_new_window_button_locator ).click() return self.wait.until( expected.new_browser_window_is_opened(self.selenium, handles_before), message="No new browser window opened", )
Open a new browser window. Args: private (bool): Optional parameter to open a private browsing window. Defaults to False. Returns: :py:class:`BrowserWindow`: Opened window.
juraj-google-style
class FlaubertPoolerAnswerClass(nn.Module): def __init__(self, config: FlaubertConfig): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor: hsz = hidden_states.shape[-1] assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None' if start_positions is not None: start_positions = start_positions[:, None, None].expand(-1, -1, hsz) start_states = hidden_states.gather(-2, start_positions).squeeze(-2) if cls_index is not None: cls_index = cls_index[:, None, None].expand(-1, -1, hsz) cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) else: cls_token_state = hidden_states[:, -1, :] x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) x = self.activation(x) x = self.dense_1(x).squeeze(-1) return x
Compute SQuAD 2.0 answer class from classification and start tokens hidden states. Args: config ([`FlaubertConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model.
github-repos
def run_tpm(system, steps, blackbox): node_tpms = [] for node in system.nodes: node_tpm = node.tpm_on for input_node in node.inputs: if (not blackbox.in_same_box(node.index, input_node)): if (input_node in blackbox.output_indices): node_tpm = marginalize_out([input_node], node_tpm) node_tpms.append(node_tpm) noised_tpm = rebuild_system_tpm(node_tpms) noised_tpm = convert.state_by_node2state_by_state(noised_tpm) tpm = convert.state_by_node2state_by_state(system.tpm) tpm = np.dot(tpm, np.linalg.matrix_power(noised_tpm, (steps - 1))) return convert.state_by_state2state_by_node(tpm)
Iterate the TPM for the given number of timesteps. Returns: np.ndarray: tpm * (noise_tpm^(t-1))
codesearchnet
def UpdateCacheFromSource(self, cache, source, incremental=False, force_write=False, location=None): return_val = 0 cache_filename = cache.GetCacheFilename() if cache_filename is not None: new_file_fd, new_file = tempfile.mkstemp(dir=os.path.dirname(cache_filename), prefix=os.path.basename(cache_filename), suffix='.nsscache.tmp') else: raise error.CacheInvalid('Cache has no filename.') self.log.debug('temp source filename: %s', new_file) try: source.GetFile(self.map_name, new_file, current_file=cache.GetCacheFilename(), location=location) os.lseek(new_file_fd, 0, os.SEEK_SET) source_cache = cache_factory.Create(self.cache_options, self.map_name) source_map = source_cache.GetMap(new_file) return_val += self._FullUpdateFromFile(cache, source_map, force_write) finally: try: os.unlink(new_file) except OSError as e: if e.errno != errno.ENOENT: raise return return_val
Update a single cache file, from a given source. Args: cache: A nss_cache.caches.Cache object. source: A nss_cache.sources.Source object. incremental: We ignore this. force_write: A boolean flag forcing empty map updates when False, defaults to False. location: The optional location in the source of this map used by automount to specify which automount map to get, defaults to None. Returns: An int indicating the success of an update (0 == good, fail otherwise).
github-repos
def _load_info(self): url = '%s/prefix?duration=36000' % self.base_url r = self.gbdx_connection.get(url) r.raise_for_status() return r.json()
Get user info for GBDX S3, put into instance vars for convenience. Args: None. Returns: Dictionary with S3 access key, S3 secret key, S3 session token, user bucket and user prefix (dict).
juraj-google-style
def detail_poi(self, **kwargs): params = {'language': util.language_code(kwargs.get('lang')), 'family': kwargs.get('family')} if kwargs.get('id'): params['id'] = kwargs['id'] result = self.make_request('detail_poi', {}, **params) if (not util.check_result(result)): return (False, result.get('message', 'UNKNOWN ERROR')) values = util.response_list(result, 'Data') return (True, [emtype.PoiDetails(**a) for a in values])
Obtain detailed info of a given POI. Args: family (str): Family code of the POI (3 chars). lang (str): Language code (*es* or *en*). id (int): Optional, ID of the POI to query. Passing value -1 will result in information from all POIs. Returns: Status boolean and parsed response (list[PoiDetails]), or message string in case of error.
codesearchnet
def list_dir(self, context): doc = inspect.getdoc(context) listing = '' listing += '\n' listing += (annotate.context_name(context) + '\n') if (doc is not None): doc = inspect.cleandoc(doc) listing += (doc + '\n') listing += '\nDefined Functions:\n' is_dict = False if isinstance(context, dict): funs = context.keys() is_dict = True else: funs = utils.find_all(context) for fun in sorted(funs): override_name = None if is_dict: override_name = fun fun = self.find_function(context, fun) if isinstance(fun, dict): if is_dict: listing += ((' - ' + override_name) + '\n') else: listing += ((' - ' + fun.metadata.name) + '\n') else: listing += ((' - ' + fun.metadata.signature(name=override_name)) + '\n') if (annotate.short_description(fun) != ''): listing += ((' ' + annotate.short_description(fun)) + '\n') listing += '\nBuiltin Functions\n' for bif in sorted(self.builtins.keys()): listing += ((' - ' + bif) + '\n') listing += '\n' return listing
Return a listing of all of the functions in this context including builtins. Args: context (object): The context to print a directory for. Returns: str
codesearchnet
def update_ip_info(self, since_days=10, save=False, force=False): try: last_check = IPInfoCheck.objects.get(ip_address=self.client_ip_address) since_last = (datetime.date.today() - last_check.date) if (since_last <= datetime.timedelta(days=since_days)): if ((not self.ip_info) or ((self.ip_info != last_check.ip_info) and force)): self.ip_info = last_check.ip_info self.save() return True elif save: self.save() return False (ip_info, created) = IPInfo.get_or_create_from_ip(self.client_ip_address) last_check.date = datetime.date.today() last_check.save() if created: last_check.ip_info = ip_info self.ip_info = ip_info self.save() return True elif save: self.save() return False except IPInfoCheck.DoesNotExist: self.ip_info = IPInfoCheck.check_ip(self.client_ip_address) self.save() return True
Update the IP info. Args: since_days (int): if checked less than this number of days ago, don't check again (default to 10 days). save (bool): whether to save anyway or not. force (bool): whether to update ip_info to last checked one. Returns: bool: check was run. IPInfo might not have been updated.
codesearchnet
def _endpoint_to_target(self, endpoint): parsed = urlparse.urlparse(endpoint) scheme = parsed[0] hostport = parsed[1] if 'unix' in scheme: return (None, None, unquote(hostport)) if scheme == 'https': target_port = 443 else: target_port = 80 (target_host, target_port) = self._split_hostport(hostport, default_port=target_port) return (target_host, target_port, None)
Convert a URL into a host / port, or into a path to a unix domain socket Args: endpoint (str): A URL parsable by urlparse Returns: 3 item tuple: (host, port, path). host and port will None, and path will be not None if a a unix domain socket URL is passed path will be None if a normal TCP based URL is passed
juraj-google-style
def get_all_text(tweet): if is_original_format(tweet): return "\n".join(filter(None, [tweet.user_entered_text, tweet.quote_or_rt_text, "\n".join(tweet.poll_options)])) else: return "\n".join(filter(None, [tweet.user_entered_text, tweet.quote_or_rt_text]))
Get all of the text of the tweet. This includes @ mentions, long links, quote-tweet contents (separated by a newline), RT contents & poll options Args: tweet (Tweet): A Tweet object (must be a Tweet object) Returns: str: text from tweet.user_entered_text, tweet.quote_or_rt_text and tweet.poll_options (if in original format), separated by newlines
juraj-google-style
def GetSeverityString(self, severity): if (0 <= severity < len(self._SEVERITY)): return self._SEVERITY[severity] return 'Unknown {0:d}'.format(severity)
Retrieves a string representation of the severity. Args: severity (int): severity. Returns: str: description of the event severity.
codesearchnet
def from_string(species_string: str): m = re.search('([A-Z][a-z]*)([0-9.]*)([+\\-])(.*)', species_string) if m: sym = m.group(1) oxi = (1 if (m.group(2) == '') else float(m.group(2))) oxi = ((- oxi) if (m.group(3) == '-') else oxi) properties = None if m.group(4): toks = m.group(4).replace(',', '').split('=') properties = {toks[0]: float(toks[1])} return Specie(sym, oxi, properties) else: raise ValueError('Invalid Species String')
Returns a Specie from a string representation. Args: species_string (str): A typical string representation of a species, e.g., "Mn2+", "Fe3+", "O2-". Returns: A Specie object. Raises: ValueError if species_string cannot be intepreted.
codesearchnet
def Graph(self): graph = graph_pb2.GraphDef() if (self._graph is not None): graph.ParseFromString(self._graph) return graph raise ValueError('There is no graph in this EventAccumulator')
Return the graph definition, if there is one. If the graph is stored directly, return that. If no graph is stored directly but a metagraph is stored containing a graph, return that. Raises: ValueError: If there is no graph for this run. Returns: The `graph_def` proto.
codesearchnet
def write_file_elements_to_strings_file(file_path, file_elements): f = open_strings_file(file_path, "w") for element in file_elements: f.write(unicode(element)) f.write(u"\n") f.close()
Write elements to the string file Args: file_path (str): The path to the strings file file_elements (list) : List of elements to write to the file.
juraj-google-style
def create(self, resource, timeout=(- 1)): return self._client.create(resource, timeout=timeout, default_values=self.DEFAULT_VALUES)
Creates a scope. Args: resource (dict): Object to create. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Created scope.
codesearchnet
def _CheckpointFilename(self, p): name, _ = p return name
Returns the checkpoint filename given a `(filename, time)` pair. Args: p: (filename, time) pair. Returns: Checkpoint file name.
github-repos
async def subscriptions(self, request): if not self._accepting: return web.Response(status=503) web_sock = web.WebSocketResponse() await web_sock.prepare(request) async for msg in web_sock: if msg.type == aiohttp.WSMsgType.TEXT: await self._handle_message(web_sock, msg.data) elif msg.type == aiohttp.WSMsgType.ERROR: LOGGER.warning( 'Web socket connection closed with exception %s', web_sock.exception()) await web_sock.close() await self._handle_unsubscribe(web_sock) return web_sock
Handles requests for new subscription websockets. Args: request (aiohttp.Request): the incoming request Returns: aiohttp.web.WebSocketResponse: the websocket response, when the resulting websocket is closed
juraj-google-style
def invoke_step(self, context): logger.debug('starting') logger.debug(f'running step {self.module}') self.run_step_function(context) logger.debug(f'step {self.module} done')
Invoke 'run_step' in the dynamically loaded step module. Don't invoke this from outside the Step class. Use pypyr.dsl.Step.run_step instead. invoke_step just does the bare module step invocation, it does not evaluate any of the decorator logic surrounding the step. So unless you really know what you're doing, use run_step if you intend on executing the step the same way pypyr does. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate.
codesearchnet
def get_metar_from_mission(mission_file: str, icao: str='XXXX', time: str=None) -> str: return _MetarFromMission(mission_file=mission_file, icao=icao, time=time).metar
Builds a dummy METAR string from a mission file Args: mission_file: input mission file icao: dummy ICAO (defaults to XXXX) time: dummy time (defaults to now()) Returns: METAR str
codesearchnet
def _chunk_query(l, n, cn, conn, table, db_type): [insert_query_m(l[i:(i + n)], table, conn, cn, db_type) for i in range(0, len(l), n)]
Call for inserting SQL query in chunks based on n rows Args: l (list): List of tuples n (int): Number of rows cn (str): Column names conn (connection object): Database connection object table (str): Table name db_type (str): If "sqlite" or "mysql"
codesearchnet
def _CreateConfig(self, project_id): project_id = (project_id or self._GetNumericProjectId()) if (not project_id): return self.boto_config_header %= (self.boto_config_script, self.boto_config_template) config = config_manager.ConfigManager(config_file=self.boto_config_template, config_header=self.boto_config_header) boto_dir = os.path.dirname(self.boto_config_script) config.SetOption('GSUtil', 'default_project_id', project_id) config.SetOption('GSUtil', 'default_api_version', '2') config.SetOption('GoogleCompute', 'service_account', 'default') config.SetOption('Plugin', 'plugin_directory', boto_dir) config.WriteConfig(config_file=self.boto_config)
Create the boto config to support standalone GSUtil. Args: project_id: string, the project ID to use in the config file.
codesearchnet
def _resource_apply_sparse(self, grad, handle, indices): raise NotImplementedError()
Add ops to apply sparse gradients to the variable `handle`. Similar to `_apply_sparse`, the `indices` argument to this method has been de-duplicated. Optimizers which deal correctly with non-unique indices may instead override `_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices are unique. Returns: An `Operation` which updates the value of the variable.
github-repos
def _parse_resources(resource_values: dict, resource_name: str) -> dict: resources = {} for r_values in resource_values[resource_name]: if ('limits' in r_values): for (r_key, r_value) in resource_values[resource_name][r_values].items(): if ('cpu' in r_key): cpu_value = (float(r_value) * (10 ** 9)) cpu_key = (r_key[:3] + '_limit') resources[cpu_key] = int(cpu_value) if ('mem' in r_key): mem_value = re.sub('M', '', r_value) mem_key = (r_key[:3] + '_limit') resources[mem_key] = (int(mem_value) * 1048576) resources_spec = docker.types.Resources(**resources) return resources_spec
Parse resources key. Args: resource_values (dict): resource configurations values resource_name (string): Resource name Returns: dict, resources specification
codesearchnet
def _add_resource_to_collection(parent_resource: Dict[str, Any], resource_json: Dict[str, Any], collections_per_resource_type: Dict[str, ResourceCollection]) -> None: resource_type = resource_json.get('resourceType') if resource_type in collections_per_resource_type: collections_per_resource_type[resource_type].put(resource_json, parent_resource) elif resource_type == 'Bundle': for entry in resource_json.get('entry', ()): bundle_resource = entry.get('resource') if bundle_resource: _add_resource_to_collection(parent_resource, bundle_resource, collections_per_resource_type)
Adds an entry for the given resource to the appropriate collection. Adds the resource described by `resource_json` found within `parent_resource` to the appropriate ResourceCollection of the given `fhir_package`. Allows the resource to subsequently be retrieved by its URL from the FhirPackage. In the case where `resource_json` is located inside a bundle, `parent_resource` will be the bundle containing the resource. Otherwise, `resource_json` and `parent_resource` will be the same JSON object. If the JSON is not a FHIR resource, or not a resource type tracked by the PackageManager, does nothing. Args: parent_resource: The bundle `resource_json` can be found inside, or the resource itself if it is not part of a bundle. resource_json: The parsed JSON representation of the resource to add. collections_per_resource_type: The set of `ResourceCollection`s to add the resource to.
github-repos
def __setstate__(self, state): if isinstance(state, tuple): self.__init__(state[0]) elif isinstance(state, basestring): self.__init__(state) elif isinstance(state, dict): if '__frange' in state and '__set' in state and '__list' in state: self._frange = state['__frange'] self._items = frozenset(state['__set']) self._order = tuple(state['__list']) else: for k in self.__slots__: setattr(self, k, state[k]) else: msg = "Unrecognized state data from which to deserialize FrameSet" raise ValueError(msg)
Allows for de-serialization from a pickled :class:`FrameSet`. Args: state (tuple or str or dict): A string/dict can be used for backwards compatibility Raises: ValueError: if state is not an appropriate type
juraj-google-style
def _parse_dtensor_env_var_from_cluster_resolver(cluster_resolver): result = {} cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_resolver.cluster_spec()) dtensor_jobs = [] if 'chief' in cluster_spec.jobs: dtensor_jobs.extend(cluster_spec.job_tasks('chief')) if 'worker' in cluster_spec.jobs: dtensor_jobs.extend(cluster_spec.job_tasks('worker')) if None in dtensor_jobs: raise ValueError(f'Unexpected dtensor job address from cluster spec: {cluster_spec}') result['DTENSOR_JOBS'] = ','.join(dtensor_jobs) result['DTENSOR_NUM_CLIENTS'] = str(len(dtensor_jobs)) if cluster_resolver.task_type == 'chief': dtensor_client_id = 0 elif cluster_resolver.task_type == 'worker': dtensor_client_id = cluster_resolver.task_id if 'chief' in cluster_spec.jobs: dtensor_client_id += 1 result['DTENSOR_CLIENT_ID'] = str(dtensor_client_id) result['DTENSOR_JOB_NAME'] = 'worker' return result
Parse the env vars for Dtensor based on the cluster resolver. In the multi-client setting, each of the DTensor jobs need to aware of each other, and the interface to setup those values are via the envvars. The value used by dtensor are different from the existing `MultiWorkerMirroredStrategy`. This function will parse the value from cluster resolver, and populate the corresponding value for DTensor jobs in the `os.environ`. Args: cluster_resolver: A `tf.distribute.cluster_resolver.ClusterResolver` instance. Returns: A dict of {Str:Str} which contains all the env vars needed by DTensor jobs. The value is for verification purpose. Raises: The value parsed from existing cluster spec is not valid.
github-repos
def _set_current_subscript(self, active): current_subscript = self.sender() if active: for subscript_name in list(self._current_subscript_stage['subscript_exec_count'].keys()): if (subscript_name == current_subscript.name): self._current_subscript_stage['subscript_exec_count'][subscript_name] += 1 self._current_subscript_stage['current_subscript'] = current_subscript else: self._current_subscript_stage['current_subscript'] = current_subscript for subscript_name in list(self._current_subscript_stage['subscript_exec_count'].keys()): if (subscript_name == current_subscript.name): duration = (current_subscript.end_time - current_subscript.start_time) if (subscript_name in self._current_subscript_stage['subscript_exec_duration']): duration_old = self._current_subscript_stage['subscript_exec_duration'][subscript_name] else: duration_old = datetime.timedelta(0) exec_count = self._current_subscript_stage['subscript_exec_count'][subscript_name] duration_new = ((duration_old * (exec_count - 1)) + duration) self._current_subscript_stage['subscript_exec_duration'][subscript_name] = (((duration_old * (exec_count - 1)) + duration) / exec_count)
sets the current subscript and keeps a counter of how ofter a particular subscript has been executed this information is usefull when implementing a status update or plotting functions that depend on which subscript is being executed keeps track of the following dictionary: self._current_subscript_stage = { 'current_subscript' : reference to the current subscrit 'subscript_exec_count' : dictionary where key is the subscript name and value how often is has been executed 'subscript_exec_duration' : dictionary where key is the subscript name and value the average duration of executing the subscript } Args: active: True if the current subscript is just started, False if it just finished
codesearchnet
def get_range(self, request, start, end): for i in range(2): try: stream = self.get_stream(request, start) data = stream.read(end - start) self._download_pos += len(data) return data except Exception as e: self._download_stream = None self._download_request = None if i == 0: continue if isinstance(e, messages.S3ClientError): raise e raise messages.S3ClientError(str(e), get_http_error_code(e))
Retrieves an object's contents. Args: request: (GetRequest) request start: (int) start offset end: (int) end offset (exclusive) Returns: (bytes) The response message.
github-repos
def check_result(data, key=''): if not isinstance(data, dict): return False if key: if key in data: return True return False if 'resultCode' in data.keys(): return True if data.get('resultCode', -1) == 0 else False elif 'code' in data.keys(): return True if data.get('code', -1) == 0 else False return False
Check the result of an API response. Ideally, this should be done by checking that the value of the ``resultCode`` attribute is 0, but there are endpoints that simply do not follow this rule. Args: data (dict): Response obtained from the API endpoint. key (string): Key to check for existence in the dict. Returns: bool: True if result was correct, False otherwise.
juraj-google-style
def _process_book(link): data = DOWNER.download(link) dom = dhtmlparser.parseString(utils.handle_encodnig(data)) dhtmlparser.makeDoubleLinked(dom) price = None try: price = _strip_content(zapi.get_price(dom)) except UserWarning: price = dom.find('p', {'class': 'vaseCena'}) if price: price = price[0].getContent().replace('&nbsp;', ' ') price = filter((lambda x: x.isdigit()), price.strip()) if price: price = (price[0] + 'kč') else: price = '-1' else: price = '-1' pub = Publication(title=_strip_content(zapi.get_title(dom)), authors=_parse_authors(zapi.get_author(dom)), price=price, publisher=_strip_content(zapi.get_publisher(dom))) pub.optionals.URL = link pub.optionals.pages = _strip_content(zapi.get_pages(dom)) pub.optionals.pub_date = _strip_content(zapi.get_pub_date(dom)) pub.optionals.ISBN = _strip_content(zapi.get_ISBN(dom)) pub.optionals.binding = _strip_content(zapi.get_binding(dom)) if pub.title.startswith('E-kniha:'): pub.title = pub.title.replace('E-kniha:', '', 1).strip() pub.optionals.is_ebook = True if pub.optionals.ISBN: if (' ' in pub.optionals.ISBN): pub.optionals.ISBN = pub.optionals.ISBN.split(' ')[0] if ('(' in pub.optionals.ISBN): pub.optionals.ISBN = pub.optionals.ISBN.split('(')[0] return pub
Download and parse available informations about book from the publishers webpages. Args: link (str): URL of the book at the publishers webpages. Returns: obj: :class:`.Publication` instance with book details.
codesearchnet
def df(self): url = self._url('/system/df') return self._result(self._get(url), True)
Get data usage information. Returns: (dict): A dictionary representing different resource categories and their respective data usage. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def cases(store, case_query, limit=100): case_groups = {status: [] for status in CASE_STATUSES} for case_obj in case_query.limit(limit): analysis_types = set(ind['analysis_type'] for ind in case_obj['individuals']) case_obj['analysis_types'] = list(analysis_types) case_obj['assignees'] = [store.user(user_email) for user_email in case_obj.get('assignees', [])] case_groups[case_obj['status']].append(case_obj) case_obj['is_rerun'] = len(case_obj.get('analyses', [])) > 0 case_obj['clinvar_variants'] = store.case_to_clinVars(case_obj['_id']) case_obj['display_track'] = TRACKS[case_obj.get('track', 'rare')] data = { 'cases': [(status, case_groups[status]) for status in CASE_STATUSES], 'found_cases': case_query.count(), 'limit': limit, } return data
Preprocess case objects. Add the necessary information to display the 'cases' view Args: store(adapter.MongoAdapter) case_query(pymongo.Cursor) limit(int): Maximum number of cases to display Returns: data(dict): includes the cases, how many there are and the limit.
juraj-google-style
class SiglipEncoder(nn.Module): def __init__(self, config: SiglipConfig): super().__init__() self.config = config self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`SiglipEncoderLayer`]. Args: config: SiglipConfig
github-repos
def deconv_output_length(input_length, filter_size, padding, stride): if input_length is None: return None input_length *= stride if padding == 'valid': input_length += max(filter_size - stride, 0) elif padding == 'full': input_length -= stride + filter_size - 2 return input_length
Determines output length of a transposed convolution given input length. Args: input_length: integer. filter_size: integer. padding: one of "same", "valid", "full". stride: integer. Returns: The output length (integer).
github-repos
def sysctl(command): out = subprocess.check_output(command) result = out.split(b' ')[1] try: return int(result) except ValueError: return result
Run a sysctl command and parse the output. Args: command: A sysctl command with an argument, for example, ["sysctl", "hw.memsize"]. Returns: The parsed output.
codesearchnet
def get_neighbors_in_shell(self, origin, r, dr): outer = self.get_sites_in_sphere(origin, r + dr) inner = r - dr return [(site, dist) for (site, dist) in outer if dist > inner]
Returns all sites in a shell centered on origin (coords) between radii r-dr and r+dr. Args: origin (3x1 array): Cartesian coordinates of center of sphere. r (float): Inner radius of shell. dr (float): Width of shell. Returns: [(site, dist) ...] since most of the time, subsequent processing requires the distance.
juraj-google-style
def __mul__(self, other): return self.__class__(self.x, other * self.y, *self._args, **self._kwargs)
Scale the Spectrum's y values Args: other: scalar, The scale amount Returns: Spectrum object with y values scaled
juraj-google-style
def record2marcxml(record): schema_name = _get_schema_name(record) if schema_name == 'hep': marcjson = hep2marc.do(record) elif schema_name == 'authors': marcjson = hepnames2marc.do(record) else: raise NotImplementedError(u'JSON -> MARC rules missing for "{}"'.format(schema_name)) record = RECORD() for key, values in sorted(iteritems(marcjson)): tag, ind1, ind2 = _parse_key(key) if _is_controlfield(tag, ind1, ind2): value = force_single_element(values) if not isinstance(value, text_type): value = text_type(value) record.append(CONTROLFIELD(_strip_invalid_chars_for_xml(value), {'tag': tag})) else: for value in force_list(values): datafield = DATAFIELD({'tag': tag, 'ind1': ind1, 'ind2': ind2}) for code, els in sorted(iteritems(value)): for el in force_list(els): if not isinstance(el, text_type): el = text_type(el) datafield.append(SUBFIELD(_strip_invalid_chars_for_xml(el), {'code': code})) record.append(datafield) return tostring(record, encoding='utf8', pretty_print=True)
Convert a JSON record to a MARCXML string. Deduces which set of rules to use by parsing the ``$schema`` key, as it unequivocally determines which kind of record we have. Args: record(dict): a JSON record. Returns: str: a MARCXML string converted from the record.
juraj-google-style
def colorize(text, messageType=None): formattedText = str(text) if "ERROR" in messageType: formattedText = colorama.Fore.RED + formattedText elif "WARNING" in messageType: formattedText = colorama.Fore.YELLOW + formattedText elif "SUCCESS" in messageType: formattedText = colorama.Fore.GREEN + formattedText elif "INFO" in messageType: formattedText = colorama.Fore.BLUE + formattedText if "BOLD" in messageType: formattedText = colorama.Style.BRIGHT + formattedText return formattedText + colorama.Style.RESET_ALL
Function that colorizes a message. Args: ----- text: The string to be colorized. messageType: Possible options include "ERROR", "WARNING", "SUCCESS", "INFO" or "BOLD". Returns: -------- string: Colorized if the option is correct, including a tag at the end to reset the formatting.
juraj-google-style
def get_eligible_features(examples, num_mutants): features_dict = get_numeric_features_to_observed_range(examples) features_dict.update(get_categorical_features_to_sampling(examples, num_mutants)) features_list = [] for (k, v) in sorted(features_dict.items()): v['name'] = k features_list.append(v) return features_list
Returns a list of JSON objects for each feature in the examples. This list is used to drive partial dependence plots in the plugin. Args: examples: Examples to examine to determine the eligible features. num_mutants: The number of mutations to make over each feature. Returns: A list with a JSON object for each feature. Numeric features are represented as {name: observedMin: observedMax:}. Categorical features are repesented as {name: samples:[]}.
codesearchnet
def mean(series): if np.issubdtype(series.dtype, np.number): return series.mean() else: return np.nan
Returns the mean of a series. Args: series (pandas.Series): column to summarize.
codesearchnet
def add_from_existing(self, resource, timeout=(- 1)): uri = (self.URI + '/from-existing') return self._client.create(resource, uri=uri, timeout=timeout)
Adds a volume that already exists in the Storage system Args: resource (dict): Object to create. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Added resource.
codesearchnet
def get_user_info(self, dn, _connection=None): return self.get_object( dn=dn, filter=self.config.get('LDAP_USER_OBJECT_FILTER'), attributes=self.config.get("LDAP_GET_USER_ATTRIBUTES"), _connection=_connection, )
Gets info about a user specified at dn. Args: dn (str): The dn of the user to find _connection (ldap3.Connection): A connection object to use when searching. If not given, a temporary connection will be created, and destroyed after use. Returns: dict: A dictionary of the user info from LDAP
juraj-google-style
def _add_encrypted(self, other): if self.public_key != other.public_key: raise ValueError("Attempted to add numbers encrypted against " "different public keys!") a, b = self, other if a.exponent > b.exponent: a = self.decrease_exponent_to(b.exponent) elif a.exponent < b.exponent: b = b.decrease_exponent_to(a.exponent) sum_ciphertext = a._raw_add(a.ciphertext(False), b.ciphertext(False)) return EncryptedNumber(a.public_key, sum_ciphertext, a.exponent)
Returns E(a + b) given E(a) and E(b). Args: other (EncryptedNumber): an `EncryptedNumber` to add to self. Returns: EncryptedNumber: E(a + b), calculated by taking the product of E(a) and E(b) modulo :attr:`~PaillierPublicKey.n` ** 2. Raises: ValueError: if numbers were encrypted against different keys.
juraj-google-style
def _find_image_bounding_boxes(filenames, image_to_bboxes): num_image_bbox = 0 bboxes = [] for f in filenames: basename = os.path.basename(f) if basename in image_to_bboxes: bboxes.append(image_to_bboxes[basename]) num_image_bbox += 1 else: bboxes.append([]) print('Found %d images with bboxes out of %d images' % ( num_image_bbox, len(filenames))) return bboxes
Find the bounding boxes for a given image file. Args: filenames: list of strings; each string is a path to an image file. image_to_bboxes: dictionary mapping image file names to a list of bounding boxes. This list contains 0+ bounding boxes. Returns: List of bounding boxes for each image. Note that each entry in this list might contain from 0+ entries corresponding to the number of bounding box annotations for the image.
juraj-google-style
def _read_mode_mptcp(self, size, kind): bins = self._read_binary(1) subt = int(bins[:4], base=2) bits = bins[4:] dlen = (size - 1) func = mptcp_opt.get(subt) if (func is None): temp = self._read_fileng(dlen) data = dict(kind=kind, length=size, subtype='Unknown', data=(bytes(chr(int(bits[:4], base=2)), encoding='utf-8') + temp)) else: data = func(self, bits, dlen, kind) return data
Read Multipath TCP option. Positional arguments: * size - int, length of option * kind - int, 30 (Multipath TCP) Returns: * dict -- extracted Multipath TCP (MP-TCP) option Structure of MP-TCP [RFC 6824]: 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +---------------+---------------+-------+-----------------------+ | Kind | Length |Subtype| | +---------------+---------------+-------+ | | Subtype-specific data | | (variable length) | +---------------------------------------------------------------+ Octets Bits Name Description 0 0 tcp.mp.kind Kind (30) 1 8 tcp.mp.length Length 2 16 tcp.mp.subtype Subtype 2 20 tcp.mp.data Subtype-specific Data
codesearchnet
def read_config(contents): file_obj = io.StringIO(contents) config = six.moves.configparser.ConfigParser() config.readfp(file_obj) return config
Reads pylintrc config into native ConfigParser object. Args: contents (str): The contents of the file containing the INI config. Returns: ConfigParser.ConfigParser: The parsed configuration.
juraj-google-style
def log_error(self, msg): if self.__logger: self.__logger.error(msg) raise RuntimeError(msg)
Log an error and raise an exception. Args: msg: Error message to log. Raises: RuntimeError: With the message.
juraj-google-style
def ResultCollectionForFID(cls, flow_id): if (not isinstance(flow_id, rdfvalue.RDFURN)): flow_id = rdfvalue.RDFURN(flow_id) return sequential_collection.GeneralIndexedCollection(flow_id.Add(RESULTS_SUFFIX))
Returns the ResultCollection for the flow with a given flow_id. Args: flow_id: The id of the flow, a RDFURN of the form aff4:/flows/F:123456. Returns: The collection containing the results for the flow identified by the id.
codesearchnet
def parse(self, data, lexer=None, *args, **kwargs): if lexer is None: lexer = self.lexer return self.parser.parse(data, lexer=lexer, *args, **kwargs)
Parse the input JSON data string into a python data structure. Args: data: An input data string lexer: An optional ply.lex instance that overrides the default lexer. Returns: A python dict or list representing the input JSON data.
juraj-google-style
def __init__(self, metric_name, kind, value_type, update_op_func, mark=Mark.PRODUCER): self.kind = kind self.metric_name = metric_name if mark is Mark.CONSUMER: self.update_op_func = self._consumer_metric(update_op_func) elif mark is Mark.PRODUCER_BY_CONSUMER: self.update_op_func = self._by_consumer_metric(update_op_func) else: self.update_op_func = update_op_func self.value_type = value_type self.mark = mark
Constructor. update_op_func is used to when updating an `Operation` from a `ReportRequestInfo`. Args: metric_name (str): the name of the metric descriptor kind (:class:`MetricKind`): the ``kind`` of the described metric value_type (:class:`ValueType`): the `value type` of the described metric update_op_func (function): the func to update an operation
juraj-google-style
def get_es_label(obj, def_obj): label_flds = LABEL_FIELDS if def_obj.es_defs.get('kds_esLabel'): label_flds = def_obj.es_defs['kds_esLabel'] + LABEL_FIELDS try: for label in label_flds: if def_obj.cls_defs.get(label): obj['label'] = def_obj.cls_defs[label][0] break if not obj.get('label'): obj['label'] = def_obj.__class__.__name__.split("_")[-1] except AttributeError: if def_obj.get('rdf_type'): obj['label'] = def_obj['rdf_type'][-1].value[-1] else: obj['label'] = "no_label" return obj
Returns object with label for an object that goes into the elacticsearch 'label' field args: obj: data object to update def_obj: the class instance that has defintion values
juraj-google-style
def _stop(self) -> None: self._server.stop()
Stops the server. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while stopping the server.
github-repos
def pretokenized_tfds_dataset(dataset_name=gin.REQUIRED, text2self=gin.REQUIRED, tfds_data_dir=gin.REQUIRED, dataset_split=gin.REQUIRED, batch_size=gin.REQUIRED, sequence_length=gin.REQUIRED, vocabulary=None): del vocabulary dataset = tfds.load( dataset_name, split=dataset_split, as_supervised=True, data_dir=tfds_data_dir) if dataset_split == "train": dataset = dataset.repeat() dataset = dataset.shuffle(1000) def shift_and_append_eos(t): return tf.concat([t + 1, [1]], 0) def feature_map(inputs, targets): if text2self: return {"targets": shift_and_append_eos(targets)} else: return {"inputs": shift_and_append_eos(inputs), "targets": shift_and_append_eos(targets)} dataset = dataset.map(feature_map, num_parallel_calls=tf.data.experimental.AUTOTUNE) return pack_and_batch(dataset, batch_size, sequence_length)
Reads a tensorflow_datasets dataset. Args: dataset_name: a string text2self: a boolean tfds_data_dir: a boolean dataset_split: a string batch_size: an integer sequence_length: an integer vocabulary: ignored Returns: a tf.data.Dataset of batches
juraj-google-style
def run(self, dag): cx_runs = dag.collect_runs(['cx']) for cx_run in cx_runs: partition = [] chunk = [] for i in range((len(cx_run) - 1)): chunk.append(cx_run[i]) qargs0 = cx_run[i].qargs qargs1 = cx_run[(i + 1)].qargs if (qargs0 != qargs1): partition.append(chunk) chunk = [] chunk.append(cx_run[(- 1)]) partition.append(chunk) for chunk in partition: if ((len(chunk) % 2) == 0): for n in chunk: dag.remove_op_node(n) else: for n in chunk[1:]: dag.remove_op_node(n) return dag
Run one pass of cx cancellation on the circuit Args: dag (DAGCircuit): the directed acyclic graph to run on. Returns: DAGCircuit: Transformed DAG.
codesearchnet
def __init__(self, fn, args, kwargs, side_inputs, windowing, tagged_receivers, step_name=None, logging_context=None, state=None, scoped_metrics_container=None, operation_name=None, transform_id=None, user_state_context=None): side_inputs = list(side_inputs) self.step_name = step_name self.transform_id = transform_id self.context = DoFnContext(step_name, state=state) self.bundle_finalizer_param = DoFn.BundleFinalizerParam() self.execution_context = None do_fn_signature = DoFnSignature(fn) main_receivers = tagged_receivers[None] if 'outputs_per_element_counter' in RuntimeValueProvider.experiments: output_counter_name = CounterName('per-element-output-count', step_name=operation_name) per_element_output_counter = state._counter_factory.get_counter(output_counter_name, Counter.DATAFLOW_DISTRIBUTION).accumulator else: per_element_output_counter = None output_handler = _OutputHandler(windowing.windowfn, main_receivers, tagged_receivers, per_element_output_counter, getattr(fn, 'output_batch_converter', None), getattr(do_fn_signature.process_method.method_value, '_beam_yields_batches', False), getattr(do_fn_signature.process_batch_method.method_value, '_beam_yields_elements', False)) if do_fn_signature.is_stateful_dofn() and (not user_state_context): raise Exception('Requested execution of a stateful DoFn, but no user state context is available. This likely means that the current runner does not support the execution of stateful DoFns.') self.do_fn_invoker = DoFnInvoker.create_invoker(do_fn_signature, output_handler, self.context, side_inputs, args, kwargs, user_state_context=user_state_context, bundle_finalizer_param=self.bundle_finalizer_param)
Initializes a DoFnRunner. Args: fn: user DoFn to invoke args: positional side input arguments (static and placeholder), if any kwargs: keyword side input arguments (static and placeholder), if any side_inputs: list of sideinput.SideInputMaps for deferred side inputs windowing: windowing properties of the output PCollection(s) tagged_receivers: a dict of tag name to Receiver objects step_name: the name of this step logging_context: DEPRECATED [BEAM-4728] state: handle for accessing DoFn state scoped_metrics_container: DEPRECATED operation_name: The system name assigned by the runner for this operation. transform_id: The PTransform Id in the pipeline proto for this DoFn. user_state_context: The UserStateContext instance for the current Stateful DoFn.
github-repos
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0): local_buffer = utils.BytearrayStream() if self._operations: for operation in self._operations: operation.write(local_buffer, kmip_version=kmip_version) if self._object_types: for object_type in self._object_types: object_type.write(local_buffer, kmip_version=kmip_version) if self._vendor_identification: self._vendor_identification.write(local_buffer, kmip_version=kmip_version) if self._server_information: self._server_information.write(local_buffer, kmip_version=kmip_version) if self._application_namespaces: for application_namespace in self._application_namespaces: application_namespace.write(local_buffer, kmip_version=kmip_version) if (kmip_version >= enums.KMIPVersion.KMIP_1_1): if self._extension_information: for extension_information in self._extension_information: extension_information.write(local_buffer, kmip_version=kmip_version) if (kmip_version >= enums.KMIPVersion.KMIP_1_2): if self._attestation_types: for attestation_type in self._attestation_types: attestation_type.write(local_buffer, kmip_version=kmip_version) if (kmip_version >= enums.KMIPVersion.KMIP_1_3): if self._rng_parameters: for rng_parameters in self._rng_parameters: rng_parameters.write(local_buffer, kmip_version=kmip_version) if self._profile_information: for profile_information in self._profile_information: profile_information.write(local_buffer, kmip_version=kmip_version) if self._validation_information: for validation_information in self._validation_information: validation_information.write(local_buffer, kmip_version=kmip_version) if self._capability_information: for capability_information in self._capability_information: capability_information.write(local_buffer, kmip_version=kmip_version) if self._client_registration_methods: for client_reg_method in self._client_registration_methods: client_reg_method.write(local_buffer, kmip_version=kmip_version) if (kmip_version >= enums.KMIPVersion.KMIP_2_0): if self._defaults_information: self._defaults_information.write(local_buffer, kmip_version=kmip_version) if self._storage_protection_masks: for storage_protection_mask in self._storage_protection_masks: storage_protection_mask.write(local_buffer, kmip_version=kmip_version) self.length = local_buffer.length() super(QueryResponsePayload, self).write(output_buffer, kmip_version=kmip_version) output_buffer.write(local_buffer.buffer)
Write the data encoding the QueryResponsePayload object to a stream. Args: output_buffer (Stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
codesearchnet
def mean_squared_error(true, pred): result = (tf.reduce_sum(tf.squared_difference(true, pred)) / tf.to_float(tf.size(pred))) return result
L2 distance between tensors true and pred. Args: true: the ground truth image. pred: the predicted image. Returns: mean squared error between ground truth and predicted image.
codesearchnet
def process_file(vcs, commit, force, gitlint_config, file_data): (filename, extra_data) = file_data if force: modified_lines = None else: modified_lines = vcs.modified_lines(filename, extra_data, commit=commit) result = linters.lint(filename, modified_lines, gitlint_config) result = result[filename] return (filename, result)
Lint the file Returns: The results from the linter.
codesearchnet
def __call__(self, current_obj, attr, obj_ref): def _find_obj_fqn(p, fqn_name, cls): def find_obj(parent, name): if parent is not current_obj and \ self.scope_redirection_logic is not None: from textx.scoping import Postponed res = self.scope_redirection_logic(parent) assert res is not None, \ "scope_redirection_logic must not return None" if type(res) is Postponed: return res for m in res: return_value = find_obj(m, name) if return_value is not None: return return_value for attr in [a for a in parent.__dict__ if not a.startswith('__') and not a.startswith('_tx_') and not callable(getattr(parent, a))]: obj = getattr(parent, attr) if isinstance(obj, (list, tuple)): for innerobj in obj: if hasattr(innerobj, "name") \ and innerobj.name == name: return innerobj else: if hasattr(obj, "name") and obj.name == name: return obj return None for n in fqn_name.split('.'): obj = find_obj(p, n) if obj: if type(obj) is Postponed: return obj p = obj else: return None from textx import textx_isinstance if textx_isinstance(obj, cls): return p else: return None def _find_referenced_obj(p, name, cls): ret = _find_obj_fqn(p, name, cls) if ret: return ret while hasattr(p, "parent"): p = p.parent ret = _find_obj_fqn(p, name, cls) if ret: return ret from textx.model import ObjCrossRef assert type(obj_ref) is ObjCrossRef, type(obj_ref) obj_cls, obj_name = obj_ref.cls, obj_ref.obj_name return _find_referenced_obj(current_obj, obj_name, obj_cls)
find a fully qualified name. Use this callable as scope_provider in a meta-model: my_metamodel.register_scope_provider( {"*.*":textx.scoping.providers.FQN}) Args: current_obj: object corresponding a instance of an object (rule instance) attr: the referencing attribute (unused) obj_ref: ObjCrossRef to be resolved Returns: None or the referenced object
juraj-google-style