code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def from_corpus(cls, corpus): ds = Corpus() tracks = copy.deepcopy(list(corpus.tracks.values())) track_mapping = ds.import_tracks(tracks) issuers = copy.deepcopy(list(corpus.issuers.values())) issuer_mapping = ds.import_issuers(issuers) utterances = copy.deepcopy(list(corpus.utterances.values())) for utterance in utterances: utterance.track = track_mapping[utterance.track.idx] if (utterance.issuer is not None): utterance.issuer = issuer_mapping[utterance.issuer.idx] ds.import_utterances(utterances) subviews = copy.deepcopy(corpus.subviews) for (subview_idx, subview) in subviews.items(): ds.import_subview(subview_idx, subview) for (feat_container_idx, feature_container) in corpus.feature_containers.items(): ds.new_feature_container(feat_container_idx, feature_container.path) return ds
Create a new modifiable corpus from any other CorpusView. This for example can be used to create a independent modifiable corpus from a subview. Args: corpus (CorpusView): The corpus to create a copy from. Returns: Corpus: A new corpus with the same data as the given one.
codesearchnet
def test_string(self, string: str) -> bool: if self.input.startswith(string, self.offset): self.offset += len(string) return True return False
If `string` comes next, return ``True`` and advance offset. Args: string: string to test
juraj-google-style
def allocate(self, size, max_time_to_block_ms): with self._lock: if self._free: return self._free.popleft() elif self._poolable_size == 0: return io.BytesIO() else: buf = None more_memory = threading.Condition(self._lock) self._waiters.append(more_memory) while buf is None: start_wait = time.time() more_memory.wait(max_time_to_block_ms / 1000.0) end_wait = time.time() if self.wait_time: self.wait_time.record(end_wait - start_wait) if self._free: buf = self._free.popleft() else: self._waiters.remove(more_memory) raise Errors.KafkaTimeoutError( "Failed to allocate memory within the configured" " max blocking time") removed = self._waiters.popleft() assert removed is more_memory, 'Wrong condition' if self._free and self._waiters: self._waiters[0].notify() return buf
Allocate a buffer of the given size. This method blocks if there is not enough memory and the buffer pool is configured with blocking mode. Arguments: size (int): The buffer size to allocate in bytes [ignored] max_time_to_block_ms (int): The maximum time in milliseconds to block for buffer memory to be available Returns: io.BytesIO
juraj-google-style
def sort_dict(d, desc=True): sort = sorted(d.items(), key=lambda x: x[1], reverse=desc) return OrderedDict(sort)
Sort an ordered dictionary by value, descending. Args: d (OrderedDict): An ordered dictionary. desc (bool): If true, sort desc. Returns: OrderedDict: The sorted dictionary.
juraj-google-style
async def bootstrap(self, addrs): log.debug('Attempting to bootstrap node with %i initial contacts', len(addrs)) cos = list(map(self.bootstrap_node, addrs)) gathered = (await asyncio.gather(*cos)) nodes = [node for node in gathered if (node is not None)] spider = NodeSpiderCrawl(self.protocol, self.node, nodes, self.ksize, self.alpha) return (await spider.find())
Bootstrap the server by connecting to other known nodes in the network. Args: addrs: A `list` of (ip, port) `tuple` pairs. Note that only IP addresses are acceptable - hostnames will cause an error.
codesearchnet
def _has_requirements(self): self._closed() return any([self.has_workflow_step, self.has_scatter_requirement, self.has_multiple_inputs])
Returns True if the workflow needs a requirements section. Returns: bool: True if the workflow needs a requirements section, False otherwise.
codesearchnet
def evaluate(nodes, x_val, y_val): (_, num_nodes) = nodes.shape if (num_nodes == 1): raise ValueError('A point cannot be implicitized') elif (num_nodes == 2): return (((nodes[(0, 0)] - x_val) * (nodes[(1, 1)] - y_val)) - ((nodes[(0, 1)] - x_val) * (nodes[(1, 0)] - y_val))) elif (num_nodes == 3): (val_a, val_b, val_c) = (nodes[(0, :)] - x_val) val_b *= 2 (val_d, val_e, val_f) = (nodes[(1, :)] - y_val) val_e *= 2 sub1 = ((val_b * val_f) - (val_c * val_e)) sub2 = ((val_a * val_f) - (val_c * val_d)) sub_det_a = (((- val_e) * sub1) + (val_f * sub2)) sub_det_d = ((val_b * sub1) - (val_c * sub2)) return ((val_a * sub_det_a) + (val_d * sub_det_d)) elif (num_nodes == 4): return _evaluate3(nodes, x_val, y_val) else: raise _helpers.UnsupportedDegree((num_nodes - 1), supported=(1, 2, 3))
r"""Evaluate the implicitized bivariate polynomial containing the curve. Assumes `algebraic curve`_ containing :math:`B(s, t)` is given by :math:`f(x, y) = 0`. This function evaluates :math:`f(x, y)`. .. note:: This assumes, but doesn't check, that ``nodes`` has 2 rows. .. note:: This assumes, but doesn't check, that ``nodes`` is not degree-elevated. If it were degree-elevated, then the Sylvester matrix will always have zero determinant. Args: nodes (numpy.ndarray): ``2 x N`` array of nodes in a curve. x_val (float): ``x``-coordinate for evaluation. y_val (float): ``y``-coordinate for evaluation. Returns: float: The computed value of :math:`f(x, y)`. Raises: ValueError: If the curve is a point. .UnsupportedDegree: If the degree is not 1, 2 or 3.
codesearchnet
def __init__(self, max_tries=5, max_wait=10, *args, **kwargs): self._max_tries = max_tries if self._max_tries < 1: raise TypeError('max_tries must be a positive integer') self._max_wait = max_wait if self._max_wait < 1: raise TypeError('max_wait must be >= 1') super(NetworkType, self).__init__(*args, **kwargs)
Validation type for external resources Attempts to connect to the resource, backing off on failure. Args: max_tries: Max number of times to attempt a connection before failing max_wait: Max number of seconds to wait between connection attempts. This can be used to cap the exponential backoff.
juraj-google-style
def _stream_output(process): exit_code = None while exit_code is None: stdout = process.stdout.readline().decode("utf-8") sys.stdout.write(stdout) exit_code = process.poll() if exit_code != 0: raise RuntimeError("Process exited with code: %s" % exit_code) return exit_code
Stream the output of a process to stdout This function takes an existing process that will be polled for output. Only stdout will be polled and sent to sys.stdout. Args: process(subprocess.Popen): a process that has been started with stdout=PIPE and stderr=STDOUT Returns (int): process exit code
juraj-google-style
def put(self, obj): self._queue.put(obj, block=True, timeout=self._queue_put_timeout) if (obj is _SHUTDOWNREQUEST): return
Put request into queue. Args: obj (cheroot.server.HTTPConnection): HTTP connection waiting to be processed
codesearchnet
def concat(self, axis, other_blocks): if (type(other_blocks) is list): other_blocks = [blocks.partitions for blocks in other_blocks] return self.__constructor__(np.concatenate(([self.partitions] + other_blocks), axis=axis)) else: return self.__constructor__(np.append(self.partitions, other_blocks.partitions, axis=axis))
Concatenate the blocks with another set of blocks. Note: Assumes that the blocks are already the same shape on the dimension being concatenated. A ValueError will be thrown if this condition is not met. Args: axis: The axis to concatenate to. other_blocks: the other blocks to be concatenated. This is a BaseFrameManager object. Returns: A new BaseFrameManager object, the type of object that called this.
codesearchnet
def post_transform(self, args): args = (args[1:] if (args and (args[0] == 'az')) else args) post_transform_commands = [] for (i, arg) in enumerate(args): if (is_alias_command(['create'], args) and (i > 0) and (args[(i - 1)] in ['-c', '--command'])): post_transform_commands.append(arg) else: post_transform_commands.append(os.path.expandvars(arg)) AliasManager.write_alias_config_hash(self.alias_config_hash) AliasManager.write_collided_alias(self.collided_alias) return post_transform_commands
Inject environment variables, and write hash to alias hash file after transforming alias to commands. Args: args: A list of args to post-transform.
codesearchnet
def as_dict(value): return {field.name: getattr(value, field.name) for field in value._tf_extension_type_fields()}
Extracts the attributes of `value` and their values to a dict format. Unlike `dataclasses.asdict()`, this function is not recursive and in case of nested `ExtensionType` objects, only the top level object is converted to a dict. Args: value: An `ExtensionType` object. Returns: A dict that contains the attributes of `value` and their values.
github-repos
def load_from_dict(self, conf_dict=None): self.set_to_default() self._update_dict(self._config, conf_dict) self._update_python_paths()
Load the configuration from a dictionary. Args: conf_dict (dict): Dictionary with the configuration.
codesearchnet
def recipe_sa_report(config, auth_sa, auth_bq, dataset, table, report, is_incremental_load): sa(config, {'description': 'Create a dataset for bigquery tables.', 'auth': auth_sa, 'body': report, 'out': {'bigquery': {'auth': auth_bq, 'dataset': dataset, 'table': table, 'is_incremental_load': is_incremental_load, 'header': True}}})
Move SA360 report to BigQuery. Args: auth_sa (authentication) - Credentials used for writing data. auth_bq (authentication) - Authorization used for writing data. dataset (string) - Existing BigQuery dataset. table (string) - Table to create from this report. report (json) - Body part of report request API call. is_incremental_load (boolean) - Clear data in destination table during this report's time period, then append report data to destination table.
github-repos
def AddArguments(cls, argument_group): argument_group.add_argument( '-o', '--output_format', '--output-format', metavar='FORMAT', dest='output_format', default='dynamic', help=( 'The output format. Use "-o list" to see a list of available ' 'output formats.')) argument_group.add_argument( '-w', '--write', metavar='OUTPUT_FILE', dest='write', help='Output filename.') arguments = sys.argv[1:] argument_index = 0 if '-o' in arguments: argument_index = arguments.index('-o') + 1 elif '--output_format' in arguments: argument_index = arguments.index('--output_format') + 1 elif '--output-format' in arguments: argument_index = arguments.index('--output-format') + 1 if 0 < argument_index < len(arguments): names = [name.strip() for name in arguments[argument_index].split(',')] else: names = ['dynamic'] if names and names != ['list']: manager.ArgumentHelperManager.AddCommandLineArguments( argument_group, category='output', names=names)
Adds command line arguments to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
juraj-google-style
def get_countries(is_legacy_xml=False): countries = {} if ((sys.platform == 'win32') and getattr(sys, 'frozen', False)): data_dir = path.dirname(sys.executable) else: data_dir = path.dirname(__file__) if is_legacy_xml: log.debug('Opening country code legacy XML: {0}'.format((str(data_dir) + '/data/iso_3166-1_list_en.xml'))) f = io.open((str(data_dir) + '/data/iso_3166-1_list_en.xml'), 'r', encoding='ISO-8859-1') data = f.read() if (not data): return {} dom = parseString(data) entries = dom.getElementsByTagName('ISO_3166-1_Entry') for entry in entries: code = entry.getElementsByTagName('ISO_3166-1_Alpha-2_Code_element')[0].firstChild.data name = entry.getElementsByTagName('ISO_3166-1_Country_name')[0].firstChild.data countries[code] = name.title() else: log.debug('Opening country code CSV: {0}'.format((str(data_dir) + '/data/iso_3166-1_list_en.xml'))) f = io.open((str(data_dir) + '/data/iso_3166-1.csv'), 'r', encoding='utf-8') csv_reader = csv.reader(f, delimiter=',', quotechar='"') for row in csv_reader: code = row[0] name = row[1] countries[code] = name return countries
The function to generate a dictionary containing ISO_3166-1 country codes to names. Args: is_legacy_xml (:obj:`bool`): Whether to use the older country code list (iso_3166-1_list_en.xml). Returns: dict: A mapping of country codes as the keys to the country names as the values.
codesearchnet
def _on_connection_close(self, connection, reply_code_or_reason, reply_text=None): self._channel = None if isinstance(reply_code_or_reason, pika_errs.ConnectionClosed): reply_code = reply_code_or_reason.reply_code reply_text = reply_code_or_reason.reply_text elif isinstance(reply_code_or_reason, int): reply_code = reply_code_or_reason else: reply_code = 0 reply_text = str(reply_code_or_reason) if reply_code == 200: _log.info("Server connection closed (%s), shutting down", reply_text) connection.ioloop.stop() else: _log.warning( "Connection to %s closed unexpectedly (%d): %s", connection.params.host, reply_code, reply_text, ) self.call_later(1, self.reconnect)
Callback invoked when a previously-opened connection is closed. Args: connection (pika.connection.SelectConnection): The connection that was just closed. reply_code_or_reason (int|Exception): The reason why the channel was closed. In older versions of pika, this is the AMQP code. reply_text (str): The human-readable reason the connection was closed (only in older versions of pika)
juraj-google-style
def Evaluate(self, client_obj): if self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ALL: quantifier = all elif self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ANY: quantifier = any else: raise ValueError("Unexpected match mode value: %s" % self.match_mode) return quantifier(rule.Evaluate(client_obj) for rule in self.rules)
Evaluates rules held in the rule set. Args: client_obj: Either an aff4 client object or a client_info dict as returned by ReadFullInfoClient if the relational db is used for reading. Returns: A bool value of the evaluation. Raises: ValueError: The match mode is of unknown value.
juraj-google-style
def writelines(self, lines, sep=b'\n', echo=None): self.write(sep.join(lines + [b'']), echo)
Write a list of byte sequences to the channel and terminate them with a separator (line feed). Args: lines(list of bytes): The lines to send. sep(bytes): The separator to use after each line. echo(bool): Whether to echo the written data to stdout. Raises: EOFError: If the channel was closed before all data was sent.
juraj-google-style
def slice_vec(expr, start, stop): weld_obj = WeldObject(encoder_, decoder_) expr_var = weld_obj.update(expr) if isinstance(expr, WeldObject): expr_var = expr.obj_id weld_obj.dependencies[expr_var] = expr weld_template = '\n slice(%(expr)s, %(start)sL, %(stop)sL)\n ' weld_obj.weld_code = (weld_template % {'expr': expr_var, 'start': start, 'stop': stop}) return weld_obj
Slices the vector. Args: expr (WeldObject) start (Long) stop (Long)
codesearchnet
def _radix_int_handler_factory(radix_indicators, charset, parse_func): def assertion(c, ctx): return ((c in radix_indicators) and (((len(ctx.value) == 1) and (ctx.value[0] == _ZERO)) or ((len(ctx.value) == 2) and (ctx.value[0] == _MINUS) and (ctx.value[1] == _ZERO))) and (ctx.ion_type == IonType.INT)) return _numeric_handler_factory(charset, (lambda prev, c, ctx, trans: _illegal_character(c, ctx)), assertion, radix_indicators, parse_func, illegal_at_end=radix_indicators)
Generates a handler co-routine which tokenizes a integer of a particular radix. Args: radix_indicators (sequence): The set of ordinals of characters that indicate the radix of this int. charset (sequence): Set of ordinals of legal characters for this radix. parse_func (callable): Called upon ending the numeric value. Accepts the current token value and returns a thunk that lazily parses the token.
codesearchnet
def get_content_type(content_type): m = email.message.Message() m['Content-Type'] = content_type return m.get_content_type()
Extract the MIME type value from a content type string. Removes any subtype and parameter values that may be present in the string. Args: content_type: str String with content type and optional subtype and parameter fields. Returns: str: String with only content type Example: :: Input: multipart/form-data; boundary=aBoundaryString Returns: multipart/form-data
juraj-google-style
def GetAdGroups(self, client_customer_id, campaign_id): self.client.SetClientCustomerId(client_customer_id) selector = { 'fields': ['Id', 'Name', 'Status'], 'predicates': [ { 'field': 'CampaignId', 'operator': 'EQUALS', 'values': [campaign_id] }, { 'field': 'Status', 'operator': 'NOT_EQUALS', 'values': ['REMOVED'] } ] } adgroups = self.client.GetService('AdGroupService').get(selector) if int(adgroups['totalNumEntries']) > 0: return adgroups['entries'] else: return None
Retrieves all AdGroups for the given campaign that haven't been removed. Args: client_customer_id: str Client Customer Id being used in API request. campaign_id: str id of the campaign for which to fetch ad groups. Returns: list List of AdGroup data objects.
juraj-google-style
def available_partitions_for_topic(self, topic): if topic not in self._partitions: return None return set([partition for partition, metadata in six.iteritems(self._partitions[topic]) if metadata.leader != -1])
Return set of partitions with known leaders Arguments: topic (str): topic to check for partitions Returns: set: {partition (int), ...} None if topic not found.
juraj-google-style
def copy(self, dest): if os.path.isfile(self.path): shutil.copy2(self.path, dest) else: shutil.copytree(self.path, dest, symlinks=False, ignore=None)
Copy item to the given `dest` path. Args: * dest: destination path to copy.
juraj-google-style
def get_voigt_dict(rank): vdict = {} for ind in itertools.product(*([range(3)] * rank)): v_ind = ind[:(rank % 2)] for j in range((rank pos = ((rank % 2) + (2 * j)) v_ind += (reverse_voigt_map[ind[pos:(pos + 2)]],) vdict[ind] = v_ind return vdict
Returns a dictionary that maps indices in the tensor to those in a voigt representation based on input rank Args: rank (int): Tensor rank to generate the voigt map
codesearchnet
def search_by_age(cls, *, limit=100, page=1, accounts=None, locations=None, age=720, properties=None, include_disabled=False): qry = cls.search(limit=limit, page=page, accounts=accounts, locations=locations, properties=properties, include_disabled=include_disabled, return_query=True) age_alias = aliased(ResourceProperty) qry = qry.join(age_alias, (Resource.resource_id == age_alias.resource_id)).filter((age_alias.name == 'launch_date'), (cast(func.JSON_UNQUOTE(age_alias.value), DATETIME) < (datetime.now() - timedelta(days=age)))) total = qry.count() qry = qry.limit(limit) qry = qry.offset((((page - 1) * limit) if (page > 1) else 0)) return (total, [cls(x) for x in qry.all()])
Search for resources based on the provided filters Args: limit (`int`): Number of results to return. Default: 100 page (`int`): Pagination offset for results. Default: 1 accounts (`list` of `int`): A list of account id's to limit the returned resources to locations (`list` of `str`): A list of locations as strings to limit the search for age (`int`): Age of instances older than `age` days to return properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list of strings, in which case a boolean OR search is performed on the values include_disabled (`bool`): Include resources from disabled accounts. Default: False Returns: `list` of `Resource`
codesearchnet
def handle_new_task(self, task_name, record): record.msg = ColorFormatter.colored('default', START_TASK_MSG) record.task = task_name self.tasks[task_name] = Task(name=task_name, maxlen=self.buffer_size) if self.should_show_by_depth(): self.pretty_emit(record, is_header=True)
Do everything needed when a task is starting Params: task_name (str): name of the task that is starting record (logging.LogRecord): log record with all the info Returns: None
codesearchnet
def add_prefix(self, name, *args, **kwargs): if os.path.exists(self.join(name)): raise LagoPrefixAlreadyExistsError(name, self.path) self.prefixes[name] = self.prefix_class(self.join(name), *args, **kwargs) self.prefixes[name].initialize() if (self.current is None): self.set_current(name) return self.prefixes[name]
Adds a new prefix to the workdir. Args: name(str): Name of the new prefix to add *args: args to pass along to the prefix constructor *kwargs: kwargs to pass along to the prefix constructor Returns: The newly created prefix Raises: LagoPrefixAlreadyExistsError: if prefix name already exists in the workdir
codesearchnet
def set_all_ylims(self, ylim, dy, yscale, fontsize=None): self._set_all_lims('y', ylim, dy, yscale, fontsize) return
Set limits and ticks for y axis for whole figure. This will set y axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: ylim (len-2 list of floats): The limits for the axis. dy (float): Amount to increment by between the limits. yscale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for y axis tick marks. Default is None.
codesearchnet
def filter_params(self, fn, override=None): override = override or {} result = {} for name, value in self.target_params.items(): if has_arg(fn, name): result.update({name: value}) result.update(override) return result
Filters `target_params` and return those in `fn`'s arguments. Args: fn : arbitrary function override: dict, values to override target_params Returns: result : dict, dictionary containing variables in both target_params and fn's arguments.
juraj-google-style
def page_format(self, topmargin, bottommargin): tL = topmargin%256 tH = topmargin/256 BL = bottommargin%256 BH = topmargin/256 if (tL+tH*256) < (BL + BH*256): self.send(chr(27)+'('+'c'+chr(4)+chr(0)+chr(tL)+chr(tH)+chr(BL)+chr(BH)) else: raise RuntimeError('The top margin must be less than the bottom margin')
Specify settings for top and bottom margins. Physically printable area depends on media. Args: topmargin: the top margin, in dots. The top margin must be less than the bottom margin. bottommargin: the bottom margin, in dots. The bottom margin must be less than the top margin. Returns: None Raises: RuntimeError: Top margin must be less than the bottom margin.
juraj-google-style
def blocking_save(self, query_dict=None, meta=None, index_fields=None): query_dict = (query_dict or {}) for query in query_dict: self.setattr(query, query_dict[query]) self.save(meta=meta, index_fields=index_fields) while (not self.objects.filter(key=self.key, **query_dict).count()): time.sleep(0.3) return self
Saves object to DB. Waits till the backend properly indexes the new object. Args: query_dict(dict) : contains keys - values of the model fields meta (dict): JSON serializable meta data for logging of save operation. {'lorem': 'ipsum', 'dolar': 5} index_fields (list): Tuple list for indexing keys in riak (with 'bin' or 'int'). bin is used for string fields, int is used for integer fields. [('lorem','bin'),('dolar','int')] Returns: Model instance.
codesearchnet
def add_alias(self, alias, index): if (index >= len(self._datasets)): raise DataInvalidIndex('A dataset with index {} does not exist'.format(index)) self._aliases[alias] = index
Add an alias pointing to the specified index. Args: alias (str): The alias that should point to the given index. index (int): The index of the dataset for which an alias should be added. Raises: DataInvalidIndex: If the index does not represent a valid dataset.
codesearchnet
def write_alias_config_hash(alias_config_hash='', empty_hash=False): with open(GLOBAL_ALIAS_HASH_PATH, 'w') as alias_config_hash_file: alias_config_hash_file.write(('' if empty_hash else alias_config_hash))
Write self.alias_config_hash to the alias hash file. Args: empty_hash: True if we want to write an empty string into the file. Empty string in the alias hash file means that we have to perform a full load of the command table in the next run.
codesearchnet
def Current(): if os.name == 'nt': return OperatingSystem.WINDOWS elif 'linux' in sys.platform: return OperatingSystem.LINUX elif 'darwin' in sys.platform: return OperatingSystem.MACOSX elif 'cygwin' in sys.platform: return OperatingSystem.CYGWIN elif 'msys' in sys.platform: return OperatingSystem.MSYS return None
Determines the current operating system. Returns: OperatingSystemTuple, One of the OperatingSystem constants or None if it cannot be determined.
github-repos
def _create_query(node, context): visited_nodes = [node] output_columns = _get_output_columns(visited_nodes, context) filters = _get_filters(visited_nodes, context) selectable = sql_context_helpers.get_node_selectable(node, context) query = select(output_columns).select_from(selectable).where(and_(*filters)) return query
Create a query from a SqlNode. Args: node: SqlNode, the current node. context: CompilationContext, global compilation state and metadata. Returns: Selectable, selectable of the generated query.
codesearchnet
def access(self, path, mode, dir_fd=None, follow_symlinks=None): if ((follow_symlinks is not None) and (sys.version_info < (3, 3))): raise TypeError("access() got an unexpected keyword argument 'follow_symlinks'") path = self._path_with_dir_fd(path, self.access, dir_fd) try: stat_result = self.stat(path, follow_symlinks=follow_symlinks) except OSError as os_error: if (os_error.errno == errno.ENOENT): return False raise if is_root(): mode &= (~ os.W_OK) return ((mode & ((stat_result.st_mode >> 6) & 7)) == mode)
Check if a file exists and has the specified permissions. Args: path: (str) Path to the file. mode: (int) Permissions represented as a bitwise-OR combination of os.F_OK, os.R_OK, os.W_OK, and os.X_OK. dir_fd: If not `None`, the file descriptor of a directory, with `path` being relative to this directory. New in Python 3.3. follow_symlinks: (bool) If `False` and `path` points to a symlink, the link itself is queried instead of the linked object. New in Python 3.3. Returns: bool, `True` if file is accessible, `False` otherwise.
codesearchnet
def to_number(result_type, value, default=None, minimum=None, maximum=None): try: return capped(result_type(value), minimum, maximum) except (TypeError, ValueError): return default
Cast `value` to numeric `result_type` if possible Args: result_type (type): Numerical type to convert to (one of: int, float, ...) value (str | unicode): Value to convert default (result_type.__class__ | None): Default to use `value` can't be turned into an int minimum (result_type.__class__ | None): If specified, result can't be below this minimum maximum (result_type.__class__ | None): If specified, result can't be above this maximum Returns: Corresponding numeric value
codesearchnet
def op(name, data, display_name=None, description=None, collections=None): import tensorflow.compat.v1 as tf if (display_name is None): display_name = name summary_metadata = metadata.create_summary_metadata(display_name=display_name, description=description) with tf.name_scope(name): with tf.control_dependencies([tf.assert_scalar(data)]): return tf.summary.tensor_summary(name='scalar_summary', tensor=tf.cast(data, tf.float32), collections=collections, summary_metadata=summary_metadata)
Create a legacy scalar summary op. Arguments: name: A unique name for the generated summary node. data: A real numeric rank-0 `Tensor`. Must have `dtype` castable to `float32`. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op.
codesearchnet
def _copy(src, dst, src_is_storage, dst_is_storage): if src_is_storage and dst_is_storage: system_src = get_instance(src) system_dst = get_instance(dst) if system_src is system_dst: if system_src.relpath(src) == system_dst.relpath(dst): raise same_file_error( "'%s' and '%s' are the same file" % (src, dst)) try: return system_dst.copy(src, dst) except (UnsupportedOperation, ObjectException): pass for caller, called, method in ( (system_dst, system_src, 'copy_from_%s'), (system_src, system_dst, 'copy_to_%s')): if hasattr(caller, method % called.storage): try: return getattr(caller, method % called.storage)( src, dst, called) except (UnsupportedOperation, ObjectException): continue with cos_open(src, 'rb') as fsrc: with cos_open(dst, 'wb') as fdst: for stream in (fsrc, fdst): try: buffer_size = getattr(stream, '_buffer_size') break except AttributeError: continue else: buffer_size = COPY_BUFSIZE copyfileobj(fsrc, fdst, buffer_size)
Copies file from source to destination Args: src (str or file-like object): Source file. dst (str or file-like object): Destination file. src_is_storage (bool): Source is storage. dst_is_storage (bool): Destination is storage.
juraj-google-style
def node_inputs(self, node_name, is_control=False, device_name=None): if not self._debug_graphs: raise LookupError('Node inputs are not loaded from partition graphs yet.') device_name = self._infer_device_name(device_name, node_name) if is_control: return self._debug_graphs[device_name].node_ctrl_inputs[node_name] else: return self._debug_graphs[device_name].node_inputs[node_name]
Get the inputs of given node according to partition graphs. Args: node_name: Name of the node. is_control: (`bool`) Whether control inputs, rather than non-control inputs, are to be returned. device_name: (`str`) name of the device. If there is only one device or if node_name exists on only one device, this argument is optional. Returns: (`list` of `str`) inputs to the node, as a list of node names. Raises: LookupError: If node inputs and control inputs have not been loaded from partition graphs yet.
github-repos
def set_doc_ids(self, doc_ids): if isinstance(doc_ids, list): self.set_documents(dict.fromkeys(doc_ids)) else: self.set_documents({doc_ids: None})
Build xml documents from a list of document ids. Args: doc_ids -- A document id or a lost of those.
codesearchnet
def from_file(cls, weafile, timestep=1, is_leap_year=False): assert os.path.isfile(weafile), 'Failed to find {}'.format(weafile) location = Location() with open(weafile, readmode) as weaf: first_line = weaf.readline() assert first_line.startswith('place'), \ 'Failed to find place in header. ' \ '{} is not a valid wea file.'.format(weafile) location.city = ' '.join(first_line.split()[1:]) location.latitude = float(weaf.readline().split()[-1]) location.longitude = -float(weaf.readline().split()[-1]) location.time_zone = -int(weaf.readline().split()[-1]) / 15 location.elevation = float(weaf.readline().split()[-1]) weaf.readline() direct_normal_irradiance = [] diffuse_horizontal_irradiance = [] for line in weaf: dirn, difh = [int(v) for v in line.split()[-2:]] direct_normal_irradiance.append(dirn) diffuse_horizontal_irradiance.append(difh) return cls.from_values(location, direct_normal_irradiance, diffuse_horizontal_irradiance, timestep, is_leap_year)
Create wea object from a wea file. Args: weafile:Full path to wea file. timestep: An optional integer to set the number of time steps per hour. Default is 1 for one value per hour. If the wea file has a time step smaller than an hour adjust this input accordingly. is_leap_year: A boolean to indicate if values are representing a leap year. Default is False.
juraj-google-style
def get_extana_led(self, cached=True): if cached and self.led_state is not None: return self.led_state extana_led = self.get_characteristic_handle_from_uuid(UUID_EXTANA_LED) if extana_led is None: logger.warn('Failed to find handle for ExtAna LED') return None rgb = self.dongle._read_attribute(self.conn_handle, extana_led, israw=True) if rgb is None: return rgb return list(map(lambda x: int(x * (LED_MAX / INT_LED_MAX)), struct.unpack('<HHH', rgb)))
Returns the current (R, G, B) colour of the SK8-ExtAna LED. Args: cached (bool): if True, returns the locally cached state of the LED (based on the last call to :meth:`set_extana_led`). Otherwise query the device for the current state. Returns: a 3-tuple (r, g, b) (all unsigned integers) in the range 0-255, or `None` on error.
juraj-google-style
def depth_august_average_ground_temperature(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_august_average_ground_temperature`'.format(value)) self._depth_august_average_ground_temperature = value
Corresponds to IDD Field `depth_august_average_ground_temperature` Args: value (float): value for IDD Field `depth_august_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def split(self, amount): split_objs = list(self.all()) if not split_objs: raise NoSplitsFoundForRecurringCost() portions = [split_obj.portion for split_obj in split_objs] split_amounts = ratio_split(amount, portions) return [ (split_objs[i], split_amount) for i, split_amount in enumerate(split_amounts) ]
Split the value given by amount according to the RecurringCostSplit's portions Args: amount (Decimal): Returns: list[(RecurringCostSplit, Decimal)]: A list with elements in the form (RecurringCostSplit, Decimal)
juraj-google-style
def process_action(resource, action, action_issuer='unknown'): from cinq_collector_aws import AWSRegionCollector func_action = action_mapper[resource.resource_type][action] extra_info = {} action_status = ActionStatus.UNKNOWN if func_action: if (action_mapper[resource.resource_type]['service_name'] == 'lambda'): client = get_aws_session(AWSAccount.get(dbconfig.get('rds_collector_account', AWSRegionCollector.ns, ''))).client('lambda', dbconfig.get('rds_collector_region', AWSRegionCollector.ns, '')) else: client = get_aws_session(AWSAccount(resource.account)).client(action_mapper[resource.resource_type]['service_name'], region_name=resource.location) try: logger.info(f'Trying to {action} resource {resource.id} for account {resource.account.account_name} / region {resource.location}') (action_status, extra_info) = func_action(client, resource) Enforcement.create(resource.account.account_id, resource.id, action, datetime.now(), extra_info) except Exception as ex: action_status = ActionStatus.FAILED logger.exception('Failed to apply action {} to {}: {}'.format(action, resource.id, ex)) finally: auditlog(event='{}.{}.{}.{}'.format(action_issuer, resource.resource_type, action, action_status), actor=action_issuer, data={'resource_id': resource.id, 'account_name': resource.account.account_name, 'location': resource.location, 'info': extra_info}) return action_status else: logger.error('Failed to apply action {} to {}: Not supported'.format(action, resource.id)) return ActionStatus.FAILED
Process an audit action for a resource, if possible Args: resource (:obj:`Resource`): A resource object to perform the action on action (`str`): Type of action to perform (`kill` or `stop`) action_issuer (`str`): The issuer of the action Returns: `ActionStatus`
codesearchnet
def accept_confirm(self, text=None, wait=None): with self.driver.accept_modal("confirm", text=text, wait=wait): yield
Execute the wrapped code, accepting a confirm. Args: text (str | RegexObject, optional): Text to match against the text in the modal. wait (int | float, optional): Maximum time to wait for the modal to appear after executing the wrapped code. Raises: ModalNotFound: If a modal dialog hasn't been found.
juraj-google-style
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): local_stream = utils.BytearrayStream() if self._unique_identifier: self._unique_identifier.write(local_stream, kmip_version=kmip_version) if self._cryptographic_parameters: self._cryptographic_parameters.write(local_stream, kmip_version=kmip_version) if self._data: self._data.write(local_stream, kmip_version=kmip_version) else: raise ValueError('invalid payload missing the data attribute') if self._iv_counter_nonce: self._iv_counter_nonce.write(local_stream, kmip_version=kmip_version) self.length = local_stream.length() super(DecryptRequestPayload, self).write(output_stream, kmip_version=kmip_version) output_stream.write(local_stream.buffer)
Write the data encoding the Decrypt request payload to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined.
codesearchnet
def send_put(self, mri, attribute_name, value): q = Queue() request = Put( path=[mri, attribute_name, "value"], value=value) request.set_callback(q.put) IOLoopHelper.call(self._send_request, request) response = q.get() if isinstance(response, Error): raise response.message else: return response.value
Abstract method to dispatch a Put to the server Args: mri (str): The mri of the Block attribute_name (str): The name of the Attribute within the Block value: The value to put
juraj-google-style
def go_in(self, vertex): if self.vertex_in: self.vertex_in.edges_in.remove(self) self.vertex_in = vertex vertex.edges_in.add(self)
Tell the edge to go into this vertex. Args: vertex (Vertex): vertex to go into.
codesearchnet
def make_selector(value): if is_callable(value): return value if is_string(value): return a_(value) raise ValueError("Unable to create callable selector from '{0}'".format(value))
Create a selector callable from the supplied value. Args: value: If is a callable, then returned unchanged. If a string is used then create an attribute selector. If in an integer is used then create a key selector. Returns: A callable selector based on the supplied value. Raises: ValueError: If a selector cannot be created from the value.
juraj-google-style
def _parse_line(self, line_no, line): try: matched = statement.parseString(line) except ParseException as exc: raise DataError("Error parsing line in TileBus file", line_number=line_no, column=exc.col, contents=line) if 'symbol' in matched: self._parse_cmd(matched) elif 'filename' in matched: self._parse_include(matched) elif 'variable' in matched: self._parse_assignment(matched) elif 'configvar' in matched: self._parse_configvar(matched)
Parse a line in a TileBus file Args: line_no (int): The line number for printing useful error messages line (string): The line that we are trying to parse
juraj-google-style
def __init__(self, identifier, configuration): super(GuppyMemoryProfiler, self).__init__() self._identifier = identifier self._path = configuration.directory self._profiling_sample = 0 self._profiling_sample_rate = configuration.sample_rate self._heapy = None self._sample_file = '{0!s}.hpy'.format(identifier) if self._path: self._sample_file = os.path.join(self._path, self._sample_file) if hpy: self._heapy = hpy()
Initializes a memory profiler. Args: identifier (str): unique name of the profile. configuration (ProfilingConfiguration): profiling configuration.
juraj-google-style
def _close_rpc_interface(self, connection_id, callback): try: context = self.connections.get_context(connection_id) except ArgumentError: callback(connection_id, self.id, False, "Could not find connection information") return self.connections.begin_operation(connection_id, 'close_interface', callback, self.get_config('default_timeout')) try: service = context['services'][TileBusService] header_characteristic = service[ReceiveHeaderChar] payload_characteristic = service[ReceivePayloadChar] except KeyError: self.connections.finish_operation(connection_id, False, "Can't find characteristics to open rpc interface") return self.bable.set_notification( enabled=False, connection_handle=context['connection_handle'], characteristic=header_characteristic, on_notification_set=[self._on_interface_closed, context, payload_characteristic], timeout=1.0 )
Disable RPC interface for this IOTile device Args: connection_id (int): The unique identifier for the connection callback (callback): Callback to be called when this command finishes callback(conn_id, adapter_id, success, failure_reason)
juraj-google-style
def rename(df, **kwargs): return df.rename(columns={v: k for k, v in kwargs.items()})
Renames columns, where keyword argument values are the current names of columns and keys are the new names. Args: df (:obj:`pandas.DataFrame`): DataFrame passed in via `>>` pipe. Kwargs: **kwargs: key:value pairs where keys are new names for columns and values are current names of columns.
juraj-google-style
def __init__(self, name, value): acceptable_types = [basestring, bool, list, int] acceptable = False for acceptable_type in acceptable_types: if isinstance(value, acceptable_type): acceptable = True if acceptable_type == bool: logger.debug("Converting parameter %s boolean '%s' " "to string.", name, value) value = str(value).lower() break if acceptable_type == int: logger.debug("Converting parameter %s integer '%s' " "to string.", name, value) value = str(value) break if not acceptable: raise ValueError( "CFNParameter (%s) value must be one of %s got: %s" % ( name, "str, int, bool, or list", value)) self.name = name self.value = value
Wrapper around a value to indicate a CloudFormation Parameter. Args: name (str): the name of the CloudFormation Parameter value (str, list, int or bool): the value we're going to submit as a CloudFormation Parameter.
juraj-google-style
def _RemoveIllegalXMLCharacters(self, xml_string): if (not isinstance(xml_string, py2to3.STRING_TYPES)): return xml_string return self._ILLEGAL_XML_RE.sub('�', xml_string)
Removes illegal characters for XML. If the input is not a string it will be returned unchanged. Args: xml_string (str): XML with possible illegal characters. Returns: str: XML where all illegal characters have been removed.
codesearchnet
def __wizard(rho, epsilon=None): if (epsilon is None): epsilon = 0.0 dim = len(rho) rho_wizard = np.zeros([dim, dim]) (v, w) = np.linalg.eigh(rho) for j in range(dim): if (v[j] < epsilon): tmp = v[j] v[j] = 0.0 x = 0.0 for k in range((j + 1), dim): x += (tmp / (dim - (j + 1))) v[k] = (v[k] + (tmp / (dim - (j + 1)))) for j in range(dim): rho_wizard = (rho_wizard + (v[j] * outer(w[(:, j)]))) return rho_wizard
Returns the nearest positive semidefinite operator to an operator. This method is based on reference [1]. It constrains positivity by setting negative eigenvalues to zero and rescaling the positive eigenvalues. Args: rho (array_like): the input operator. epsilon(float or None): threshold (>=0) for truncating small eigenvalues values to zero. Returns: numpy.array: A positive semidefinite numpy array.
codesearchnet
def send_graph_tracebacks(destinations, run_key, origin_stack, graph, send_source=True): _send_call_tracebacks(destinations, origin_stack, is_eager_execution=False, call_key=run_key, graph=graph, send_source=send_source)
Send the tracebacks of a graph execution call to debug server(s). Args: destinations: gRPC destination addresses, a `str` or a `list` of `str`s, e.g., "localhost:4242". If a `list`, gRPC requests containing the same `CallTraceback` proto payload will be sent to all the destinations. run_key: A string describing the feeds, fetches (and targets) names of the `tf.Session.run` call. origin_stack: The traceback of the `tf.Session.run()` invocation. graph: A Python `tf.Graph` object (i.e., *not* a `tf.compat.v1.GraphDef`), which contains op tracebacks. send_source: Whether the source files involved in the op tracebacks but outside the TensorFlow library are to be sent.
github-repos
def sample_shape_tensor(self, name="sample_shape_tensor"): with tf.compat.v1.name_scope(name): if isinstance(self._sample_shape, tf.Tensor): return self._sample_shape return tf.convert_to_tensor( value=self.sample_shape.as_list(), dtype=tf.int32)
Sample shape of random variable as a 1-D `Tensor`. Args: name: name to give to the op Returns: sample_shape: `Tensor`.
juraj-google-style
def _GetPathSegmentIndexForSimilarityWeights(self, similarity_weights, occurrence_weights, value_weights): largest_weight = similarity_weights.GetLargestWeight() if (largest_weight > 0): similarity_weight_indexes = similarity_weights.GetIndexesForWeight(largest_weight) number_of_similarity_indexes = len(similarity_weight_indexes) else: number_of_similarity_indexes = 0 path_segment_index = None if (number_of_similarity_indexes == 0): path_segment_index = self._GetPathSegmentIndexForOccurrenceWeights(occurrence_weights, value_weights) elif (number_of_similarity_indexes == 1): path_segment_index = similarity_weight_indexes[0] else: largest_weight = 0 largest_value_weight = 0 for similarity_index in similarity_weight_indexes: occurrence_weight = occurrence_weights.GetWeightForIndex(similarity_index) if ((largest_weight > 0) and (largest_weight == occurrence_weight)): value_weight = value_weights.GetWeightForIndex(similarity_index) if (largest_value_weight < value_weight): largest_weight = 0 if ((not path_segment_index) or (largest_weight < occurrence_weight)): largest_weight = occurrence_weight path_segment_index = similarity_index largest_value_weight = value_weights.GetWeightForIndex(similarity_index) return path_segment_index
Retrieves the index of the path segment based on similarity weights. Args: similarity_weights: the similarity weights object (instance of _PathSegmentWeights). occurrence_weights: the occurrence weights object (instance of _PathSegmentWeights). value_weights: the value weights object (instance of _PathSegmentWeights). Returns: An integer containing the path segment index.
codesearchnet
def __getitem__(self, key: Union[int, str]) -> Node: node: Node = None if isinstance(key, int): node = self._nodes.get(key) if isinstance(key, str): node = self._node_name_map.get(key) if node is None: raise IndexError("Invalid key.") return node
Returns the node corresponding to the given key. If the given key is an integer, then the node with the given index will be returned. If the given key is a string, then the node with the given name will be returned. Arguments: key (Union[int, str]): The key that identifies the node to return. Raises: IndexError: If the index is invalid or out of range.
juraj-google-style
def log_coroutine(self, cor, *args, **kwargs): if self.stopping: raise LoopStoppingError(('Could not launch coroutine because loop is shutting down: %s' % cor)) self.start() cor = _instaniate_coroutine(cor, args, kwargs) def _run_and_log(): task = self.loop.create_task(cor) task.add_done_callback((lambda x: _log_future_exception(x, self._logger))) if self.inside_loop(): _run_and_log() else: self.loop.call_soon_threadsafe(_run_and_log)
Run a coroutine logging any exception raised. This routine will not block until the coroutine is finished nor will it return any result. It will just log if any exception is raised by the coroutine during operation. It is safe to call from both inside and outside the event loop. There is no guarantee on how soon the coroutine will be scheduled. Args: cor (coroutine): The coroutine that we wish to run in the background and wait until it finishes.
codesearchnet
def check_peft_version(min_version: str) -> None: if not is_peft_available(): raise ValueError('PEFT is not installed. Please install it with `pip install peft`') is_peft_version_compatible = version.parse(importlib.metadata.version('peft')) >= version.parse(min_version) if not is_peft_version_compatible: raise ValueError(f'The version of PEFT you are using is not compatible, please use a version that is greater than {min_version}')
Checks if the version of PEFT is compatible. Args: version (`str`): The version of PEFT to check against.
github-repos
def divide(x1, x2, output_shape=None, name=None): output_shape = convert_to_shape(output_shape) if (not isinstance(x2, Tensor)): return ScalarMultiplyOperation(x1, (1.0 / x2)).outputs[0] with tf.name_scope(name, default_name='divide'): (x1, x2) = binary_arguments_to_tensors(x1, x2) return multiply(x1, reciprocal(x2), output_shape=output_shape)
Binary division with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
codesearchnet
def Put(self, message, block=True, timeout=1000): message = message.SerializeToString() if (not block): if self.Full(): raise queue.Full else: t0 = time.time() while self.Full(): time.sleep(1) self._heart_beat_cb() if ((time.time() - t0) > timeout): raise queue.Full with self._lock: self._queue.appendleft(message) self._total_size += len(message)
Put a message on the queue, blocking if it is too full. Blocks when the queue contains more than the threshold. Args: message: rdf_flows.GrrMessage The message to put. block: bool If True, we block and wait for the queue to have more space. Otherwise, if the queue is full, we raise. timeout: int Maximum time (in seconds, with 1 sec resolution) we spend waiting on the queue. Raises: queue.Full: if the queue is full and block is False, or timeout is exceeded.
codesearchnet
def create_room(self, alias=None, is_public=False, invitees=None): response = self.api.create_room(alias=alias, is_public=is_public, invitees=invitees) return self._mkroom(response['room_id'])
Create a new room on the homeserver. Args: alias (str): The canonical_alias of the room. is_public (bool): The public/private visibility of the room. invitees (str[]): A set of user ids to invite into the room. Returns: Room Raises: MatrixRequestError
codesearchnet
def transition_block(x, reduction, name): bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_bn')(x) x = layers.Activation('relu', name=name + '_relu')(x) x = layers.Conv2D(int(x.shape[bn_axis] * reduction), 1, use_bias=False, name=name + '_conv')(x) x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x) return x
A transition block. Args: x: input tensor. reduction: float, compression rate at transition layers. name: string, block label. Returns: Output tensor for the block.
github-repos
def get_dimensions(js_dict, naming): dimensions = [] dim_names = [] if check_version_2(js_dict): dimension_dict = js_dict else: dimension_dict = js_dict['dimension'] for dim in dimension_dict['id']: dim_name = js_dict['dimension'][dim]['label'] if (not dim_name): dim_name = dim if (naming == 'label'): dim_label = get_dim_label(js_dict, dim) dimensions.append(dim_label) dim_names.append(dim_name) else: dim_index = get_dim_index(js_dict, dim) dimensions.append(dim_index) dim_names.append(dim) return (dimensions, dim_names)
Get dimensions from input data. Args: js_dict (dict): dictionary containing dataset data and metadata. naming (string, optional): dimension naming. Possible values: 'label' \ or 'id'. Returns: dimensions (list): list of pandas data frames with dimension \ category data. dim_names (list): list of strings with dimension names.
codesearchnet
def unpatchify(self, patchified_pixel_values, original_image_size: Optional[Tuple[int, int]]=None): patch_size, num_channels = (self.config.patch_size, self.config.num_channels) original_image_size = original_image_size if original_image_size is not None else (self.config.image_size, self.config.image_size) original_height, original_width = original_image_size num_patches_h = original_height num_patches_w = original_width tf.debugging.assert_equal(num_patches_h * num_patches_w, shape_list(patchified_pixel_values)[1], message=f'The number of patches in the patchified pixel values is {shape_list(patchified_pixel_values)[1]} does not match the patches of original image {num_patches_w}*{num_patches_h}') batch_size = shape_list(patchified_pixel_values)[0] patchified_pixel_values = tf.reshape(patchified_pixel_values, (batch_size, num_patches_h, num_patches_w, patch_size, patch_size, num_channels)) patchified_pixel_values = tf.einsum('nhwpqc->nhpwqc', patchified_pixel_values) pixel_values = tf.reshape(patchified_pixel_values, (batch_size, num_patches_h * patch_size, num_patches_w * patch_size, num_channels)) return pixel_values
Args: patchified_pixel_values (`tf.Tensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`: Patchified pixel values. original_image_size (`Tuple[int, int]`, *optional*): Original image size. Returns: `tf.Tensor` of shape `(batch_size, height, width, num_channels)`: Pixel values.
github-repos
def read_until(self, s, echo=None): s_len = len(s) buf = self.read(s_len, echo) while (buf[(- s_len):] != s): buf += self.read(1, echo) return buf
Read until a certain string is encountered.. Args: s(bytes): The string to wait for. echo(bool): Whether to write the read data to stdout. Returns: bytes: The data up to and including *s*. Raises: EOFError: If the channel was closed.
codesearchnet
def _vec(A): N, m, n = A.shape return A.reshape((N, m*n, 1), order='F')
Linear operator _vec() from Wiktorsson2001 p478 Args: A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each interval of time j in 0..N-1 Returns: array of shape N x mn x 1, made by stacking the columns of matrix A[j] on top of each other, for each j in 0..N-1
juraj-google-style
def with_transform(self, transform: MLTransformProvider): self._validate_transform(transform) self.transforms.append(transform) return self
Add a transform to the MLTransform pipeline. Args: transform: A BaseOperation instance. Returns: A MLTransform instance.
github-repos
def __init__(self, channel): self.Log = channel.unary_unary( '/pulumirpc.Engine/Log', request_serializer=engine__pb2.LogRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.GetRootResource = channel.unary_unary( '/pulumirpc.Engine/GetRootResource', request_serializer=engine__pb2.GetRootResourceRequest.SerializeToString, response_deserializer=engine__pb2.GetRootResourceResponse.FromString, ) self.SetRootResource = channel.unary_unary( '/pulumirpc.Engine/SetRootResource', request_serializer=engine__pb2.SetRootResourceRequest.SerializeToString, response_deserializer=engine__pb2.SetRootResourceResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def input_vars(self, transitive: bool=False) -> Set[str]: input_vars = set() def list_var_refs(k, v, p): del k, p if isinstance(v, Function): return pg.TraverseAction.CONTINUE if isinstance(v, SymbolReference): input_vars.add(v.name) return pg.TraverseAction.ENTER pg.traverse(self, list_var_refs) if transitive: parent_func = self.parent_func() if parent_func is not None: unresolved_vars = input_vars.copy() for i in reversed(range(self.line_number())): line = parent_func.body[i] line_output_vars = line.output_vars() if line_output_vars & unresolved_vars: line_input_vars = line.input_vars() input_vars.update(line_input_vars) unresolved_vars -= line_output_vars unresolved_vars.update(line_input_vars) assert unresolved_vars.issubset(set(parent_func.args)), unresolved_vars return input_vars
Returns the input context from this code entity. Args: transitive: If True, transitive input context will be included. Returns: A set of context.
github-repos
def check_config(config, path): messages = [] config_copy = get_frozen_copy(config) missing_keys = set(DEFAULT_CONFIG.keys()) - set(config_copy.keys()) if missing_keys: messages.append("Missing config keys {}!".format(missing_keys)) for key, value in config_copy.items(): if key not in DEFAULT_CONFIG: messages.append("Unknown key {} in {}!".format(key, path)) continue if value is None: messages.append(_VALUE_UNDEFINED_MESSAGE.format(path=path, key=key)) else: value_type = type(value) if isinstance(DEFAULT_CONFIG[key], Mapping) and 'by-cot-product' in DEFAULT_CONFIG[key]: default_type = type(DEFAULT_CONFIG[key]['by-cot-product'][config['cot_product']]) else: default_type = type(DEFAULT_CONFIG[key]) if value_type is not default_type: messages.append( "{} {}: type {} is not {}!".format(path, key, value_type, default_type) ) if value in ("...", b"..."): messages.append(_VALUE_UNDEFINED_MESSAGE.format(path=path, key=key)) if key in ("provisioner_id", "worker_group", "worker_type", "worker_id") and not _is_id_valid(value): messages.append('{} doesn\'t match "{}" (required by Taskcluster)'.format(key, _GENERIC_ID_REGEX.pattern)) return messages
Validate the config against DEFAULT_CONFIG. Any unknown keys or wrong types will add error messages. Args: config (dict): the running config. path (str): the path to the config file, used in error messages. Returns: list: the error messages found when validating the config.
juraj-google-style
def rename(name, new_name): if six.PY2: name = _to_unicode(name) new_name = _to_unicode(new_name) current_info = info(name) if not current_info: raise CommandExecutionError('User \'{0}\' does not exist'.format(name)) new_info = info(new_name) if new_info: raise CommandExecutionError( 'User \'{0}\' already exists'.format(new_name) ) with salt.utils.winapi.Com(): c = wmi.WMI(find_classes=0) try: user = c.Win32_UserAccount(Name=name)[0] except IndexError: raise CommandExecutionError('User \'{0}\' does not exist'.format(name)) result = user.Rename(new_name)[0] if not result == 0: error_dict = {0: 'Success', 1: 'Instance not found', 2: 'Instance required', 3: 'Invalid parameter', 4: 'User not found', 5: 'Domain not found', 6: 'Operation is allowed only on the primary domain controller of the domain', 7: 'Operation is not allowed on the last administrative account', 8: 'Operation is not allowed on specified special groups: user, admin, local, or guest', 9: 'Other API error', 10: 'Internal error'} raise CommandExecutionError( 'There was an error renaming \'{0}\' to \'{1}\'. Error: {2}' .format(name, new_name, error_dict[result]) ) return info(new_name).get('name') == new_name
Change the username for a named user Args: name (str): The user name to change new_name (str): The new name for the current user Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' user.rename jsnuffy jshmoe
juraj-google-style
def get(feature_name): implementations = MetadataExtractor._implementations() try: return implementations[feature_name] except KeyError: raise UnsupportedFeatureException('no MetadataExtractor registered for feature "{feature_name}" (try any of the following: {supported_features})'.format(feature_name=feature_name, supported_features=', '.join(sorted(implementations))))
Returns the MetadataExtractor that can extract information about the provided feature name. Raises: UnsupportedFeature: If no extractor exists for the feature name.
codesearchnet
def pooled_sample_variance(sample1, sample2): deg_freedom = len(sample1) + len(sample2) - 2 mean1 = statistics.mean(sample1) squares1 = ((x - mean1) ** 2 for x in sample1) mean2 = statistics.mean(sample2) squares2 = ((x - mean2) ** 2 for x in sample2) return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)
Find the pooled sample variance for two samples. Args: sample1: one sample. sample2: the other sample. Returns: Pooled sample variance, as a float.
juraj-google-style
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] fncall = line for pattern in ('\\bif\\s*\\((.*)\\)\\s*{', '\\bfor\\s*\\((.*)\\)\\s*{', '\\bwhile\\s*\\((.*)\\)\\s*[{;]', '\\bswitch\\s*\\((.*)\\)\\s*{'): match = Search(pattern, line) if match: fncall = match.group(1) break if ((not Search('\\b(if|for|while|switch|return|new|delete|catch|sizeof)\\b', fncall)) and (not Search(' \\([^)]+\\)\\([^)]*(\\)|,$)', fncall)) and (not Search(' \\([^)]+\\)\\[[^\\]]+\\]', fncall))): if Search('\\w\\s*\\(\\s(?!\\s*\\\\$)', fncall): error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') elif Search('\\(\\s+(?!(\\s*\\\\)|\\()', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (') if (Search('\\w\\s+\\(', fncall) and (not Search('_{0,2}asm_{0,2}\\s+_{0,2}volatile_{0,2}\\s+\\(', fncall)) and (not Search(' if Search('\\boperator_*\\b', line): error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call') else: error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call') if Search('[^)]\\s+\\)\\s*[^{\\s]', fncall): if Search('^\\s+\\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )')
Checks for the correctness of various spacing around function calls. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
codesearchnet
def __schemas_descriptor(self): result = {} for (schema_key, schema_value) in self.__parser.schemas().iteritems(): field_keys = schema_value.keys() key_result = {} if ('properties' in field_keys): key_result['properties'] = schema_value['properties'].copy() for (prop_key, prop_value) in schema_value['properties'].iteritems(): if ('enum' in prop_value): num_enums = len(prop_value['enum']) key_result['properties'][prop_key]['enumDescriptions'] = ([''] * num_enums) elif ('default' in prop_value): if (prop_value.get('type') == 'boolean'): prop_value['default'] = ('true' if prop_value['default'] else 'false') else: prop_value['default'] = str(prop_value['default']) key_result['properties'][prop_key].pop('required', None) for key in ('type', 'id', 'description'): if (key in field_keys): key_result[key] = schema_value[key] if key_result: result[schema_key] = key_result for schema_value in result.itervalues(): for field_value in schema_value.itervalues(): if isinstance(field_value, dict): if ('$ref' in field_value): field_value['type'] = 'object' return result
Describes the schemas section of the discovery document. Returns: Dictionary describing the schemas of the document.
codesearchnet
def validate(self, read_tuple_name): if reg_lrn.match(read_tuple_name) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_read_tuple_name_structure", message="'{}' is not matched".format(reg_lrn), ) else: parts = read_tuple_name.split("__") if reg_prefix_part.match(parts[0]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_prefix_part", message="'{}' is not matched".format(reg_prefix_part), ) if reg_id_part.match(parts[1]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_id_part", message="'{}' is not matched".format(reg_id_part), ) if reg_segmental_part.match(parts[2]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_segmental_part", message="'{}' is not matched".format(reg_segmental_part), ) if reg_suffix_part.match(parts[3]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_suffix_part", message="'{}' is not matched".format(reg_suffix_part), ) if not self.rnf_profile.check(read_tuple_name): self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_profile", message="Read has a wrong profile (wrong widths). It should be: {} but it is: {}.".format( self.rnf_profile, rnftools.rnfformat.RnfProfile(read_tuple_name=read_tuple_name), ), warning=True, )
Check RNF validity of a read tuple. Args: read_tuple_name (str): Read tuple name to be checked.s
juraj-google-style
def __init__(self, validate_args=False, name="absolute_value"): self._graph_parents = [] self._name = name with self._name_scope("init"): super(AbsoluteValue, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name)
Instantiates the `AbsoluteValue` bijector. Args: validate_args: Python `bool` indicating whether arguments should be checked for correctness, in particular whether inputs to `inverse` and `inverse_log_det_jacobian` are non-negative. name: Python `str` name given to ops managed by this object.
juraj-google-style
def create(self, members=(), admins=()): memberObjs = [{'id': '8:{0}'.format(self.skype.userId), 'role': 'Admin'}] for id in members: if (id == self.skype.userId): continue memberObjs.append({'id': '8:{0}'.format(id), 'role': ('Admin' if (id in admins) else 'User')}) resp = self.skype.conn('POST', '{0}/threads'.format(self.skype.conn.msgsHost), auth=SkypeConnection.Auth.RegToken, json={'members': memberObjs}) return self.chat(resp.headers['Location'].rsplit('/', 1)[1])
Create a new group chat with the given users. The current user is automatically added to the conversation as an admin. Any other admin identifiers must also be present in the member list. Args: members (str list): user identifiers to initially join the conversation admins (str list): user identifiers to gain admin privileges
codesearchnet
def _check_properties(cls, property_names, require_indexed=True): assert isinstance(property_names, (list, tuple)), repr(property_names) for name in property_names: assert isinstance(name, basestring), repr(name) if '.' in name: name, rest = name.split('.', 1) else: rest = None prop = cls._properties.get(name) if prop is None: cls._unknown_property(name) else: prop._check_property(rest, require_indexed=require_indexed)
Internal helper to check the given properties exist and meet specified requirements. Called from query.py. Args: property_names: List or tuple of property names -- each being a string, possibly containing dots (to address subproperties of structured properties). Raises: InvalidPropertyError if one of the properties is invalid. AssertionError if the argument is not a list or tuple of strings.
juraj-google-style
def attention_lm_decoder(decoder_input, decoder_self_attention_bias, hparams, name="decoder"): x = decoder_input with tf.variable_scope(name): for layer in range(hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) x = common_layers.layer_postprocess(x, y, hparams) with tf.variable_scope("ffn"): y = common_layers.conv_hidden_relu( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
A stack of attention_lm layers. Args: decoder_input: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors
juraj-google-style
def store_to_file(self, filename): with tf.gfile.Open(filename, "w") as f: for i in range(len(self._id_to_token)): f.write(self._id_to_token[i] + "\n")
Write vocab file to disk. Vocab files have one token per line. The file ends in a newline. Reserved tokens are written to the vocab file as well. Args: filename: Full path of the file to store the vocab to.
juraj-google-style
def cluster_spec(self): tf_config = _load_tf_config(self._port) if 'cluster' not in tf_config: return ClusterSpec({}) return ClusterSpec(tf_config['cluster'])
Returns a ClusterSpec based on the SageMaker environment variables. Returns: A ClusterSpec with information from the SageMaker environment variables.
github-repos
def __init__(self, req, config, section): self.req = req self.exclude = None self.include = None self.range = [None, None] self.config = config self._req_type = '' self._section = section self._initialized = None self._error_message = [] self.parse_single_req()
Initializes a version or dependency requirement object. Args: req: List that contains individual supported versions or a single string that contains `range` definition. e.g. [`range(1.0, 2.0) include(3.0) exclude(1.5)`] e.g. [`1.0`, `3.0`, `7.1`] config: String that is the configuration name. e.g. `platform` section: String that is the section name from the `.ini` config file under which the requirement is defined. e.g. `Required`, `Optional`, `Unsupported`, `Dependency`
github-repos
def GetUser(self, sid=None, uid=None, username=None): if sid: for user in self.users: if (user.sid == sid): return user return None if uid: for user in self.users: if (user.uid == uid): return user if username: for user in self.users: if (user.username == username): if (uid and user.uid and (user.uid != uid)): return None else: return user
Retrieve a User based on sid, uid or username. On windows we first get a SID and use it to find the username. We want to avoid combining users with name collisions, which occur when local users have the same username as domain users (something like Admin is particularly common). So if a SID is provided, don't also try to match by username. On linux we first get a username, then use this to find the UID, so we want to combine these records or we end up with multiple partially-filled user records. TODO(user): this won't work at all well with a query for uid=0 because that is also the default for User objects that don't have uid set. Args: sid: Windows user sid uid: Linux/Darwin user id username: string Returns: rdf_client.User or None
codesearchnet
def ParseConversationRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TangoAndroidConversationEventData() event_data.conversation_identifier = self._GetRowValue( query_hash, row, 'conv_id') date_time = dfdatetime_semantic_time.NotSet() event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a conversation row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
juraj-google-style
def __init__(self, channel): self.Activate = channel.unary_unary( '/enterprise.API/Activate', request_serializer=client_dot_enterprise_dot_enterprise__pb2.ActivateRequest.SerializeToString, response_deserializer=client_dot_enterprise_dot_enterprise__pb2.ActivateResponse.FromString, ) self.GetState = channel.unary_unary( '/enterprise.API/GetState', request_serializer=client_dot_enterprise_dot_enterprise__pb2.GetStateRequest.SerializeToString, response_deserializer=client_dot_enterprise_dot_enterprise__pb2.GetStateResponse.FromString, ) self.Deactivate = channel.unary_unary( '/enterprise.API/Deactivate', request_serializer=client_dot_enterprise_dot_enterprise__pb2.DeactivateRequest.SerializeToString, response_deserializer=client_dot_enterprise_dot_enterprise__pb2.DeactivateResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def requestA(self): work_context = self.getContext() self.setContext('request[v4A]') self.m_serial_port.write((('2f3f'.decode('hex') + self.m_meter_address) + '3030210d0a'.decode('hex'))) self.m_raw_read_a = self.m_serial_port.getResponse(self.getContext()) unpacked_read_a = self.unpackStruct(self.m_raw_read_a, self.m_blk_a) self.convertData(unpacked_read_a, self.m_blk_a) self.m_kwh_precision = int(self.m_blk_a[Field.kWh_Scale][MeterData.NativeValue]) self.m_a_crc = self.crcMeterRead(self.m_raw_read_a, self.m_blk_a) self.setContext(work_context) return self.m_a_crc
Issue an A read on V4 meter. Returns: bool: True if CRC match at end of call.
codesearchnet
def pack_rpc_payload(arg_format, args): code = _create_respcode(arg_format, args) packed_result = struct.pack(code, *args) unpacked_validation = struct.unpack(code, packed_result) if (tuple(args) != unpacked_validation): raise RPCInvalidArgumentsError('Passed values would be truncated, please validate the size of your string', code=code, args=args) return packed_result
Pack an RPC payload according to arg_format. Args: arg_format (str): a struct format code (without the <) for the parameter format for this RPC. This format code may include the final character V, which means that it expects a variable length bytearray. args (list): A list of arguments to pack according to arg_format. Returns: bytes: The packed argument buffer.
codesearchnet
def ParseFileObject(self, parser_mediator, file_object): fixed_section_data_map = self._GetDataTypeMap('job_fixed_length_data_section') try: (fixed_length_section, file_offset) = self._ReadStructureFromFileObject(file_object, 0, fixed_section_data_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile('Unable to parse fixed-length data section with error: {0!s}'.format(exception)) if (not (fixed_length_section.product_version in self._PRODUCT_VERSIONS)): raise errors.UnableToParseFile('Unsupported product version in: 0x{0:04x}'.format(fixed_length_section.product_version)) if (not (fixed_length_section.format_version == 1)): raise errors.UnableToParseFile('Unsupported format version in: {0:d}'.format(fixed_length_section.format_version)) variable_section_data_map = self._GetDataTypeMap('job_variable_length_data_section') try: (variable_length_section, data_size) = self._ReadStructureFromFileObject(file_object, file_offset, variable_section_data_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile('Unable to parse variable-length data section with error: {0!s}'.format(exception)) file_offset += data_size event_data = self._ParseEventData(variable_length_section) date_time = self._ParseLastRunTime(parser_mediator, fixed_length_section) if date_time: event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_RUN) parser_mediator.ProduceEventWithEventData(event, event_data) trigger_data_map = self._GetDataTypeMap('job_trigger') for trigger_index in range(0, variable_length_section.number_of_triggers): try: (trigger, data_size) = self._ReadStructureFromFileObject(file_object, file_offset, trigger_data_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile('Unable to parse trigger: {0:d} with error: {2!s}'.format(trigger_index, exception)) file_offset += data_size event_data.trigger_type = trigger.trigger_type date_time = self._ParseTriggerStartTime(parser_mediator, trigger) if date_time: event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_SCHEDULED_TO_START, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data) date_time = self._ParseTriggerEndTime(parser_mediator, trigger) if date_time: event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_SCHEDULED_TO_START, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a Windows job file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
codesearchnet
def WriteFixedString(self, value, length): towrite = value.encode('utf-8') slen = len(towrite) if slen > length: raise Exception("string longer than fixed length: %s " % length) self.WriteBytes(towrite) diff = length - slen while diff > 0: self.WriteByte(0) diff -= 1
Write a string value to the stream. Args: value (str): value to write to the stream. length (int): length of the string to write.
juraj-google-style
def _ExportEvent(self, output_module, event, deduplicate_events=True): if event.timestamp != self._export_event_timestamp: self._FlushExportBuffer( output_module, deduplicate_events=deduplicate_events) self._export_event_timestamp = event.timestamp self._export_event_heap.PushEvent(event)
Exports an event using an output module. Args: output_module (OutputModule): output module. event (EventObject): event. deduplicate_events (Optional[bool]): True if events should be deduplicated.
juraj-google-style