code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def find_file_in_load_dirs(relpath): if relpath.startswith(os.path.sep): relpath = relpath.lstrip(os.path.sep) for ld in settings.DATA_DIRECTORIES: possible_path = os.path.join(ld, relpath) if os.path.exists(possible_path): return possible_path
If given relative path exists in one of DevAssistant load paths, return its full path. Args: relpath: a relative path, e.g. "assitants/crt/test.yaml" Returns: absolute path of the file, e.g. "/home/x/.devassistant/assistanta/crt/test.yaml or None if file is not found
juraj-google-style
def _start_profiler(self, logdir): if self._profiler_started: return try: backend.tensorboard.start_trace(logdir) self._profiler_started = True except Exception as e: logging.error('Failed to start profiler: %s', e)
Starts the profiler if currently inactive. Args: logdir: Directory where profiler results will be saved.
github-repos
def is_remote_file_modified(web_file, destination): try: last_mod = web_file.headers.get('last-modified') if last_mod: web_file_time = time.strptime( web_file.headers.get( 'last-modified'), '%a, %d %b %Y %H:%M:%S %Z') else: web_file_time = time.gmtime() web_file_size = int(web_file.headers.get('content-length', -1)) if os.path.exists(destination): file_time = time.gmtime(os.path.getmtime(destination)) file_size = os.path.getsize(destination) if file_time >= web_file_time and file_size == web_file_size: return False except Exception as ex: msg = ('Fail checking if remote file is modified default returns TRUE' ' - {}'.format(ex)) logger.debug(msg) return True
Check if online file has been modified. Args: :web_file: online file to check. :destination: path of the offline file to compare.
juraj-google-style
def add_where_when(voevent, coords, obs_time, observatory_location, allow_tz_naive_datetime=False): if (obs_time.tzinfo is not None): utc_naive_obs_time = obs_time.astimezone(pytz.utc).replace(tzinfo=None) elif (not allow_tz_naive_datetime): raise ValueError("Datetime passed without tzinfo, cannot be sure if it is really a UTC timestamp. Please verify function call and either add tzinfo or pass parameter 'allow_tz_naive_obstime=True', as appropriate") else: utc_naive_obs_time = obs_time obs_data = etree.SubElement(voevent.WhereWhen, 'ObsDataLocation') etree.SubElement(obs_data, 'ObservatoryLocation', id=observatory_location) ol = etree.SubElement(obs_data, 'ObservationLocation') etree.SubElement(ol, 'AstroCoordSystem', id=coords.system) ac = etree.SubElement(ol, 'AstroCoords', coord_system_id=coords.system) time = etree.SubElement(ac, 'Time', unit='s') instant = etree.SubElement(time, 'TimeInstant') instant.ISOTime = utc_naive_obs_time.isoformat() pos2d = etree.SubElement(ac, 'Position2D', unit=coords.units) pos2d.Name1 = 'RA' pos2d.Name2 = 'Dec' pos2d_val = etree.SubElement(pos2d, 'Value2') pos2d_val.C1 = coords.ra pos2d_val.C2 = coords.dec pos2d.Error2Radius = coords.err
Add details of an observation to the WhereWhen section. We Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. coords(:class:`.Position2D`): Sky co-ordinates of event. obs_time(datetime.datetime): Nominal DateTime of the observation. Must either be timezone-aware, or should be carefully verified as representing UTC and then set parameter ``allow_tz_naive_datetime=True``. observatory_location(str): Telescope locale, e.g. 'La Palma'. May be a generic location as listed under :class:`voeventparse.definitions.observatory_location`. allow_tz_naive_datetime (bool): (Default False). Accept timezone-naive datetime-timestamps. See comments for ``obs_time``.
codesearchnet
def __request_message_descriptor(self, request_kind, message_type, method_id, path): descriptor = {} params, param_order = self.__params_descriptor(message_type, request_kind, path, method_id) if isinstance(message_type, resource_container.ResourceContainer): message_type = message_type.body_message_class() if (request_kind == self.__NO_BODY or message_type == message_types.VoidMessage()): descriptor['body'] = 'empty' else: descriptor['body'] = 'autoTemplate(backendRequest)' descriptor['bodyName'] = 'resource' self.__request_schema[method_id] = self.__parser.add_message( message_type.__class__) if params: descriptor['parameters'] = params if param_order: descriptor['parameterOrder'] = param_order return descriptor
Describes the parameters and body of the request. Args: request_kind: The type of request being made. message_type: messages.Message or ResourceContainer class. The message to describe. method_id: string, Unique method identifier (e.g. 'myapi.items.method') path: string, HTTP path to method. Returns: Dictionary describing the request. Raises: ValueError: if the method path and request required fields do not match
juraj-google-style
def mean(values: Sequence[Union[int, float, None]]) -> Optional[float]: total = 0.0 n = 0 for x in values: if x is not None: total += x n += 1 return total / n if n > 0 else None
Returns the mean of a list of numbers. Args: values: values to mean, ignoring any values that are ``None`` Returns: the mean, or ``None`` if :math:`n = 0`
juraj-google-style
def forward(self, X): return (self.W(X).sum(dim=1) + self.b)
Execute sparse linear layer Args: X: an [n, h] torch.LongTensor containing up to h indices of features whose weights should be looked up and used in a sparse linear multiplication.
codesearchnet
def __init__(self, parameter_name, value, allowed_values): super(EnumRejectionError, self).__init__(parameter_name, value) self.allowed_values = allowed_values
Constructor for EnumRejectionError. Args: parameter_name: String; the name of the enum parameter which had a value rejected. value: The actual value passed in for the enum. Usually string. allowed_values: List of strings allowed for the enum.
juraj-google-style
def x_www_form_urlencoded(post_data): if isinstance(post_data, dict): return "&".join([ u"{}={}".format(key, value) for key, value in post_data.items() ]) else: return post_data
convert origin dict to x-www-form-urlencoded Args: post_data (dict): {"a": 1, "b":2} Returns: str: a=1&b=2
juraj-google-style
def get_build_output(self, process): while True: output = process.stdout.readline() if output == b'' and process.poll() is not None: if process.returncode > 0: raise Exception("Compilation ended with an error" ".\nSTDERR\n%s\nSTDOUT\n%s" % (process.stderr.read(), process.stdout.read())) return if output: matches = re.search(r'\[\s*(\d+?)/(\d+)\].*', output.strip().decode('utf-8')) if matches is not None: yield [int(matches.group(1)), int(matches.group(2))]
Parse the output of the ns-3 build process to extract the information that is needed to draw the progress bar. Args: process: the subprocess instance to listen to.
juraj-google-style
def add_gene_ids(self, genes_list): orig_num_genes = len(self.genes) for g in list(set(genes_list)): if (not self.genes.has_id(g)): new_gene = GenePro(id=g, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir) if self.model: self.model.genes.append(new_gene) else: self.genes.append(new_gene) log.info('Added {} genes to GEM-PRO project'.format((len(self.genes) - orig_num_genes)))
Add gene IDs manually into the GEM-PRO project. Args: genes_list (list): List of gene IDs as strings.
codesearchnet
def GetVolumeIdentifiers(self, volume_system): volume_identifiers = [] for volume in volume_system.volumes: volume_identifier = getattr(volume, 'identifier', None) if volume_identifier: volume_identifiers.append(volume_identifier) return sorted(volume_identifiers)
Retrieves the volume identifiers. Args: volume_system (VolumeSystem): volume system. Returns: list[str]: sorted volume identifiers.
juraj-google-style
def to_dict(self, drop_null=True, camel=False): def to_dict(obj, drop_null, camel): if isinstance(obj, (Body, BodyChild)): obj = obj.__dict__ if isinstance(obj, dict): data = {} for attr, val in six.iteritems(obj): if camel: attr = _snake_to_camel(attr) valid_null = (isinstance(val, bool) or val == 0 or (val and to_dict(val, drop_null, camel))) if not drop_null or (drop_null and valid_null): data[attr] = to_dict(val, drop_null, camel) return data elif isinstance(obj, list): data = [] for val in obj: valid_null = (isinstance(val, bool) or val == 0 or (val and to_dict(val, drop_null, camel))) if not drop_null or (drop_null and valid_null): data.append(to_dict(val, drop_null, camel)) return data else: return obj return to_dict(self, drop_null, camel)
Serialize self as dict. Args: drop_null: bool, default True. Remove 'empty' attributes. camel: bool, default True. Convert keys to camelCase. Return: dict: object params.
juraj-google-style
def bounds(self, thr=0, lower_index=0, upper_index=-1): points = self.points[lower_index:upper_index] min_lat = float("inf") min_lon = float("inf") max_lat = -float("inf") max_lon = -float("inf") for point in points: min_lat = min(min_lat, point.lat) min_lon = min(min_lon, point.lon) max_lat = max(max_lat, point.lat) max_lon = max(max_lon, point.lon) return (min_lat - thr, min_lon - thr, max_lat + thr, max_lon + thr)
Computes the bounds of the segment, or part of it Args: lower_index (int, optional): Start index. Defaults to 0 upper_index (int, optional): End index. Defaults to 0 Returns: :obj:`tuple` of :obj:`float`: Bounds of the (sub)segment, such that (min_lat, min_lon, max_lat, max_lon)
juraj-google-style
def build_prefixes(namespaces=None): if namespaces is None: namespaces = [ ('bf', str(BIBFRAME)), ('schema', str(SCHEMA_ORG)) ] output = "PREFIX {}: <{}>\n".format( namespaces[0][0], namespaces[0][1]) if len(namespaces) == 1: return output else: for namespace in namespaces[1:]: output += "PREFIX {}: <{}>\n".format(namespace[0], namespace[1]) return output
Internal function takes a list of prefix, namespace uri tuples and generates a SPARQL PREFIX string. Args: namespaces(list): List of tuples, defaults to BIBFRAME and Schema.org Returns: string
juraj-google-style
def NgramScorer(frequency_map): length = len(next(iter(frequency_map))) floor = math.log10((0.01 / sum(frequency_map.values()))) ngrams = frequency.frequency_to_probability(frequency_map, decorator=math.log10) def inner(text): text = ''.join(text) text = remove(text.upper(), (string.whitespace + string.punctuation)) return sum((ngrams.get(ngram, floor) for ngram in iterate_ngrams(text, length))) return inner
Compute the score of a text by using the frequencies of ngrams. Example: >>> fitness = NgramScorer(english.unigrams) >>> fitness("ABC") -4.3622319742618245 Args: frequency_map (dict): ngram to frequency mapping
codesearchnet
def union(self, other): union = Rect() lib.SDL_UnionRect(self._ptr, other._ptr, union._ptr) return union
Calculate the union of this rectangle and another rectangle. Args: other (Rect): The other rectangle. Returns: Rect: The union of this rectangle and the given other rectangle.
codesearchnet
def interpolate(self, date, method=None, order=None): if not self.start <= date <= self.stop: raise ValueError("Date '%s' not in range" % date) prev_idx = 0 ephem = self while True: idx = len(ephem) if idx == 1: break k = idx if date > ephem[k].date: prev_idx += k ephem = ephem[k:] else: ephem = ephem[:k] method = method if method is not None else self.method order = order if order is not None else self.order if method == self.LINEAR: y0 = self[prev_idx] y1 = self[prev_idx + 1] result = y0[:] + (y1[:] - y0[:]) * (date.mjd - y0.date.mjd) / (y1.date.mjd - y0.date.mjd) elif method == self.LAGRANGE: stop = prev_idx + 1 + order start = prev_idx - order if stop >= len(self): start -= stop - len(self) elif start < 0: stop -= start start = 0 subset = self[start:stop] date_subset = np.array([x.date.mjd for x in subset]) result = np.zeros(6) for j in range(order): mask = date_subset != date_subset[j] l_j = (date.mjd - date_subset[mask]) / (date_subset[j] - date_subset[mask]) result = result + l_j.prod() * subset[j] else: raise ValueError("Unkown interpolation method", method) orb = ephem[0] return orb.__class__(date, result, orb.form, orb.frame, orb.propagator)
Interpolate data at a given date Args: date (Date): method (str): Method of interpolation to use order (int): In case of ``LAGRANGE`` method is used Return: Orbit:
juraj-google-style
def prepare_sample_weight_modes(training_endpoints, sample_weight_mode): if isinstance(sample_weight_mode, collections.abc.Mapping): generic_utils.check_for_unexpected_keys('sample_weight_mode', sample_weight_mode, [e.output_name for e in training_endpoints]) for end_point in training_endpoints: if not end_point.should_skip_target_weights(): if end_point.output_name not in sample_weight_mode: raise ValueError('Output ' + end_point.output_name + 'missing from `_sample_weight_modes` dictionary') else: end_point.sample_weight_mode = sample_weight_mode.get(end_point.output_name) elif isinstance(sample_weight_mode, (list, tuple)): if len(sample_weight_mode) != len(training_endpoints): raise ValueError('When passing a list as sample_weight_mode, it should have one entry per model output. The model has ' + str(len(training_endpoints)) + ' outputs, but you passed ' + str(len(sample_weight_mode)) + '_sample_weight_modes.') for mode, endpoint in zip(sample_weight_mode, training_endpoints): if not endpoint.should_skip_target_weights(): endpoint.sample_weight_mode = mode else: for endpoint in training_endpoints: if not endpoint.should_skip_target_weights(): endpoint.sample_weight_mode = sample_weight_mode
Prepares sample weight modes for the model. Args: training_endpoints: List of model _TrainingEndpoints. sample_weight_mode: sample weight mode user input passed from compile API. Raises: ValueError: In case of invalid `sample_weight_mode` input.
github-repos
def equal(x1, x2): if any_symbolic_tensors((x1, x2)): return Equal().symbolic_call(x1, x2) return backend.numpy.equal(x1, x2)
Returns `(x1 == x2)` element-wise. Args: x1: Tensor to compare. x2: Tensor to compare. Returns: Output tensor, element-wise comparison of `x1` and `x2`.
github-repos
def exists(path): path = _normalize_dir(path) sysPath = get_path() return (path.lower() in (x.lower() for x in sysPath))
Check if the directory is configured in the SYSTEM path Case-insensitive and ignores trailing backslash Returns: boolean True if path exists, False if not CLI Example: .. code-block:: bash salt '*' win_path.exists 'c:\\python27' salt '*' win_path.exists 'c:\\python27\\' salt '*' win_path.exists 'C:\\pyThon27'
codesearchnet
def consume_socket_output(frames, demux=False): if demux is False: return six.binary_type().join(frames) out = [None, None] for frame in frames: assert frame != (None, None) if frame[0] is not None: if out[0] is None: out[0] = frame[0] else: out[0] += frame[0] else: if out[1] is None: out[1] = frame[1] else: out[1] += frame[1] return tuple(out)
Iterate through frames read from the socket and return the result. Args: demux (bool): If False, stdout and stderr are multiplexed, and the result is the concatenation of all the frames. If True, the streams are demultiplexed, and the result is a 2-tuple where each item is the concatenation of frames belonging to the same stream.
juraj-google-style
def getfullargspec(obj): decorators, target = tf_decorator.unwrap(obj) for d in decorators: if d.decorator_argspec is not None: return _convert_maybe_argspec_to_fullargspec(d.decorator_argspec) return _getfullargspec(target)
TFDecorator-aware replacement for `inspect.getfullargspec`. This wrapper emulates `inspect.getfullargspec` in[^)]* Python2. Args: obj: A callable, possibly decorated. Returns: The `FullArgSpec` that describes the signature of the outermost decorator that changes the callable's signature. If the callable is not decorated, `inspect.getfullargspec()` will be called directly on the callable.
github-repos
def wait(self, timeout_s: float=None) -> int: if (not self.running): return 0 retcode = self.process.wait(timeout=timeout_s) if (retcode is None): self.error('Subprocess finished, but return code was None') retcode = 1 elif (retcode == 0): self.info('Subprocess finished cleanly (return code 0).') else: self.error('Subprocess finished, but FAILED (return code {}). Logs were: {} (stdout), {} (stderr)'.format(retcode, self.details.logfile_out, self.details.logfile_err)) self.running = False return retcode
Wait for up to ``timeout_s`` for the child process to finish. Args: timeout_s: maximum time to wait or ``None`` to wait forever Returns: process return code; or ``0`` if it wasn't running, or ``1`` if it managed to exit without a return code Raises: subprocess.TimeoutExpired: if the process continues to run
codesearchnet
def read(keypath, configfile=None): if (configfile in _configs): appconfig = _configs[configfile] else: appconfig = AppConfig(configfile=configfile) _configs[configfile] = appconfig return appconfig.read(keypath)
Reads a value from the configuration file. Args: keypath: str Specifies the key for which the value is desired. It can be a hierarchical path. Example: "section1.subsection.key1" configfile: str Path to the config file to read. Defaults to None, in which case the application's default config file is used. Returns: value from configuration file
codesearchnet
def create_config(config_path='scriptworker.yaml'): if (not os.path.exists(config_path)): print("{} doesn't exist! Exiting...".format(config_path), file=sys.stderr) sys.exit(1) with open(config_path, 'r', encoding='utf-8') as fh: secrets = safe_load(fh) config = dict(deepcopy(DEFAULT_CONFIG)) if (not secrets.get('credentials')): secrets['credentials'] = read_worker_creds() config.update(secrets) apply_product_config(config) messages = check_config(config, config_path) if messages: print('\n'.join(messages), file=sys.stderr) print('Exiting...', file=sys.stderr) sys.exit(1) credentials = get_frozen_copy(secrets['credentials']) del config['credentials'] config = get_frozen_copy(config) return (config, credentials)
Create a config from DEFAULT_CONFIG, arguments, and config file. Then validate it and freeze it. Args: config_path (str, optional): the path to the config file. Defaults to "scriptworker.yaml" Returns: tuple: (config frozendict, credentials dict) Raises: SystemExit: on failure
codesearchnet
def _create_array(self, arr: np.ndarray) -> int: if (not isinstance(arr, np.ndarray)): raise ValueError('Array is not a numpy ndarray.') try: c_arr = np.ctypeslib.as_ctypes(arr) except (KeyError, NotImplementedError): raise ValueError('Array has unsupported dtype {}.'.format(arr.dtype)) raw_arr = RawArray(c_arr._type_, c_arr) with self._lock: if (self._count >= len(self._arrays)): self._arrays += (len(self._arrays) * [None]) self._get_next_free() self._arrays[self._current] = (raw_arr, arr.shape) self._count += 1 return self._current
Returns the handle of a RawArray created from the given numpy array. Args: arr: A numpy ndarray. Returns: The handle (int) of the array. Raises: ValueError: if arr is not a ndarray or of an unsupported dtype. If the array is of an unsupported type, using a view of the array to another dtype and then converting on get is often a work around.
codesearchnet
def save_as(self, new_filename): xfile._save_file(self._filename, self._datasourceTree, new_filename)
Save our file with the name provided. Args: new_filename: New name for the workbook file. String. Returns: Nothing.
juraj-google-style
def add_index_argument(cls, group): prefix = cls.argument_prefix group.add_argument( '--%s-index' % prefix, action="store", dest="%s_index" % prefix, help=("Name of the %s root markdown file, can be None" % ( cls.extension_name)))
Subclasses may call this to add an index argument. Args: group: arparse.ArgumentGroup, the extension argument group prefix: str, arguments have to be namespaced
juraj-google-style
def __init__(self, transport, maxdata, remote_banner): try: self.systemtype, self.serial, self.banner = remote_banner.split(':', 2) except ValueError: raise usb_exceptions.AdbProtocolError('Received malformed banner %s', remote_banner) self.transport = transport self.maxdata = maxdata self._last_id_used = 0 self._reader_lock = threading.Lock() self._open_lock = threading.Lock() self._stream_transport_map = {} self._stream_transport_map_lock = threading.RLock()
Create an ADB connection to a device. Args: transport: AdbTransportAdapter to use for reading/writing AdbMessages maxdata: Max data size the remote endpoint will accept. remote_banner: Banner received from the remote endpoint.
juraj-google-style
def supports_ansi_escape_codes(fd): if os.isatty(fd): return True if not is_win: return False handle = winapi._get_osfhandle(fd) if handle == winapi.INVALID_HANDLE_VALUE: return False if winapi.GetFileType(handle) != winapi.FILE_TYPE_PIPE: return False file_name = _get_file_name_for_handle(handle) match = re.match( "^\\\\(cygwin|msys)-[a-z0-9]+-pty[0-9]+-(from|to)-master$", file_name) return match is not None
Returns whether the output device is capable of interpreting ANSI escape codes when :func:`print_` is used. Args: fd (int): file descriptor (e.g. ``sys.stdout.fileno()``) Returns: `bool`
juraj-google-style
def make_new(self, rev): return self.vcs.make_rev_options(rev, extra_args=self.extra_args)
Make a copy of the current instance, but with a new rev. Args: rev: the name of the revision for the new object.
codesearchnet
def remove_words(self, words): for word in words: self._dictionary.pop(word.lower()) self._update_dictionary()
Remove a list of words from the word frequency list Args: words (list): The list of words to remove
juraj-google-style
def summarize_mean_in_nats_and_bits(inputs, units, name, nats_name_scope='nats', bits_name_scope='bits_per_dim'): mean = tf.reduce_mean(input_tensor=inputs) with tf.compat.v1.name_scope(nats_name_scope): tf.compat.v2.summary.scalar(name, mean, step=tf.compat.v1.train.get_or_create_global_step()) with tf.compat.v1.name_scope(bits_name_scope): tf.compat.v2.summary.scalar(name, ((mean / units) / tf.math.log(2.0)), step=tf.compat.v1.train.get_or_create_global_step())
Summarize the mean of a tensor in nats and bits per unit. Args: inputs: A tensor of values measured in nats. units: The units of the tensor with which to compute the mean bits per unit. name: The name of the tensor. nats_name_scope: The name scope of the nats summary. bits_name_scope: The name scope of the bits summary.
codesearchnet
def tscore(sample1, sample2): if (len(sample1) != len(sample2)): raise ValueError('different number of values') error = (pooled_sample_variance(sample1, sample2) / len(sample1)) diff = (statistics.mean(sample1) - statistics.mean(sample2)) return (diff / math.sqrt((error * 2)))
Calculate a t-test score for the difference between two samples. Args: sample1: one sample. sample2: the other sample. Returns: The t-test score, as a float.
codesearchnet
def GetMerger(self, cls): for merger in self._mergers: if isinstance(merger, cls): return merger raise LookupError('No matching DataSetMerger found')
Looks for an added DataSetMerger derived from the given class. Args: cls: A class derived from DataSetMerger. Returns: The matching DataSetMerger instance. Raises: LookupError: No matching DataSetMerger has been added.
juraj-google-style
def quote(self, data): if (self.lang == 'python'): quote_char = "'" elif (self.lang == 'java'): quote_char = "'" if re.findall('[!\\-\\=\\s\\$\\&]{1,}', str(data)): data = '{0}{1}{0}'.format(quote_char, data) return data
Quote any parameters that contain spaces or special character. Returns: (string): String containing parameters wrapped in double quotes
codesearchnet
def set_label_list(self, label_lists): if isinstance(label_lists, annotations.LabelList): label_lists = [label_lists] for label_list in label_lists: if (label_list.idx is None): label_list.idx = 'default' label_list.utterance = self self.label_lists[label_list.idx] = label_list
Set the given label-list for this utterance. If the label-list-idx is not set, ``default`` is used. If there is already a label-list with the given idx, it will be overriden. Args: label_list (LabelList, list): A single or multi. label-lists to add.
codesearchnet
async def anext(*args): if (not args): raise TypeError('anext() expected at least 1 arguments, got 0') if (len(args) > 2): raise TypeError('anext() expected at most 2 arguments, got {}'.format(len(args))) (iterable, default, has_default) = (args[0], None, False) if (len(args) == 2): (iterable, default) = args has_default = True try: return (await iterable.__anext__()) except StopAsyncIteration as exc: if has_default: return default raise StopAsyncIteration() from exc
Return the next item from an async iterator. Args: iterable: An async iterable. default: An optional default value to return if the iterable is empty. Return: The next value of the iterable. Raises: TypeError: The iterable given is not async. This function will return the next value form an async iterable. If the iterable is empty the StopAsyncIteration will be propogated. However, if a default value is given as a second argument the exception is silenced and the default value is returned instead.
codesearchnet
def list_attributes(self): def _row_gen(attributes): for attr in attributes.values(): (yield (attr.name, attr.display_name, attr.description)) return pd.DataFrame.from_records(_row_gen(self.attributes), columns=['name', 'display_name', 'description'])
Lists available attributes in a readable DataFrame format. Returns: pd.DataFrame: Frame listing available attributes.
codesearchnet
def get_stacks(self): if (not hasattr(self, '_stacks')): stacks = [] definitions = self._get_stack_definitions() for stack_def in definitions: stack = Stack(definition=stack_def, context=self, mappings=self.mappings, force=(stack_def.name in self.force_stacks), locked=stack_def.locked, enabled=stack_def.enabled, protected=stack_def.protected) stacks.append(stack) self._stacks = stacks return self._stacks
Get the stacks for the current action. Handles configuring the :class:`stacker.stack.Stack` objects that will be used in the current action. Returns: list: a list of :class:`stacker.stack.Stack` objects
codesearchnet
def analyze_one_classification_result(storage_client, file_path, adv_batch, dataset_batches, dataset_meta): class_result = read_classification_results(storage_client, file_path) if (class_result is None): return (0, 0, 0, 0) adv_images = adv_batch['images'] dataset_batch_images = dataset_batches.data[adv_batch['dataset_batch_id']]['images'] count_correctly_classified = 0 count_errors = 0 count_hit_target_class = 0 num_images = 0 for (adv_img_id, label) in iteritems(class_result): if (adv_img_id not in adv_images): continue num_images += 1 clean_image_id = adv_images[adv_img_id]['clean_image_id'] dataset_image_id = dataset_batch_images[clean_image_id]['dataset_image_id'] if (label == dataset_meta.get_true_label(dataset_image_id)): count_correctly_classified += 1 else: count_errors += 1 if (label == dataset_meta.get_target_class(dataset_image_id)): count_hit_target_class += 1 return (count_correctly_classified, count_errors, count_hit_target_class, num_images)
Reads and analyzes one classification result. This method reads file with classification result and counts how many images were classified correctly and incorrectly, how many times target class was hit and total number of images. Args: storage_client: instance of CompetitionStorageClient file_path: result file path adv_batch: AversarialBatches.data[adv_batch_id] adv_batch_id is stored in each ClassificationBatch entity dataset_batches: instance of DatasetBatches dataset_meta: instance of DatasetMetadata Returns: Tuple of (count_correctly_classified, count_errors, count_hit_target_class, num_images)
codesearchnet
def unzip_columns(expr, column_types): weld_obj = WeldObject(encoder_, decoder_) column_appenders = [] struct_fields = [] result_fields = [] for i, column_type in enumerate(column_types): column_appenders.append("appender[%s]" % column_type) struct_fields.append("merge(b.$%s, e.$%s)" % (i, i)) result_fields.append("result(unzip_builder.$%s)" % i) appender_string = "{%s}" % ", ".join(column_appenders) struct_string = "{%s}" % ", ".join(struct_fields) result_string = "{%s}" % ", ".join(result_fields) expr_var = weld_obj.update(expr) if isinstance(expr, WeldObject): expr_var = expr.obj_id weld_obj.dependencies[expr_var] = expr weld_template = weld_obj.weld_code = weld_template % {"expr": expr_var, "appenders": appender_string, "struct_builder": struct_string, "result": result_string} return weld_obj
Zip together multiple columns. Args: columns (WeldObject / Numpy.ndarray): lust of columns Returns: A WeldObject representing this computation
juraj-google-style
def save_plot(code, elem): if ('plt' in elem.attributes): (figurewidth, figureheight) = elem.attributes['plt'].split(',') else: try: figureheight = elem.attributes['height'] except KeyError: figureheight = '4cm' try: figurewidth = elem.attributes['width'] except KeyError: figurewidth = '6cm' return f
Converts matplotlib plots to tikz code. If elem has either the plt attribute (format: plt=width,height) or the attributes width=width and/or height=height, the figurewidth and -height are set accordingly. If none are given, a height of 4cm and a width of 6cm is used as default. Args: code: The matplotlib code. elem: The element. Returns: The code and some code to invoke matplotlib2tikz.
codesearchnet
def _ParseDateTimeValue(self, parser_mediator, date_time_value): if date_time_value[14] != 'Z': parser_mediator.ProduceExtractionWarning( 'invalid date and time value: {0!s}'.format(date_time_value)) return None try: year = int(date_time_value[0:4], 10) month = int(date_time_value[4:6], 10) day_of_month = int(date_time_value[6:8], 10) hours = int(date_time_value[8:10], 10) minutes = int(date_time_value[10:12], 10) seconds = int(date_time_value[12:14], 10) except (TypeError, ValueError): parser_mediator.ProduceExtractionWarning( 'invalid date and time value: {0!s}'.format(date_time_value)) return None time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds) try: return dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date and time value: {0!s}'.format(date_time_value)) return None
Parses a date time value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. date_time_value (str): date time value (CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE) in the format: "YYYYMMDDhhmmssZ". Returns: dfdatetime.TimeElements: date and time extracted from the value or None if the value does not represent a valid string.
juraj-google-style
def get(self, ldap_dn): self.base_dn = ldap_dn self.sub_tree = BASE return self.first()
Return an LDAP entry by DN Args: ldap_dn (str): LDAP DN
codesearchnet
def get_both_blocks_sibling(self): if (not self.is_both_blocks()): return None if (self.block.block_letter and (self.block.block_letter.upper() not in ['A', 'B'])): return None other_instances = EighthScheduledActivity.objects.filter(activity=self.activity, block__date=self.block.date) for inst in other_instances: if (inst == self): continue if (inst.block.block_letter in ['A', 'B']): return inst return None
If this is a both-blocks activity, get the other EighthScheduledActivity object that occurs on the other block. both_blocks means A and B block, NOT all of the blocks on that day. Returns: EighthScheduledActivity object if found None if the activity cannot have a sibling False if not found
codesearchnet
def on_train_end(self, logs=None): logs = self._process_logs(logs) for callback in self.callbacks: callback.on_train_end(logs)
Calls the `on_train_end` methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.
github-repos
def _distributed_apply(self, distribution, grads_and_vars, global_step=None, name=None): name = name if name is not None else self.get_name() grads = [g for g, _ in grads_and_vars] loss_scale_update_op, should_apply_grads = self._loss_scale.update(grads) def apply_fn(): return self._apply_gradients(distribution, grads_and_vars, global_step, name + '-wrapped') maybe_apply_op = smart_cond.smart_cond(should_apply_grads, apply_fn, control_flow_ops.no_op) return control_flow_ops.group(maybe_apply_op, loss_scale_update_op, name=name)
A version of `apply_gradients` for cross replica context. When users are in a cross replica strategy, they must call this rather than `apply_gradients()`. Args: distribution: a `DistributionStrategy` object. grads_and_vars: List of (gradient, variable) pairs as returned by `compute_gradients()` and then aggregated across replicas. global_step: Optional (mirrored) `Variable` to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the `Optimizer` constructor. Returns: An `Operation` that applies the specified gradients across all replicas. If `global_step` was not None, that operation also increments `global_step`
github-repos
def _add_transitions(mcs, field_name, workflow, attrs, implems=None): new_implems = ImplementationList(field_name, workflow) if implems: new_implems.load_parent_implems(implems) new_implems.transform(attrs) return new_implems
Collect and enhance transition definitions to a workflow. Modifies the 'attrs' dict in-place. Args: field_name (str): name of the field transitions should update workflow (Workflow): workflow we're working on attrs (dict): dictionary of attributes to be updated. implems (ImplementationList): Implementation list from parent classes (optional) Returns: ImplementationList: The new implementation list for this field.
codesearchnet
def get_without_ethernet(self, id_or_uri): uri = (self._client.build_uri(id_or_uri) + '/withoutEthernet') return self._client.get(uri)
Gets the logical downlink with the specified ID without ethernet. Args: id_or_uri: Can be either the logical downlink id or the logical downlink uri. Returns: dict
codesearchnet
def recipe_email_cm_to_bigquery(config, auth_read, email, subject, dataset, table, is_incremental_load): email(config, {'auth': auth_read, 'read': {'from': 'noreply-cm@google.com', 'to': email, 'subject': subject, 'attachment': '.*'}, 'write': {'bigquery': {'dataset': dataset, 'table': table, 'header': True, 'is_incremental_load': is_incremental_load}}})
Pulls a CM Report from a gMail powered email account into BigQuery. Args: auth_read (authentication) - Credentials used for reading data. email (string) - Email address report was sent to. subject (string) - Regular expression to match subject. Double escape backslashes. dataset (string) - Existing dataset in BigQuery. table (string) - Name of table to be written to. is_incremental_load (boolean) - Append report data to table based on date column, de-duplicates.
github-repos
def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, np.ndarray, List[dict]]: doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) return (retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids))
Retrieves documents for specified `question_hidden_states`. Args: question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`): A batch of query vectors to retrieve with. n_docs (`int`): The number of docs retrieved per query. Return: `Tuple[np.ndarray, np.ndarray, List[dict]]`: A tuple with the following objects: - **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings of the retrieved docs per query. - **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index - **doc_dicts** (`List[dict]`): The `retrieved_doc_embeds` examples per query.
github-repos
def get_children_of_type(typ, root): if type(typ) is not text: typ = typ.__name__ return get_children(lambda x: x.__class__.__name__ == typ, root)
Returns a list of all model elements of type 'typ' starting from model element 'root'. The search process will follow containment links only. Non-containing references shall not be followed. Args: typ(str or python class): The type of the model object we are looking for. root (model object): Python model object which is the start of the search process.
juraj-google-style
def CopyToDict(self): path_spec_dict = {} for (attribute_name, attribute_value) in iter(self.__dict__.items()): if (attribute_value is None): continue if (attribute_name == 'parent'): attribute_value = attribute_value.CopyToDict() path_spec_dict[attribute_name] = attribute_value return path_spec_dict
Copies the path specification to a dictionary. Returns: dict[str, object]: path specification attributes.
codesearchnet
def _guessEncoding(self, path): if (os.path.exists(path) and path.lower().endswith('csv')): encoding = None if (encoding is not None): if encoding.startswith('utf'): encoding = encoding.replace('-', '') encoding = encoding.replace('-', '_') viewValue = _encodings.get(encoding) self._encodingKey = encoding index = self._encodingComboBox.findText(viewValue.upper()) self._encodingComboBox.setCurrentIndex(index)
Opens a file from the given `path` and checks the file encoding. The file must exists on the file system and end with the extension `.csv`. The file is read line by line until the encoding could be guessed. On a successfull identification, the widgets of this dialog will be updated. Args: path (string): Path to a csv file on the file system.
codesearchnet
def get_plugin_apps(self): return {_ACK_ROUTE: self._serve_ack, _COMM_ROUTE: self._serve_comm, _DEBUGGER_GRPC_HOST_PORT_ROUTE: self._serve_debugger_grpc_host_port, _DEBUGGER_GRAPH_ROUTE: self._serve_debugger_graph, _GATED_GRPC_ROUTE: self._serve_gated_grpc, _TENSOR_DATA_ROUTE: self._serve_tensor_data, _SOURCE_CODE_ROUTE: self._serve_source_code}
Obtains a mapping between routes and handlers. This function also starts a debugger data server on separate thread if the plugin has not started one yet. Returns: A mapping between routes and handlers (functions that respond to requests).
codesearchnet
def move_test_classes_into_scope(wrapped_test_module): for name, obj in wrapped_test_module.__dict__.items(): if _is_test_class(obj): module_variables['tpu_test_imported_%s' % name] = obj
Add all test classes defined in wrapped module to our module. The test runner works by inspecting the main module for TestCase classes, so by adding a module-level reference to the TestCase we cause it to execute the wrapped TestCase. Args: wrapped_test_module: The user-provided test code to run.
github-repos
def WMITimeStrToRDFDatetime(self, timestr): offset_minutes = timestr[21:] year = timestr[:4] month = timestr[4:6] day = timestr[6:8] hours = timestr[8:10] minutes = timestr[10:12] seconds = timestr[12:14] microseconds = timestr[15:21] unix_seconds = calendar.timegm( tuple(map(int, [year, month, day, hours, minutes, seconds]))) unix_seconds -= int(offset_minutes) * 60 return rdfvalue.RDFDatetime(unix_seconds * 1e6 + int(microseconds))
Return RDFDatetime from string like 20140825162259.000000-420. Args: timestr: WMI time string Returns: rdfvalue.RDFDatetime We have some timezone manipulation work to do here because the UTC offset is in minutes rather than +-HHMM
juraj-google-style
def CreateTask(self, session_identifier): task = tasks.Task(session_identifier) logger.debug('Created task: {0:s}.'.format(task.identifier)) with self._lock: self._tasks_queued[task.identifier] = task self._total_number_of_tasks += 1 self.SampleTaskStatus(task, 'created') return task
Creates a task. Args: session_identifier (str): the identifier of the session the task is part of. Returns: Task: task attribute container.
codesearchnet
def _get_min_max_value_by_expanding_range(self, start_idx: int) -> tuple[float, float]: mse_min = (float('inf'), float('inf'), float('inf')) left, right = (start_idx, start_idx) move_left = True while not (left == 0 and right == self._num_bins - 1): if move_left and left > 0 or right == self._num_bins - 1: left = max(left - 1, 0) else: right = min(right + 1, self._num_bins - 1) move_left = not move_left quant_min, quant_max = (self._hist_mids[left], self._hist_mids[right]) mse_tuple = self._get_weighted_mean_squared_error(quant_min, quant_max) mse_min = min(mse_tuple, mse_min) min_value, max_value = (mse_min[1], mse_min[2]) return (min_value, max_value)
Starting from start_idx, expand left and right alternately to find the min value of mse loss. Args: start_idx: Index to start quantization. Returns: (min_value, max_value): Min and max calculated.
github-repos
def log_combinations(n, counts, name='log_combinations'): with tf.name_scope(name): n = tf.convert_to_tensor(value=n, name='n') counts = tf.convert_to_tensor(value=counts, name='counts') total_permutations = tf.math.lgamma((n + 1)) counts_factorial = tf.math.lgamma((counts + 1)) redundant_permutations = tf.reduce_sum(input_tensor=counts_factorial, axis=[(- 1)]) return (total_permutations - redundant_permutations)
Multinomial coefficient. Given `n` and `counts`, where `counts` has last dimension `k`, we compute the multinomial coefficient as: ```n! / sum_i n_i!``` where `i` runs over all `k` classes. Args: n: Floating-point `Tensor` broadcastable with `counts`. This represents `n` outcomes. counts: Floating-point `Tensor` broadcastable with `n`. This represents counts in `k` classes, where `k` is the last dimension of the tensor. name: A name for this operation (optional). Returns: `Tensor` representing the multinomial coefficient between `n` and `counts`.
codesearchnet
def derive_field_name(self, field_name): cls = type(self) return cls(self[0], self[1], self[2], field_name, self[4], self[5])
Derives a new event from this one setting the ``field_name`` attribute. Args: field_name (Union[amazon.ion.symbols.SymbolToken, unicode]): The field name to set. Returns: IonEvent: The newly generated event.
codesearchnet
def where_node_as_ldap(where, compiler, connection): (bits, params) = ([], []) for item in where.children: if isinstance(item, WhereNode): (clause, clause_params) = compiler.compile(item) else: (clause, clause_params) = item.as_sql(compiler, connection) bits.append(clause) params.extend(clause_params) if (not bits): return ('', []) if (len(bits) == 1): clause = bits[0] elif (where.connector == AND): clause = ('&' + ''.join((('(%s)' % bit) for bit in bits))) elif (where.connector == OR): clause = ('|' + ''.join((('(%s)' % bit) for bit in bits))) else: raise LdapDBError(('Unhandled WHERE connector: %s' % where.connector)) if where.negated: clause = ('!(%s)' % clause) return (clause, params)
Parse a django.db.models.sql.where.WhereNode. Returns: (clause, [params]): the filter clause, with a list of unescaped parameters.
codesearchnet
def detect(self, filename, offset, standalone=False): r = RawStruct(filename=filename, offset=(offset + SIG_OFFSET), length=SIG_SIZE) oem_id = r.data if (oem_id == b'NTFS '): return True return False
Verifies NTFS filesystem signature. Returns: bool: True if filesystem signature at offset 0x03 \ matches 'NTFS ', False otherwise.
codesearchnet
def print_res(data): print('===================================') main_part = data['data'] print(main_part['word_name']) symbols = main_part['symbols'][0] print("美式音标:[" + symbols['ph_am'] + "]") print("英式音标:[" + symbols['ph_en'] + "]") print('-----------------------------------') parts = symbols['parts'] for part in parts: print(part['part']) for mean in part['means']: print(" ", mean) print('===================================')
Print translate result in a better format Args: data(str): result
juraj-google-style
def nb_cluster(data, k, P_init=None, R_init=None, assignments=None, means=None, max_iters=10): (genes, cells) = data.shape if (P_init is None): P_init = np.random.random((genes, k)) if (R_init is None): R_init = np.random.randint(1, data.max(), (genes, k)) R_init = R_init.astype(float) if (assignments is None): (_, assignments) = kmeans_pp(data, k, means) means = np.zeros((genes, k)) old_assignments = np.copy(assignments) for i in range(max_iters): nb_gene_indices = fit_cluster(data, assignments, k, P_init, R_init, means) lls = nb_ll(data[(nb_gene_indices, :)], P_init[(nb_gene_indices, :)], R_init[(nb_gene_indices, :)]) lls += pois_ll.poisson_ll(data[((~ nb_gene_indices), :)], means[((~ nb_gene_indices), :)]) P_init[((~ nb_gene_indices), :)] = 0 R_init[((~ nb_gene_indices), :)] = np.inf for c in range(cells): assignments[c] = np.argmax(lls[(c, :)]) if np.equal(assignments, old_assignments).all(): break old_assignments = np.copy(assignments) return (assignments, P_init, R_init)
Performs negative binomial clustering on the given data. If some genes have mean > variance, then these genes are fitted to a Poisson distribution. Args: data (array): genes x cells k (int): number of clusters P_init (array): NB success prob param - genes x k. Default: random R_init (array): NB stopping param - genes x k. Default: random assignments (array): cells x 1 array of integers 0...k-1. Default: kmeans-pp (poisson) means (array): initial cluster means (for use with kmeans-pp to create initial assignments). Default: None max_iters (int): default: 100 Returns: assignments (array): 1d array of length cells, containing integers 0...k-1 P (array): genes x k - value is 0 for genes with mean > var R (array): genes x k - value is inf for genes with mean > var
codesearchnet
def Encode(string, encoding=None): del encoding return string
Encode the text string to a byte string. Args: string: str, The text string to encode. encoding: The suggested encoding if known. Returns: str, The binary string.
github-repos
def get_mac_dot_app_dir(directory): return os.path.dirname(os.path.dirname(os.path.dirname(directory)))
Returns parent directory of mac .app Args: directory (str): Current directory Returns: (str): Parent directory of mac .app
codesearchnet
def create_schema(host): connection = create_blocking_connection(host) channel = connection.channel() exchange = settings.get_amqp_settings()[host]["exchange"] channel.exchange_declare( exchange=exchange, exchange_type="topic", durable=True ) print "Created exchange '%s'." % exchange print "Creating queues:" queues = settings.get_amqp_settings()[host]["queues"] for queue in queues.keys(): channel.queue_declare( queue=queue, durable=True, ) print "\tCreated durable queue '%s'." % queue print print "Routing exchanges using routing key to queues:" for queue in queues.keys(): channel.queue_bind( queue=queue, exchange=exchange, routing_key=queues[queue] ) print "\tRouting exchange %s['%s'] -> '%s'." % ( exchange, queues[queue], queue )
Create exchanges, queues and route them. Args: host (str): One of the possible hosts.
juraj-google-style
def Start(self, seed_list: List[str]=None, skip_seeds: bool=False) -> None: if (not seed_list): seed_list = settings.SEED_LIST logger.debug('Starting up nodeleader') if (not skip_seeds): logger.debug('Attempting to connect to seed list...') for bootstrap in seed_list: if (not is_ip_address(bootstrap)): (host, port) = bootstrap.split(':') bootstrap = f'{hostname_to_ip(host)}:{port}' addr = Address(bootstrap) self.KNOWN_ADDRS.append(addr) self.SetupConnection(addr) logger.debug('Starting up nodeleader: starting peer, mempool, and blockheight check loops') self.start_peer_check_loop() self.start_memcheck_loop() self.start_blockheight_loop() if (settings.ACCEPT_INCOMING_PEERS and (not self.incoming_server_running)): class OneShotFactory(Factory): def __init__(self, leader): self.leader = leader def buildProtocol(self, addr): print(f'building new protocol for addr: {addr}') self.leader.AddKnownAddress(Address(f'{addr.host}:{addr.port}')) p = NeoNode(incoming_client=True) p.factory = self return p def listen_err(err): print(f'Failed start listening server for reason: {err.value}') def listen_ok(value): self.incoming_server_running = True logger.debug(f'Starting up nodeleader: setting up listen server on port: {settings.NODE_PORT}') server_endpoint = TCP4ServerEndpoint(self.reactor, settings.NODE_PORT) listenport_deferred = server_endpoint.listen(OneShotFactory(leader=self)) listenport_deferred.addCallback(listen_ok) listenport_deferred.addErrback(listen_err)
Start connecting to the seed list. Args: seed_list: a list of host:port strings if not supplied use list from `protocol.xxx.json` skip_seeds: skip connecting to seed list
codesearchnet
def mode(self, axis=0, numeric_only=False, dropna=True): axis = self._get_axis_number(axis) return self.__constructor__( query_compiler=self._query_compiler.mode( axis=axis, numeric_only=numeric_only, dropna=dropna ) )
Perform mode across the DataFrame. Args: axis (int): The axis to take the mode on. numeric_only (bool): if True, only apply to numeric columns. Returns: DataFrame: The mode of the DataFrame.
juraj-google-style
def get_signature_params(func): if is_cython(func): attrs = ['__code__', '__annotations__', '__defaults__', '__kwdefaults__'] if all((hasattr(func, attr) for attr in attrs)): original_func = func def func(): return for attr in attrs: setattr(func, attr, getattr(original_func, attr)) else: raise TypeError('{!r} is not a Python function we can process'.format(func)) return list(funcsigs.signature(func).parameters.items())
Get signature parameters Support Cython functions by grabbing relevant attributes from the Cython function and attaching to a no-op function. This is somewhat brittle, since funcsigs may change, but given that funcsigs is written to a PEP, we hope it is relatively stable. Future versions of Python may allow overloading the inspect 'isfunction' and 'ismethod' functions / create ABC for Python functions. Until then, it appears that Cython won't do anything about compatability with the inspect module. Args: func: The function whose signature should be checked. Raises: TypeError: A type error if the signature is not supported
codesearchnet
def _make_source_table(self, source_list, is_tf_py_library): path_head = 'Source file path' num_nodes_head = ' num_tensors_head = ' num_dumps_head = ' if is_tf_py_library: color = cli_shared.COLOR_GRAY lines = [RL('TensorFlow Python library file(s):', color)] else: color = cli_shared.COLOR_WHITE lines = [RL('File(s) outside TensorFlow Python library:', color)] if not source_list: lines.append(RL('[No files.]')) lines.append(RL()) return debugger_cli_common.rich_text_lines_from_rich_line_list(lines) path_column_width = max(max((len(item[0]) for item in source_list)), len(path_head)) + 1 num_nodes_column_width = max(max((len(str(item[2])) for item in source_list)), len(num_nodes_head)) + 1 num_tensors_column_width = max(max((len(str(item[3])) for item in source_list)), len(num_tensors_head)) + 1 head = RL(path_head + ' ' * (path_column_width - len(path_head)), color) head += RL(num_nodes_head + ' ' * (num_nodes_column_width - len(num_nodes_head)), color) head += RL(num_tensors_head + ' ' * (num_tensors_column_width - len(num_tensors_head)), color) head += RL(num_dumps_head, color) lines.append(head) for file_path, _, num_nodes, num_tensors, num_dumps, first_line_num in source_list: path_attributes = [color] if source_utils.is_extension_uncompiled_python_source(file_path): path_attributes.append(debugger_cli_common.MenuItem(None, 'ps %s -b %d' % (file_path, first_line_num))) line = RL(file_path, path_attributes) line += ' ' * (path_column_width - len(line)) line += RL(str(num_nodes) + ' ' * (num_nodes_column_width - len(str(num_nodes))), color) line += RL(str(num_tensors) + ' ' * (num_tensors_column_width - len(str(num_tensors))), color) line += RL(str(num_dumps), color) lines.append(line) lines.append(RL()) return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
Make a table summarizing the source files that create nodes and tensors. Args: source_list: List of source files and related information as a list of tuples (file_path, is_tf_library, num_nodes, num_tensors, num_dumps, first_line). is_tf_py_library: (`bool`) whether this table is for files that belong to the TensorFlow Python library. Returns: The table as a `debugger_cli_common.RichTextLines` object.
github-repos
async def forget_ticket(self, request): session = await get_session(request) session.pop(self.cookie_name, '')
Called to forget the ticket data a request Args: request: aiohttp Request object.
juraj-google-style
def match_datetime(file_name: str, regex_expression: str) -> datetime.datetime: def rearrange_time_list(order_list: t.List, time_list: t.List) -> t.List: if order_list == DEFAULT_TIME_ORDER_LIST: return time_list new_time_list = [] for i, j in zip(order_list, time_list): dst = DEFAULT_TIME_ORDER_LIST.index(i) new_time_list.insert(dst, j) return new_time_list char_to_replace = {'%Y': ['([0-9]{4})', [0, 1978]], '%m': ['([0-9]{2})', [1, 1]], '%d': ['([0-9]{2})', [2, 1]], '%H': ['([0-9]{2})', [3, 0]], '%M': ['([0-9]{2})', [4, 0]], '%S': ['([0-9]{2})', [5, 0]], '*': ['.*']} missing_idx_list = [] temp_expression = regex_expression for key, value in char_to_replace.items(): if key != '*' and regex_expression.find(key) == -1: missing_idx_list.append(value[1]) else: temp_expression = temp_expression.replace(key, value[0]) regex_matches = re.findall(temp_expression, file_name)[0] order_list = [f'%{char}' for char in re.findall('%(\\w{1})', regex_expression)] time_list = list(map(int, regex_matches)) time_list = rearrange_time_list(order_list, time_list) if missing_idx_list: for [idx, val] in missing_idx_list: time_list.insert(idx, val) return datetime.datetime(*time_list)
Matches the regex string given and extracts the datetime object. Args: file_name: File name from which you want to extract datetime. regex_expression: Regex expression for extracting datetime from the filename. Returns: A datetime object after extracting from the filename.
github-repos
def _init_saver(self, saver=USE_DEFAULT): if saver is Supervisor.USE_DEFAULT: saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS) if saver is None and variables.global_variables(): saver = saver_mod.Saver() ops.add_to_collection(ops.GraphKeys.SAVERS, saver) self._saver = saver
Initializes saver. Args: saver: A `Saver` object. If set to USE_DEFAULT, create one that saves all the variables.
github-repos
def search_track(self, artist, album=None, track=None, full_album_art_uri=False): subcategories = [artist] subcategories.append((album or '')) result = self.get_album_artists(full_album_art_uri=full_album_art_uri, subcategories=subcategories, search_term=track, complete_result=True) result._metadata['search_type'] = 'search_track' return result
Search for an artist, an artist's albums, or specific track. Args: artist (str): an artist's name. album (str, optional): an album name. Default `None`. track (str, optional): a track name. Default `None`. full_album_art_uri (bool): whether the album art URI should be absolute (i.e. including the IP address). Default `False`. Returns: A `SearchResult` instance.
codesearchnet
def to_dms(angle, style='dms'): sign = 1 if angle >= 0 else -1 angle = abs(angle) * 3600 minutes, seconds = divmod(angle, 60) degrees, minutes = divmod(minutes, 60) if style == 'dms': return tuple(sign * abs(i) for i in (int(degrees), int(minutes), seconds)) elif style == 'dm': return tuple(sign * abs(i) for i in (int(degrees), (minutes + seconds / 60))) else: raise ValueError('Unknown style type %r' % style)
Convert decimal angle to degrees, minutes and possibly seconds. Args: angle (float): Angle to convert style (str): Return fractional or whole minutes values Returns: tuple of int: Angle converted to degrees, minutes and possibly seconds Raises: ValueError: Unknown value for ``style``
juraj-google-style
def get_enum_from_name(self, enum_name): return next((e for e in self.enums if e.name == enum_name), None)
Return an enum from a name Args: enum_name (str): name of the enum Returns: Enum
juraj-google-style
def first(self) -> 'Builder': return self._to_builder(_evaluation.FirstFunction(self.node.context, self.node, []))
The FHIRPath first() function. Returns: An expression that evaluates to the first element of the parent, or empty if the parent has no results.
github-repos
def get_descriptor(self): raise NotImplementedError('Base class should not be called directly!')
This function returns a string describing the sniffer. The specific string (and its format) is up to each derived sniffer type. Returns: A string describing the sniffer.
github-repos
def add_molecule(self, mol, bond=None, base=None, target=None): ai = self.available_idx() mapping = {n: n + ai - 1 for n, _ in mol.atoms_iter()} relabeled = nx.relabel_nodes(mol.graph, mapping) self.graph.add_nodes_from(relabeled.nodes(data=True)) self.graph.add_edges_from(relabeled.edges(data=True)) if bond: self.add_bond(base, mapping[target], bond)
connect atom group (for SMILES parser) May requires recalculation of 2D coordinate for drawing Args: mol: graphmol.Compound() the original object will be copied. bond: Bond object to be connected. the original will not be copied so be careful. base: index of atom in self to connect target: index of atom in group to be connected Raises: TypeError
juraj-google-style
def __frontend_limit_rules_descriptor(self, api_info): if not api_info.frontend_limits.rules: return None rules = [] for rule in api_info.frontend_limits.rules: descriptor = {} for propname, descname in (('match', 'match'), ('qps', 'qps'), ('user_qps', 'userQps'), ('daily', 'daily'), ('analytics_id', 'analyticsId')): if getattr(rule, propname) is not None: descriptor[descname] = getattr(rule, propname) if descriptor: rules.append(descriptor) return rules
Builds a frontend limit rules descriptor from API info. Args: api_info: An _ApiInfo object. Returns: A list of dictionaries with frontend limit rules information.
juraj-google-style
def reindex(self, axis, labels, **kwargs): def reindex_builer(df, axis, old_labels, new_labels, **kwargs): if axis: while len(df.columns) < len(old_labels): df[len(df.columns)] = np.nan df.columns = old_labels new_df = df.reindex(columns=new_labels, **kwargs) new_df.columns = pandas.RangeIndex(len(new_df.columns)) return new_df else: while len(df.index) < len(old_labels): df.loc[len(df.index)] = np.nan df.index = old_labels new_df = df.reindex(index=new_labels, **kwargs) new_df.reset_index(inplace=True, drop=True) return new_df old_labels = self.columns if axis else self.index new_index = self.index if axis else labels new_columns = labels if axis else self.columns func = self._prepare_method( lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs) ) new_data = self._map_across_full_axis(axis, func) return self.__constructor__(new_data, new_index, new_columns)
Fits a new index for this Manger. Args: axis: The axis index object to target the reindex on. labels: New labels to conform 'axis' on to. Returns: A new QueryCompiler with updated data and new index.
juraj-google-style
def exportData(self, datfile): def ampl_set(name, values): def format_entry(e): return repr(e).replace(' ', '') return 'set {0} := {1};'.format( name, ','.join(format_entry(e) for e in values) ) def ampl_param(name, values): def format_entry(k, v): k = repr(k).strip('()').replace(' ', '') if v == inf: v = "Infinity" elif v == -inf: v = "-Infinity" else: v = repr(v).strip('()').replace(' ', '') return '[{0}]{1}'.format(k, v) return 'param {0} := {1};'.format( name, ''.join(format_entry(k, v) for k, v in values.items()) ) with open(datfile, 'w') as f: for name, entity in self.getSets(): values = entity.getValues().toList() print(ampl_set(name, values), file=f) for name, entity in self.getParameters(): if entity.isScalar(): print( 'param {} := {};'.format(name, entity.value()), file=f ) else: values = entity.getValues().toDict() print(ampl_param(name, values), file=f)
Create a .dat file with the data that has been loaded. Args: datfile: Path to the file (Relative to the current working directory or absolute).
juraj-google-style
def random(self, shape, tf_fn, kwargs): slice_shape = self.slice_shape(shape) op_seed = random.random() def my_fn(pnum): seed = hash("%s,%s" % (op_seed, self.slice_begin(shape, pnum))) return tf_fn(slice_shape, seed=seed, **kwargs) return self.slicewise(my_fn, self.laid_out_pnum())
Call a random tf operation (e.g. tf.random.uniform). Args: shape: a Shape tf_fn: a function such as tf.random.uniform kwargs: kwargs to pass to tf_fn, except for seed Returns: a LaidOutTensor
juraj-google-style
def generate_selected_rules(rule_configs: List[RuleConfig], rules: RulesMap) -> List[RuleChecker]: selected_rules: List[RuleChecker] = [] for rule_config in rule_configs: rule_name = rule_config['rule'] if rule_name not in rules: raise ValueError('Invalid rule specified.') else: args = rule_config.get('args', {}) rule = rules[rule_name](**args) rule.__name__ = rule_name rule.__kwdefaults__ = args selected_rules.append(rule) if len(selected_rules) == 0: raise ValueError('No rules specified.') return selected_rules
Generates rule checkers from the provided rule configs and mappable rules. Args: * rule_configs: List of RuleConfigs, with potential args * rules: Typed RulesMap Returns: * List of RuleCheckers with args applied Raises: * ValueError: if non-existent rule name provided
github-repos
def _GetMergeTaskStorageFilePath(self, task): filename = '{0:s}.plaso'.format(task.identifier) return os.path.join(self._merge_task_storage_path, filename)
Retrieves the path of a task storage file in the merge directory. Args: task (Task): task. Returns: str: path of a task storage file file in the merge directory.
juraj-google-style
def FromString(cls, indata): lines = [x.strip() for x in indata.split('\n') if ((not x.startswith(' if (len(lines) < 3): raise DataError('Invalid CommandFile string that did not contain 3 header lines', lines=lines) (fmt_line, version_line, ascii_line) = lines[:3] if (not version_line.startswith('Format: ')): raise DataError("Invalid format version that did not start with 'Format: '", line=version_line) version = version_line[8:] if (ascii_line != 'Type: ASCII'): raise DataError('Unknown file type line (expected Type: ASCII)', line=ascii_line) cmds = [cls.decode(x) for x in lines[3:]] return CommandFile(fmt_line, version, cmds)
Load a CommandFile from a string. The string should be produced from a previous call to encode. Args: indata (str): The encoded input data. Returns: CommandFile: The decoded CommandFile object.
codesearchnet
def update_user_attributes(self, user, claims): required_fields = [field.name for field in user._meta.fields if (field.blank is False)] for (field, claim) in settings.CLAIM_MAPPING.items(): if hasattr(user, field): if (claim in claims): setattr(user, field, claims[claim]) logger.debug("Attribute '{}' for user '{}' was set to '{}'.".format(field, user, claims[claim])) elif (field in required_fields): msg = "Claim not found in access token: '{}'. Check ADFS claims mapping." raise ImproperlyConfigured(msg.format(claim)) else: msg = "Claim '{}' for user field '{}' was not found in the access token for user '{}'. Field is not required and will be left empty".format(claim, field, user) logger.warning(msg) else: msg = "User model has no field named '{}'. Check ADFS claims mapping." raise ImproperlyConfigured(msg.format(field))
Updates user attributes based on the CLAIM_MAPPING setting. Args: user (django.contrib.auth.models.User): User model instance claims (dict): claims from the access token
codesearchnet
def from_dict(d): i = Tags() for (k, v) in d.items(): if (k not in ('@module', '@class')): i[k] = v return i
Creates Tags object from a dictionary. Args: d: Dict of feff parameters and values. Returns: Tags object
codesearchnet
def occurrence(self, indicator=None): self._request_entity = 'fileOccurrence' self._request_uri = '{}/fileOccurrences'.format(self._request_uri) if indicator is not None: self._request_uri = '{}/{}/fileOccurrences'.format(self._api_uri, indicator)
Update the URI to retrieve file occurrences for the provided indicator. Args: indicator (string): The indicator to retrieve file occurrences.
juraj-google-style
def __call__(self, shape, dtype=None, **kwargs): _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False) dtype = _assert_float_dtype(_get_dtype(dtype)) if len(shape) < 2: raise ValueError('The tensor to initialize must be at least two-dimensional') num_rows = 1 for dim in shape[:-1]: num_rows *= dim num_cols = shape[-1] flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows)) a = self._random_generator.random_normal(flat_shape, dtype=dtype) q, r = gen_linalg_ops.qr(a, full_matrices=False) d = array_ops.tensor_diag_part(r) q *= math_ops.sign(d) if num_rows < num_cols: q = array_ops.matrix_transpose(q) return self.gain * array_ops.reshape(q, shape)
Returns a tensor object initialized to an orthogonal matrix. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, `tf.keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `tf.keras.backend.set_floatx(float_dtype)`) **kwargs: Additional keyword arguments.
github-repos
def _on_action(self, sequence, topic, message): try: slug = None parts = topic.split('/') slug = parts[-3] uuid = self._extract_device_uuid(slug) except Exception as exc: self._logger.warn("Error parsing slug in action handler (slug=%s, topic=%s)", slug, topic) return if messages.DisconnectCommand.matches(message): self._logger.debug("Received disconnect command for device 0x%X", uuid) key = message['key'] client = message['client'] self._loop.add_callback(self._disconnect_from_device, uuid, key, client) elif messages.OpenInterfaceCommand.matches(message) or messages.CloseInterfaceCommand.matches(message): self._logger.debug("Received %s command for device 0x%X", message['operation'], uuid) key = message['key'] client = message['client'] oper = message['operation'] if oper == 'open_interface': self._loop.add_callback(self._open_interface, client, uuid, message['interface'], key) else: self._loop.add_callback(self._close_interface, client, uuid, message['interface'], key) elif messages.RPCCommand.matches(message): rpc_msg = messages.RPCCommand.verify(message) client = rpc_msg['client'] address = rpc_msg['address'] rpc = rpc_msg['rpc_id'] payload = rpc_msg['payload'] key = rpc_msg['key'] timeout = rpc_msg['timeout'] self._loop.add_callback(self._send_rpc, client, uuid, address, rpc, payload, timeout, key) elif messages.ScriptCommand.matches(message): script_msg = messages.ScriptCommand.verify(message) key = script_msg['key'] client = script_msg['client'] script = script_msg['script'] self._loop.add_callback(self._send_script, client, uuid, script, key, (script_msg['fragment_index'], script_msg['fragment_count'])) else: self._logger.error("Unsupported message received (topic=%s) (message=%s)", topic, str(message))
Process a command action that we received on behalf of a device. Args: sequence (int): The sequence number of the packet received topic (string): The topic this message was received on message (dict): The message itself
juraj-google-style
def __init__(self, element_value: ValueSpecOrAnnotation, default: typing.Optional[typing.List[typing.Any]]=MISSING_VALUE, min_size: typing.Optional[int]=None, max_size: typing.Optional[int]=None, size: typing.Optional[int]=None, transform: typing.Optional[typing.Callable[[typing.Any], typing.List[typing.Any]]]=None, is_noneable: bool=False, frozen: bool=False): element_value = ValueSpec.from_annotation(element_value, auto_typing=True) if size is not None and (min_size is not None or max_size is not None): raise ValueError(f'Either "size" or "min_size"/"max_size" pair can be specified. Encountered: size={size}, min_size={min_size}, max_size={max_size}.') if size is not None: min_size = size max_size = size if min_size is None: min_size = 0 if min_size < 0: raise ValueError(f'"min_size" of List must be no less than 0. Encountered: {min_size}.') if max_size is not None: if max_size < min_size: raise ValueError(f'"max_size" of List must be no less than "min_size". Encountered: min_size={min_size}, max_size={max_size}') self._element = Field(key_specs.ListKey(min_size, max_size), element_value, 'Field of list element') super().__init__(list, default, transform, is_noneable=is_noneable, frozen=frozen)
Constructor. Args: element_value: A ``ValueSpec`` object or an equivalent annotation as the spec for the list element. default: (Optional) default value for this spec. min_size: (Optional) min size of list. If None, 0 will be used. max_size: (Optional) max size of list. size: (Optional) size of List. A shortcut to specify min_size and max_size at the same time. `size` and `min_size`/`max_size` are mutual exclusive. transform: (Optional) user-defined function to be called on the input of `apply`. It could be used as a type converter or a custom validator which may raise errors. is_noneable: If True, None is acceptable. frozen: If True, values other than the default value is not accceptable.
github-repos
def _sorted_results(self, results_dicts): print('results dicts:', results_dicts) sorted_dict = sorted(results_dicts, key=lambda k: k['start_time']) results = [] for entry in sorted_dict: results.append(entry['dt']) return results
Sorts dict of results based on log start_time. Sorts the results and returns an array with only the values but sorted by oldest value first.value Args: results_dicts: List of result dicts Returns: List of only the time but sorted oldest first.
juraj-google-style
def _create_filter(col_param, extractor): include_missing_values = (not col_param.exclude_missing_values) if col_param.HasField('filter_regexp'): value_filter_fn = _create_regexp_filter(col_param.filter_regexp) elif col_param.HasField('filter_interval'): value_filter_fn = _create_interval_filter(col_param.filter_interval) elif col_param.HasField('filter_discrete'): value_filter_fn = _create_discrete_set_filter(col_param.filter_discrete) elif include_missing_values: return None else: value_filter_fn = (lambda _: True) def filter_fn(session_group): value = extractor(session_group) if (value is None): return include_missing_values return value_filter_fn(value) return filter_fn
Creates a filter for the given col_param and extractor. Args: col_param: A tensorboard.hparams.ColParams object identifying the column and describing the filter to apply. extractor: A function that extract the column value identified by 'col_param' from a tensorboard.hparams.SessionGroup protobuffer. Returns: A boolean function taking a tensorboard.hparams.SessionGroup protobuffer returning True if the session group passes the filter described by 'col_param'. If col_param does not specify a filter (i.e. any session group passes) returns None.
codesearchnet
def make_unique_script_attr(attributes): filtered_attr = [] script_list = [] for attr in attributes: if attr.Usage != TransactionAttributeUsage.Script: filtered_attr.append(attr) else: data = attr.Data if isinstance(data, UInt160): data = attr.Data.ToArray() if data not in script_list: script_list.append(data) filtered_attr.append(attr) return filtered_attr
Filter out duplicate `Script` TransactionAttributeUsage types. Args: attributes: a list of TransactionAttribute's Returns: list:
juraj-google-style
def generate_poisson_data(centers, n_cells, cluster_probs=None): (genes, clusters) = centers.shape output = np.zeros((genes, n_cells)) if (cluster_probs is None): cluster_probs = (np.ones(clusters) / clusters) labels = [] for i in range(n_cells): c = np.random.choice(range(clusters), p=cluster_probs) labels.append(c) output[(:, i)] = np.random.poisson(centers[(:, c)]) return (output, np.array(labels))
Generates poisson-distributed data, given a set of means for each cluster. Args: centers (array): genes x clusters matrix n_cells (int): number of output cells cluster_probs (array): prior probability for each cluster. Default: uniform. Returns: output - array with shape genes x n_cells labels - array of cluster labels
codesearchnet