code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def eigvalsh(a, eigvec=False): if (eigvec == True): (val, vec) = eigh(a, eigvec=True) return (val, gvar.mean(vec)) else: return eigh(a, eigvec=False)
Eigenvalues of Hermitian matrix ``a``. Args: a: Two-dimensional, square Hermitian matrix/array of numbers and/or :class:`gvar.GVar`\s. Array elements must be real-valued if `gvar.GVar`\s are involved (i.e., symmetric matrix). eigvec (bool): If ``True``, method returns a tuple of arrays ``(val, vec)`` where ``val[i]`` are the eigenvalues of ``a``, and ``vec[:, i]`` are the mean values of the corresponding eigenvectors. Only ``val`` is returned if ``eigvec=False`` (default). Returns: Array ``val`` of eigenvalues of matrix ``a`` if parameter ``eigvec==False`` (default); otherwise a tuple of arrays ``(val, vec)`` where ``val[i]`` are the eigenvalues (in ascending order) and ``vec[:, i]`` are the mean values of the corresponding eigenvectors. Raises: ValueError: If matrix is not square and two-dimensional.
codesearchnet
def get_status(self): return (self._initialized, self._error_message)
Get status of `_Reqs` initialization. Returns: Tuple (Boolean indicating initialization status, List of error messages, if any)
github-repos
def OpenFileSystem(cls, path_spec_object, resolver_context=None): if (not isinstance(path_spec_object, path_spec.PathSpec)): raise TypeError('Unsupported path specification type.') if (resolver_context is None): resolver_context = cls._resolver_context if (path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT): if path_spec_object.HasParent(): raise errors.PathSpecError('Unsupported mount path specification with parent.') mount_point = getattr(path_spec_object, 'identifier', None) if (not mount_point): raise errors.PathSpecError('Unsupported path specification without mount point identifier.') path_spec_object = mount_manager.MountPointManager.GetMountPoint(mount_point) if (not path_spec_object): raise errors.MountPointError('No such mount point: {0:s}'.format(mount_point)) file_system = resolver_context.GetFileSystem(path_spec_object) if (not file_system): resolver_helper = cls._GetResolverHelper(path_spec_object.type_indicator) file_system = resolver_helper.NewFileSystem(resolver_context) try: file_system.Open(path_spec_object) except (IOError, ValueError) as exception: raise errors.BackEndError('Unable to open file system with error: {0!s}'.format(exception)) return file_system
Opens a file system object defined by path specification. Args: path_spec_object (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built in context which is not multi process safe. Returns: FileSystem: file system or None if the path specification could not be resolved or has no file system object. Raises: AccessError: if the access to open the file system was denied. BackEndError: if the file system cannot be opened. MountPointError: if the mount point specified in the path specification does not exist. PathSpecError: if the path specification is incorrect. TypeError: if the path specification type is unsupported.
codesearchnet
def __init__(self, g=None, default_device=None, global_step=None): if g is None: self._g = tf.get_default_graph() else: self._g = g self._train_op = None self._summary_tags = set() if global_step and global_step.dtype.base_dtype not in (tf.int32, tf.int64): raise ValueError('Global step must be an int32 or int64 variable: %s' % global_step.dtype) self._global_step = global_step if default_device: self.g._device_function_stack.append(default_device) self._recurrent_state = None self.reset_summary_collections()
Creates a Bookkeeper. Args: g: A graph, if not specified then the default graph is used. default_device: A default device or function. global_step: A variable to use as a global step. Raises: ValueError: If global_step is not an integer variable.
juraj-google-style
def cumsum(x, axis=None, dtype=None): return Cumsum(axis=axis, dtype=dtype)(x)
Returns the cumulative sum of elements along a given axis. Args: x: Input tensor. axis: Axis along which the cumulative sum is computed. By default the input is flattened. dtype: dtype of returned tensor. Defaults to x.dtype. Returns: Output tensor.
github-repos
def MICECache(subsystem, parent_cache=None): if config.REDIS_CACHE: cls = RedisMICECache else: cls = DictMICECache return cls(subsystem, parent_cache=parent_cache)
Construct a |MICE| cache. Uses either a Redis-backed cache or a local dict cache on the object. Args: subsystem (Subsystem): The subsystem that this is a cache for. Kwargs: parent_cache (MICECache): The cache generated by the uncut version of ``subsystem``. Any cached |MICE| which are unaffected by the cut are reused in this cache. If None, the cache is initialized empty.
codesearchnet
def download_aspera(self, user, host, silent=False): aspera_home = os.environ.get("ASPERA_HOME", None) if not aspera_home: raise ValueError("environment variable $ASPERA_HOME not set") if not os.path.exists(aspera_home): raise ValueError( "$ASPERA_HOME directory {} does not exist".format(aspera_home)) ascp = os.path.join(aspera_home, "connect/bin/ascp") key = os.path.join(aspera_home, "connect/etc/asperaweb_id_dsa.openssh") if not os.path.exists(ascp): raise ValueError("could not find ascp binary") if not os.path.exists(key): raise ValueError("could not find openssh key") parsed_url = urlparse(self.url) cmd = "{} -i {} -k1 -T -l400m {}@{}:{} {}".format( ascp, key, user, host, parsed_url.path, self._temp_file_name) logger.debug(cmd) try: pr = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, stderr = pr.communicate() if not silent: logger.debug("Aspera stdout: " + str(stdout)) logger.debug("Aspera stderr: " + str(stderr)) if pr.returncode == 0: logger.debug("Moving %s to %s" % ( self._temp_file_name, self.destination)) shutil.move(self._temp_file_name, self.destination) logger.debug("Successfully downloaded %s" % self.url) else: logger.error( "Failed to download %s using Aspera Connect" % self.url) finally: try: os.remove(self._temp_file_name) except OSError: pass
Download file with Aspera Connect. For details see the documentation ov Aspera Connect Args: user (:obj:`str`): FTP user. host (:obj:`str`): FTP host. Defaults to "ftp-trace.ncbi.nlm.nih.gov".
juraj-google-style
def build_worker_instruction(*args): tuple_class = collections.namedtuple(*args) tuple_class.__str__ = worker_object_to_string tuple_class.__repr__ = worker_object_to_string return tuple_class
Create an object representing a ParallelInstruction protobuf. This will be a collections.namedtuple with a custom __str__ method. Alas, this wrapper is not known to pylint, which thinks it creates constants. You may have to put a disable=invalid-name pylint annotation on any use of this, depending on your names. Args: *args: first argument is the name of the type to create. Should start with "Worker". Second arguments is alist of the attributes of this object. Returns: A new class, a subclass of tuple, that represents the protobuf.
github-repos
def submit_file_content(self, method, url, data, headers, params, halt_on_error=True): r = None try: r = self.tcex.session.request(method, url, data=data, headers=headers, params=params) except Exception as e: self.tcex.handle_error(580, [e], halt_on_error) return r
Submit File Content for Documents and Reports to ThreatConnect API. Args: method (str): The HTTP method for the request (POST, PUT). url (str): The URL for the request. data (str;bytes;file): The body (data) for the request. headers (dict): The headers for the request. params (dict): The query string parameters for the request. halt_on_error (bool, default:True): If True any exception will raise an error. Returns: requests.models.Response: The response from the request.
codesearchnet
def __init__(self, maximum_number_of_queued_items=0, timeout=None): super(MultiProcessingQueue, self).__init__() self._timeout = timeout queue_max_length = _multiprocessing.SemLock.SEM_VALUE_MAX if maximum_number_of_queued_items > queue_max_length: logger.warning(( 'Requested maximum queue size: {0:d} is larger than the maximum ' 'size supported by the system. Defaulting to: {1:d}').format( maximum_number_of_queued_items, queue_max_length)) maximum_number_of_queued_items = queue_max_length self._queue = multiprocessing.Queue(maxsize=maximum_number_of_queued_items)
Initializes a multi-processing queue. Args: maximum_number_of_queued_items (Optional[int]): maximum number of queued items, where 0 represents no limit. timeout (Optional[float]): number of seconds for the get to time out, where None will block until a new item is put onto the queue.
juraj-google-style
def get_primitives_paths(): primitives_paths = list() entry_points = pkg_resources.iter_entry_points('mlprimitives') for entry_point in entry_points: if (entry_point.name == 'jsons_path'): path = entry_point.load() primitives_paths.append(path) return (_PRIMITIVES_PATHS + primitives_paths)
Get the list of folders where the primitives will be looked for. This list will include the value of any `entry_point` named `jsons_path` published under the name `mlprimitives`. An example of such an entry point would be:: entry_points = { 'mlprimitives': [ 'jsons_path=some_module:SOME_VARIABLE' ] } where the module `some_module` contains a variable such as:: SOME_VARIABLE = os.path.join(os.path.dirname(__file__), 'jsons') Returns: list: The list of folders.
codesearchnet
def __ge__(self, other): if not isinstance(other, interface.DateTimeValues): raise ValueError('Other not an instance of DateTimeValues') if not isinstance(other, SemanticTime): return False return self._SORT_ORDER >= other._SORT_ORDER
Determines if the date time values are greater than or equal to other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are greater than or equal to other. Raises: ValueError: if other is not an instance of DateTimeValues.
juraj-google-style
def _start_process(self, classpath): cache_dir = self.config['cache-dir'] java_flags = self.config['java-flags'] iswindows = (os.name == 'nt') Util.mkdir_p(cache_dir) log_path = os.path.join(cache_dir, 'server.log') log = open(log_path, 'w') null = open(os.devnull, 'r') java = os.path.join(self.config['java-home'], 'bin', ('java.exe' if iswindows else 'java')) if (not os.path.exists(java)): raise InvalidJavaPathError(errno.ENOENT, 'No such file or directory', java) elif (not os.access(java, os.X_OK)): raise InvalidJavaPathError(errno.EACCES, 'Permission denied', java) args = (([java, '-cp', (';' if iswindows else ':').join(classpath)] + [a for a in java_flags if a]) + ['-Densime.config={}'.format(self.config.filepath), 'org.ensime.server.Server']) process = subprocess.Popen(args, stdin=null, stdout=log, stderr=subprocess.STDOUT) pid_path = os.path.join(cache_dir, 'server.pid') Util.write_file(pid_path, str(process.pid)) def on_stop(): log.close() null.close() with catch(Exception): os.remove(pid_path) return EnsimeProcess(cache_dir, process, log_path, on_stop)
Given a classpath prepared for running ENSIME, spawns a server process in a way that is otherwise agnostic to how the strategy installs ENSIME. Args: classpath (list of str): list of paths to jars or directories (Within this function the list is joined with a system dependent path separator to create a single string argument suitable to pass to ``java -cp`` as a classpath) Returns: EnsimeProcess: A process handle for the launched server.
codesearchnet
def stage_job_resources(self, resources: List[Tuple[str, str, str]], staging_location: Optional[str]=None): if staging_location is None: raise RuntimeError('The staging_location must be specified.') staged_resources = [] for file_path, staged_path, sha256 in resources: self.stage_artifact(file_path, FileSystems.join(staging_location, staged_path), sha256) staged_resources.append(staged_path) return staged_resources
For internal use only; no backwards-compatibility guarantees. Stages job resources to staging_location. Args: resources: A list of tuples of local file paths and file names (no paths) to be used for staging resources. staging_location: Location to stage the file. Returns: A list of file names (no paths) for the resources staged. All the files are assumed to be staged at staging_location. Raises: RuntimeError: If files specified are not found or error encountered while trying to create the resources (e.g., build a setup package).
github-repos
def write(self, data, echo=None): if (echo or ((echo is None) and self.echo)): sys.stdout.write(data.decode('latin1')) sys.stdout.flush() self.channel.write(data)
Write data to channel. Args: data(bytes): The data to write to the channel. echo(bool): Whether to echo the written data to stdout. Raises: EOFError: If the channel was closed before all data was sent.
codesearchnet
def _slice_params_to_dict(dist, params_event_ndims, slices): override_dict = {} for (param_name, param_event_ndims) in six.iteritems(params_event_ndims): if (param_name not in dist.parameters): raise ValueError('Distribution {} is missing advertised parameter {}'.format(dist, param_name)) param = dist.parameters[param_name] if (param is None): continue dtype = None if hasattr(dist, param_name): attr = getattr(dist, param_name) dtype = getattr(attr, 'dtype', None) if (dtype is None): dtype = dist.dtype warnings.warn('Unable to find property getter for parameter Tensor {} on {}, falling back to Distribution.dtype {}'.format(param_name, dist, dtype)) param = tf.convert_to_tensor(value=param, dtype=dtype) override_dict[param_name] = _slice_single_param(param, param_event_ndims, slices, dist.batch_shape_tensor()) return override_dict
Computes the override dictionary of sliced parameters. Args: dist: The tfd.Distribution being batch-sliced. params_event_ndims: Per-event parameter ranks, a `str->int` `dict`. slices: Slices as received by __getitem__. Returns: overrides: `str->Tensor` `dict` of batch-sliced parameter overrides.
codesearchnet
def test_encode(self, base_id: str, expected_context_element: str, expected_sql_expression: str, expected_fhir_path_expression: str, expected_fields_referenced_by_expression: List[str]): error_reporter = fhir_errors.ListErrorReporter() all_resources = list(self.resources.values()) encoder = fhir_path_validator.FhirProfileStandardSqlEncoder(unittest.mock.Mock(iter_structure_definitions=lambda: all_resources), primitive_handler.PrimitiveHandler(), error_reporter, options=fhir_path_validator.SqlGenerationOptions(verbose_error_reporting=True)) resource = self.resources[f'http: actual_bindings = encoder.encode(resource) self.assertEmpty(error_reporter.warnings) self.assertEmpty(error_reporter.errors) self.assertEqual(actual_bindings[0].element_path, expected_context_element) self.assertEqual(actual_bindings[0].fhir_path_expression, expected_fhir_path_expression) self.assertEqual(actual_bindings[0].fields_referenced_by_expression, expected_fields_referenced_by_expression) self.assertEqual(actual_bindings[0].sql_expression, expected_sql_expression)
Ensures we build the expected constraints to validate a structure definition. Given the `base_id` to a structure definition, ensure we generate the expected constraints to validate that structure definition. Args: base_id: The structure definition to use in the test. expected_context_element: The expected element_path for the resulting constraint. expected_sql_expression: The expected SQL expression for the resulting constraint. expected_fhir_path_expression: The expected FHIRPath expression for the resulting constraint. expected_fields_referenced_by_expression: The expected fields_referenced_by_expression for the resulting constraint.
github-repos
def append(self, item): if isinstance(item, list): self.extend(item) elif not self: list.append(self, item) elif item.__class__ == self[0].__class__: list.append(self, item) else: raise exceptions.WrongListItemType(item.__class__.__name__, self[0].__class__.__name__)
Append one item to the list. Args: item: Item to be appended. Raises: :exc:`~.exceptions.WrongListItemType`: If an item has a different type than the first item to be stored.
juraj-google-style
def set_value(x, value): value = numpy_compat.np_asarray(value, dtype=dtype_numpy(x)) if ops.executing_eagerly_outside_functions(): x.assign(value) else: with get_graph().as_default(): tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0]) if hasattr(x, '_assign_placeholder'): assign_placeholder = x._assign_placeholder assign_op = x._assign_op else: placeholder_shape = tensor_shape.TensorShape([None] * value.ndim) assign_placeholder = array_ops.placeholder(tf_dtype, shape=placeholder_shape) assign_op = x.assign(assign_placeholder) x._assign_placeholder = assign_placeholder x._assign_op = assign_op get_session().run(assign_op, feed_dict={assign_placeholder: value})
Sets the value of a variable, from a Numpy array. `backend.set_value` is the complement of `backend.get_value`, and provides a generic interface for assigning to variables while abstracting away the differences between TensorFlow 1.x and 2.x semantics. {snippet} Args: x: Variable to set to a new value. value: Value to set the tensor to, as a Numpy array (of the same shape).
github-repos
def __call__(self, stats): if 'elapsed_time' not in stats: stats['elapsed_time'] = _get_time() - self._start_at self._log.append(stats) with tempdir(prefix=self._log_name, dir=self._out_path) as tempd: path = os.path.join(tempd, 'log.json') with open(path, 'w') as f: json.dump(self._log, f, indent=4) new_path = os.path.join(self._out_path, self._log_name) shutil.move(path, new_path)
Add training log. Args: stats (dict): Training log values. The object must be key-value style and values type must be `float` or `int`. When the object does not have 'elapsed_time' key, the function set the time automatically. The measurement starts when create new instance.
juraj-google-style
def load_pyfile(self, path): with open(path) as config_file: contents = config_file.read() try: exec(compile(contents, path, 'exec'), self) except Exception as e: raise MalformedConfig(path, six.text_type(e))
Load python file as config. Args: path (string): path to the python file
juraj-google-style
def swo_set_emu_buffer_size(self, buf_size): buf = ctypes.c_uint32(buf_size) res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.SET_BUFFERSIZE_EMU, ctypes.byref(buf)) if (res < 0): raise errors.JLinkException(res) return None
Sets the size of the buffer used by the J-Link to collect SWO data. Args: self (JLink): the ``JLink`` instance buf_size (int): the new size of the emulator buffer Returns: ``None`` Raises: JLinkException: on error
codesearchnet
def __init__(self, y_tensor=None): self._uuid = uuid.uuid4().hex _gradient_debuggers[self._uuid] = self self._gradient_tensors = {} self._y_tensor = y_tensor self._graph = None if y_tensor: self._graph = y_tensor.graph self._is_active_context = False
Constructor of GradientsDebugger. Args: y_tensor: optional: the `tf.Tensor` to be differentiated, i.e., the tensor on the numerator of the differentiation.
github-repos
def _resize_image_if_necessary(image_fobj, target_pixels=None): if target_pixels is None: return image_fobj cv2 = tfds.core.lazy_imports.cv2 image = cv2.imdecode( np.fromstring(image_fobj.read(), dtype=np.uint8), flags=3) height, width, _ = image.shape actual_pixels = height * width if actual_pixels > target_pixels: factor = np.sqrt(target_pixels / actual_pixels) image = cv2.resize(image, dsize=None, fx=factor, fy=factor) _, buff = cv2.imencode(".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), 72]) return io.BytesIO(buff.tostring())
Resize an image to have (roughly) the given number of target pixels. Args: image_fobj: File object containing the original image. target_pixels: If given, number of pixels that the image must have. Returns: A file object.
juraj-google-style
def length_squared(x, keep_dims=False, name=None, reduction_dim=None): with tf.name_scope(name, 'length_squared', [x]) as scope: x = tf.convert_to_tensor(x, name='x') if (not reduction_dim): reduction_dim = _last_index(x, 1) return tf.reduce_sum(tf.square(x), reduction_dim, keep_dims=keep_dims, name=scope)
Computes the squared length of x. Args: x: A tensor. keep_dims: If true, reduction does not change the rank of the input. name: Optional name for this op. reduction_dim: The dimension to reduce, by default choose the last one and if no shape is specified guess 1. Returns: The squared length of x.
codesearchnet
def download_to_tempfile(url, file_name=None, extension=None): if (not file_name): file_name = generate_timestamped_string('wtf_temp_file') if extension: file_path = temp_path((file_name + extension)) else: ext = '' try: ext = re.search(u'\\.\\w+$', file_name).group(0) except: pass file_path = temp_path((file_name + ext)) webFile = urllib.urlopen(url) localFile = open(file_path, 'w') localFile.write(webFile.read()) webFile.close() localFile.close() return file_path
Downloads a URL contents to a tempfile. This is useful for testing downloads. It will download the contents of a URL to a tempfile, which you then can open and use to validate the downloaded contents. Args: url (str) : URL of the contents to download. Kwargs: file_name (str): Name of file. extension (str): Extension to use. Return: str - Returns path to the temp file.
codesearchnet
def _list(self, dir_or_prefix): try: for path, (size, updated) in self._gcsIO().list_files(dir_or_prefix, with_metadata=True): yield FileMetadata(path, size, updated) except Exception as e: raise BeamIOError('List operation failed', {dir_or_prefix: e})
List files in a location. Listing is non-recursive, for filesystems that support directories. Args: dir_or_prefix: (string) A directory or location prefix (for filesystems that don't have directories). Returns: Generator of ``FileMetadata`` objects. Raises: ``BeamIOError``: if listing fails, but not if no files were found.
github-repos
def verify(self, obj): if not isinstance(obj, str): raise ValidationError("Object is not a string", reason='object is not a string', object=obj, type=type(obj), str_type=str) return obj
Verify that the object conforms to this verifier's schema Args: obj (object): A python object to verify Raises: ValidationError: If there is a problem verifying the dictionary, a ValidationError is thrown with at least the reason key set indicating the reason for the lack of validation.
juraj-google-style
def add(self, arg, options=None): fut = tasklets.Future('%s.add(%s, %s)' % (self, arg, options)) todo = self._queues.get(options) if todo is None: utils.logging_debug('AutoBatcher(%s): creating new queue for %r', self._todo_tasklet.__name__, options) if not self._queues: eventloop.add_idle(self._on_idle) todo = self._queues[options] = [] todo.append((fut, arg)) if len(todo) >= self._limit: del self._queues[options] self.run_queue(options, todo) return fut
Adds an arg and gets back a future. Args: arg: one argument for _todo_tasklet. options: rpc options. Return: An instance of future, representing the result of running _todo_tasklet without batching.
juraj-google-style
def filter_publication(publication, cmp_authors=True): query = None isbn_query = False if (publication.optionals and publication.optionals.ISBN): query = aleph.ISBNQuery(publication.optionals.ISBN) isbn_query = True else: query = aleph.TitleQuery(publication.title) result = aleph.reactToAMQPMessage(aleph.SearchRequest(query), '') if (not result.records): return publication if isbn_query: for record in result.records: epub = record.epublication if (compare_names(epub.nazev, publication.title) >= 80): return None return publication for record in result.records: epub = record.epublication if (not (compare_names(epub.nazev, publication.title) >= 80)): continue if (not cmp_authors): return None for author in epub.autori: author_str = ('%s %s %s' % (author.firstName, author.lastName, author.title)) pub_authors = map((lambda x: x.name), publication.authors) if (type(pub_authors) not in [list, tuple, set]): pub_authors = [pub_authors] for pub_author in pub_authors: if (compare_names(author_str, pub_author) >= 50): return None return publication
Filter publications based at data from Aleph. Args: publication (obj): :class:`.Publication` instance. Returns: obj/None: None if the publication was found in Aleph or `publication` \ if not.
codesearchnet
def indicator_body(indicators): hash_patterns = {'md5': re.compile('^([a-fA-F\\d]{32})$'), 'sha1': re.compile('^([a-fA-F\\d]{40})$'), 'sha256': re.compile('^([a-fA-F\\d]{64})$')} body = {} for indicator in indicators: if (indicator is None): continue if hash_patterns['md5'].match(indicator): body['md5'] = indicator elif hash_patterns['sha1'].match(indicator): body['sha1'] = indicator elif hash_patterns['sha256'].match(indicator): body['sha256'] = indicator return body
Generate the appropriate dictionary content for POST of an File indicator Args: indicators (list): A list of one or more hash value(s).
codesearchnet
def horizontal_infrared_radiation_intensity(self, value=9999.0): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `horizontal_infrared_radiation_intensity`'.format(value)) if value < 0.0: raise ValueError( 'value need to be greater or equal 0.0 ' 'for field `horizontal_infrared_radiation_intensity`') self._horizontal_infrared_radiation_intensity = value
Corresponds to IDD Field `horizontal_infrared_radiation_intensity` Args: value (float): value for IDD Field `horizontal_infrared_radiation_intensity` Unit: Wh/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def noisy_wrap(__func: Callable) -> Callable: def wrapper(*args, **kwargs): DebugPrint.enable() try: __func(*args, **kwargs) finally: DebugPrint.disable() return wrapper
Decorator to enable DebugPrint for a given function. Args: __func: Function to wrap Returns: Wrapped function
codesearchnet
def _get_type_name(type_): name = repr(type_) if name.startswith("<"): name = getattr(type_, "__qualname__", getattr(type_, "__name__", "")) return name.rsplit(".", 1)[-1] or repr(type_)
Return a displayable name for the type. Args: type_: A class object. Returns: A string value describing the class name that can be used in a natural language sentence.
juraj-google-style
def sam2rnf(args): rnftools.mishmash.Source.recode_sam_reads(sam_fn=args.sam_fn, fastq_rnf_fo=args.fq_fo, fai_fo=args.fai_fo, genome_id=args.genome_id, number_of_read_tuples=(10 ** 9), simulator_name=args.simulator_name, allow_unmapped=args.allow_unmapped)
Convert SAM to RNF-based FASTQ with respect to argparse parameters. Args: args (...): Arguments parsed by argparse
codesearchnet
def _get_context(): try: import google.colab import IPython except ImportError: pass else: if (IPython.get_ipython() is not None): return _CONTEXT_COLAB try: import IPython except ImportError: pass else: ipython = IPython.get_ipython() if ((ipython is not None) and ipython.has_trait('kernel')): return _CONTEXT_IPYTHON return _CONTEXT_NONE
Determine the most specific context that we're in. Returns: _CONTEXT_COLAB: If in Colab with an IPython notebook context. _CONTEXT_IPYTHON: If not in Colab, but we are in an IPython notebook context (e.g., from running `jupyter notebook` at the command line). _CONTEXT_NONE: Otherwise (e.g., by running a Python script at the command-line or using the `ipython` interactive shell).
codesearchnet
def __init__(self, max_age, client_chunksize=25, **kwargs): super(IterateAllClients, self).__init__(**kwargs) self.client_chunksize = client_chunksize self.max_age = max_age
Iterate over all clients in a threadpool. Args: max_age: Maximum age in seconds of clients to check. client_chunksize: A function to call with each client urn. **kwargs: Arguments passed to init.
juraj-google-style
def plot_accuracy(data, output_dir_path='.', output_filename='accuracy.png', width=10, height=8): output_path = os.path.join(output_dir_path, output_filename) max_val_data = get_epoch_max_val_acc(data) max_val_label = round(max_val_data['acc'].values[0], 4) max_epoch_data = data[data['epoch'] == data['epoch'].max()] plot = ggplot(data, aes('epoch', 'acc', color='factor(data)')) + \ geom_line(size=1, show_legend=False) + \ geom_vline(aes(xintercept='epoch', color='data'), data=max_val_data, alpha=0.5, show_legend=False) + \ geom_label(aes('epoch', 'acc'), data=max_val_data, label=max_val_label, nudge_y=-0.02, va='top', label_size=0, show_legend=False) + \ geom_text(aes('epoch', 'acc', label='data'), data=max_epoch_data, nudge_x=2, ha='center', show_legend=False) + \ geom_point(aes('epoch', 'acc'), data=max_val_data, show_legend=False) + \ labs(y='Accuracy', x='Epochs') + \ theme_bw(base_family='Arial', base_size=15) + \ scale_color_manual([' plot.save(output_path, width=width, height=height)
Plot accuracy. Args: data: Panda dataframe in *the* format.
juraj-google-style
def on_message(self, fragment): try: message = yield self._receive(fragment) except Exception as e: log.error("Unhandled exception receiving a message: %r: %r", e, fragment, exc_info=True) self._internal_error("server failed to parse a message") try: if message: if _message_test_port is not None: _message_test_port.received.append(message) work = yield self._handle(message) if work: yield self._schedule(work) except Exception as e: log.error("Handler or its work threw an exception: %r: %r", e, message, exc_info=True) self._internal_error("server failed to handle a message") raise gen.Return(None)
Process an individual wire protocol fragment. The websocket RFC specifies opcodes for distinguishing text frames from binary frames. Tornado passes us either a text or binary string depending on that opcode, we have to look at the type of the fragment to see what we got. Args: fragment (unicode or bytes) : wire fragment to process
juraj-google-style
def union(self, other): if not hasattr(other, "__iter__"): other = [other] bounds = self.bounds[:] for range in other: bounds += range.bounds bounds = self._union(bounds) range = VersionRange(None) range.bounds = bounds return range
OR together version ranges. Calculates the union of this range with one or more other ranges. Args: other: VersionRange object (or list of) to OR with. Returns: New VersionRange object representing the union.
juraj-google-style
def write8(self, offset, value): if (not isinstance(offset, (int, long))): raise TypeError('Invalid offset type, should be integer.') if (not isinstance(value, (int, long))): raise TypeError('Invalid value type, should be integer.') if ((value < 0) or (value > 255)): raise ValueError('Value out of bounds.') offset = self._adjust_offset(offset) self._validate_offset(offset, 1) self.mapping[offset:(offset + 1)] = struct.pack('B', value)
Write 8-bits to the specified `offset` in bytes, relative to the base physical address of the MMIO region. Args: offset (int, long): offset from base physical address, in bytes. value (int, long): 8-bit value to write. Raises: TypeError: if `offset` or `value` type are invalid. ValueError: if `offset` or `value` are out of bounds.
codesearchnet
def file_create(filename, settings): if len(settings) != 1: raise ValueError("Settings must only contain one item with key " "'content'.") for k, v in settings.items(): if k == "content": with open(filename, 'w') as f: f.write(v)
Creates a file. Args: filename (str): Filename. settings (dict): Must be {"content": actual_content}
juraj-google-style
def merge(cls, schema_list: Sequence['Schema'], name: Optional[str]=None, description: Optional[str]=None) -> 'Schema': fields = {} kw_field = None for schema in schema_list: for key, field in schema.fields.items(): if key.is_const: if key not in fields or (field.origin is not None and fields[key].origin is not None and issubclass(field.origin, fields[key].origin)): fields[key] = field elif kw_field is None: kw_field = field if kw_field is not None: fields[kw_field.key] = kw_field return Schema(list(fields.values()), name=name, description=description, allow_nonconst_keys=True)
Merge multiple schemas into one. For fields shared by multiple schemas, the first appeared onces will be used in the merged schema. Args: schema_list: A list of schemas to merge. name: (Optional) name of the merged schema. description: (Optinoal) description of the schema. Returns: The merged schema.
github-repos
def run_independently(self, op): self._independent_ops.append(op) op._set_attr('_independent_side_effects', attr_value_pb2.AttrValue(b=True))
Marks the given op as independent. Overrides any other rule for the op. Independent ops are guaranteed to execute before the return values, but are allowed to run in parallel with everything else. Use in programs which can guarantee that an op has side effects that don't affect any other op. Args: op: An operation
github-repos
def set_function_defaults(self, node: 'cfg.CFGNode', defaults_var: 'cfg.Variable') -> None: defaults = self._extract_defaults(defaults_var) if defaults is None: defaults = [self.ctx.new_unsolvable(node) for _ in self.signature.param_names] defaults = dict(zip(self.signature.param_names[-len(defaults):], defaults)) self.signature.defaults = defaults
Attempts to set default arguments of a function. If defaults_var is not an unambiguous tuple (i.e. one that can be processed by abstract_utils.get_atomic_python_constant), every argument is made optional and a warning is issued. This function emulates __defaults__. Args: node: The node where default arguments are being set. Needed if we cannot get a useful value from defaults_var. defaults_var: a Variable with a single binding to a tuple of default values.
github-repos
def getFileKeys(self): files = self.getFileObjects() files_list = [] for (key, value) in files.iteritems(): if value: files_list.append(key) return files_list
Retrieve a list of file keys that have been read into the database. This is a utility method that can be used to programmatically access the GsshaPy file objects. Use these keys in conjunction with the dictionary returned by the getFileObjects method. Returns: list: List of keys representing file objects that have been read into the database.
codesearchnet
def delete_by_path(self, path): if not os.path.exists(path): raise IOError("Unknown path '%s'!" % path) if not path.startswith(self.path): raise IOError( "Path '%s' is not in the root of the storage ('%s')!" % ( path, self.path ) ) if os.path.isfile(path): os.unlink(path) return self._recursive_remove_blank_dirs(path) shutil.rmtree(path) self._recursive_remove_blank_dirs(path)
Delete file/directory identified by `path` argument. Warning: `path` have to be in :attr:`path`. Args: path (str): Path of the file / directory you want to remove. Raises: IOError: If the file / directory doesn't exists, or is not in \ :attr:`path`.
juraj-google-style
def mean(data, n=3, **kwargs): if (len(data[(- n):]) < n): forecast = np.nan else: forecast = np.mean(data[(- n):]) return forecast
The mean forecast for the next point is the mean value of the previous ``n`` points in the series. Args: data (np.array): Observed data, presumed to be ordered in time. n (int): period over which to calculate the mean Returns: float: a single-valued forecast for the next value in the series.
codesearchnet
def Add(self, path, age=None): if not isinstance(path, string_types): raise ValueError("Only strings should be added to a URN.") result = rdfvalue.RDFURN(self.Copy(age)) result.Update(path=utils.JoinPath(self._string_urn, path)) return result
Add a relative stem to the current value and return a new RDFURN. Note that this returns an RDFURN, not a ClientURN since the resulting object would not pass validation. Args: path: A string containing a relative path. age: The age of the object. If None set to current time. Returns: A new RDFURN that can be chained. Raises: ValueError: if the path component is not a string.
juraj-google-style
def import_extension_module(ext_name): import importlib try: return importlib.import_module(('.' + ext_name), 'nnabla_ext') except ImportError as e: from nnabla import logger logger.error('Extension `{}` does not exist.'.format(ext_name)) raise e
Import an extension module by name. The extension modules are installed under the `nnabla_ext` package as namespace packages. All extension modules provide a unified set of APIs. Args: ext_name(str): Extension name. e.g. 'cpu', 'cuda', 'cudnn' etc. Returns: module An Python module of a particular NNabla extension. Example: .. code-block:: python ext = import_extension_module('cudnn') available_devices = ext.get_devices() print(available_devices) ext.device_synchronize(available_devices[0]) ext.clear_memory_cache()
codesearchnet
def decode_event(abi: ABI, log_: Dict) -> Dict: if isinstance(log_['topics'][0], str): log_['topics'][0] = decode_hex(log_['topics'][0]) elif isinstance(log_['topics'][0], int): log_['topics'][0] = decode_hex(hex(log_['topics'][0])) event_id = log_['topics'][0] events = filter_by_type('event', abi) topic_to_event_abi = { event_abi_to_log_topic(event_abi): event_abi for event_abi in events } event_abi = topic_to_event_abi[event_id] return get_event_data(event_abi, log_)
Helper function to unpack event data using a provided ABI Args: abi: The ABI of the contract, not the ABI of the event log_: The raw event data Returns: The decoded event
juraj-google-style
def _serialize_quadratic_biases(quadratic, edgelist): quadratic_list = [(quadratic[(u, v)] if ((u, v) in quadratic) else quadratic[(v, u)]) for (u, v) in edgelist] quadratic_bytes = struct.pack(('<' + ('d' * len(quadratic))), *quadratic_list) return base64.b64encode(quadratic_bytes).decode('utf-8')
Serializes the quadratic biases. Args: quadratic (dict): a dict of the form {edge1: bias1, ...} where each edge is of the form (node1, node2). edgelist (list): a list of the form [(node1, node2), ...]. Returns: str: base 64 encoded string of little endian 8 byte floats, one for each of the edges in quadratic. Ordered by edgelist. Example: >>> _serialize_quadratic_biases({(0, 1): -1, (1, 2): 1, (0, 2): .4}, ... [(0, 1), (1, 2), (0, 2)]) 'AAAAAAAA8L8AAAAAAADwP5qZmZmZmdk/'
codesearchnet
def __init__(self, hooks=None, scaffold=None, master='', config=None, checkpoint_dir=None, stop_grace_period_secs=120, checkpoint_filename_with_path=None): session_creator = ChiefSessionCreator(scaffold=scaffold, master=master, config=config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path) super(SingularMonitoredSession, self).__init__(session_creator, hooks, should_recover=False, stop_grace_period_secs=stop_grace_period_secs)
Creates a SingularMonitoredSession. Args: hooks: An iterable of `SessionRunHook' objects. scaffold: A `Scaffold` used for gathering or building supportive ops. If not specified a default one is created. It's used to finalize the graph. master: `String` representation of the TensorFlow master to use. config: `ConfigProto` proto used to configure the session. checkpoint_dir: A string. Optional path to a directory where to restore variables. stop_grace_period_secs: Number of seconds given to threads to stop after `close()` has been called. checkpoint_filename_with_path: A string. Optional path to a checkpoint file from which to restore variables.
github-repos
def server(self, value): self._server = value self._connectionXML.set('server', value)
Set the connection's server property. Args: value: New server. String. Returns: Nothing.
juraj-google-style
def SetServerInformation(self, server, port): self._host = server self._port = port
Sets the server information. Args: server (str): hostname or IP address of the database server. port (int): port number of the database server.
juraj-google-style
def _get_longest_diag_dict(input_matrix, nonzero_idx): visited = set() diags = {} for idx in nonzero_idx: start_idx = torch.clone(idx) tuple_start_idx = tuple(start_idx.tolist()) if tuple_start_idx in visited: continue visited.add(tuple_start_idx) cur_diag_len = 1 start_idx += 1 while start_idx[0] < input_matrix.shape[0] and start_idx[1] < input_matrix.shape[1]: tuple_start_idx = tuple(start_idx.tolist()) visited.add(tuple_start_idx) if input_matrix[start_idx[0], start_idx[1]] == 1: cur_diag_len += 1 start_idx += 1 else: break diags[idx] = cur_diag_len return diags
Calculates the length of the longest diagonal sequence in a given matrix. Args: input_matrix (torch.Tensor): The input matrix. nonzero_idx (torch.Tensor): The indices of the non-zero elements in the matrix. Returns: dict: A dictionary where the keys are the indices of the non-zero elements and the values are the lengths of the longest diagonal sequences starting from those indices.
github-repos
def _get_scalars_plugin(self): if (scalars_metadata.PLUGIN_NAME in self._plugin_name_to_instance): return self._plugin_name_to_instance[scalars_metadata.PLUGIN_NAME] return None
Tries to get the scalars plugin. Returns: The scalars plugin. Or None if it is not yet registered.
codesearchnet
def get_key_pair(self, alias_name): uri = ((self.URI + '/keypair/') + alias_name) return self._client.get(uri)
Retrieves the public and private key pair associated with the specified alias name. Args: alias_name: Key pair associated with the RabbitMQ Returns: dict: RabbitMQ certificate
codesearchnet
def get_config(self, config='running-config', params=None, as_string=False): if (config not in ['startup-config', 'running-config']): raise TypeError('invalid config name specified') command = ('show %s' % config) if params: command += (' %s' % params) result = self.run_commands(command, 'text') if as_string: return str(result[0]['output']).strip() return str(result[0]['output']).split('\n')
Retreives the config from the node This method will retrieve the config from the node as either a string or a list object. The config to retrieve can be specified as either the startup-config or the running-config. Args: config (str): Specifies to return either the nodes startup-config or running-config. The default value is the running-config params (str): A string of keywords to append to the command for retrieving the config. as_string (boo): Flag that determines the response. If True, then the configuration is returned as a raw string. If False, then the configuration is returned as a list. The default value is False Returns: This method will return either a string or a list depending on the states of the as_string keyword argument. Raises: TypeError: If the specified config is not one of either 'running-config' or 'startup-config'
codesearchnet
def call(self, hidden_states, attention_mask: tf.Tensor | None=None, encoder_hidden_states: tf.Tensor | None=None, encoder_attention_mask: tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, cross_attn_layer_head_mask: tf.Tensor | None=None, past_key_value: Tuple[tf.Tensor] | None=None, training=False) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, training=training) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return (hidden_states, self_attn_weights, cross_attn_weights, present_key_value)
Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`tf.Tensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(decoder_attention_heads,)` cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. `(decoder_attention_heads,)` past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
github-repos
def delete(self, resource, timeout=-1): self._client.delete(resource=resource, timeout=timeout)
Delete all the labels for a resource. Args: resource (dict): Object to delete. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion.
juraj-google-style
def mutual_information(state, d0, d1=None): if d1 is None: d1 = int(len(state) / d0) mi = entropy(partial_trace(state, [0], dimensions=[d0, d1])) mi += entropy(partial_trace(state, [1], dimensions=[d0, d1])) mi -= entropy(state) return mi
Compute the mutual information of a bipartite state. Args: state (array_like): a bipartite state-vector or density-matrix. d0 (int): dimension of the first subsystem. d1 (int or None): dimension of the second subsystem. Returns: float: The mutual information S(rho_A) + S(rho_B) - S(rho_AB).
juraj-google-style
def positional_encoding_1d(self, batch_size, sequence_length, embedding_dim, device='cpu', dtype=torch.float32): position = torch.arange(0, sequence_length, dtype=dtype, device=device).unsqueeze(1) index = torch.arange(0, embedding_dim, 2, dtype=dtype, device=device).unsqueeze(0) div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim)) pos_encoding = position * div_term pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1) pos_encoding = pos_encoding.unsqueeze(dim=0).repeat(batch_size, 1, 1) return pos_encoding
Generate positional encodings Args: sequence_length (int): Sequence length embedding_dim (int): Embedding dimension Returns: torch.Tensor: Positional encodings.
github-repos
def clown_strike_ioc(self, ioc): r = requests.get('http: self._output(r.text)
Performs Clown Strike lookup on an IoC. Args: ioc - An IoC.
codesearchnet
def cos(x): if any_symbolic_tensors((x,)): return Cos().symbolic_call(x) return backend.numpy.cos(x)
Cosine, element-wise. Args: x: Input tensor. Returns: The corresponding cosine values.
github-repos
def reply_code_tuple(code: int) -> Tuple[int, int, int]: return code
Return the reply code as a tuple. Args: code: The reply code. Returns: Each item in the tuple is the digit.
juraj-google-style
def _parse_unknown_block_line(self, instrumentation_block, line): if line.startswith(_InstrumentationStructurePrefixes.STATUS): return self._parse_method_block_line(self._transition_instrumentation_block(instrumentation_block, new_state=_InstrumentationBlockStates.METHOD), line) elif line.startswith(_InstrumentationStructurePrefixes.RESULT) or _InstrumentationStructurePrefixes.FAILED in line: return self._parse_result_block_line(self._transition_instrumentation_block(instrumentation_block, new_state=_InstrumentationBlockStates.RESULT), line) else: instrumentation_block.add_value(line) return instrumentation_block
Parses a line from the instrumentation output from the UNKNOWN parser state. Args: instrumentation_block: _InstrumentationBlock, the current instrumenation block, where the correct categorization it noti yet known. line: string, the raw instrumenation output line to be used to deteremine the correct categorization. Returns: The next instrumentation block to continue parsing with. Usually, this is the same instrumentation block but with the state transitioned appropriately.
github-repos
def create(cls, session, attributes=None, relationships=None): resource_type = cls._resource_type() resource_path = cls._resource_path() url = session._build_url(resource_path) json = build_request_body(resource_type, None, attributes=attributes, relationships=relationships) process = cls._mk_one(session) return session.post(url, CB.json(201, process), json=json)
Create a resource of the resource. This should only be called from sub-classes Args: session(Session): The session to create the resource in. attributes(dict): Any attributes that are valid for the given resource type. relationships(dict): Any relationships that are valid for the given resource type. Returns: Resource: An instance of a resource.
juraj-google-style
def cross_product(p1, p2, o=(0, 0)): v1 = vector(o, p1) v2 = vector(o, p2) return v1[0] * v2[1] - v1[1] * v2[0]
Returns cross product Args: p1, p2: point (x, y) o: origin
juraj-google-style
def interpolate(hidden_states, ratio): batch_size, time_length, classes_num = hidden_states.shape upsampled = hidden_states[:, :, None, :].repeat(1, 1, ratio, 1) upsampled = upsampled.reshape(batch_size, time_length * ratio, classes_num) return upsampled
Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN. Args: hidden_states (`torch.FloatTensor` of shape (batch_size, time_length, classes_num)): Input hidden states ratio (`int`): The ratio of the length of the output to the length of the input.
github-repos
def get_source_url(obj): source_env_prefix = obj.context.config['source_env_prefix'] task = obj.task log.debug('Getting source url for {} {}...'.format(obj.name, obj.task_id)) repo = get_repo(obj.task, source_env_prefix=source_env_prefix) source = task['metadata']['source'] if (repo and (not verify_repo_matches_url(repo, source))): raise CoTError("{name} {task_id}: {source_env_prefix} {repo} doesn't match source {source}!".format(name=obj.name, task_id=obj.task_id, source_env_prefix=source_env_prefix, repo=repo, source=source)) log.info('{} {}: found {}'.format(obj.name, obj.task_id, source)) return source
Get the source url for a Trust object. Args: obj (ChainOfTrust or LinkOfTrust): the trust object to inspect Raises: CoTError: if repo and source are defined and don't match Returns: str: the source url.
codesearchnet
def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is not None and return_dict: outputs, past = outputs outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): outputs, past = outputs outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs
Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import AutoTokenizer, FlaxBartForConditionalGeneration >>> model = FlaxBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> last_decoder_hidden_states = outputs.last_hidden_state ```
github-repos
def _add_data_types_and_routes_to_api(self, namespace, desc): env = self._get_or_create_env(namespace.name) for item in desc: if isinstance(item, AstTypeDef): api_type = self._create_type(env, item) namespace.add_data_type(api_type) self._check_canonical_name_available(item, namespace.name) elif isinstance(item, AstStructPatch) or isinstance(item, AstUnionPatch): base_name = self._get_base_name(item.name, namespace.name) self._patch_data_by_canonical_name[base_name] = (item, namespace) elif isinstance(item, AstRouteDef): route = self._create_route(env, item) namespace.add_route(route) self._check_canonical_name_available(item, namespace.name, allow_duplicate=True) elif isinstance(item, AstImport): pass elif isinstance(item, AstAlias): alias = self._create_alias(env, item) namespace.add_alias(alias) self._check_canonical_name_available(item, namespace.name) elif isinstance(item, AstAnnotationDef): annotation = self._create_annotation(env, item) namespace.add_annotation(annotation) self._check_canonical_name_available(item, namespace.name) elif isinstance(item, AstAnnotationTypeDef): annotation_type = self._create_annotation_type(env, item) namespace.add_annotation_type(annotation_type) self._check_canonical_name_available(item, namespace.name) else: raise AssertionError('Unknown AST node type %r' % item.__class__.__name__)
From the raw output of the parser, create forward references for each user-defined type (struct, union, route, and alias). Args: namespace (stone.api.Namespace): Namespace for definitions. desc (List[stone.stone.parser._Element]): All AST nodes in a spec file in the order they were defined. Should not include a namespace declaration.
juraj-google-style
def get_video_features(self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor]=None): pixel_values_videos = pixel_values_videos.type(self.visual.dtype) video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw) return video_embeds
Encodes videos into continuous embeddings that can be forwarded to the language model. Args: pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input videos. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM.
github-repos
def _get_numeric_sort_key_fn(self, table_numeric_values, value): if not table_numeric_values: return None all_values = list(table_numeric_values.values()) all_values.append(value) try: return get_numeric_sort_key_fn(all_values) except ValueError: return None
Returns the sort key function for comparing value to table values. The function returned will be a suitable input for the key param of the sort(). See number_annotation_utils._get_numeric_sort_key_fn for details Args: table_numeric_values: Numeric values of a column value: Numeric value in the question Returns: A function key function to compare column and question values.
github-repos
def render(self, link_url, image_url, **kwargs): path = '%s/render' % self.path data = {'link_url': link_url, 'image_url': image_url} return self.gitlab.http_get(path, data, **kwargs)
Preview link_url and image_url after interpolation. Args: link_url (str): URL of the badge link image_url (str): URL of the badge image **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabRenderError: If the rendering failed Returns: dict: The rendering properties
juraj-google-style
def to_json_string(self) -> str: dictionary = self.to_dict() return json.dumps(dictionary, indent=2, sort_keys=True) + '\n'
Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
github-repos
def generate(self, id_or_uri): uri = self._client.build_uri(id_or_uri) + "/generate" return self._client.get(uri)
Generates and returns a random range. Args: id_or_uri: ID or URI of range. Returns: dict: A dict containing a list with IDs.
juraj-google-style
def decode(self, probs, sizes=None): (_, max_probs) = torch.max(probs.transpose(0, 1), 2) strings = self.convert_to_strings(max_probs.view(max_probs.size(0), max_probs.size(1)), sizes) return self.process_strings(strings, remove_repetitions=True)
Returns the argmax decoding given the probability matrix. Removes repeated elements in the sequence, as well as blanks. Arguments: probs: Tensor of character probabilities from the network. Expected shape of seq_length x batch x output_dim sizes(optional): Size of each sequence in the mini-batch Returns: strings: sequences of the model's best guess for the transcription on inputs
codesearchnet
def avro_union_type_to_beam_type(union_type: List) -> schema_pb2.FieldType: if len(union_type) == 2 and 'null' in union_type: for avro_type in union_type: if avro_type in AVRO_PRIMITIVES_TO_BEAM_PRIMITIVES: return schema_pb2.FieldType(atomic_type=AVRO_PRIMITIVES_TO_BEAM_PRIMITIVES[avro_type], nullable=True) return schemas.typing_to_runner_api(Any) return schemas.typing_to_runner_api(Any)
convert an avro union type to a beam type if the union type is a nullable, and it is a nullable union of an avro primitive with a corresponding beam primitive then create a nullable beam field of the corresponding beam type, otherwise return an Any type. Args: union_type: the avro union type to convert. Returns: the beam type of the avro union.
github-repos
def pre_fetch(self, feed): if hasattr(self, '_list_name') and self._list_name and self._id_field: print('pre fetching %s' % self._list_name) ids = [feed_item[self._id_field] for feed_item in feed if isinstance(feed_item[self._id_field], int)] if ids: for i in range(0, len(ids), 500): results = self._api(iterate=True).list(profileId=self.profile_id, ids=ids[i:i + 500]).execute() for item in results: store.set(self._entity, [item['id']], item)
Pre-fetches all required items to be update into the cache. This increases performance for update operations. Args: feed: List of feed items to retrieve
github-repos
def loopUntil( self, condition=None, timeout: float = 0) -> Iterator[object]: endTime = time.time() + timeout while True: test = condition and condition() if test: yield test return elif timeout and time.time() > endTime: yield False return else: yield test self.waitOnUpdate(endTime - time.time() if timeout else 0)
Iterate until condition is met, with optional timeout in seconds. The yielded value is that of the condition or False when timed out. Args: condition: Predicate function that is tested after every network update. timeout: Maximum time in seconds to wait. If 0 then no timeout is used.
juraj-google-style
def get_lonlatalts(self): band = self.filehandle (xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), (gcps, crs) = self.get_gcps() longitudes = interpolate_xarray(xpoints, ypoints, gcp_lons, band.shape) latitudes = interpolate_xarray(xpoints, ypoints, gcp_lats, band.shape) altitudes = interpolate_xarray(xpoints, ypoints, gcp_alts, band.shape) longitudes.attrs['gcps'] = gcps longitudes.attrs['crs'] = crs latitudes.attrs['gcps'] = gcps latitudes.attrs['crs'] = crs altitudes.attrs['gcps'] = gcps altitudes.attrs['crs'] = crs return longitudes, latitudes, altitudes
Obtain GCPs and construct latitude and longitude arrays. Args: band (gdal band): Measurement band which comes with GCP's array_shape (tuple) : The size of the data array Returns: coordinates (tuple): A tuple with longitude and latitude arrays
juraj-google-style
def distinct_values_of(self, field, count_deleted=False): solr_params = ('facet=true&facet.field=%s&rows=0' % field) result = self.riak_http_search_query(self.index_name, solr_params, count_deleted) facet_fields = result['facet_counts']['facet_fields'][field] keys = facet_fields[0::2] vals = facet_fields[1::2] return dict(zip(keys, vals))
Uses riak http search query endpoint for advanced SOLR queries. Args: field (str): facet field count_deleted (bool): ignore deleted or not Returns: (dict): pairs of field values and number of counts
codesearchnet
def restore_initializer(filename, name_fn=None, collection=tf.GraphKeys.GLOBAL_VARIABLES): def _restore_initializer(getter, name, *args, **kwargs): 'Gets variable with restore initializer.' collections = kwargs['collections'] if (collections is None): collections = [tf.GraphKeys.GLOBAL_VARIABLES] if (kwargs['trainable'] and (tf.GraphKeys.TRAINABLE_VARIABLES not in collections)): collections += [tf.GraphKeys.TRAINABLE_VARIABLES] if ((collection is None) or (collection in collections)): if (name_fn is not None): var_name_in_checkpoint = name_fn(name) else: var_name_in_checkpoint = name tf.logging.info("Restoring '%s' from '%s' into variable '%s'", var_name_in_checkpoint, filename, name) kwargs['initializer'] = snt.restore_initializer(filename, var_name_in_checkpoint, scope='') return getter(name, *args, **kwargs) return _restore_initializer
Custom getter to restore all variables with `snt.restore_initializer`. Args: filename: The filename of the checkpoint. name_fn: A function which can map the name of the variable requested. This allows restoring variables with values having different names in the checkpoint. collection: Only set the restore initializer for variables in this collection. If `None`, it will attempt to restore all variables. By default `tf.GraphKeys.GLOBAL_VARIABLES`. Returns: A restore_initializer custom getter, which is a function taking arguments (getter, name, *args, **kwargs).
codesearchnet
def cysparse_type_to_real_sum_cysparse_type(cysparse_type): r_type = None if cysparse_type in ['INT32_t', 'UINT32_t', 'INT64_t', 'UINT64_t']: r_type = 'FLOAT64_t' elif cysparse_type in ['FLOAT32_t', 'FLOAT64_t']: r_type = 'FLOAT64_t' elif cysparse_type in ['FLOAT128_t']: r_type = 'FLOAT128_t' elif cysparse_type in ['COMPLEX64_t', 'COMPLEX128_t']: r_type = 'FLOAT64_t' elif cysparse_type in ['COMPLEX256_t']: r_type = 'FLOAT128_t' else: raise TypeError("Not a recognized type") assert r_type in ['FLOAT64_t', 'FLOAT128_t'] return r_type
Returns the best **real** type for a **real** sum for a given type. For instance: INT32_t -> FLOAT64_t Args: cysparse_type:
juraj-google-style
def report_delete(config, auth, report_id=None, name=None): if config.verbose: print('DBM DELETE:', report_id or name) report = report_get(config, auth, report_id, name) if report: API_DBM(config, auth).queries().delete(queryId=report['queryId']).execute() elif config.verbose: print('DBM DELETE: No Report')
Deletes a DBM report based on name or ID. Args: * auth: (string) Either user or service. * report_id: (int) ID of DCm report to fetch ( either or name ). * name: (string) Name of report to fetch ( either or report_id ). Returns: * None
github-repos
def save(self, filepath): filepath = str(filepath) if not filepath.endswith('.weights.h5'): raise ValueError(f'Invalid `filepath` argument: expected a `.weights.h5` extension. Received: filepath={filepath}') weights_store = H5IOStore(filepath, mode='w') def _save(weights_dict, weights_store, inner_path): vars_to_create = {} for name, value in weights_dict.items(): if isinstance(value, dict): if value: _save(weights_dict[name], weights_store, inner_path=inner_path + '/' + name) else: vars_to_create[name] = value if vars_to_create: var_store = weights_store.make(inner_path) for name, value in vars_to_create.items(): var_store[name] = value _save(self.weights_dict, weights_store, inner_path='') weights_store.close()
Save the edited weights file. Args: filepath: Path to save the file to. Must be a `.weights.h5` file.
github-repos
def get_rel_pos(q_size, k_size, rel_pos): max_rel_dist = int(2 * max(q_size, k_size) - 1) if rel_pos.shape[0] != max_rel_dist: rel_pos_resized = nn.functional.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode='linear') rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) else: rel_pos_resized = rel_pos q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()]
Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (`int`): Size of query q. k_size (`int`): Size of key k. rel_pos (`torch.Tensor`): Relative position embeddings (num_embeddings, num_channels). Returns: Extracted positional embeddings according to relative positions.
github-repos
def distances_from_parent(self, leaves=True, internal=True, unlabeled=False): if not isinstance(leaves, bool): raise TypeError("leaves must be a bool") if not isinstance(internal, bool): raise TypeError("internal must be a bool") if not isinstance(unlabeled, bool): raise TypeError("unlabeled must be a bool") if leaves or internal: for node in self.traverse_preorder(): if ((leaves and node.is_leaf()) or (internal and not node.is_leaf())) and (unlabeled or node.label is not None): if node.edge_length is None: yield (node,0) else: yield (node,node.edge_length)
Generator over the node-to-parent distances of this ``Tree``; (node,distance) tuples Args: ``terminal`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` ``unlabeled`` (``bool``): ``True`` to include unlabeled nodes, otherwise ``False``
juraj-google-style
def r_edges(step): (rbot, rtop) = misc.get_rbounds(step) centers = (step.rprof.loc[(:, 'r')].values + rbot) edges = ((centers[:(- 1)] + centers[1:]) / 2) edges = np.insert(edges, 0, rbot) edges = np.append(edges, rtop) return (edges, edges)
Cell border. Args: step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData instance. Returns: tuple of :class:`numpy.array`: the position of the bottom and top walls of the cells. The two elements of the tuple are identical.
codesearchnet
def pybel_to_json(molecule, name=None): atoms = [{'element': table.GetSymbol(atom.atomicnum), 'location': list(atom.coords)} for atom in molecule.atoms] for (json_atom, pybel_atom) in zip(atoms, molecule.atoms): if (pybel_atom.partialcharge != 0): json_atom['charge'] = pybel_atom.partialcharge if pybel_atom.OBAtom.HasData('_atom_site_label'): obatom = pybel_atom.OBAtom json_atom['label'] = obatom.GetData('_atom_site_label').GetValue() if pybel_atom.OBAtom.HasData('color'): obatom = pybel_atom.OBAtom json_atom['color'] = obatom.GetData('color').GetValue() bonds = [{'atoms': [b.GetBeginAtom().GetIndex(), b.GetEndAtom().GetIndex()], 'order': b.GetBondOrder()} for b in ob.OBMolBondIter(molecule.OBMol)] output = {'atoms': atoms, 'bonds': bonds, 'units': {}} if hasattr(molecule, 'unitcell'): uc = molecule.unitcell output['unitcell'] = [[v.GetX(), v.GetY(), v.GetZ()] for v in uc.GetCellVectors()] density = (sum((atom.atomicmass for atom in molecule.atoms)) / (uc.GetCellVolume() * 0.6022)) output['density'] = density output['units']['density'] = 'kg / L' element_count = Counter((table.GetSymbol(a.atomicnum) for a in molecule)) hill_count = [] for element in ['C', 'H']: if (element in element_count): hill_count += [(element, element_count[element])] del element_count[element] hill_count += sorted(element_count.items()) div = (reduce(gcd, (c[1] for c in hill_count)) if hasattr(molecule, 'unitcell') else 1) output['formula'] = ''.join(((n if ((c / div) == 1) else ('%s%d' % (n, (c / div)))) for (n, c) in hill_count)) output['molecular_weight'] = (molecule.molwt / div) output['units']['molecular_weight'] = 'g / mol' if name: output['name'] = name return output
Converts a pybel molecule to json. Args: molecule: An instance of `pybel.Molecule` name: (Optional) If specified, will save a "name" property Returns: A Python dictionary containing atom and bond data
codesearchnet
def get_region(b): remap = {None: 'us-east-1', 'EU': 'eu-west-1'} region = b.get('Location', {}).get('LocationConstraint') return remap.get(region, region)
Tries to get the bucket region from Location.LocationConstraint Special cases: LocationConstraint EU defaults to eu-west-1 LocationConstraint null defaults to us-east-1 Args: b (object): A bucket object Returns: string: an aws region string
codesearchnet
def get_vocab(self) -> Dict[str, int]: raise NotImplementedError()
Returns the vocabulary as a dictionary of token to index. `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the vocab. Returns: `Dict[str, int]`: The vocabulary.
github-repos
def _get_expiration(self, headers: dict) -> int: expiration_str = headers.get('expires') if (not expiration_str): return 0 expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z') delta = (expiration - datetime.utcnow()).total_seconds() return math.ceil(abs(delta))
Gets the expiration time of the data from the response headers. Args: headers: dictionary of headers from ESI Returns: value of seconds from now the data expires
codesearchnet
def reactions_add(self, *, name: str, **kwargs) -> SlackResponse: kwargs.update({"name": name}) return self.api_call("reactions.add", json=kwargs)
Adds a reaction to an item. Args: name (str): Reaction (emoji) name. e.g. 'thumbsup' channel (str): Channel where the message to add reaction to was posted. e.g. 'C1234567890' timestamp (str): Timestamp of the message to add reaction to. e.g. '1234567890.123456'
juraj-google-style
class AriaCrossAttention(nn.Module): def __init__(self, config: AriaConfig, dropout_rate: float=0): super().__init__() hidden_size = config.vision_config.hidden_size num_heads = config.vision_config.num_attention_heads self.num_heads = num_heads self.q_proj = nn.Linear(hidden_size, hidden_size, bias=False) self.k_proj = nn.Linear(hidden_size, hidden_size, bias=False) self.v_proj = nn.Linear(hidden_size, hidden_size, bias=False) self.multihead_attn = nn.MultiheadAttention(hidden_size, num_heads, batch_first=True) self.linear = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(dropout_rate) self.layer_norm = nn.LayerNorm(hidden_size) self.layer_norm_kv = nn.LayerNorm(hidden_size) def forward(self, key_value_states, hidden_states, attn_mask=None): query = self.q_proj(self.layer_norm(hidden_states)) key_value_states = self.layer_norm_kv(key_value_states) key = self.k_proj(key_value_states) value = self.v_proj(key_value_states) attn_output, _ = self.multihead_attn(query, key, value, attn_mask=attn_mask) attn_output = self.dropout(self.linear(attn_output)) return attn_output
Aria Cross-Attention module. Args: config (`AriaConfig`): The configuration to use.
github-repos
def __delete__(self, obj): if self.name in obj._property_values: old_value = obj._property_values[self.name] del obj._property_values[self.name] self.trigger_if_changed(obj, old_value) if self.name in obj._unstable_default_values: del obj._unstable_default_values[self.name]
Implement the deleter for the Python `descriptor protocol`_. Args: obj (HasProps) : An instance to delete this property from
juraj-google-style
def _validate_schema_and_ast(schema, ast): core_graphql_errors = validate(schema, ast) unsupported_default_directives = frozenset([frozenset(['include', frozenset(['FIELD', 'FRAGMENT_SPREAD', 'INLINE_FRAGMENT']), frozenset(['if'])]), frozenset(['skip', frozenset(['FIELD', 'FRAGMENT_SPREAD', 'INLINE_FRAGMENT']), frozenset(['if'])]), frozenset(['deprecated', frozenset(['ENUM_VALUE', 'FIELD_DEFINITION']), frozenset(['reason'])])]) expected_directives = {frozenset([directive.name, frozenset(directive.locations), frozenset(six.viewkeys(directive.args))]) for directive in DIRECTIVES} actual_directives = {frozenset([directive.name, frozenset(directive.locations), frozenset(six.viewkeys(directive.args))]) for directive in schema.get_directives()} missing_directives = (expected_directives - actual_directives) if missing_directives: missing_message = u'The following directives were missing from the provided schema: {}'.format(missing_directives) core_graphql_errors.append(missing_message) extra_directives = ((actual_directives - expected_directives) - unsupported_default_directives) if extra_directives: extra_message = u'The following directives were supplied in the given schema, but are not not supported by the GraphQL compiler: {}'.format(extra_directives) core_graphql_errors.append(extra_message) return core_graphql_errors
Validate the supplied graphql schema and ast. This method wraps around graphql-core's validation to enforce a stricter requirement of the schema -- all directives supported by the compiler must be declared by the schema, regardless of whether each directive is used in the query or not. Args: schema: GraphQL schema object, created using the GraphQL library ast: abstract syntax tree representation of a graphql query Returns: list containing schema and/or query validation errors
codesearchnet
async def evaluate_model(eval_model_path, target_model_path, sgf_dir, seed): lines = (await run('bazel-bin/cc/eval', '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'eval.flags')), '--model={}'.format(eval_model_path), '--model_two={}'.format(target_model_path), '--sgf_dir={}'.format(sgf_dir), '--seed={}'.format(seed))) result = '\n'.join(lines[(- 7):]) logging.info(result) (eval_stats, target_stats) = parse_win_stats_table(result, 2) num_games = (eval_stats.total_wins + target_stats.total_wins) win_rate = (eval_stats.total_wins / num_games) logging.info('Win rate %s vs %s: %.3f', eval_stats.model_name, target_stats.model_name, win_rate) return win_rate
Evaluate one model against a target. Args: eval_model_path: the path to the model to evaluate. target_model_path: the path to the model to compare to. sgf_dif: directory path to write SGF output to. seed: random seed to use when running eval. Returns: The win-rate of eval_model against target_model in the range [0, 1].
codesearchnet