code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def sort(self, cmp=None, key=None, reverse=False): def _DefaultKey(value): result = [] for key in self.header: try: result.append(float(value[key])) except ValueError: result.append(value[key]) return result key = key or _DefaultKey new_table = self._table[1:] if cmp is not None: key = cmp_to_key(cmp) new_table.sort(key=key, reverse=reverse) self._table = [self.header] self._table.extend(new_table) for index, row in enumerate(self._table): row.row = index
Sorts rows in the texttable. Args: cmp: func, non default sort algorithm to use. key: func, applied to each element before sorting. reverse: bool, reverse order of sort.
juraj-google-style
def reduce_by_device(parallelism, data, reduce_fn): unique_devices = [] device_to_data = {} for (dev, datum) in zip(parallelism.devices, data): if (dev not in device_to_data): unique_devices.append(dev) device_to_data[dev] = [datum] else: device_to_data[dev].append(datum) device_parallelism = Parallelism(unique_devices) grouped_data = [device_to_data[dev] for dev in unique_devices] return (device_parallelism, device_parallelism(reduce_fn, grouped_data))
Reduces data per device. This can be useful, for example, if we want to all-reduce n tensors on k<n devices (like during eval when we have only one device). We call reduce_by_device() to first sum the tensors per device, then call our usual all-reduce operation to create one sum per device, followed by expand_by_device, to create the appropriate number of pointers to these results. See all_reduce_ring() below for an example of how this is used. Args: parallelism: a expert_utils.Parallelism object data: a list of Tensors with length parallelism.n reduce_fn: a function taking a list of Tensors. e.g. tf.add_n Returns: device_parallelism: a Parallelism object with each device listed only once. reduced_data: A list of Tensors, one per device.
codesearchnet
def record_corrected_value(self, value, expected_interval, count=1): while True: if (not self.record_value(value, count)): return False if ((value <= expected_interval) or (expected_interval <= 0)): return True value -= expected_interval
Record a new value into the histogram and correct for coordinated omission if needed Args: value: the value to record (must be in the valid range) expected_interval: the expected interval between 2 value samples count: incremental count (defaults to 1)
codesearchnet
def update_endpoint(self, endpoint_name, endpoint_config_name): if (not _deployment_entity_exists((lambda : self.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)))): raise ValueError('Endpoint with name "{}" does not exist; please use an existing endpoint name'.format(endpoint_name)) self.sagemaker_client.update_endpoint(EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name) return endpoint_name
Update an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request Raise an error if endpoint with endpoint_name does not exist. Args: endpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to update. endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to deploy. Returns: str: Name of the Amazon SageMaker ``Endpoint`` being updated.
codesearchnet
def plot(self): plt.rcParams['xtick.major.pad'] = '6' plt.rcParams['ytick.major.pad'] = '6' plt.rcParams['axes.linewidth'] = 2 npoint = 1000 xs = np.linspace(0, 1, npoint) xs_reverse_converted = InterfacialReactivity._reverse_convert(xs, self.factor1, self.factor2) energies = [self._get_energy(x) for x in xs_reverse_converted] plt.plot(xs, energies, 'k-') kinks = self.get_kinks() (_, x_kink, energy_kink, _, _) = zip(*kinks) plt.scatter(x_kink, energy_kink, marker='o', c='blue', s=20) plt.scatter(self.minimum()[0], self.minimum()[1], marker='*', c='red', s=300) for (index, x, energy, _, _) in kinks: plt.annotate(index, xy=(x, energy), xytext=(5, 30), textcoords='offset points', ha='right', va='bottom', arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')).draggable() plt.xlim([(- 0.05), 1.05]) if self.norm: plt.ylabel('Energy (eV/atom)') else: plt.ylabel('Energy (eV/f.u.)') plt.xlabel('$x$ in $x$ {} + $(1-x)$ {}'.format(self.c1.reduced_formula, self.c2.reduced_formula)) return plt
Plots reaction energy as a function of mixing ratio x in self.c1 - self.c2 tie line using pylab. Returns: Pylab object that plots reaction energy as a function of mixing ratio x.
codesearchnet
def run_without_time_limit(self, cmd): cmd = ([DOCKER_BINARY, 'run', DOCKER_NVIDIA_RUNTIME] + cmd) logging.info('Docker command: %s', ' '.join(cmd)) start_time = time.time() retval = subprocess.call(cmd) elapsed_time_sec = int((time.time() - start_time)) logging.info('Elapsed time of attack: %d', elapsed_time_sec) logging.info('Docker retval: %d', retval) if (retval != 0): logging.warning('Docker returned non-zero retval: %d', retval) raise WorkerError(('Docker returned non-zero retval ' + str(retval))) return elapsed_time_sec
Runs docker command without time limit. Args: cmd: list with the command line arguments which are passed to docker binary Returns: how long it took to run submission in seconds Raises: WorkerError: if error occurred during execution of the submission
codesearchnet
def matrix(self) -> np.ndarray: num_qubits = self.num_qubits() if (num_qubits is None): raise ValueError('Unknown number of qubits') num_dim = (2 ** num_qubits) result = np.zeros((num_dim, num_dim), dtype=np.complex128) for (gate, coefficient) in self.items(): result += (protocols.unitary(gate) * coefficient) return result
Reconstructs matrix of self using unitaries of underlying gates. Raises: TypeError: if any of the gates in self does not provide a unitary.
codesearchnet
def quality(self, tests, alias=None): this_tests = ((((tests.get('each', []) + tests.get('Each', [])) + tests.get('EACH', [])) + tests.get(self.mnemonic, [])) + utils.flatten_list([tests.get(a) for a in self.get_alias(alias=alias)])) this_tests = filter(None, this_tests) if (not tests.get(self.mnemonic, 1)): this_tests = [] return {test.__name__: test(self) for test in this_tests}
Run a series of tests and return the corresponding results. Args: tests (list): a list of functions. alias (dict): a dictionary mapping mnemonics to lists of mnemonics. Returns: list. The results. Stick to booleans (True = pass) or ints.
codesearchnet
def update(self, iterable): for pair in pairwise_longest(iterable, fillvalue=_FILL): self._edges.append(pair) self._results = None
Update with an ordered iterable of items. Args: iterable: An ordered iterable of items. The relative order of the items in this iterable will be respected in the TopoSet (in the absence of cycles).
juraj-google-style
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): vision_data = {} if image_sizes is not None: images_kwargs = AyaVisionProcessorKwargs._defaults.get('images_kwargs', {}) images_kwargs.update(kwargs) num_image_patches = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes] token_per_patch = (self.img_size num_image_tokens = [token_per_patch + 3 + sum((token_per_patch + 1 for _ in range(1, num_patches))) for num_patches in num_image_patches] vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches}) return MultiModalData(**vision_data)
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`List[List[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data.
github-repos
def add_archive_as_dir(self, zip_file_obj): BalancedDiscStorage._check_interface(zip_file_obj) file_hash = self._get_hash(zip_file_obj) dir_path = self._create_dir_path(file_hash) full_path = os.path.join(dir_path, file_hash) if os.path.exists(full_path): shutil.rmtree(full_path) os.mkdir(full_path) try: self._unpack_zip(zip_file_obj, full_path) except Exception: shutil.rmtree(full_path) raise return PathAndHash(path=full_path, hash=file_hash)
Add archive to the storage and unpack it. Args: zip_file_obj (file): Opened file-like object. Returns: obj: Path where the `zip_file_obj` was unpacked wrapped in \ :class:`.PathAndHash` structure. Raises: ValueError: If there is too many files in .zip archive. \ See :attr:`._max_zipfiles` for details. AssertionError: If the `zip_file_obj` is not file-like object.
codesearchnet
def generate_session_id(secret_key=settings.secret_key_bytes(), signed=settings.sign_sessions()): secret_key = _ensure_bytes(secret_key) if signed: base_id = _get_random_string(secret_key=secret_key) return ((base_id + '-') + _signature(base_id, secret_key)) else: return _get_random_string(secret_key=secret_key)
Generate a random session ID. Typically, each browser tab connected to a Bokeh application has its own session ID. In production deployments of a Bokeh app, session IDs should be random and unguessable - otherwise users of the app could interfere with one another. If session IDs are signed with a secret key, the server can verify that the generator of the session ID was "authorized" (the generator had to know the secret key). This can be used to have a separate process, such as another web application, which generates new sessions on a Bokeh server. This other process may require users to log in before redirecting them to the Bokeh server with a valid session ID, for example. Args: secret_key (str, optional) : Secret key (default: value of 'BOKEH_SECRET_KEY' env var) signed (bool, optional) : Whether to sign the session ID (default: value of 'BOKEH_SIGN_SESSIONS' env var)
codesearchnet
def make_action(self, fn, schema_parser, meta): validate_input = validate_output = None if "$input" in meta: with MarkKey("$input"): validate_input = schema_parser.parse(meta["$input"]) if "$output" in meta: with MarkKey("$output"): validate_output = schema_parser.parse(meta["$output"]) def action(data): if validate_input: try: data = validate_input(data) except Invalid as ex: return abort(400, "InvalidData", str(ex)) if isinstance(data, dict): rv = fn(**data) else: rv = fn(data) else: rv = fn() rv, status, headers = unpack(rv) if validate_output: try: rv = validate_output(rv) except Invalid as ex: return abort(500, "ServerError", str(ex)) return rv, status, headers return action
Make resource's method an action Validate input, output by schema in meta. If no input schema, call fn without params. If no output schema, will not validate return value. Args: fn: resource's method schema_parser: for parsing schema in meta meta: meta data of the action
juraj-google-style
def unauthorized(cls, errors=None): if cls.expose_status: cls.response.content_type = 'application/json' cls.response._status_line = '401 Unauthorized' return cls(401, errors=errors).to_json
Shortcut API for HTTP 401 `Unauthorized` response. Args: errors (list): Response key/value data. Returns: WSResponse Instance.
codesearchnet
def roc_auc_score(gold, probs, ignore_in_gold=[], ignore_in_pred=[]): gold = arraylike_to_numpy(gold) if len(ignore_in_pred) > 0: raise ValueError("ignore_in_pred not defined for ROC-AUC score.") keep = [x not in ignore_in_gold for x in gold] gold = gold[keep] probs = probs[keep, :] gold_s = pred_to_prob(torch.from_numpy(gold), k=probs.shape[1]).numpy() return skm.roc_auc_score(gold_s, probs)
Compute the ROC AUC score, given the gold labels and predicted probs. Args: gold: A 1d array-like of gold labels probs: A 2d array-like of predicted probabilities ignore_in_gold: A list of labels for which elements having that gold label will be ignored. Returns: roc_auc_score: The (float) roc_auc score
juraj-google-style
def generate_nodes(tpm, cm, network_state, indices, node_labels=None): if (node_labels is None): node_labels = NodeLabels(None, indices) node_state = utils.state_of(indices, network_state) return tuple((Node(tpm, cm, index, state, node_labels) for (index, state) in zip(indices, node_state)))
Generate |Node| objects for a subsystem. Args: tpm (np.ndarray): The system's TPM cm (np.ndarray): The corresponding CM. network_state (tuple): The state of the network. indices (tuple[int]): Indices to generate nodes for. Keyword Args: node_labels (|NodeLabels|): Textual labels for each node. Returns: tuple[Node]: The nodes of the system.
codesearchnet
def run(self, tag=None, output=None, **kwargs): start = datetime.datetime.now() count = 0 if tag: tag = Uri(tag) xml_generator = etree.iterparse(self.source, tag=tag.etree) else: xml_generator = etree.iterparse(self.source) i = 0 for event, element in xml_generator: type_tags = element.findall(_RDF_TYPE_TAG) rdf_types = [el.get(_RES_TAG) for el in type_tags if el.get(_RES_TAG)] if str(self.filter_val) in rdf_types: pdb.set_trace() count += 1 i += 1 element.clear() print("Found '{}' items in {}".format(count, (datetime.datetime.now() - start)))
runs the extractor Args: ----- output: ['filepath', None]
juraj-google-style
def read_links(self, file, encoding=None): return [item[0] for item in self.iter_text(file, encoding) if item[1]]
Return an iterator of links found in the document. Args: file: A file object containing the document. encoding (str): The encoding of the document. Returns: iterable: str
codesearchnet
def phenSpecificEffects(snps, pheno1, pheno2, K=None, covs=None, test='lrt'): N = snps.shape[0] if (K is None): K = SP.eye(N) assert (pheno1.shape[1] == pheno2.shape[1]), 'Only consider equal number of phenotype dimensions' if (covs is None): covs = SP.ones(N, 1) assert ((pheno1.shape[1] == 1) and (pheno2.shape[1] == 1) and (pheno1.shape[0] == N) and (pheno2.shape[0] == N) and (K.shape[0] == N) and (K.shape[1] == N) and (covs.shape[0] == N)), 'shapes missmatch' Inter = SP.zeros(((N * 2), 1)) Inter[(0:N, 0)] = 1 Inter0 = SP.ones(((N * 2), 1)) Yinter = SP.concatenate((pheno1, pheno2), 0) Xinter = SP.tile(snps, (2, 1)) Covitner = SP.tile(covs(2, 1)) lm = simple_interaction(snps=Xinter, pheno=Yinter, covs=Covinter, Inter=Inter, Inter0=Inter0, test=test) return lm
Univariate fixed effects interaction test for phenotype specific SNP effects Args: snps: [N x S] SP.array of S SNPs for N individuals (test SNPs) pheno1: [N x 1] SP.array of 1 phenotype for N individuals pheno2: [N x 1] SP.array of 1 phenotype for N individuals K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional) If not provided, then linear regression analysis is performed covs: [N x D] SP.array of D covariates for N individuals test: 'lrt' for likelihood ratio test (default) or 'f' for F-test Returns: limix LMM object
codesearchnet
def __init__(self, value): super().__init__(duration=0) if abs(value) > 1: raise PulseError("Absolute value of PV amplitude exceeds 1.") self._value = complex(value)
create new persistent value command. Args: value (complex): Complex value to apply, bounded by an absolute value of 1. The allowable precision is device specific. Raises: PulseError: when input value exceed 1.
juraj-google-style
def get_usb_serial(self, port_num): port = self.port_map[str(port_num)] arg = ''.join(['DEVICE INFO,', self._addr, '.', port]) cmd = (['esuit64', '-t', arg]) info = subprocess.check_output(cmd, stderr=subprocess.STDOUT) serial = None if "SERIAL" in info: serial_info = info.split('SERIAL:')[1] serial = serial_info.split('\n')[0].strip() use_info = info.split('BY')[1].split(' ')[1] if use_info == 'NO': cmd = (['esuit64', '-t', 'AUTO USE ALL']) subprocess.check_output(cmd, stderr=subprocess.STDOUT) time.sleep(50.0/1000.0) else: raise ValueError('No USB device detected') return serial
Get the device serial number Args: port_num: port number on the Cambrionix unit Return: usb device serial number
juraj-google-style
def create_clusters(provider, context, **kwargs): conn = get_session(provider.region).client('ecs') try: clusters = kwargs["clusters"] except KeyError: logger.error("setup_clusters hook missing \"clusters\" argument") return False if isinstance(clusters, basestring): clusters = [clusters] cluster_info = {} for cluster in clusters: logger.debug("Creating ECS cluster: %s", cluster) r = conn.create_cluster(clusterName=cluster) cluster_info[r["cluster"]["clusterName"]] = r return {"clusters": cluster_info}
Creates ECS clusters. Expects a "clusters" argument, which should contain a list of cluster names to create. Args: provider (:class:`stacker.providers.base.BaseProvider`): provider instance context (:class:`stacker.context.Context`): context instance Returns: boolean for whether or not the hook succeeded.
juraj-google-style
def __init__(self, base: ModelHandler[ExampleT, PredictionT, ModelT]): self._base = base self._env_vars = getattr(base, '_env_vars', {})
A ModelHandler that skips batching in RunInference. Args: base: An implementation of the underlying model handler.
github-repos
def individual(self, ind_id=None): for ind_obj in self.individuals: if ind_obj.ind_id == ind_id: return ind_obj return None
Return a individual object Args: ind_id (str): A individual id Returns: individual (puzzle.models.individual)
juraj-google-style
def encode_chunk(dataframe): csv_buffer = six.StringIO() dataframe.to_csv(csv_buffer, index=False, header=False, encoding='utf-8', float_format='%.15g', date_format='%Y-%m-%d %H:%M:%S.%f') body = csv_buffer.getvalue() if isinstance(body, bytes): body = body.decode('utf-8') body = body.encode('utf-8') return six.BytesIO(body)
Return a file-like object of CSV-encoded rows. Args: dataframe (pandas.DataFrame): A chunk of a dataframe to encode
codesearchnet
def get_excitation_spectrum(self, width=0.1, npoints=2000): roots = self.parse_tddft() data = roots["singlet"] en = np.array([d["energy"] for d in data]) osc = np.array([d["osc_strength"] for d in data]) epad = 20.0 * width emin = en[0] - epad emax = en[-1] + epad de = (emax - emin) / npoints if width < 2 * de: width = 2 * de energies = [emin + ie * de for ie in range(npoints)] cutoff = 20.0 * width gamma = 0.5 * width gamma_sqrd = gamma * gamma de = (energies[-1] - energies[0]) / (len(energies) - 1) prefac = gamma / np.pi * de x = [] y = [] for energy in energies: xx0 = energy - en stot = osc / (xx0 * xx0 + gamma_sqrd) t = np.sum(stot[np.abs(xx0) <= cutoff]) x.append(energy) y.append(t * prefac) return ExcitationSpectrum(x, y)
Generate an excitation spectra from the singlet roots of TDDFT calculations. Args: width (float): Width for Gaussian smearing. npoints (int): Number of energy points. More points => smoother curve. Returns: (ExcitationSpectrum) which can be plotted using pymatgen.vis.plotters.SpectrumPlotter.
juraj-google-style
def get_md5sum(fname, chunk_size=1024): def iter_chunks(f): while True: chunk = f.read(chunk_size) if not chunk: break yield chunk sig = hashlib.md5() with open(fname, 'rb') as f: for chunk in iter_chunks(f): sig.update(chunk) return sig.hexdigest()
Returns the MD5 checksum of a file. Args: fname (str): Filename chunk_size (Optional[int]): Size (in Bytes) of the chunks that should be read in at once. Increasing chunk size reduces the number of reads required, but increases the memory usage. Defaults to 1024. Returns: The MD5 checksum of the file, which is a string.
juraj-google-style
def label(self, label, action='ADD', params=None): if params is None: params = {} if not label: self._tcex.handle_error(925, ['label', 'Security Label', 'label', 'label', label]) if not self.can_update(): self._tcex.handle_error(910, [self.type]) if action == 'GET': return self.tc_requests.get_label( self.api_type, self.api_sub_type, self.unique_id, label, owner=self.owner, params=params, ) if action == 'ADD': return self.tc_requests.add_label( self.api_type, self.api_sub_type, self.unique_id, label, owner=self.owner ) if action == 'DELETE': return self.tc_requests.delete_label( self.api_type, self.api_sub_type, self.unique_id, label, owner=self.owner ) self._tcex.handle_error(925, ['action', 'label', 'action', 'action', action]) return None
Adds a Security Label to a Indicator/Group or Victim Args: params: label: The name of the Security Label action:
juraj-google-style
def load_library(library_location): if os.path.exists(library_location): if os.path.isdir(library_location): directory_contents = os.listdir(library_location) kernel_libraries = [os.path.join(library_location, f) for f in directory_contents if _is_shared_object(f)] else: kernel_libraries = [library_location] for lib in kernel_libraries: py_tf.TF_LoadLibrary(lib) else: raise OSError(errno.ENOENT, 'The file or folder to load kernel libraries from does not exist.', library_location)
Loads a TensorFlow plugin. "library_location" can be a path to a specific shared object, or a folder. If it is a folder, all shared objects that are named "libtfkernel*" will be loaded. When the library is loaded, kernels registered in the library via the `REGISTER_*` macros are made available in the TensorFlow process. Args: library_location: Path to the plugin or the folder of plugins. Relative or absolute filesystem path to a dynamic library file or folder. Returns: None Raises: OSError: When the file to be loaded is not found. RuntimeError: when unable to load the library.
github-repos
def catchup_subscriber(self, connection_id): with self._subscribers_cv: subscriber = self._subscribers[connection_id] last_known_block_id = subscriber.get_last_known_block_id() subscriptions = subscriber.subscriptions if (last_known_block_id is not None): LOGGER.debug('Catching up Subscriber %s from %s', connection_id, last_known_block_id) for block_id in self.get_catchup_block_ids(last_known_block_id): events = self.get_events_for_block_id(block_id, subscriptions) event_list = EventList(events=events) self._send(connection_id, event_list.SerializeToString())
Send an event list with all events that are in the given subscriptions from all blocks since that latest block in the current chain that is in the given last known block ids. Raises: PossibleForkDetectedError A possible fork was detected while building the event list NoKnownBlockError None of the last known blocks were in the current chain KeyError Unknown connection_id
codesearchnet
def dag_to_circuit(dag): qregs = collections.OrderedDict() for qreg in dag.qregs.values(): qreg_tmp = QuantumRegister(qreg.size, name=qreg.name) qregs[qreg.name] = qreg_tmp cregs = collections.OrderedDict() for creg in dag.cregs.values(): creg_tmp = ClassicalRegister(creg.size, name=creg.name) cregs[creg.name] = creg_tmp name = (dag.name or None) circuit = QuantumCircuit(*qregs.values(), *cregs.values(), name=name) for node in dag.topological_op_nodes(): qubits = [] for qubit in node.qargs: qubits.append(qregs[qubit[0].name][qubit[1]]) clbits = [] for clbit in node.cargs: clbits.append(cregs[clbit[0].name][clbit[1]]) if (node.condition is None): control = None else: control = (node.condition[0], node.condition[1]) inst = node.op.copy() inst.control = control circuit.append(inst, qubits, clbits) return circuit
Build a ``QuantumCircuit`` object from a ``DAGCircuit``. Args: dag (DAGCircuit): the input dag. Return: QuantumCircuit: the circuit representing the input dag.
codesearchnet
def SetProtocol(self, protocol): if protocol not in self.SUPPORTED_PROTOCOLS: raise ValueError('Unsupported protocol: {0!s}'.format(protocol)) self._protocol = protocol
Sets the protocol that will be used to query Viper. Args: protocol (str): protocol to use to query Viper. Either 'http' or 'https'. Raises: ValueError: if the protocol is not supported.
juraj-google-style
def member_of(self, group): if isinstance(group, Group): group = group.name return self.groups.filter(name=group).exists()
Returns whether a user is a member of a certain group. Args: group The name of a group (string) or a group object Returns: Boolean
codesearchnet
def get_sub_category(alt_len, ref_len, category, svtype=None): subcategory = '' if (category in ('snv', 'indel', 'cancer')): if (ref_len == alt_len): subcategory = 'snv' else: subcategory = 'indel' elif (category == 'sv'): subcategory = svtype return subcategory
Get the subcategory for a VCF variant The sub categories are: 'snv', 'indel', 'del', 'ins', 'dup', 'bnd', 'inv' Args: alt_len(int) ref_len(int) category(str) svtype(str) Returns: subcategory(str)
codesearchnet
def choose_template(self, template): n1 = int(template)/10 n2 = int(template)%10 self.send('^TS'+'0'+str(n1)+str(n2))
Choose a template Args: template: String, choose which template you would like. Returns: None Raises: None
juraj-google-style
def __init__(self, concentration1=None, concentration0=None, validate_args=False, allow_nan_stats=True, name='Beta'): parameters = dict(locals()) with ops.name_scope(name, values=[concentration1, concentration0]) as name: self._concentration1 = self._maybe_assert_valid_concentration(ops.convert_to_tensor(concentration1, name='concentration1'), validate_args) self._concentration0 = self._maybe_assert_valid_concentration(ops.convert_to_tensor(concentration0, name='concentration0'), validate_args) check_ops.assert_same_float_dtype([self._concentration1, self._concentration0]) self._total_concentration = self._concentration1 + self._concentration0 super(Beta, self).__init__(dtype=self._total_concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=distribution.FULLY_REPARAMETERIZED, parameters=parameters, graph_parents=[self._concentration1, self._concentration0, self._total_concentration], name=name)
Initialize a batch of Beta distributions. Args: concentration1: Positive floating-point `Tensor` indicating mean number of successes; aka "alpha". Implies `self.dtype` and `self.batch_shape`, i.e., `concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`. concentration0: Positive floating-point `Tensor` indicating mean number of failures; aka "beta". Otherwise has same semantics as `concentration1`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class.
github-repos
def list_class_funcnames(fname, blank_pats=[' with open(fname, 'r') as file_: lines = file_.readlines() funcname_list = [] for lx, line in enumerate(lines): if any([line.startswith(pat) for pat in blank_pats]): funcname_list.append('') if line.startswith(' def '): def_x = line.find('def') rparen_x = line.find('(') funcname = line[(def_x + 3):rparen_x] funcname_list.append(funcname) return funcname_list
list_class_funcnames Args: fname (str): filepath blank_pats (list): defaults to ' #' Returns: list: funcname_list Example: >>> # DISABLE_DOCTEST >>> from utool.util_inspect import * # NOQA >>> fname = 'util_class.py' >>> blank_pats = [' #'] >>> funcname_list = list_class_funcnames(fname, blank_pats) >>> print(funcname_list)
juraj-google-style
def convert_labels(Y, source, dest): if (Y is None): return Y if isinstance(Y, np.ndarray): Y = Y.copy() assert isinstance(Y, int) elif isinstance(Y, torch.Tensor): Y = Y.clone() assert (np.sum((Y.numpy() - Y.numpy().astype(int))) == 0.0) else: raise ValueError('Unrecognized label data type.') negative_map = {'categorical': 2, 'plusminus': (- 1), 'onezero': 0} Y[(Y == negative_map[source])] = negative_map[dest] return Y
Convert a matrix from one label type to another Args: Y: A np.ndarray or torch.Tensor of labels (ints) source: The convention the labels are currently expressed in dest: The convention to convert the labels to Conventions: 'categorical': [0: abstain, 1: positive, 2: negative] 'plusminus': [0: abstain, 1: positive, -1: negative] 'onezero': [0: negative, 1: positive] Note that converting to 'onezero' will combine abstain and negative labels.
codesearchnet
def success(channel, title, datapacks): gui = ui_embed.UI(channel, title, '', modulename=modulename, datapacks=datapacks) return gui
Creates an embed UI containing the help message Args: channel (discord.Channel): The Discord channel to bind the embed to title (str): The title of the embed datapacks (list): The hex value Returns: ui (ui_embed.UI): The embed UI object
codesearchnet
def find_in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8): if len(fcoord_list) == 0: return [] fcoords = np.tile(fcoord, (len(fcoord_list), 1)) fdist = fcoord_list - fcoords fdist -= np.round(fdist) return np.where(np.all(np.abs(fdist) < atol, axis=1))[0]
Get the indices of all points in a fractional coord list that are equal to a fractional coord (with a tolerance), taking into account periodic boundary conditions. Args: fcoord_list: List of fractional coords fcoord: A specific fractional coord to test. atol: Absolute tolerance. Defaults to 1e-8. Returns: Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
juraj-google-style
def extract_subject_info_extension(cert_obj): try: subject_info_der = cert_obj.extensions.get_extension_for_oid(cryptography.x509.oid.ObjectIdentifier(DATAONE_SUBJECT_INFO_OID)).value.value return str(pyasn1.codec.der.decoder.decode(subject_info_der)[0]) except Exception as e: logging.debug('SubjectInfo not extracted. reason="{}"'.format(e))
Extract DataONE SubjectInfo XML doc from certificate. Certificates issued by DataONE may include an embedded XML doc containing additional information about the subject specified in the certificate DN. If present, the doc is stored as an extension with an OID specified by DataONE and formatted as specified in the DataONE SubjectInfo schema definition. Args: cert_obj: cryptography.Certificate Returns: str : SubjectInfo XML doc if present, else None
codesearchnet
def draw_on(self, canvas, stem_color, leaf_color, thickness, ages=None): if (canvas.__module__ in SUPPORTED_CANVAS): drawer = SUPPORTED_CANVAS[canvas.__module__] drawer(self, canvas, stem_color, leaf_color, thickness, ages).draw()
Draw the tree on a canvas. Args: canvas (object): The canvas, you want to draw the tree on. Supported canvases: svgwrite.Drawing and PIL.Image (You can also add your custom libraries.) stem_color (tupel): Color or gradient for the stem of the tree. leaf_color (tupel): Color for the leaf (= the color for last iteration). thickness (int): The start thickness of the tree.
codesearchnet
def Register(self, app_id, challenge, registered_keys): client_data = model.ClientData(model.ClientData.TYP_REGISTRATION, challenge, self.origin) challenge_param = self.InternalSHA256(client_data.GetJson()) app_param = self.InternalSHA256(app_id) for key in registered_keys: try: if (key.version != u'U2F_V2'): continue resp = self.security_key.CmdAuthenticate(challenge_param, app_param, key.key_handle, True) raise errors.HardwareError('Should Never Happen') except errors.TUPRequiredError: raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE) except errors.InvalidKeyHandleError as e: pass except errors.HardwareError as e: raise errors.U2FError(errors.U2FError.BAD_REQUEST, e) for _ in range(30): try: resp = self.security_key.CmdRegister(challenge_param, app_param) return model.RegisterResponse(resp, client_data) except errors.TUPRequiredError as e: self.security_key.CmdWink() time.sleep(0.5) except errors.HardwareError as e: raise errors.U2FError(errors.U2FError.BAD_REQUEST, e) raise errors.U2FError(errors.U2FError.TIMEOUT)
Registers app_id with the security key. Executes the U2F registration flow with the security key. Args: app_id: The app_id to register the security key against. challenge: Server challenge passed to the security key. registered_keys: List of keys already registered for this app_id+user. Returns: RegisterResponse with key_handle and attestation information in it ( encoded in FIDO U2F binary format within registration_data field). Raises: U2FError: There was some kind of problem with registration (e.g. the device was already registered or there was a timeout waiting for the test of user presence).
codesearchnet
def _inverse_log_det_jacobian(self, y): raise NotImplementedError('inverse_log_det_jacobian not implemented.')
Subclass implementation of `inverse_log_det_jacobian` public function. In particular, this method differs from the public function, in that it does not take `event_ndims`. Thus, this implements the minimal Jacobian determinant calculation (i.e. over `inverse_min_event_ndims`). Args: y: `Tensor`. The input to the "inverse_log_det_jacobian" evaluation. Returns: inverse_log_det_jacobian: `Tensor`, if this bijector is injective. If not injective, returns the k-tuple containing jacobians for the unique `k` points `(x1, ..., xk)` such that `g(xi) = y`.
github-repos
def run(app: web.Application): host = app['config']['host'] port = app['config']['port'] web.run_app(app, host=host, port=port)
Runs the application in an async context. This function will block indefinitely until the application is shut down. Args: app (web.Application): The Aiohttp Application as created by `create_app()`
juraj-google-style
def _use_memcache(self, key, options=None): flag = ContextOptions.use_memcache(options) if flag is None: flag = self._memcache_policy(key) if flag is None: flag = ContextOptions.use_memcache(self._conn.config) if flag is None: flag = True return flag
Return whether to use memcache for this key. Args: key: Key instance. options: ContextOptions instance, or None. Returns: True if the key should be cached in memcache, False otherwise.
juraj-google-style
def elaborate_borns_and_epsilon(ucell, borns, epsilon, primitive_matrix=None, supercell_matrix=None, is_symmetry=True, symmetrize_tensors=False, symprec=1e-05): assert (len(borns) == ucell.get_number_of_atoms()), ('num_atom %d != len(borns) %d' % (ucell.get_number_of_atoms(), len(borns))) if symmetrize_tensors: (borns_, epsilon_) = symmetrize_borns_and_epsilon(borns, epsilon, ucell, symprec=symprec, is_symmetry=is_symmetry) else: borns_ = borns epsilon_ = epsilon (indeps_in_supercell, indeps_in_unitcell) = _extract_independent_atoms(ucell, primitive_matrix=primitive_matrix, supercell_matrix=supercell_matrix, is_symmetry=is_symmetry, symprec=symprec) return (borns_[indeps_in_unitcell].copy(), epsilon_, indeps_in_supercell)
Symmetrize Born effective charges and dielectric constants and extract Born effective charges of symmetrically independent atoms for primitive cell. Args: ucell (Atoms): Unit cell structure borns (np.array): Born effective charges of ucell epsilon (np.array): Dielectric constant tensor Returns: (np.array) Born effective charges of symmetrically independent atoms in primitive cell (np.array) Dielectric constant (np.array) Atomic index mapping table from supercell to primitive cell of independent atoms Raises: AssertionError: Inconsistency of number of atoms or Born effective charges. Warning: Broken symmetry of Born effective charges
codesearchnet
def register_token(self, token_class, regexp=None): if (regexp is None): regexp = token_class.regexp self.tokens.register(token_class, regexp)
Register a token class. Args: token_class (tdparser.Token): the token class to register regexp (optional str): the regexp for elements of that token. Defaults to the `regexp` attribute of the token class.
codesearchnet
def run_local_server(self, host='localhost', port=8080, authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE, success_message=_DEFAULT_WEB_SUCCESS_MESSAGE, open_browser=True, **kwargs): self.redirect_uri = 'http: (auth_url, _) = self.authorization_url(**kwargs) wsgi_app = _RedirectWSGIApp(success_message) local_server = wsgiref.simple_server.make_server(host, port, wsgi_app, handler_class=_WSGIRequestHandler) if open_browser: webbrowser.open(auth_url, new=1, autoraise=True) print(authorization_prompt_message.format(url=auth_url)) local_server.handle_request() authorization_response = wsgi_app.last_request_uri.replace('http', 'https') self.fetch_token(authorization_response=authorization_response) return self.credentials
Run the flow using the server strategy. The server strategy instructs the user to open the authorization URL in their browser and will attempt to automatically open the URL for them. It will start a local web server to listen for the authorization response. Once authorization is complete the authorization server will redirect the user's browser to the local web server. The web server will get the authorization code from the response and shutdown. The code is then exchanged for a token. Args: host (str): The hostname for the local redirect server. This will be served over http, not https. port (int): The port for the local redirect server. authorization_prompt_message (str): The message to display to tell the user to navigate to the authorization URL. success_message (str): The message to display in the web browser the authorization flow is complete. open_browser (bool): Whether or not to open the authorization URL in the user's browser. kwargs: Additional keyword arguments passed through to :meth:`authorization_url`. Returns: google.oauth2.credentials.Credentials: The OAuth 2.0 credentials for the user.
codesearchnet
def GetMessages(self, files): def _GetAllMessageNames(desc): 'Walk a message Descriptor and recursively yields all message names.' (yield desc.full_name) for msg_desc in desc.nested_types: for full_name in _GetAllMessageNames(msg_desc): (yield full_name) result = {} for file_name in files: file_desc = self.pool.FindFileByName(file_name) for msg_desc in file_desc.message_types_by_name.values(): for full_name in _GetAllMessageNames(msg_desc): try: result[full_name] = self._classes[full_name] except KeyError: pass return result
Gets all registered messages from a specified file. Only messages already created and registered will be returned; (this is the case for imported _pb2 modules) But unlike MessageFactory, this version also returns already defined nested messages, but does not register any message extensions. Args: files: The file names to extract messages from. Returns: A dictionary mapping proto names to the message classes. Raises: KeyError: if a file could not be found.
codesearchnet
def config_to_string(config): output = [] for section, section_content in config.items(): output.append("[{}]".format(section)) for option, option_value in section_content.items(): output.append("{} = {}".format(option, option_value)) return "\n".join(output)
Nice output string for the config, which is a nested defaultdict. Args: config (defaultdict(defaultdict)): The configuration information. Returns: str: A human-readable output string detailing the contents of the config.
juraj-google-style
def _get_best(values: List[float], losses: List[float], max_loss_div: float = 0.9, min_val_div: float = 10.0) -> float: assert len(values) == len(losses), "lengths of values and losses should be equal" min_ind = np.argmin(losses) for i in range(min_ind - 1, 0, -1): if (losses[i] * max_loss_div > losses[min_ind]) or\ (values[i] * min_val_div < values[min_ind]): return values[i + 1] return values[min_ind] / min_val_div
Find the best value according to given losses Args: values: list of considered values losses: list of obtained loss values corresponding to `values` max_loss_div: maximal divergence of loss to be considered significant min_val_div: minimum divergence of loss to be considered significant Returns: best value divided by `min_val_div`
juraj-google-style
def _get_annotations(self, text, language=''): body = { 'document': { 'type': 'PLAIN_TEXT', 'content': text, }, 'features': { 'extract_syntax': True, }, 'encodingType': 'UTF32', } if language: body['document']['language'] = language request = self.service.documents().annotateText(body=body) response = request.execute() tokens = response.get('tokens', []) language = response.get('language') return {'tokens': tokens, 'language': language}
Returns the list of annotations retrieved from the given text. Args: text (str): Input text. language (:obj:`str`, optional): Language code. Returns: Results in a dictionary. :code:`tokens` contains the list of annotations and :code:`language` contains the inferred language from the input.
juraj-google-style
def set_params(self, **params): if 'bias' in params.keys(): self.intercept_ = params['bias'] if 'weights' in params.keys(): self.coef_ = params['weights'] for key in params.keys(): if 'b_' == key[:2]: self.B[int(key[2:])] = params[key] return self
Set the parameters of the estimator. Args: bias (array-like) : bias of the estimator. Also known as the intercept in a linear model. weights (array-like) : weights of the features. Also known as coeficients. NER biases (array-like) : NER entities infering column position on X and bias value. Ex: `b_4=10, b_5=6`. Example: >>> cls = VTT() >>> cls.set_params(b_4=10, b_5=6, b_6=8)
juraj-google-style
def open(path, mode=gdalconst.GA_ReadOnly): path = getattr(path, 'name', path) try: return Raster(vsiprefix(path), mode) except AttributeError: try: imgdata = path.read() except AttributeError: raise TypeError('Not a file-like object providing read()') else: imgio = MemFileIO(delete=False) gdal.FileFromMemBuffer(imgio.name, imgdata) return Raster(imgio, mode) raise ValueError('Failed to open raster from "%r"' % path)
Returns a Raster instance. Arguments: path -- local or remote path as str or file-like object Keyword args: mode -- gdal constant representing access mode
juraj-google-style
def encode(self, label): label = super().encode(label) return torch.tensor(self.stoi.get(label, self.unknown_index))
Encodes a ``label``. Args: label (object): Label to encode. Returns: torch.Tensor: Encoding of the label.
codesearchnet
def while_loop(self, context, step_method): logger.debug("starting") context['whileCounter'] = 0 if self.stop is None and self.max is None: logger.error(f"while decorator missing both max and stop.") raise PipelineDefinitionError("the while decorator must have " "either max or stop, or both. " "But not neither.") error_on_max = context.get_formatted_as_type( self.error_on_max, out_type=bool) sleep = context.get_formatted_as_type(self.sleep, out_type=float) if self.max is None: max = None logger.info(f"while decorator will loop until {self.stop} " f"evaluates to True at {sleep}s intervals.") else: max = context.get_formatted_as_type(self.max, out_type=int) if max < 1: logger.info( f"max {self.max} is {max}. while only runs when max > 0.") logger.debug("done") return if self.stop is None: logger.info(f"while decorator will loop {max} times at " f"{sleep}s intervals.") else: logger.info(f"while decorator will loop {max} times, or " f"until {self.stop} evaluates to True at " f"{sleep}s intervals.") if not poll.while_until_true(interval=sleep, max_attempts=max)( self.exec_iteration)(context=context, step_method=step_method): if error_on_max: logger.error(f"exhausted {max} iterations of while loop, " "and errorOnMax is True.") if self.stop and max: raise LoopMaxExhaustedError("while loop reached " f"{max} and {self.stop} " "never evaluated to True.") else: raise LoopMaxExhaustedError(f"while loop reached {max}.") else: if self.stop and max: logger.info( f"while decorator looped {max} times, " f"and {self.stop} never evaluated to True.") logger.debug("while loop done") else: logger.info(f"while loop done, stop condition {self.stop} " "evaluated True.") logger.debug("done")
Run step inside a while loop. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context)
juraj-google-style
def daemon(args): if os.environ.get(DVC_DAEMON): logger.debug('skipping launching a new daemon.') return cmd = [sys.executable] if (not is_binary()): cmd += ['-m', 'dvc'] cmd += (['daemon', '-q'] + args) env = fix_env() file_path = os.path.abspath(inspect.stack()[0][1]) env[cast_bytes_py2('PYTHONPATH')] = cast_bytes_py2(os.path.dirname(os.path.dirname(file_path))) env[cast_bytes_py2(DVC_DAEMON)] = cast_bytes_py2('1') _spawn(cmd, env)
Launch a `dvc daemon` command in a detached process. Args: args (list): list of arguments to append to `dvc daemon` command.
codesearchnet
def get_versions(self): versions_response = self.repo.api.http_request('GET', ('%s/fcr:versions' % self.uri)) versions_graph = self.repo.api.parse_rdf_payload(versions_response.content, versions_response.headers) for version_uri in versions_graph.objects(self.uri, self.rdf.prefixes.fedora.hasVersion): version_label = versions_graph.value(version_uri, self.rdf.prefixes.fedora.hasVersionLabel, None).toPython() self._affix_version(version_uri, version_label)
retrieves all versions of an object, and stores them at self.versions Args: None Returns: None: appends instances
codesearchnet
def updateNodeCapabilities(self, nodeId, node, vendorSpecific=None): response = self.updateNodeCapabilitiesResponse(nodeId, node, vendorSpecific) return self._read_boolean_response(response)
See Also: updateNodeCapabilitiesResponse() Args: nodeId: node: vendorSpecific: Returns:
juraj-google-style
def _workflow_complete(workflow_stage_dict: dict): complete_stages = [] for (_, stage_config) in workflow_stage_dict.items(): complete_stages.append((stage_config['status'] == 'complete')) if all(complete_stages): LOG.info('PB workflow complete!') return True return False
Check if the workflow is complete. This function checks if the entire workflow is complete. This function is used by `execute_processing_block`. Args: workflow_stage_dict (dict): Workflow metadata dictionary. Returns: bool, True if the workflow is complete, otherwise False.
codesearchnet
def concat(self, axis, other, **kwargs): return self._append_list_of_managers(other, axis, **kwargs)
Concatenates two objects together. Args: axis: The axis index object to join (0 for columns, 1 for index). other: The other_index to concat with. Returns: Concatenated objects.
codesearchnet
def from_files(path_dir, dos_spin=1): run_type, warning, efermi, gap, doping_levels = \ BoltztrapAnalyzer.parse_outputtrans(path_dir) vol = BoltztrapAnalyzer.parse_struct(path_dir) intrans = BoltztrapAnalyzer.parse_intrans(path_dir) if run_type == "BOLTZ": dos, pdos = BoltztrapAnalyzer.parse_transdos( path_dir, efermi, dos_spin=dos_spin, trim_dos=False) mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, \ seebeck_doping, cond_doping, kappa_doping, hall_doping, \ carrier_conc = BoltztrapAnalyzer. \ parse_cond_and_hall(path_dir, doping_levels) return BoltztrapAnalyzer( gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, seebeck_doping, cond_doping, kappa_doping, hall_doping, intrans, dos, pdos, carrier_conc, vol, warning) elif run_type == "DOS": trim = True if intrans["dos_type"] == "HISTO" else False dos, pdos = BoltztrapAnalyzer.parse_transdos( path_dir, efermi, dos_spin=dos_spin, trim_dos=trim) return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos, warning=warning, vol=vol) elif run_type == "BANDS": bz_kpoints = np.loadtxt( os.path.join(path_dir, "boltztrap_band.dat"))[:, -3:] bz_bands = np.loadtxt( os.path.join(path_dir, "boltztrap_band.dat"))[:, 1:-6] return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints, warning=warning, vol=vol) elif run_type == "FERMI": if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')): fs_data = read_cube_file( os.path.join(path_dir, 'boltztrap_BZ.cube')) elif os.path.exists(os.path.join(path_dir, 'fort.30')): fs_data = read_cube_file(os.path.join(path_dir, 'fort.30')) else: raise BoltztrapError("No data file found for fermi surface") return BoltztrapAnalyzer(fermi_surface_data=fs_data) else: raise ValueError("Run type: {} not recognized!".format(run_type))
get a BoltztrapAnalyzer object from a set of files Args: path_dir: directory where the boltztrap files are dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down Returns: a BoltztrapAnalyzer object
juraj-google-style
def get_branch(profile, name): ref = ('heads/' + name) data = refs.get_ref(profile, ref) return data
Fetch a branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. name The name of the branch to fetch. Returns: A dict with data baout the branch.
codesearchnet
def get_config(self): return {}
Returns the initializer's configuration as a JSON-serializable dict. Returns: A JSON-serializable Python dict.
github-repos
def output(self, _filename): txt = '' for c in self.contracts: txt += "\nContract %s\n"%c.name table = PrettyTable(['Variable', 'Dependencies']) for v in c.state_variables: table.add_row([v.name, _get(v, c)]) txt += str(table) txt += "\n" for f in c.functions_and_modifiers_not_inherited: txt += "\nFunction %s\n"%f.full_name table = PrettyTable(['Variable', 'Dependencies']) for v in f.variables: table.add_row([v.name, _get(v, f)]) for v in c.state_variables: table.add_row([v.canonical_name, _get(v, f)]) txt += str(table) self.info(txt)
_filename is not used Args: _filename(string)
juraj-google-style
def __init__(self, filesystem): self._filesystem = filesystem self.name = '' self.path = '' self._inode = None self._islink = False self._isdir = False self._statresult = None self._statresult_symlink = None
Initialize the dir entry with unset values. Args: filesystem: the fake filesystem used for implementation.
juraj-google-style
def Evaluate(self, client_obj): if (self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ALL): quantifier = all elif (self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ANY): quantifier = any else: raise ValueError(('Unexpected match mode value: %s' % self.match_mode)) return quantifier((rule.Evaluate(client_obj) for rule in self.rules))
Evaluates rules held in the rule set. Args: client_obj: Either an aff4 client object or a client_info dict as returned by ReadFullInfoClient if the relational db is used for reading. Returns: A bool value of the evaluation. Raises: ValueError: The match mode is of unknown value.
codesearchnet
def create_raw(self, key, value): data = None if ((key is not None) and (value is not None)): data = self.db.create(key.strip(), value) else: self.tcex.log.warning(u'The key or value field was None.') return data
Create method of CRUD operation for raw data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write.
codesearchnet
def reorder_resources(self, resource_ids, hxl_update=True): dataset_id = self.data.get('id') if not dataset_id: raise HDXError('Dataset has no id! It must be read, created or updated first.') data = {'id': dataset_id, 'order': resource_ids} self._write_to_hdx('reorder', data, 'package_id') if hxl_update: self.hxl_update()
Reorder resources in dataset according to provided list. If only some resource ids are supplied then these are assumed to be first and the other resources will stay in their original order. Args: resource_ids (List[str]): List of resource ids hxl_update (bool): Whether to call package_hxl_update. Defaults to True. Returns: None
juraj-google-style
def add_listener(self, event, listener): self.emit('new_listener', event, listener) self._listeners[event].append(listener) self._check_limit(event) return self
Bind a listener to a particular event. Args: event (str): The name of the event to listen for. This may be any string value. listener (def or async def): The callback to execute when the event fires. This may be a sync or async function.
juraj-google-style
def package_info(pkg_name): indent = " " for config, _ in _iter_packages(): if pkg_name == config["name"]: print("Package:", pkg_name) print(indent, "Platform:", config["platform"]) print(indent, "Version:", config["version"]) print(indent, "Path:", config["path"]) print(indent, "Worlds:") for world in config["maps"]: world_info(world["name"], world_config=world, initial_indent=" ")
Prints the information of a package. Args: pkg_name (str): The name of the desired package to get information
juraj-google-style
def __init__(self, value, translator): self.value = value self.translator = translator
Creates a NestedValueProvider that wraps the provided ValueProvider. Args: value: ValueProvider object to wrap translator: function that is applied to the ValueProvider Raises: ``RuntimeValueProviderError``: if any of the provided objects are not accessible.
github-repos
def remove_user(username): users = passwd_reader.load_users() assert username in users, "Username '%s' not found!" % username del users[username] passwd_reader.save_users(users) home_dir = settings.DATA_PATH + username if os.path.exists(home_dir): shutil.rmtree(home_dir) reload_configuration()
Remove user, his home directory and so on.. Args: username (str): User's name.
juraj-google-style
def __init__(self, output_filename="std_err.txt"): self.output_filename = output_filename self.errors = set() self.error_count = Counter()
Initializes the handler with the output file to check. Args: output_filename (str): This is the file where the stderr for vasp is being redirected. The error messages that are checked are present in the stderr. Defaults to "std_err.txt", which is the default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
juraj-google-style
def human_timestamp_to_datetime(human_timestamp, to_utc=False): settings = {} if to_utc: settings = {'TO_TIMEZONE': 'UTC'} return dateparser.parse(human_timestamp, settings=settings)
Converts a human-readable timestamp into a Python ``DateTime`` object Args: human_timestamp (str): A timestamp string to_utc (bool): Convert the timestamp to UTC Returns: DateTime: The converted timestamp
codesearchnet
def process(self, element): text_input, prediction_result = element softmax = torch.nn.Softmax(dim=-1)(prediction_result.inference['logits']).detach().numpy() return [{'input': text_input, 'softmax': softmax}]
Takes the input text and the prediction result, and returns a dictionary with the input text and the softmax probabilities Args: element: The tuple of input text and the prediction result Returns: A list of dictionaries, each containing the input text and the softmax output.
github-repos
def correction(self, word): return max(self.candidates(word), key=self.word_probability)
The most probable correct spelling for the word Args: word (str): The word to correct Returns: str: The most likely candidate
codesearchnet
def Uniform(cls, low: 'TensorFluent', high: 'TensorFluent', batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']: if low.scope != high.scope: raise ValueError('Uniform distribution: parameters must have same scope!') dist = tf.distributions.Uniform(low.tensor, high.tensor) batch = low.batch or high.batch if not batch and batch_size is not None: t = dist.sample(batch_size) batch = True else: t = dist.sample() scope = low.scope.as_list() return (dist, TensorFluent(t, scope, batch=batch))
Returns a TensorFluent for the Uniform sampling op with given low and high parameters. Args: low: The low parameter of the Uniform distribution. high: The high parameter of the Uniform distribution. batch_size: The size of the batch (optional). Returns: The Uniform distribution and a TensorFluent sample drawn from the distribution. Raises: ValueError: If parameters do not have the same scope.
juraj-google-style
def get_upstream_artifacts_full_paths_per_task_id(context): upstream_artifacts = context.task['payload']['upstreamArtifacts'] task_ids_and_relative_paths = [ (artifact_definition['taskId'], artifact_definition['paths']) for artifact_definition in upstream_artifacts ] optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts) upstream_artifacts_full_paths_per_task_id = {} failed_paths_per_task_id = {} for task_id, paths in task_ids_and_relative_paths: for path in paths: try: path_to_add = get_and_check_single_upstream_artifact_full_path(context, task_id, path) add_enumerable_item_to_dict( dict_=upstream_artifacts_full_paths_per_task_id, key=task_id, item=path_to_add ) except ScriptWorkerTaskException: if path in optional_artifacts_per_task_id.get(task_id, []): log.warning('Optional artifact "{}" of task "{}" not found'.format(path, task_id)) add_enumerable_item_to_dict( dict_=failed_paths_per_task_id, key=task_id, item=path ) else: raise return upstream_artifacts_full_paths_per_task_id, failed_paths_per_task_id
List the downloaded upstream artifacts. Args: context (scriptworker.context.Context): the scriptworker context. Returns: dict, dict: lists of the paths to upstream artifacts, sorted by task_id. First dict represents the existing upstream artifacts. The second one maps the optional artifacts that couldn't be downloaded Raises: scriptworker.exceptions.ScriptWorkerTaskException: when an artifact doesn't exist.
juraj-google-style
def block_diag(*blocks: np.ndarray) -> np.ndarray: for b in blocks: if b.shape[0] != b.shape[1]: raise ValueError('Blocks must be square.') if not blocks: return np.zeros((0, 0), dtype=np.complex128) n = sum(b.shape[0] for b in blocks) dtype = functools.reduce(_merge_dtypes, (b.dtype for b in blocks)) result = np.zeros(shape=(n, n), dtype=dtype) i = 0 for b in blocks: j = i + b.shape[0] result[i:j, i:j] = b i = j return result
Concatenates blocks into a block diagonal matrix. Args: *blocks: Square matrices to place along the diagonal of the result. Returns: A block diagonal matrix with the given blocks along its diagonal. Raises: ValueError: A block isn't square.
juraj-google-style
def metadata_matches(self, query={}): result = (len(query.keys()) > 0) for key in query.keys(): result = (result and (query[key] == self.metadata.get(key))) return result
Returns key matches to metadata This will check every key in query for a matching key in metadata returning true if every key is in metadata. query without keys return false. Args: query(object): metadata for matching Returns: bool: True: when key count in query is > 0 and all keys in query in self.metadata False: if key count in query is <= 0 or any key in query not found in self.metadata
codesearchnet
def replica_id_in_sync_group(self): if tensor_util.is_tf_type(self._replica_id_in_sync_group): return self._replica_id_in_sync_group return constant_op.constant(self._replica_id_in_sync_group, dtypes.int32, name='replica_id_in_sync_group')
Returns the id of the replica. This identifies the replica among all replicas that are kept in sync. The value of the replica id can range from 0 to `tf.distribute.ReplicaContext.num_replicas_in_sync` - 1. NOTE: This is not guaranteed to be the same ID as the XLA replica ID use for low-level operations such as collective_permute. Returns: a `Tensor`.
github-repos
def load(config): if config.sys_path: logger.debug('Appending %s to sys.path.', config.sys_path) sys.path.append(config.sys_path) logger.debug('sys.path is now %s', sys.path) if config.lookups: for (key, handler) in config.lookups.items(): register_lookup_handler(key, handler) return config
Loads a stacker configuration by modifying sys paths, loading lookups, etc. Args: config (:class:`Config`): the stacker config to load. Returns: :class:`Config`: the stacker config provided above.
codesearchnet
def get_missing_services(self, services): required_services = set(services) provided_services = set(self._services.keys()) missing_services = required_services.difference(provided_services) return sorted(missing_services)
Check if all required services are provided Args: services: List with the service names which are required Returns: List with missing services
juraj-google-style
def __init__( self, resolver_context, encoding_method=None, file_object=None): if file_object is not None and encoding_method is None: raise ValueError( 'File-like object provided without corresponding encoding method.') super(EncodedStream, self).__init__(resolver_context) self._current_offset = 0 self._decoded_data = b'' self._decoded_data_offset = 0 self._decoded_data_size = 0 self._decoded_stream_size = None self._decoder = None self._encoded_data = b'' self._encoding_method = encoding_method self._file_object = file_object self._file_object_set_in_init = bool(file_object) self._realign_offset = True
Initializes a file-like object. If the file-like object is chained do not separately use the parent file-like object. Args: resolver_context (Context): resolver context. encoding_method (Optional[str]): method used to the encode the data. file_object (Optional[file]): parent file-like object. Raises: ValueError: if file_object provided but encoding_method is not.
juraj-google-style
def _maybe_cast_inputs(self, inputs): compute_dtype = self._compute_dtype if self._autocast and compute_dtype and dtypes.as_dtype(compute_dtype).is_floating: def f(x): cast_types = (tensor.Tensor, sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor) if isinstance(x, cast_types) and x.dtype.is_floating and (x.dtype.base_dtype.name != compute_dtype): return math_ops.cast(x, compute_dtype) elif isinstance(x, tensor.TensorSpec) and x.dtype.is_floating: return tensor.TensorSpec(x.shape, compute_dtype, x.name) else: return x return nest.map_structure(f, inputs) else: return inputs
Maybe casts the inputs to the compute dtype. If self._compute_dtype is floating-point, and self_autocast is True, floating-point inputs are casted to self._compute_dtype. Args: inputs: Input tensor, or structure of input tensors. Returns: `inputs`, but tensors may have been casted to self._compute_dtype
github-repos
def AddFile(self, filepath): if (filepath not in self._files): self._files.add(filepath) return True return False
Adds a file path as a source. Args: filepath: a string representing a path to the file. Returns: True if the file is not an already existing source.
codesearchnet
def from_py_func(cls, code): from bokeh.util.deprecation import deprecated deprecated("'from_py_func' is deprecated and will be removed in an eventual 2.0 release. Use CustomJSHover directly instead.") if (not isinstance(code, FunctionType)): raise ValueError('CustomJSHover.from_py_func only accepts function objects.') pscript = import_required('pscript', ('To use Python functions for CustomJSHover, you need PScript ' + '("conda install -c conda-forge pscript" or "pip install pscript")')) def pscript_compile(code): sig = signature(code) (all_names, default_values) = get_param_info(sig) if ((len(all_names) - len(default_values)) != 0): raise ValueError('Function may only contain keyword arguments.') if (default_values and (not any((isinstance(value, Model) for value in default_values)))): raise ValueError('Default value must be a Bokeh Model.') func_kwargs = dict(zip(all_names, default_values)) code = (pscript.py2js(code, 'transformer') + ('return transformer(%s);\n' % ', '.join(all_names))) return (code, func_kwargs) (jsfunc, func_kwargs) = pscript_compile(code) return cls(code=jsfunc, args=func_kwargs)
Create a ``CustomJSHover`` instance from a Python functions. The function is translated to JavaScript using PScript. The python functions must have no positional arguments. It is possible to pass Bokeh models (e.g. a ``ColumnDataSource``) as keyword arguments to the functions. The ``code`` function namespace will contain the variable ``value`` (the untransformed value) at render time as well as ``format`` and ``special_vars`` as described in the class description. Args: code (function) : a scalar function to transform a single ``value`` Returns: CustomJSHover
codesearchnet
def _other_wrapper(self, name, writing): io_attr = getattr(self._io, name) def other_wrapper(*args, **kwargs): 'Wrap all other calls to the stream Object.\n\n We do this to track changes to the write pointer. Anything that\n moves the write pointer in a file open for appending should move\n the read pointer as well.\n\n Args:\n *args: Pass through args.\n **kwargs: Pass through kwargs.\n\n Returns:\n Wrapped stream object method.\n ' write_seek = self._io.tell() ret_value = io_attr(*args, **kwargs) if (write_seek != self._io.tell()): self._read_seek = self._io.tell() self._read_whence = 0 if ((not writing) or (not IS_PY2)): return ret_value return other_wrapper
Wrap a stream attribute in an other_wrapper. Args: name: the name of the stream attribute to wrap. Returns: other_wrapper which is described below.
codesearchnet
def _dirint_bins(ktp, alt, w, dktp): it = range(len(ktp)) ktp_bin = [-1] * len(ktp) ktp_bin = [0 if ktp[i] >= 0 and ktp[i] < 0.24 else ktp_bin[i] for i in it] ktp_bin = [1 if ktp[i] >= 0.24 and ktp[i] < 0.4 else ktp_bin[i] for i in it] ktp_bin = [2 if ktp[i] >= 0.4 and ktp[i] < 0.56 else ktp_bin[i] for i in it] ktp_bin = [3 if ktp[i] >= 0.56 and ktp[i] < 0.7 else ktp_bin[i] for i in it] ktp_bin = [4 if ktp[i] >= 0.7 and ktp[i] < 0.8 else ktp_bin[i] for i in it] ktp_bin = [5 if ktp[i] >= 0.8 and ktp[i] <= 1 else ktp_bin[i] for i in it] alt_bin = [-1] * len(alt) alt_bin = [0 if alt[i] <= 90 and alt[i] > 65 else alt_bin[i] for i in it] alt_bin = [1 if alt[i] <= 65 and alt[i] > 50 else alt_bin[i] for i in it] alt_bin = [2 if alt[i] <= 50 and alt[i] > 35 else alt_bin[i] for i in it] alt_bin = [3 if alt[i] <= 35 and alt[i] > 20 else alt_bin[i] for i in it] alt_bin = [4 if alt[i] <= 20 and alt[i] > 10 else alt_bin[i] for i in it] alt_bin = [5 if alt[i] <= 10 else alt_bin[i] for i in it] w_bin = [-1] * len(w) w_bin = [0 if w[i] >= 0 and w[i] < 1 else w_bin[i] for i in it] w_bin = [1 if w[i] >= 1 and w[i] < 2 else w_bin[i] for i in it] w_bin = [2 if w[i] >= 2 and w[i] < 3 else w_bin[i] for i in it] w_bin = [3 if w[i] >= 3 else w_bin[i] for i in it] w_bin = [4 if w[i] == -1 else w_bin[i] for i in it] dktp_bin = [-1] * len(dktp) dktp_bin = [0 if dktp[i] >= 0 and dktp[i] < 0.015 else dktp_bin[i] for i in it] dktp_bin = [1 if dktp[i] >= 0.015 and dktp[i] < 0.035 else dktp_bin[i] for i in it] dktp_bin = [2 if dktp[i] >= 0.035 and dktp[i] < 0.07 else dktp_bin[i] for i in it] dktp_bin = [3 if dktp[i] >= 0.07 and dktp[i] < 0.15 else dktp_bin[i] for i in it] dktp_bin = [4 if dktp[i] >= 0.15 and dktp[i] < 0.3 else dktp_bin[i] for i in it] dktp_bin = [5 if dktp[i] >= 0.3 and dktp[i] <= 1 else dktp_bin[i] for i in it] dktp_bin = [6 if dktp[i] == -1 else dktp_bin[i] for i in it] return ktp_bin, alt_bin, w_bin, dktp_bin
Determine the bins for the DIRINT coefficients. Args: ktp : Altitude-independent clearness index alt : Solar altitude angle w : precipitable water estimated from surface dew-point temperature dktp : stability index Returns: tuple of ktp_bin, alt_bin, w_bin, dktp_bin
juraj-google-style
def __sendCommand(self, cmd): logging.info('%s: sendCommand[%s]', self.port, cmd) if self.logThreadStatus == self.logStatus['running']: self.logThreadStatus = self.logStatus['pauseReq'] while self.logThreadStatus != self.logStatus['paused'] and self.logThreadStatus != self.logStatus['stop']: pass try: retry_times = 3 while retry_times > 0: retry_times -= 1 try: self._sendline(cmd) self._expect(cmd) except Exception as e: logging.exception('%s: failed to send command[%s]: %s', self.port, cmd, str(e)) if retry_times == 0: raise else: break line = None response = [] retry_times = 10 while retry_times > 0: line = self._readline() logging.info('%s: the read line is[%s]', self.port, line) if line: response.append(line) if line == 'Done': break else: retry_times -= 1 time.sleep(0.2) if line != 'Done': raise Exception('%s: failed to find end of response' % self.port) logging.info('%s: send command[%s] done!', self.port, cmd) return response except Exception, e: ModuleHelper.WriteIntoDebugLogger("sendCommand() Error: " + str(e)) raise
send specific command to reference unit over serial port Args: cmd: OpenThread CLI string Returns: Done: successfully send the command to reference unit and parse it Value: successfully retrieve the desired value from reference unit Error: some errors occur, indicates by the followed specific error number
juraj-google-style
def number(digit): spoken = str(digit) if spoken.startswith("8") or spoken[:len(spoken) % 3] == "11": article = "an " else: article = "a " if spoken.endswith("1") and spoken != "11": suffix = "st" elif spoken.endswith("2") and spoken != "12": suffix = "nd" elif spoken.endswith("3") and spoken != "13": suffix = "rd" else: suffix = "th" if digit > 999: prefix = len(spoken) % 3 separated = spoken[:prefix] for n in range(prefix, len(spoken), 3): separated += "," + spoken[n : n + 3] spoken = separated return article + spoken + suffix
Gets a spoken-word representation for a number. Arguments: digit (int): An integer to convert into spoken-word. Returns: A spoken-word representation for a digit, including an article ('a' or 'an') and a suffix, e.g. 1 -> 'a 1st', 11 -> "an 11th". Adittionally delimits characters in pairs of three for values > 999.
juraj-google-style
def _record_result_type(recorder, f): def wrapper(*args, **kwargs): res = f(*args, **kwargs) res = recorder(args, kwargs, res) return res return wrapper
A decorator that records some information about the function. Args: recorder: a function of signature `(args, kwargs, res) -> res`. f: the original function. Returns: A transformed function that calls the original function and then the recorder afterwards.
github-repos
def _full_reduce(nodes): was_reduced, nodes = maybe_reduce(nodes) while was_reduced: was_reduced, nodes = maybe_reduce(nodes) return nodes
Apply degree reduction to ``nodes`` until it can no longer be reduced. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): The nodes in the curve. Returns: numpy.ndarray: The fully degree-reduced nodes.
juraj-google-style
def __init__(self, filename, args, version): self.args = args self.version = version self.filename = filename try: with open(self.filename, 'rb') as file: self.data = json.load(file) except IOError: self.data = {}
Args: filename: Filename for database. args: Program arguments. version: Version of file.
juraj-google-style
def run_command(self, command, arg=None, is_eval=False): mode = ((is_eval and 'eval') or 'command') if isinstance(arg, tuple): (name, d) = arg else: (name, d) = (arg, {}) result = getattr(self.connection.admin, mode)(command, name, **d) return result
run command on the server Args: command - command string arg - command argument is_eval - if True execute command as eval return command's result
codesearchnet
def daemonize(pidfile=None): resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) os.chdir('/') os.umask(0) pid = os.fork() if (pid > 0): os._exit(0) os.setsid() pid = os.fork() if (pid > 0): os._exit(0) def terminate(signal, stack_frame): msg = 'Terminating on signal {}'.format(signal) logger.info(msg) raise SystemExit(msg) signal.signal(signal.SIGTERM, terminate) streams = [sys.stdin, sys.stdout, sys.stderr] for stream in streams: devnull = os.open(os.devnull, os.O_RDWR) os.dup2(devnull, stream.fileno()) for fd in [stream.fileno() for stream in streams]: try: os.close(fd) except OSError as err: if (err.errno == errno.EBADF): pass if ((pidfile is None) or (pidfile.strip() == '')): logger.debug('Empty pidfile set') else: pid = os.getpid() try: with open(pidfile, 'w') as f: f.write('{}\n'.format(pid)) f.close() except EnvironmentError: logger.error('Failed to create pidfile at {}'.format(pidfile)) def remove_pid_file(): os.remove(pidfile) atexit.register(remove_pid_file) logger.debug('Process daemonized')
Turn the running process into a proper daemon according to PEP3143. Args: pidfile --The pidfile to create.
codesearchnet
def __init__(self, instrumentation_key, telemetry_channel=None): if instrumentation_key: if isinstance(instrumentation_key, channel.TelemetryChannel): telemetry_channel = instrumentation_key instrumentation_key = None else: raise Exception('Instrumentation key was required but not provided') self._context = channel.TelemetryContext() self._context.instrumentation_key = instrumentation_key self._channel = telemetry_channel or channel.TelemetryChannel() self._telemetry_processors = []
Initializes a new instance of the class. Args: instrumentation_key (str). the instrumentation key to use for this telemetry client.\n telemetry_channel (:class:`channel.TelemetryChannel`). the optional telemetry channel to be used instead of constructing a default one.
juraj-google-style
def _tensor_product(self, other, reverse=False): if not isinstance(other, Kraus): other = Kraus(other) ka_l, ka_r = self._data kb_l, kb_r = other._data if reverse: input_dims = self.input_dims() + other.input_dims() output_dims = self.output_dims() + other.output_dims() kab_l = [np.kron(b, a) for a in ka_l for b in kb_l] else: input_dims = other.input_dims() + self.input_dims() output_dims = other.output_dims() + self.output_dims() kab_l = [np.kron(a, b) for a in ka_l for b in kb_l] if ka_r is None and kb_r is None: kab_r = None else: if ka_r is None: ka_r = ka_l if kb_r is None: kb_r = kb_l if reverse: kab_r = [np.kron(b, a) for a in ka_r for b in kb_r] else: kab_r = [np.kron(a, b) for a in ka_r for b in kb_r] data = (kab_l, kab_r) return Kraus(data, input_dims, output_dims)
Return the tensor product channel. Args: other (QuantumChannel): a quantum channel subclass. reverse (bool): If False return self ⊗ other, if True return if True return (other ⊗ self) [Default: False Returns: Kraus: the tensor product channel as a Kraus object. Raises: QiskitError: if other cannot be converted to a channel.
juraj-google-style