code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def delete(self, resource_id): endpoint = '{}/{}'.format(self.endpoint, resource_id) response = self.api.execute("DELETE", endpoint) if not response.ok: raise Error.parse(response.json()) return self._cls.parse(response.json())
Deletes an existing resource Args: resource_id - int - The resource ID to be deleted
juraj-google-style
def fetch_resource(url): try: data = get_request(url) lines = data.split('\n') except Exception as err: raise err return lines
Fetch a resource and return the resulting lines in a list Send file_name to get more clean log messages Args: url(str) Returns: lines(list(str))
juraj-google-style
def save(self, new_path=None): self.saved_in_temp = (new_path is None) if (new_path is None): (fd, new_path) = tempfile.mkstemp() os.close(fd) if self.current_path: shutil.move(self.current_path, new_path) else: with open(new_path, 'wb') as dest: _copy_stream(self._data, dest, self._size) self.current_path = new_path
Moves or creates the file with stream contents to a new location. Args: new_path: path to move to, if None a temporary file is created.
codesearchnet
def __init__(self, expression, options): self._expression = expression self._options = options
Initializes a new instance of the ExpressionParser class Args: expression: The cron expression string options: Parsing options
juraj-google-style
def update(self, **kwargs): updated = False for prop in self.class_properties: key = prop['key'] kwarg_key = to_camelcase(key) if kwarg_key in kwargs: if prop['required'] and not kwargs[kwarg_key]: raise InquisitorError('Missing required property {}'.format(prop['name'])) updated |= self.set_property(key, kwargs[kwarg_key]) return updated
Updates the object information based on live data, if there were any changes made. Any changes will be automatically applied to the object, but will not be automatically persisted. You must manually call `db.session.add(object)` on the object. Args: **kwargs (:obj:): AWS API Resource object fetched from AWS API Returns: `bool`
juraj-google-style
def getPoly(rCut, nMax): rCutVeryHard = (rCut + 5.0) rx = ((0.5 * rCutVeryHard) * (x + 1)) basisFunctions = [] for i in range(1, (nMax + 1)): basisFunctions.append((lambda rr, i=i, rCut=rCut: ((rCut - np.clip(rr, 0, rCut)) ** (i + 2)))) S = np.zeros((nMax, nMax)) for i in range(1, (nMax + 1)): for j in range(1, (nMax + 1)): S[((i - 1), (j - 1))] = ((2 * (rCut ** ((7 + i) + j))) / ((((5 + i) + j) * ((6 + i) + j)) * ((7 + i) + j))) betas = sqrtm(np.linalg.inv(S)) if (betas.dtype == np.complex128): raise ValueError('Could not calculate normalization factors for the polynomial basis in the domain of real numbers. Lowering the number of radial basis functions is advised.') fs = np.zeros([nMax, len(x)]) for n in range(1, (nMax + 1)): fs[((n - 1), :)] = ((rCut - np.clip(rx, 0, rCut)) ** (n + 2)) gss = np.dot(betas, fs) return (nMax, rx, gss)
Used to calculate discrete vectors for the polynomial basis functions. Args: rCut(float): Radial cutoff nMax(int): Number of polynomial radial functions
codesearchnet
def while_loop(condition: Callable[..., Any], body: Callable[..., Any], inputs: Optional[List[Any]]=None, infeed_queue: Optional[tpu_feed.InfeedQueue]=None, name: Any=None) -> Any: del name inputs = [] if inputs is None else [ops.convert_to_tensor(x) for x in inputs] input_types = [x.dtype for x in inputs] input_arity = len(inputs) body_arg_error = xla.check_function_argument_count(body, input_arity, infeed_queue) if body_arg_error is not None: if infeed_queue is None: raise TypeError(f'Supplied loop body function cannot be called with the specified inputs. You specified {input_arity} inputs: {[i.name for i in inputs]}, but the loop body needs {body_arg_error}') else: raise TypeError(f'Supplied loop body function cannot be called with the specified inputs. You specified {input_arity} inputs: {[i.name for i in inputs]} and {infeed_queue.number_of_tuple_elements} additional inputs from infeed, but the computation needs {body_arg_error}') condition_arg_error = xla.check_function_argument_count(condition, input_arity, None) if condition_arg_error is not None: if infeed_queue is None: raise TypeError(f'Supplied loop condition function cannot be called with the specified inputs. You specified {input_arity} inputs: {[i.name for i in inputs]}, but the loop condition needs {condition_arg_error}') else: raise TypeError(f'Supplied loop condition function cannot be called with the specified inputs. You specified {input_arity} inputs: {[i.name for i in inputs]}, but the loop condition needs {condition_arg_error}. Note that infeed is not passed to the loop condition.') def condition_wrapper(*inputs): if input_arity == 0: inputs = [] return condition(*inputs) def body_wrapper(*inputs): inputs = list(inputs) if input_arity == 0: inputs = [] if infeed_queue: number_of_shards = tpu_function.get_tpu_context().number_of_shards if number_of_shards is None: raise ValueError("Can't build training loop with infeed when there is no tpu_shard_context. Are you building a loop or graph directly rather than from inside tpu.rewrite, tpu.batch_parallel, tpu.shard, or tpu.replicate?") infeed_queue.set_number_of_shards(number_of_shards) dequeue_ops = [d for d in infeed_queue.generate_dequeue_op()] else: dequeue_ops = [] outputs = body(*inputs + dequeue_ops) if not isinstance(outputs, (list, tuple)): outputs = (outputs,) outputs = [o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o) for o in outputs] output_operations = [o for o in outputs if isinstance(o, ops.Operation)] output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)] if outputs != output_tensors + output_operations: raise ValueError('TPU training loop body must return zero or more Tensor values followed by zero or more Operations.') output_types = [op.dtype for op in output_tensors] if input_types != output_types: raise TypeError('Mismatch between input types and output types for training loop body: {} vs {}'.format(input_types, output_types)) output_operations += dequeue_ops if not output_tensors: output_tensors = array_ops.constant(0) if output_operations: output_tensors = control_flow_ops.tuple(output_tensors, control_inputs=output_operations) if tensor_tracer.TensorTracer.is_enabled(): num_replicas = tpu_function.get_tpu_context().number_of_shards if num_replicas is None: num_replicas = 1 tt = tensor_tracer.TensorTracer() output_tensors = tt.trace_tpu(ops.get_default_graph(), output_tensors, None, num_replicas) return output_tensors if input_arity == 0: inputs = [array_ops.constant(0)] return while_loop_tf.while_loop(condition_wrapper, body_wrapper, inputs, name='', parallel_iterations=1)
Builds a training loop for TPUs. The set of loop-carried tensors corresponds to `inputs`. Both `condition` and `body` take the current value of the loop-carried tensors. 'body' additionally takes a tuple of infeed from infeed_queue if infeed_queue is not None. `condition` must return a single boolean value that determines whether iteration continues. `body` must return an updated list of values for the loop-carried tensors. Args: condition: a Python function that builds the loop condition. body: a Python function that builds the loop body. inputs: a list of initial values passed into the training loop, or None (equivalent to an empty list). infeed_queue: if not None, the infeed queue from which to append a tuple of arguments as inputs to condition. name: (Deprecated) Does nothing. Returns: The final values of the loop-carried tensors. Raises: TypeError: if body or condition has the wrong signature.
github-repos
def _send_trace(self, chunk=None): self._trace_sm_running = True if chunk is None: chunk = self._next_tracing_chunk(20) if chunk is None or len(chunk) == 0: self._trace_sm_running = False return try: self._send_notification(TracingChar.value_handle, chunk) self._defer(self._send_trace) except bable_interface.BaBLEException as err: if err.packet.status == 'Rejected': time.sleep(0.05) self._defer(self._send_trace, [chunk]) else: self._audit('ErrorStreamingTrace') self._logger.exception("Error while tracing data")
Stream tracing data to the ble client in 20 byte chunks Args: chunk (bytearray): A chunk that should be sent instead of requesting a new chunk from the pending reports.
juraj-google-style
def create_token(self, token_name, project_name, dataset_name, is_public): return self.resources.create_token(token_name, project_name, dataset_name, is_public)
Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created.
juraj-google-style
def _pyval_find_struct_keys_and_depth(pyval, keys): if isinstance(pyval, dict): keys.update(pyval.keys()) return 0 elif isinstance(pyval, (list, tuple)): depth = None for child in pyval: child_depth = _pyval_find_struct_keys_and_depth(child, keys) if child_depth is not None: if depth is None: depth = child_depth + 1 elif depth != child_depth + 1: raise ValueError('Inconsistent depth of dictionaries') return depth else: return None
Finds the keys & depth of nested dictionaries in `pyval`. Args: pyval: A nested structure of lists, tuples, and dictionaries. keys: (output parameter) A set, which will be updated with any keys that are found in the nested dictionaries. Returns: The nesting depth of dictionaries in `pyval`, or `None` if `pyval` does not contain any dictionaries. Raises: ValueError: If dictionaries have inconsistent depth.
github-repos
def repr(tick, pack=False): if (tick == 9223372036854775807): return '?' dt = (datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=tick)) millis = (dt.microsecond / 1000) if pack: return ('%d%.2d%.2d%.2d%.2d%.2d%.3d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, millis)) return ('%d/%.2d/%.2d %.2d:%.2d:%.2d.%.3d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, millis))
Return a date string for an epoch-millis timestamp. Args: tick (int): The timestamp in milliseconds since the epoch. Returns: (str): A date time string
codesearchnet
def _default_transform_fn(self, model, content, content_type, accept): try: data = self._input_fn(content, content_type) except _errors.UnsupportedFormatError as e: return self._error_response(e, http_client.UNSUPPORTED_MEDIA_TYPE) prediction = self._predict_fn(data, model) try: result = self._output_fn(prediction, accept) except _errors.UnsupportedFormatError as e: return self._error_response(e, http_client.NOT_ACCEPTABLE) return result
Make predictions against the model and return a serialized response. This serves as the default implementation of transform_fn, used when the user has not implemented one themselves. Args: model (obj): model loaded by model_fn. content: request content. content_type (str): the request Content-Type. accept (str): accept content-type expected by the client. Returns: sagemaker_containers.beta.framework.worker.Response or tuple: the serialized response data and its content type, either as a Response object or a tuple of the form (response_data, content_type)
codesearchnet
def get_vocabulary(preprocess_output_dir, name): vocab_file = os.path.join(preprocess_output_dir, (CATEGORICAL_ANALYSIS % name)) if (not file_io.file_exists(vocab_file)): raise ValueError(('File %s not found in %s' % ((CATEGORICAL_ANALYSIS % name), preprocess_output_dir))) labels = python_portable_string(file_io.read_file_to_string(vocab_file)).split('\n') label_values = [x for x in labels if x] return label_values
Loads the vocabulary file as a list of strings. Args: preprocess_output_dir: Should contain the file CATEGORICAL_ANALYSIS % name. name: name of the csv column. Returns: List of strings. Raises: ValueError: if file is missing.
codesearchnet
def get_token(self, token_name, project_name, dataset_name): return self.resources.get_token(token_name, project_name, dataset_name)
Get a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name Returns: dict: Token info
juraj-google-style
def trace_call(self, node, func, sigs, posargs, namedargs, result): log.debug('Logging call to %r with %d args, return %r', func, len(posargs), result) args = tuple(posargs) kwargs = tuple((namedargs or {}).items()) record = _CallRecord(node, func, sigs, args, kwargs, result) if isinstance(func.data, abstract.BoundPyTDFunction): self._method_calls.add(record) elif isinstance(func.data, abstract.PyTDFunction): self._calls.add(record)
Add an entry into the call trace. Args: node: The CFG node right after this function call. func: A cfg.Binding of a function that was called. sigs: The signatures that the function might have been called with. posargs: The positional arguments, an iterable over cfg.Variable. namedargs: The keyword arguments, a dict mapping str to cfg.Variable. result: A Variable of the possible result values.
github-repos
def ensuredir(dpath, mode=1023, verbose=None): if (verbose is None): verbose = 0 if isinstance(dpath, (list, tuple)): dpath = join(*dpath) if (not exists(dpath)): if verbose: print(('Ensuring new directory (%r)' % dpath)) if (sys.version_info.major == 2): os.makedirs(normpath(dpath), mode=mode) else: os.makedirs(normpath(dpath), mode=mode, exist_ok=True) elif verbose: print(('Ensuring existing directory (%r)' % dpath)) return dpath
r""" Ensures that directory will exist. Creates new dir with sticky bits by default Args: dpath (PathLike): dir to ensure. Can also be a tuple to send to join mode (int): octal mode of directory (default 0o1777) verbose (int): verbosity (default 0) Returns: PathLike: path: the ensured directory Notes: This function is not thread-safe in Python2 Example: >>> from ubelt.util_platform import * # NOQA >>> import ubelt as ub >>> cache_dpath = ub.ensure_app_cache_dir('ubelt') >>> dpath = join(cache_dpath, 'ensuredir') >>> if exists(dpath): ... os.rmdir(dpath) >>> assert not exists(dpath) >>> ub.ensuredir(dpath) >>> assert exists(dpath) >>> os.rmdir(dpath)
codesearchnet
def get_by_name(self, name): scopes = self._client.get_all() result = [x for x in scopes if x['name'] == name] return result[0] if result else None
Gets a Scope by name. Args: name: Name of the Scope Returns: dict: Scope.
juraj-google-style
def get_user(self, user_id): try: return get_user_model().objects.get(id=user_id) except get_user_model().DoesNotExist: return None
Get a user by their ID. Args: user_id: The ID of the user to fetch. Returns: The user with the specified ID if they exist and ``None`` otherwise.
codesearchnet
def read_configuration_file(filepath=_give_default_file_path()): config = configparser.ConfigParser() config.read(filepath) def get_correct_type(section, key, config): def getstring(section, key, config): return config[section][key] def getinteger(section, key, config): return config[section].getint(key) def getboolean(section, key, config): return config[section].getboolean(key) def getfloat(section, key, config): return config[section].getfloat(key) special_actions = {} special_actions['defaults'] = {} special_actions['defaults']['use_lookup'] = getboolean try: return special_actions[section][key](section, key, config) except KeyError: return getstring(section, key, config) for section in config.sections(): for key in config[section]: settings[section][key] = get_correct_type(section, key, config) return settings
Read the configuration file. .. note:: This function changes ``cc.settings`` inplace and is therefore not sideeffect free. Args: filepath (str): Where to read the file. The default is under both UNIX and Windows ``~/.chemcoordrc``. Returns: None:
juraj-google-style
def which(program, path=None): path = (path or os.environ['PATH'].split(os.pathsep)) abspath = (True if os.path.split(program)[0] else False) if abspath: if fs.isexe(program): return program else: for directory in path: directory = directory.strip('"') exe_file = os.path.join(directory, program) if fs.isexe(exe_file): return exe_file return None
Returns the full path of shell commands. Replicates the functionality of system which (1) command. Looks for the named program in the directories indicated in the $PATH environment variable, and returns the full path if found. Examples: >>> system.which("ls") "/bin/ls" >>> system.which("/bin/ls") "/bin/ls" >>> system.which("not-a-real-command") None >>> system.which("ls", path=("/usr/bin", "/bin")) "/bin/ls" Arguments: program (str): The name of the program to look for. Can be an absolute path. path (sequence of str, optional): A list of directories to look for the pgoram in. Default value is system $PATH. Returns: str: Full path to program if found, else None.
codesearchnet
def check(self, dsm, **kwargs): layered_architecture = True messages = [] categories = dsm.categories dsm_size = dsm.size[0] if not categories: categories = ['appmodule'] * dsm_size for i in range(0, dsm_size - 1): for j in range(i + 1, dsm_size): if (categories[i] != 'broker' and categories[j] != 'broker' and dsm.entities[i].split('.')[0] != dsm.entities[j].split('.')[0]): if dsm.data[i][j] > 0: layered_architecture = False messages.append( 'Dependency from %s to %s breaks the ' 'layered architecture.' % ( dsm.entities[i], dsm.entities[j])) return layered_architecture, '\n'.join(messages)
Check layered architecture. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. Returns: bool, str: True if layered architecture else False, messages
juraj-google-style
def _build_rdf(self, data=None): self.rdf = SimpleNamespace() self.rdf.data = data self.rdf.prefixes = SimpleNamespace() self.rdf.uris = SimpleNamespace() for (prefix, uri) in self.repo.context.items(): setattr(self.rdf.prefixes, prefix, rdflib.Namespace(uri)) self._parse_graph()
Parse incoming rdf as self.rdf.orig_graph, create copy at self.rdf.graph Args: data (): payload from GET request, expected RDF content in various serialization formats Returns: None
codesearchnet
def Send(self, client_id, data):
Sends a data to a GUI client of |client_id|. Args: client_id: an opaque ID or object for a GUI client for the outgoing data or response. It must be gotten by callback call set by Start(). data: an outgoing byte stream data to a GUI client. Raises: IOError: Cannot send data to the GUI client.
github-repos
def from_dict(cls, feature_extractor_dict: dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor: return_unused_kwargs = kwargs.pop('return_unused_kwargs', False) to_remove = [] for key, value in kwargs.items(): if key in feature_extractor_dict: feature_extractor_dict[key] = value to_remove.append(key) for key in to_remove: kwargs.pop(key, None) feature_extractor = cls(**feature_extractor_dict) logger.info(f'Feature extractor {feature_extractor}') if return_unused_kwargs: return (feature_extractor, kwargs) else: return feature_extractor
Instantiates a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a Python dictionary of parameters. Args: feature_extractor_dict (`Dict[str, Any]`): Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the [`~feature_extraction_utils.FeatureExtractionMixin.to_dict`] method. kwargs (`Dict[str, Any]`): Additional parameters from which to initialize the feature extractor object. Returns: [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature extractor object instantiated from those parameters.
github-repos
def Deserialize(self, reader): self.Version = reader.ReadUInt32() self.Services = reader.ReadUInt64() self.Timestamp = reader.ReadUInt32() self.Port = reader.ReadUInt16() self.Nonce = reader.ReadUInt32() self.UserAgent = reader.ReadVarString().decode('utf-8') self.StartHeight = reader.ReadUInt32() logger.debug("Version start height: T %s " % self.StartHeight) self.Relay = reader.ReadBool()
Deserialize full object. Args: reader (neo.IO.BinaryReader):
juraj-google-style
def one_hot(indices, output_dim, on_value=1.0, off_value=0.0, dtype=tf.float32, name=None): return OneHotOperation(indices, output_dim, on_value, off_value, dtype, name=name).outputs[0]
One hot operation. TODO(noam): Is there a good reason we need a special mtf.Operation here? We could just use some code like this: cast(equal(indices, mtf_range(indices.mesh, output_dim, dtype=indices.dtype)), dtype) Args: indices: a Tensor output_dim: a Dimension on_value: Value taken when indices are on at a location, default 1 off_value: Value taken when indices are off at a location, default 0 dtype: a tf.DType name: an optional string Returns: a Tensor with shape extended by output_dim for the last axis.
codesearchnet
def write(self, b): if not self._writable: raise UnsupportedOperation('write') size = len(b) with self._seek_lock: start = self._seek end = start + size self._seek = end buffer = self._write_buffer if end <= len(buffer): buffer = memoryview(buffer) buffer[start:end] = b return size
Write the given bytes-like object, b, to the underlying raw stream, and return the number of bytes written. Args: b (bytes-like object): Bytes to write. Returns: int: The number of bytes written.
juraj-google-style
def _infer_var_name(var): name_to_var_dict = saveable_object_util.op_list_to_dict(var) if len(name_to_var_dict) > 1: raise TypeError('`var` = %s passed as arg violates the constraints. name_to_var_dict = %s' % (var, name_to_var_dict)) return list(name_to_var_dict.keys())[0]
Returns name of the `var`. Args: var: A list. The list can contain either of the following: (i) A single `Variable` (ii) A single `ResourceVariable` (iii) Multiple `Variable` objects which must be slices of the same larger variable. (iv) A single `PartitionedVariable` Returns: Name of the `var`
github-repos
def get_dict_to_print(field_to_obs): def compressed_steps(steps): return {'num_steps': len(set(steps)), 'min_step': min(steps), 'max_step': max(steps), 'last_step': steps[(- 1)], 'first_step': steps[0], 'outoforder_steps': get_out_of_order(steps)} def full_steps(steps): return {'steps': steps, 'outoforder_steps': get_out_of_order(steps)} output = {} for (field, observations) in field_to_obs.items(): if (not observations): output[field] = None continue steps = [x['step'] for x in observations] if (field in SHORT_FIELDS): output[field] = compressed_steps(steps) if (field in LONG_FIELDS): output[field] = full_steps(steps) return output
Transform the field-to-obs mapping into a printable dictionary. Args: field_to_obs: Dict that maps string field to `Observation` list. Returns: A dict with the keys and values to print to console.
codesearchnet
def _wait_and_kill(pid_to_wait, pids_to_kill): import psutil if psutil.pid_exists(pid_to_wait): psutil.Process(pid=pid_to_wait).wait() for pid_to_kill in pids_to_kill: if psutil.pid_exists(pid_to_kill): p = psutil.Process(pid=pid_to_kill) p.kill() p.wait()
Helper function. Wait for a process to finish if it exists, and then try to kill a list of processes. Used by local_train Args: pid_to_wait: the process to wait for. pids_to_kill: a list of processes to kill after the process of pid_to_wait finishes.
juraj-google-style
def remove_notification_listener(self, notification_id): for v in self.notifications.values(): toRemove = list(filter(lambda tup: tup[0] == notification_id, v)) if len(toRemove) > 0: v.remove(toRemove[0]) return True return False
Remove a previously added notification callback. Args: notification_id: The numeric id passed back from add_notification_listener Returns: The function returns boolean true if found and removed, false otherwise.
juraj-google-style
def __eq__(self, other): return isinstance(other, self.__class__) and self.to_string() == other.to_string()
Checks if the `other` DeviceSpec is same as the current instance, eg have same value for all the internal fields. Args: other: Another DeviceSpec Returns: Return `True` if `other` is also a DeviceSpec instance and has same value as the current instance. Return `False` otherwise.
github-repos
def apply_pending(self, panel_obj, version): updates = {} new_panel = deepcopy(panel_obj) new_panel['pending'] = [] new_panel['date'] = dt.datetime.now() info_fields = ['disease_associated_transcripts', 'inheritance_models', 'reduced_penetrance', 'mosaicism', 'database_entry_version', 'comment'] new_genes = [] for update in panel_obj.get('pending', []): hgnc_id = update['hgnc_id'] if (update['action'] != 'add'): updates[hgnc_id] = update continue info = update.get('info', {}) gene_obj = {'hgnc_id': hgnc_id, 'symbol': update['symbol']} for field in info_fields: if (field in info): gene_obj[field] = info[field] new_genes.append(gene_obj) for gene in panel_obj['genes']: hgnc_id = gene['hgnc_id'] if (hgnc_id not in updates): new_genes.append(gene) continue current_update = updates[hgnc_id] action = current_update['action'] info = current_update['info'] if (action == 'delete'): continue elif (action == 'edit'): for field in info_fields: if (field in info): gene[field] = info[field] new_genes.append(gene) new_panel['genes'] = new_genes new_panel['version'] = float(version) inserted_id = None if (new_panel['version'] == panel_obj['version']): result = self.panel_collection.find_one_and_replace({'_id': panel_obj['_id']}, new_panel, return_document=pymongo.ReturnDocument.AFTER) inserted_id = result['_id'] else: new_panel.pop('_id') panel_obj['is_archived'] = True self.update_panel(panel_obj=panel_obj, date_obj=panel_obj['date']) inserted_id = self.panel_collection.insert_one(new_panel).inserted_id return inserted_id
Apply the pending changes to an existing gene panel or create a new version of the same panel. Args: panel_obj(dict): panel in database to update version(double): panel version to update Returns: inserted_id(str): id of updated panel or the new one
codesearchnet
def get_colour_handler(extranames: List[str] = None, with_process_id: bool = False, with_thread_id: bool = False, stream: TextIO = None) -> logging.StreamHandler: fmt = "%(white)s%(asctime)s.%(msecs)03d" if with_process_id or with_thread_id: procinfo = [] if with_process_id: procinfo.append("p%(process)d") if with_thread_id: procinfo.append("t%(thread)d") fmt += " [{}]".format(".".join(procinfo)) extras = ":" + ":".join(extranames) if extranames else "" fmt += " %(name)s{extras}:%(levelname)s: ".format(extras=extras) fmt += "%(reset)s%(log_color)s%(message)s" cf = ColoredFormatter(fmt, datefmt=LOG_DATEFMT, reset=True, log_colors=LOG_COLORS, secondary_log_colors={}, style='%') ch = logging.StreamHandler(stream) ch.setFormatter(cf) return ch
Gets a colour log handler using a standard format. Args: extranames: additional names to append to the logger's name with_process_id: include the process ID in the logger's name? with_thread_id: include the thread ID in the logger's name? stream: ``TextIO`` stream to send log output to Returns: the :class:`logging.StreamHandler`
juraj-google-style
def update(cls, customer_id, **kwargs): return cls().requests.put('customer/{customer_id}'.format(**locals()), data=kwargs)
Static method defined to update paystack customer data by id. Args: customer_id: paystack customer id. first_name: customer's first name(optional). last_name: customer's last name(optional). email: customer's email address(optional). phone:customer's phone number(optional). Returns: Json data from paystack API.
juraj-google-style
def lint(ctx: click.Context, amend: bool = False, stage: bool = False): _lint(ctx, amend, stage)
Runs all linters Args: ctx: click context amend: whether or not to commit results stage: whether or not to stage changes
juraj-google-style
def get_experiment_fn(args): def get_experiment(output_dir): train_config = util.merge_metadata(args.preprocess_output_dir, args.transforms_file) estimator = util.get_estimator(output_dir, train_config, args) schema_file = os.path.join(args.preprocess_output_dir, util.SCHEMA_FILE) additional_assets = {'features.json': args.transforms_file, util.SCHEMA_FILE: schema_file} if util.is_classification_model(args.model_type): target_name = train_config['target_column'] vocab_file_name = util.CATEGORICAL_ANALYSIS % target_name vocab_file_path = os.path.join( args.preprocess_output_dir, vocab_file_name) assert file_io.file_exists(vocab_file_path) additional_assets[vocab_file_name] = vocab_file_path export_strategy_target = util.make_export_strategy( train_config=train_config, args=args, keep_target=True, assets_extra=additional_assets) export_strategy_notarget = util.make_export_strategy( train_config=train_config, args=args, keep_target=False, assets_extra=additional_assets) input_reader_for_train = get_reader_input_fn( train_config=train_config, preprocess_output_dir=args.preprocess_output_dir, model_type=args.model_type, data_paths=args.train_data_paths, batch_size=args.train_batch_size, shuffle=True, num_epochs=args.num_epochs) input_reader_for_eval = get_reader_input_fn( train_config=train_config, preprocess_output_dir=args.preprocess_output_dir, model_type=args.model_type, data_paths=args.eval_data_paths, batch_size=args.eval_batch_size, shuffle=False, num_epochs=1) return tf.contrib.learn.Experiment( estimator=estimator, train_input_fn=input_reader_for_train, eval_input_fn=input_reader_for_eval, train_steps=args.max_steps, export_strategies=[export_strategy_target, export_strategy_notarget], min_eval_frequency=args.min_eval_frequency, eval_steps=None, ) return get_experiment
Builds the experiment function for learn_runner.run. Args: args: the command line args Returns: A function that returns a tf.learn experiment object.
juraj-google-style
def api_representation(self, content_type): payload = dict(Subject=self.subject, Body=dict(ContentType=content_type, Content=self.body)) if self.sender is not None: payload.update(From=self.sender.api_representation()) if any(isinstance(item, str) for item in self.to): self.to = [Contact(email=email) for email in self.to] recipients = [contact.api_representation() for contact in self.to] payload.update(ToRecipients=recipients) if self.cc: if any(isinstance(email, str) for email in self.cc): self.cc = [Contact(email) for email in self.cc] cc_recipients = [contact.api_representation() for contact in self.cc] payload.update(CcRecipients=cc_recipients) if self.bcc: if any(isinstance(email, str) for email in self.bcc): self.bcc = [Contact(email) for email in self.bcc] bcc_recipients = [contact.api_representation() for contact in self.bcc] payload.update(BccRecipients=bcc_recipients) if self._attachments: payload.update(Attachments=[attachment.api_representation() for attachment in self._attachments]) payload.update(Importance=str(self.importance)) return dict(Message=payload)
Returns the JSON representation of this message required for making requests to the API. Args: content_type (str): Either 'HTML' or 'Text'
juraj-google-style
def format_script(sensor_graph): records = [] records.append(SetGraphOnlineRecord(False, address=8)) records.append(ClearDataRecord(address=8)) records.append(ResetGraphRecord(address=8)) for node in sensor_graph.nodes: records.append(AddNodeRecord(str(node), address=8)) for streamer in sensor_graph.streamers: records.append(AddStreamerRecord(streamer, address=8)) for (stream, value) in sorted(sensor_graph.constant_database.items(), key=(lambda x: x[0].encode())): records.append(SetConstantRecord(stream, value, address=8)) records.append(PersistGraphRecord(address=8)) records.append(ClearConfigVariablesRecord()) for slot in sorted(sensor_graph.config_database, key=(lambda x: x.encode())): for config_id in sorted(sensor_graph.config_database[slot]): (config_type, value) = sensor_graph.config_database[slot][config_id] byte_value = _convert_to_bytes(config_type, value) records.append(SetConfigRecord(slot, config_id, byte_value)) app_tag = sensor_graph.metadata_database.get('app_tag') app_version = sensor_graph.metadata_database.get('app_version') if (app_tag is not None): records.append(SetDeviceTagRecord(app_tag=app_tag, app_version=app_version)) script = UpdateScript(records) return script.encode()
Create a binary script containing this sensor graph. This function produces a repeatable script by applying a known sorting order to all constants and config variables when iterating over those dictionaries. Args: sensor_graph (SensorGraph): the sensor graph that we want to format Returns: bytearray: The binary script data.
codesearchnet
def __init__(self, source_path): self.source_path = source_path self.package = get_developer_package(source_path) self.type_settings = self.package.config.plugins.release_hook self.settings = self.type_settings.get(self.name())
Create a release hook. Args: source_path: Path containing source that was released.
juraj-google-style
def recipe_barnacle_dv360(config, auth_read, auth_write, partner, recipe_slug): dataset(config, {'auth': auth_write, 'dataset': recipe_slug}) google_api(config, {'auth': auth_read, 'api': 'doubleclickbidmanager', 'version': 'v1.1', 'function': 'queries.listqueries', 'alias': 'list', 'results': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'DV_Reports'}}}) google_api(config, {'auth': auth_read, 'api': 'displayvideo', 'version': 'v1', 'function': 'partners.list', 'kwargs': {'fields': 'partners.displayName,partners.partnerId,nextPageToken'}, 'results': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'DV_Partners'}}}) google_api(config, {'auth': auth_read, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.list', 'kwargs': {'partnerId': partner, 'fields': 'advertisers.displayName,advertisers.advertiserId,nextPageToken'}, 'results': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'DV_Advertisers'}}}) google_api(config, {'auth': 'service', 'api': 'displayvideo', 'version': 'v1', 'function': 'users.list', 'kwargs': {}, 'results': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'DV_Users'}}}) bigquery(config, {'auth': auth_write, 'from': {'query': "SELECT\n U.userId,\n U.name,\n U.email,\n U.displayName,\n REGEXP_EXTRACT(U.email, r'@(.+)') AS Domain,\n IF (ENDS_WITH(U.email, '.gserviceaccount.com'), 'Service', 'User') AS Authentication,\n IF((Select COUNT(advertiserId) from UNNEST(U.assignedUserRoles)) = 0, 'Partner', 'Advertiser') AS Scope,\n STRUCT(\n AUR.partnerId,\n P.displayName AS partnerName,\n AUR.userRole,\n AUR.advertiserId,\n A.displayName AS advertiserName,\n AUR.assignedUserRoleId\n ) AS assignedUserRoles,\n FROM `{dataset}.DV_Users` AS U,\n UNNEST(assignedUserRoles) AS AUR\n LEFT JOIN `{dataset}.DV_Partners` AS P\n ON AUR.partnerId=P.partnerId\n LEFT JOIN `{dataset}.DV_Advertisers` AS A\n ON AUR.advertiserId=A.advertiserId ", 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'Barnacle_User_Roles'}}) bigquery(config, {'auth': auth_write, 'from': {'query': "SELECT\n R.*,\n P.displayName AS partnerName,\n A.displayName AS advertiserName,\n FROM (\n SELECT\n queryId,\n (SELECT CAST(value AS INT64) FROM UNNEST(R.params.filters) WHERE type = 'FILTER_PARTNER' LIMIT 1) AS partnerId,\n (SELECT CAST(value AS INT64) FROM UNNEST(R.params.filters) WHERE type = 'FILTER_ADVERTISER' LIMIT 1) AS advertiserId,\n R.schedule.frequency,\n R.params.metrics,\n R.params.type,\n R.metadata.dataRange,\n R.metadata.sendNotification,\n DATE(TIMESTAMP_MILLIS(R.metadata.latestReportRunTimeMS)) AS latestReportRunTime,\n FROM `{dataset}.DV_Reports` AS R) AS R\n LEFT JOIN `{dataset}.DV_Partners` AS P\n ON R.partnerId=P.partnerId\n LEFT JOIN `{dataset}.DV_Advertisers` AS A\n ON R.advertiserId=A.advertiserId ", 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'table': 'Barnacle_Reports'}})
Gives DV clients ability to see which users have access to which parts of an account. Loads DV user profile mappings using the API into BigQuery and connects to a DataStudio dashboard. Args: auth_read (authentication) - Credentials used for writing data. auth_write (authentication) - Credentials used for writing data. partner (integer) - Partner ID to run user audit on. recipe_slug (string) - Name of Google BigQuery dataset to create.
github-repos
def add_rect(self, width, height, rid=None): assert(width > 0 and height >0) rect, _ = self._select_position(width, height) if not rect: return None self._split(rect) self._remove_duplicates() rect.rid = rid self.rectangles.append(rect) return rect
Add rectangle of widthxheight dimensions. Arguments: width (int, float): Rectangle width height (int, float): Rectangle height rid: Optional rectangle user id Returns: Rectangle: Rectangle with placemente coordinates None: If the rectangle couldn be placed.
juraj-google-style
def find_package_data(): l = list() for start in ('ambry/support', 'ambry/bundle/default_files'): for (root, dirs, files) in os.walk(start): for f in files: if f.endswith('.pyc'): continue path = os.path.join(root, f).replace('ambry/', '') l.append(path) return {'ambry': l}
Returns package_data, because setuptools is too stupid to handle nested directories. Returns: dict: key is "ambry", value is list of paths.
codesearchnet
def get_uid_state(self, id_or_uri): uri = self._client.build_uri(id_or_uri) + "/uidState" return self._client.get(uri)
Retrieves the unit identification (UID) state (on, off, unknown) of the specified power outlet or extension bar resource. The device must be an HP iPDU component with a locator light (HP Intelligent Load Segment, HP AC Module, HP Intelligent Outlet Bar, or HP Intelligent Outlet). Args: id_or_uri: Can be either the power device id or the uri Returns: str: unit identification (UID) state
juraj-google-style
def TerminateAFF4Flow(cls, flow_id, reason=None, status=None, token=None): flow_obj = aff4.FACTORY.Open( flow_id, aff4_type=GRRFlow, mode="rw", token=token) if not flow_obj: raise FlowError("Could not terminate flow %s" % flow_id) with flow_obj: runner = flow_obj.GetRunner() if not runner.IsRunning(): return if token is None: token = access_control.ACLToken() if reason is None: reason = "Manual termination by console." runner.Error(reason, status_code=status) flow_obj.Log("Terminated by user {0}. Reason: {1}".format( token.username, reason)) super_token = token.SetUID() children_to_kill = aff4.FACTORY.MultiOpen( flow_obj.ListChildren(), token=super_token, aff4_type=GRRFlow) for child_obj in children_to_kill: cls.TerminateAFF4Flow( child_obj.urn, reason="Parent flow terminated.", token=super_token)
Terminate a flow. Args: flow_id: The flow session_id to terminate. reason: A reason to log. status: Status code used in the generated status message. token: The access token to be used for this request. Raises: FlowError: If the flow can not be found.
juraj-google-style
def genHostCert(self, name, signas=None, outp=None, csr=None, sans=None): (pkey, cert) = self._genBasePkeyCert(name, pkey=csr) ext_sans = {('DNS:' + name)} if isinstance(sans, str): ext_sans = ext_sans.union(sans.split(',')) ext_sans = ','.join(sorted(ext_sans)) cert.add_extensions([crypto.X509Extension(b'nsCertType', False, b'server'), crypto.X509Extension(b'keyUsage', False, b'digitalSignature,keyEncipherment'), crypto.X509Extension(b'extendedKeyUsage', False, b'serverAuth'), crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE'), crypto.X509Extension(b'subjectAltName', False, ext_sans.encode('utf-8'))]) if (signas is not None): self.signCertAs(cert, signas) else: self.selfSignCert(cert, pkey) if (not pkey._only_public): keypath = self._savePkeyTo(pkey, 'hosts', ('%s.key' % name)) if (outp is not None): outp.printf(('key saved: %s' % (keypath,))) crtpath = self._saveCertTo(cert, 'hosts', ('%s.crt' % name)) if (outp is not None): outp.printf(('cert saved: %s' % (crtpath,))) return (pkey, cert)
Generates a host keypair. Args: name (str): The name of the host keypair. signas (str): The CA keypair to sign the new host keypair with. outp (synapse.lib.output.Output): The output buffer. csr (OpenSSL.crypto.PKey): The CSR public key when generating the keypair from a CSR. sans (list): List of subject alternative names. Examples: Make a host keypair named "myhost": myhostkey, myhostcert = cdir.genHostCert('myhost') Returns: ((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the private key and certificate objects.
codesearchnet
def reduce(self, fn, *args): assert not context.executing_eagerly() tensor_specs = [] for arg in args: if not isinstance(arg, tensor_lib.Tensor): raise ValueError(f'Got a non-Tensor argument {arg} in reduce.') batched_shape = tensor_shape.TensorShape([self._maybe_iters]).concatenate(arg.shape) tensor_specs.append(tensor_lib.TensorSpec(shape=batched_shape, dtype=arg.dtype)) concrete_function = def_function.function(fn).get_concrete_function(*tensor_specs) pl_outputs = [] with ops.control_dependencies(args): for output in concrete_function.outputs: if not isinstance(output, tensor_lib.Tensor): raise ValueError(f'Got a non-Tensor output {output} while running reduce.') if output.shape.is_fully_defined(): dummy = array_ops.zeros(output.shape.as_list(), dtype=output.dtype) pl_outputs.append(array_ops.placeholder_with_default(dummy, shape=output.shape)) else: pl_outputs.append(array_ops.placeholder(output.dtype, shape=output.shape)) reduction_op = array_ops.identity_n(pl_outputs)[0].op self._reduce_map[reduction_op] = (concrete_function, args) if len(reduction_op.outputs) == 1: return reduction_op.outputs[0] else: return tuple(reduction_op.outputs)
Performs reduction `fn` on `args` vectorized across pfor iterations. Note that `fn` is traced once inside the loop function context. Hence any captures or side-effects will happen in that context. Call to the traced version of `fn` happens during the construction of the vectorized code. Note that this currently may not work inside a control flow construct. Args: fn: a reduction function. It will be called with arguments that have the same structure as *args but with individual values whose rank may be higher by 1 since they represent loop invariant vectorized versions of the corresponding Tensors in *args. *args: unvectorized Tensors. Returns: The result of running `fn` on the vectorized versions of `*args`. These outputs will be available as loop invariant values to all the iterations.
github-repos
def set_guest_access(self, room_id, guest_access): content = {'guest_access': guest_access} return self.send_state_event(room_id, 'm.room.guest_access', content)
Set the guest access policy of the room. Args: room_id(str): The room to set the rules for. guest_access(str): Wether guests can join. One of: ["can_join", "forbidden"]
codesearchnet
class EfficientNetEncoder(nn.Module): def __init__(self, config: EfficientNetConfig): super().__init__() self.config = config self.depth_coefficient = config.depth_coefficient def round_repeats(repeats): return int(math.ceil(self.depth_coefficient * repeats)) num_base_blocks = len(config.in_channels) num_blocks = sum((round_repeats(n) for n in config.num_block_repeats)) curr_block_num = 0 blocks = [] for i in range(num_base_blocks): in_dim = round_filters(config, config.in_channels[i]) out_dim = round_filters(config, config.out_channels[i]) stride = config.strides[i] kernel_size = config.kernel_sizes[i] expand_ratio = config.expand_ratios[i] for j in range(round_repeats(config.num_block_repeats[i])): id_skip = True if j == 0 else False stride = 1 if j > 0 else stride in_dim = out_dim if j > 0 else in_dim adjust_padding = False if curr_block_num in config.depthwise_padding else True drop_rate = config.drop_connect_rate * curr_block_num / num_blocks block = EfficientNetBlock(config=config, in_dim=in_dim, out_dim=out_dim, stride=stride, kernel_size=kernel_size, expand_ratio=expand_ratio, drop_rate=drop_rate, id_skip=id_skip, adjust_padding=adjust_padding) blocks.append(block) curr_block_num += 1 self.blocks = nn.ModuleList(blocks) self.top_conv = nn.Conv2d(in_channels=out_dim, out_channels=round_filters(config, 1280), kernel_size=1, padding='same', bias=False) self.top_bn = nn.BatchNorm2d(num_features=config.hidden_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) self.top_activation = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> BaseModelOutputWithNoAttention: all_hidden_states = (hidden_states,) if output_hidden_states else None for block in self.blocks: hidden_states = block(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) hidden_states = self.top_conv(hidden_states) hidden_states = self.top_bn(hidden_states) hidden_states = self.top_activation(hidden_states) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states] if v is not None)) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
Forward propagates the embeddings through each EfficientNet block. Args: config ([`EfficientNetConfig`]): Model configuration class.
github-repos
def wait(self, timeout=None): poll = 30 while not self._is_complete: try: query_result = self._api.jobs_query_results(self._job_id, project_id=self._context.project_id, page_size=0, timeout=poll * 1000) except Exception as e: raise e if query_result['jobComplete']: if 'totalBytesProcessed' in query_result: self._bytes_processed = int(query_result['totalBytesProcessed']) self._cache_hit = query_result.get('cacheHit', None) if 'totalRows' in query_result: self._total_rows = int(query_result['totalRows']) break if timeout is not None: timeout -= poll if timeout <= 0: break self._refresh_state() return self
Wait for the job to complete, or a timeout to happen. This is more efficient than the version in the base Job class, in that we can use a call that blocks for the poll duration rather than a sleep. That means we shouldn't block unnecessarily long and can also poll less. Args: timeout: how long to wait (in seconds) before giving up; default None which means no timeout. Returns: The QueryJob
juraj-google-style
def __init__(self, channel): self.CreateReadSession = channel.unary_unary( "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/CreateReadSession", request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.CreateReadSessionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.ReadSession.FromString, ) self.ReadRows = channel.unary_stream( "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/ReadRows", request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.ReadRowsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.ReadRowsResponse.FromString, ) self.BatchCreateReadSessionStreams = channel.unary_unary( "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/BatchCreateReadSessionStreams", request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.BatchCreateReadSessionStreamsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.BatchCreateReadSessionStreamsResponse.FromString, ) self.FinalizeStream = channel.unary_unary( "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/FinalizeStream", request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.FinalizeStreamRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.SplitReadStream = channel.unary_unary( "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/SplitReadStream", request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.SplitReadStreamRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.SplitReadStreamResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def GetSubkeyByPath(self, key_path): if not self._registry_key and self._registry: self._GetKeyFromRegistry() subkey = self for path_segment in key_paths.SplitKeyPath(key_path): subkey = subkey.GetSubkeyByName(path_segment) if not subkey: break return subkey
Retrieves a subkey by path. Args: key_path (str): path of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found.
juraj-google-style
def update_state(self, y_true, y_pred, sample_weight=None): metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)
Accumulates true positive and false negative statistics. Args: y_true: The ground truth values, with the same dimensions as `y_pred`. Will be cast to `bool`. y_pred: The predicted values. Each element must be in the range `[0, 1]`. sample_weight: Optional weighting of each example. Defaults to `1`. Can be a tensor whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`.
github-repos
def __init__(self, prevHash=None, prevIndex=None): super(TransactionInput, self).__init__() self.PrevHash = prevHash self.PrevIndex = prevIndex
Create an instance. Args: prevHash (UInt256): prevIndex (int):
juraj-google-style
def register_token(self, registry_address_hex: typing.AddressHex, token_address_hex: typing.AddressHex, retry_timeout: typing.NetworkTimeout=DEFAULT_RETRY_TIMEOUT) -> TokenNetwork: registry_address = decode_hex(registry_address_hex) token_address = decode_hex(token_address_hex) registry = self._raiden.chain.token_network_registry(registry_address) contracts_version = self._raiden.contract_manager.contracts_version if (contracts_version == DEVELOPMENT_CONTRACT_VERSION): token_network_address = registry.add_token_with_limits(token_address=token_address, channel_participant_deposit_limit=UINT256_MAX, token_network_deposit_limit=UINT256_MAX) else: token_network_address = registry.add_token_without_limits(token_address=token_address) waiting.wait_for_payment_network(self._raiden, registry.address, token_address, retry_timeout) return self._raiden.chain.token_network(token_network_address)
Register a token with the raiden token manager. Args: registry_address: registry address token_address_hex (string): a hex encoded token address. Returns: The token network proxy.
codesearchnet
def html(self, data=None, template=None): if data is None: data = {} if template: return render(self.request, template, data) return HttpResponse(data)
Send html document to user. Args: - data: Dict to render template, or string with rendered HTML. - template: Name of template to render HTML document with passed data.
juraj-google-style
def wcs_pix_transform(ct, i, format=0): z1 = float(ct.z1) z2 = float(ct.z2) i = float(i) yscale = (128.0 / (z2 - z1)) if ((format == 'T') or (format == 't')): format = 1 if (i == 0): t = 0.0 elif (ct.zt == W_LINEAR): t = ((((i - 1) * (z2 - z1)) / 199.0) + z1) t = max(z1, min(z2, t)) else: t = float(i) if (format > 1): t = ((z2 - t) * yscale) return t
Computes the WCS corrected pixel value given a coordinate transformation and the raw pixel value. Input: ct coordinate transformation. instance of coord_tran. i raw pixel intensity. format format string (optional). Returns: WCS corrected pixel value
codesearchnet
def add_phenotype(self, institute, case, user, link, hpo_term=None, omim_term=None, is_group=False): hpo_results = [] try: if hpo_term: hpo_results = [hpo_term] elif omim_term: LOG.debug("Fetching info for mim term {0}".format(omim_term)) disease_obj = self.disease_term(omim_term) if disease_obj: for hpo_term in disease_obj.get('hpo_terms', []): hpo_results.append(hpo_term) else: raise ValueError('Must supply either hpo or omim term') except ValueError as e: raise e existing_terms = set(term['phenotype_id'] for term in case.get('phenotype_terms', [])) updated_case = case phenotype_terms = [] for hpo_term in hpo_results: LOG.debug("Fetching info for hpo term {0}".format(hpo_term)) hpo_obj = self.hpo_term(hpo_term) if hpo_obj is None: raise ValueError("Hpo term: %s does not exist in database" % hpo_term) phenotype_id = hpo_obj['_id'] description = hpo_obj['description'] if phenotype_id not in existing_terms: phenotype_term = dict(phenotype_id=phenotype_id, feature=description) phenotype_terms.append(phenotype_term) LOG.info("Creating event for adding phenotype term for case" " {0}".format(case['display_name'])) self.create_event( institute=institute, case=case, user=user, link=link, category='case', verb='add_phenotype', subject=case['display_name'], content=phenotype_id ) if is_group: updated_case = self.case_collection.find_one_and_update( {'_id': case['_id']}, { '$addToSet': { 'phenotype_terms': {'$each': phenotype_terms}, 'phenotype_groups': {'$each': phenotype_terms}, }, }, return_document=pymongo.ReturnDocument.AFTER ) else: updated_case = self.case_collection.find_one_and_update( {'_id': case['_id']}, { '$addToSet': { 'phenotype_terms': {'$each': phenotype_terms}, }, }, return_document=pymongo.ReturnDocument.AFTER ) LOG.debug("Case updated") return updated_case
Add a new phenotype term to a case Create a phenotype term and event with the given information Args: institute (Institute): A Institute object case (Case): Case object user (User): A User object link (str): The url to be used in the event hpo_term (str): A hpo id omim_term (str): A omim id is_group (bool): is phenotype term a group?
juraj-google-style
def new_stories(self, raw=False, limit=None): new_stories = self._get_stories('newstories', limit) if raw: new_stories = [story.raw for story in new_stories] return new_stories
Returns list of item ids of current new stories Args: limit (int): specifies the number of stories to be returned. raw (bool): Flag to indicate whether to transform all objects into raw json. Returns: `list` object containing ids of new stories.
codesearchnet
def _write_version(self, data, model): vdata = {'data': data, 'key': model.key, 'model': model.Meta.bucket_name, 'timestamp': time.time()} obj = version_bucket.new(data=vdata) obj.add_index('key_bin', model.key) obj.add_index('model_bin', vdata['model']) obj.add_index('timestamp_int', int(vdata['timestamp'])) obj.store() return obj.key
Writes a copy of the objects current state to write-once mirror bucket. Args: data (dict): Model instance's all data for versioning. model (instance): Model instance. Returns: Key of version record. key (str): Version_bucket key.
juraj-google-style
def _sanitize_input_structure(input_structure): input_structure = input_structure.copy() input_structure.remove_spin() input_structure = input_structure.get_primitive_structure(use_site_props=False) if "magmom" in input_structure.site_properties: input_structure.remove_site_property("magmom") return input_structure
Sanitize our input structure by removing magnetic information and making primitive. Args: input_structure: Structure Returns: Structure
juraj-google-style
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): with ops.name_scope(name, 'normalize', [counts, mean_ss, variance_ss, shift]): divisor = math_ops.reciprocal(counts, name='divisor') if shift is not None: shifted_mean = math_ops.multiply(mean_ss, divisor, name='shifted_mean') mean = math_ops.add(shifted_mean, shift, name='mean') else: shifted_mean = math_ops.multiply(mean_ss, divisor, name='mean') mean = shifted_mean variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor), math_ops.square(shifted_mean), name='variance') return (mean, variance)
Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`.
github-repos
def ed25519_generate_key_pair_from_secret(secret): if not isinstance(secret, bytes): secret = secret.encode() hash_bytes = sha3.keccak_256(secret).digest() sk = Ed25519SigningKeyFromHash.generate(hash_bytes=hash_bytes) private_value_base58 = sk.encode(encoding='base58') public_value_compressed_base58 = sk.get_verifying_key().encode(encoding='base58') return private_value_base58, public_value_compressed_base58
Generate a new key pair. Args: secret (:class:`string`): A secret that serves as a seed Returns: A tuple of (private_key, public_key) encoded in base58.
juraj-google-style
def GetHashData(hashable): ms = StreamManager.GetStream() writer = BinaryWriter(ms) hashable.SerializeUnsigned(writer) ms.flush() retVal = ms.ToArray() StreamManager.ReleaseStream(ms) return retVal
Get the data used for hashing. Args: hashable (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin Returns: bytes:
codesearchnet
def remove_location(self, location): res = self._remove_hdxobject(self.data.get('groups'), location, matchon='name') if not res: res = self._remove_hdxobject(self.data.get('groups'), location.upper(), matchon='name') if not res: res = self._remove_hdxobject(self.data.get('groups'), location.lower(), matchon='name') return res
Remove a location. If the location is already added, it is ignored. Args: location (str): Location to remove Returns: bool: True if location removed or False if not
juraj-google-style
def merge_corpus(self, corpus): merging_corpus = Corpus.from_corpus(corpus) self.import_tracks(corpus.tracks.values()) self.import_issuers(corpus.issuers.values()) utterance_idx_mapping = self.import_utterances(corpus.utterances.values()) for subview_idx, subview in merging_corpus.subviews.items(): for filter in subview.filter_criteria: if isinstance(filter, subset.MatchingUtteranceIdxFilter): new_filtered_utt_ids = set() for utt_idx in filter.utterance_idxs: new_filtered_utt_ids.add(utterance_idx_mapping[utt_idx].idx) filter.utterance_idxs = new_filtered_utt_ids new_idx = naming.index_name_if_in_list(subview_idx, self.subviews.keys()) self.import_subview(new_idx, subview) for feat_container_idx, feat_container in merging_corpus.feature_containers.items(): self.new_feature_container(feat_container_idx, feat_container.path)
Merge the given corpus into this corpus. All assets (tracks, utterances, issuers, ...) are copied into this corpus. If any ids (utt-idx, track-idx, issuer-idx, subview-idx, ...) are occurring in both corpora, the ids from the merging corpus are suffixed by a number (starting from 1 until no other is matching). Args: corpus (CorpusView): The corpus to merge.
juraj-google-style
def provides(arg_name=None, annotated_with=None, in_scope=None): if ((arg_name is None) and (annotated_with is None) and (in_scope is None)): raise errors.EmptyProvidesDecoratorError(locations.get_back_frame_loc()) return _get_pinject_wrapper(locations.get_back_frame_loc(), provider_arg_name=arg_name, provider_annotated_with=annotated_with, provider_in_scope_id=in_scope)
Modifies the binding of a provider method. If arg_name is specified, then the created binding is for that arg name instead of the one gotten from the provider method name (e.g., 'foo' from 'provide_foo'). If annotated_with is specified, then the created binding includes that annotation object. If in_scope is specified, then the created binding is in the scope with that scope ID. At least one of the args must be specified. A provider method may not be decorated with @provides() twice. Args: arg_name: the name of the arg to annotate on the decorated function annotated_with: an annotation object in_scope: a scope ID Returns: a function that will decorate functions passed to it
codesearchnet
def get_top_pairs(fsym, limit=5): url = build_url('pairs', fsym=fsym, limit=limit) data = load_data(url) return data['Data']
Get top trading pairs by 24 hour aggregated volume for a currency. Args: fsym: FROM symbol. limit: Number of results. Default value returns top 5 pairs. Returns: Function returns a list containing a dictionary for each result: [{'exchange': ..., 'fromSymbol': ..., 'toSymbol': ..., 'volume24h': ..., 'volume24hTo': ...}, {...}, ...] The list is ordered based on the volume of the FROM currency starting with the highest value.
juraj-google-style
def execute_code(self, code, filename=None, isolate=False): def _apply(): self.compile_code(code=code, filename=filename, exec_namespace=self.globals) if isolate: saved_globals = dict(self.globals) try: _apply() finally: self.globals.clear() self.globals.update(saved_globals) else: _apply()
Execute code within the execution context. Args: code (str or SourceCode): Rex code to execute. filename (str): Filename to report if there are syntax errors. isolate (bool): If True, do not affect `self.globals` by executing this code.
codesearchnet
def _filter_top_k(x, k): _, top_k_idx = nn_ops.top_k(x, k, sorted=False) top_k_mask = math_ops.reduce_sum(array_ops.one_hot(top_k_idx, array_ops.shape(x)[-1], axis=-1), axis=-2) return x * top_k_mask + NEG_INF * (1 - top_k_mask)
Filters top-k values in the last dim of x and set the rest to NEG_INF. Used for computing top-k prediction values in dense labels (which has the same shape as predictions) for recall and precision top-k metrics. Args: x: tensor with any dimensions. k: the number of values to keep. Returns: tensor with same shape and dtype as x.
github-repos
def data_period_start_day_of_week(self, value=None): if (value is not None): try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str for field `data_period_start_day_of_week`'.format(value)) if (',' in value): raise ValueError('value should not contain a comma for field `data_period_start_day_of_week`') vals = set() vals.add('Sunday') vals.add('Monday') vals.add('Tuesday') vals.add('Wednesday') vals.add('Thursday') vals.add('Friday') vals.add('Saturday') if (value not in vals): raise ValueError('value {} is not an accepted value for field `data_period_start_day_of_week`'.format(value)) self._data_period_start_day_of_week = value
Corresponds to IDD Field `data_period_start_day_of_week` Args: value (str): value for IDD Field `data_period_start_day_of_week` Accepted values are: - Sunday - Monday - Tuesday - Wednesday - Thursday - Friday - Saturday if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def append(self, value): if ((self._avm is not None) and (not self.terminated)): path = self._last_path if path: path += '.' self[(path + LIST_HEAD)] = value self._last_path = (path + LIST_TAIL) self[self._last_path] = AVM() else: raise TdlError('Cannot append to a closed list.')
Append an item to the end of an open ConsList. Args: value (:class:`Conjunction`, :class:`Term`): item to add Raises: :class:`TdlError`: when appending to a closed list
codesearchnet
def flownet2_fusion(self, x): with argscope([tf.layers.conv2d], activation=lambda x: tf.nn.leaky_relu(x, 0.1), padding='valid', strides=2, kernel_size=3, data_format='channels_first'), \ argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity, data_format='channels_first', strides=2, kernel_size=4): conv0 = tf.layers.conv2d(pad(x, 1), 64, name='conv0', strides=1) x = tf.layers.conv2d(pad(conv0, 1), 64, name='conv1') conv1 = tf.layers.conv2d(pad(x, 1), 128, name='conv1_1', strides=1) x = tf.layers.conv2d(pad(conv1, 1), 128, name='conv2') conv2 = tf.layers.conv2d(pad(x, 1), 128, name='conv2_1', strides=1) flow2 = tf.layers.conv2d(pad(conv2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity) flow2_up = tf.layers.conv2d_transpose(flow2, 2, name='upsampled_flow2_to_1') x = tf.layers.conv2d_transpose(conv2, 32, name='deconv1', activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat1 = tf.concat([conv1, x, flow2_up], axis=1, name='concat1') interconv1 = tf.layers.conv2d(pad(concat1, 1), 32, strides=1, name='inter_conv1', activation=tf.identity) flow1 = tf.layers.conv2d(pad(interconv1, 1), 2, name='predict_flow1', strides=1, activation=tf.identity) flow1_up = tf.layers.conv2d_transpose(flow1, 2, name='upsampled_flow1_to_0') x = tf.layers.conv2d_transpose(concat1, 16, name='deconv0', activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat0 = tf.concat([conv0, x, flow1_up], axis=1, name='concat0') interconv0 = tf.layers.conv2d(pad(concat0, 1), 16, strides=1, name='inter_conv0', activation=tf.identity) flow0 = tf.layers.conv2d(pad(interconv0, 1), 2, name='predict_flow0', strides=1, activation=tf.identity) return tf.identity(flow0, name='flow2')
Architecture in Table 4 of FlowNet 2.0. Args: x: NCHW tensor, where C=11 is the concatenation of 7 items of [3, 2, 2, 1, 1, 1, 1] channels.
juraj-google-style
def save(f, arr, vocab): itr = iter(vocab) word, idx = next(itr) _write_line(f, arr[idx], word) for word, idx in itr: f.write(b'\n') _write_line(f, arr[idx], word)
Save word embedding file. Args: f (File): File to write the vectors. File should be open for writing ascii. arr (numpy.array): Numpy array with ``float`` dtype. vocab (iterable): Each element is pair of a word (``bytes``) and ``arr`` index (``int``). Word should be encoded to str apriori.
juraj-google-style
def get_fractional_coords(self, cart_coords: Vector3Like) -> np.ndarray: return dot(cart_coords, self.inv_matrix)
Returns the fractional coordinates given cartesian coordinates. Args: cart_coords (3x1 array): Cartesian coords. Returns: Fractional coordinates.
juraj-google-style
def relu6(x): return ops.relu6(x)
Relu6 activation function. It's the ReLU function, but truncated to a maximum value of 6. Args: x: Input tensor.
github-repos
def epoch_to_human_time(epoch_time): if isinstance(epoch_time, int): try: d = datetime.datetime.fromtimestamp(epoch_time / 1000) return d.strftime('%m-%d-%Y %H:%M:%S ') except ValueError: return None
Converts an epoch timestamp to human readable time. This essentially converts an output of get_current_epoch_time to an output of get_current_human_time Args: epoch_time: An integer representing an epoch timestamp in milliseconds. Returns: A time string representing the input time. None if input param is invalid.
github-repos
class AveragePooling3D(keras_layers.AveragePooling3D, base.Layer): def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(AveragePooling3D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)
Average pooling layer for 3D inputs (e.g. volumes). Args: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer.
github-repos
def strace(device, trace_address, breakpoint_address): jlink = pylink.JLink() jlink.open() jlink.power_on() jlink.set_tif(pylink.JLinkInterfaces.SWD) jlink.connect(device) jlink.reset() jlink.breakpoint_clear_all() op = pylink.JLinkStraceOperation.TRACE_START jlink.strace_clear_all() jlink.strace_start() bphandle = jlink.breakpoint_set(breakpoint_address, thumb=True) trhandle = jlink.strace_code_fetch_event(op, address=trace_address) jlink.restart() time.sleep(1) while True: if jlink.halted(): break while True: instructions = jlink.strace_read(1) if len(instructions) == 0: break instruction = instructions[0] print(jlink.disassemble_instruction(instruction)) jlink.power_off() jlink.close()
Implements simple trace using the STrace API. Args: device (str): the device to connect to trace_address (int): address to begin tracing from breakpoint_address (int): address to breakpoint at Returns: ``None``
juraj-google-style
def GetCustomerIDs(client): managed_customer_service = client.GetService('ManagedCustomerService', version='v201809') offset = 0 selector = {'fields': ['CustomerId'], 'predicates': [{'field': 'CanManageClients', 'operator': 'EQUALS', 'values': [False]}], 'paging': {'startIndex': str(offset), 'numberResults': str(PAGE_SIZE)}} queue = multiprocessing.Queue() more_pages = True while more_pages: page = managed_customer_service.get(selector) if (page and ('entries' in page) and page['entries']): for entry in page['entries']: queue.put(entry['customerId']) else: raise Exception("Can't retrieve any customer ID.") offset += PAGE_SIZE selector['paging']['startIndex'] = str(offset) more_pages = (offset < int(page['totalNumEntries'])) return queue
Retrieves all CustomerIds in the account hierarchy. Note that your configuration file must specify a client_customer_id belonging to an AdWords manager account. Args: client: an AdWordsClient instance. Raises: Exception: if no CustomerIds could be found. Returns: A Queue instance containing all CustomerIds in the account hierarchy.
codesearchnet
def save_args(conditions, out_path): if isinstance(conditions, argparse.Namespace): args = vars(conditions) else: args = conditions try: os.makedirs(out_path) except OSError: pass with tempdir(prefix='args', dir=out_path) as tempd: path = os.path.join(tempd, 'args.json') with open(path, 'w') as f: json.dump(args, f, indent=4) new_path = os.path.join(out_path, 'args') shutil.move(path, new_path)
A util function to save experiment condition for job table. Args: conditions (:class:`argparse.Namespace` or dict): Experiment conditions to show on a job table. Keys are show as table header and values are show at a job row. out_path (str): Output directory name to save conditions.
juraj-google-style
def post(self, url, params=None, data=None, files=None, **kwargs): return self.call_api( "POST", url, params=params, data=data, files=files, **kwargs )
Call the API with a POST request. Args: url (str): Resource location relative to the base URL. params (dict or None): Query-string parameters. data (dict or None): Request body contents. files (dict or None: Files to be passed to the request. Returns: An instance of ResultParser or ErrorParser.
juraj-google-style
def set_processed_counts(self, shards_processed, shards_status): chart = google_chart_api.BarChart() def filter_status(status_to_filter): return [count if status == status_to_filter else 0 for count, status in zip(shards_processed, shards_status)] if shards_status: chart.stacked = True chart.AddBars(filter_status("unknown"), color="404040") chart.AddBars(filter_status("success"), color="00ac42") chart.AddBars(filter_status("running"), color="3636a9") chart.AddBars(filter_status("aborted"), color="e29e24") chart.AddBars(filter_status("failed"), color="f6350f") else: chart.AddBars(shards_processed) shard_count = len(shards_processed) if shard_count > 95: pixels_per_shard = 700.0 / shard_count bar_thickness = int(pixels_per_shard * .9) chart.style = bar_chart.BarChartStyle(bar_thickness=bar_thickness, bar_gap=0.1, use_fractional_gap_spacing=True) if shards_processed and shard_count <= 95: stride_length = max(1, shard_count / 16) chart.bottom.labels = [] for x in xrange(shard_count): if (x % stride_length == 0 or x == shard_count - 1): chart.bottom.labels.append(x) else: chart.bottom.labels.append("") chart.left.labels = ["0", str(max(shards_processed))] chart.left.min = 0 self.chart_width = min(700, max(300, shard_count * 20)) self.chart_url = chart.display.Url(self.chart_width, 200)
Updates a chart url to display processed count for each shard. Args: shards_processed: list of integers with number of processed entities in each shard
juraj-google-style
def resample(self, data, input_rate): data16 = np.fromstring(string=data, dtype=np.int16) resample_size = int(((len(data16) / self.input_rate) * self.RATE_PROCESS)) resample = signal.resample(data16, resample_size) resample16 = np.array(resample, dtype=np.int16) return resample16.tostring()
Microphone may not support our native processing sampling rate, so resample from input_rate to RATE_PROCESS here for webrtcvad and deepspeech Args: data (binary): Input audio stream input_rate (int): Input audio rate to resample from
codesearchnet
def GetAttributeNames(self): attribute_names = [] for attribute_name in iter(self.__dict__.keys()): if (attribute_name[0] == '_'): continue attribute_names.append(attribute_name) return attribute_names
Retrieves the names of all attributes. Returns: list[str]: attribute names.
codesearchnet
def getGUA(self, filterByPrefix=None): print '%s call getGUA' % self.port print filterByPrefix globalAddrs = [] try: globalAddrs = self.getGlobal() if filterByPrefix is None: return self.__padIp6Addr(globalAddrs[0]) else: for line in globalAddrs: line = self.__padIp6Addr(line) print "Padded IPv6 Address:" + line if line.startswith(filterByPrefix): return line print 'no global address matched' return str(globalAddrs[0]) except Exception, e: ModuleHelper.WriteIntoDebugLogger('getGUA() Error: ' + str(e)) return e
get expected global unicast IPv6 address of OpenThreadWpan Args: filterByPrefix: a given expected global IPv6 prefix to be matched Returns: a global IPv6 address
juraj-google-style
def to_env_vars(mapping): def format_key(key): if key: decoded_name = 'SM_%s' % str(key).upper() return decoded_name else: return '' def format_value(_mapping): if six.PY3 and isinstance(_mapping, six.binary_type): return _mapping.decode('latin1') elif _mapping is None: return '' elif isinstance(_mapping, six.string_types): return str(_mapping) else: return json.dumps(_mapping, sort_keys=True, separators=(',', ':'), ensure_ascii=True) return {format_key(k): format_value(v) for k, v in mapping.items()}
Transform a dictionary in a dictionary of env vars. Example: >>>env_vars = mapping.to_env_vars({'model_dir': '/opt/ml/model', 'batch_size': 25}) >>> >>>print(args) ['MODEL_DIR', '/opt/ml/model', 'BATCH_SIZE', 25] Args: mapping (dict[str, object]): A Python mapping. Returns: (dict): Dictionary of env vars
juraj-google-style
def dbmax05years(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dbmax05years`'.format(value)) self._dbmax05years = value
Corresponds to IDD Field `dbmax05years` 5-year return period values for maximum extreme dry-bulb temperature Args: value (float): value for IDD Field `dbmax05years` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def _ParseNamesString(self, names_string): if (not names_string): return names_string = names_string.lower() names = [name.strip() for name in names_string.split(',')] file_entry_filter = file_entry_filters.NamesFileEntryFilter(names) self._filter_collection.AddFilter(file_entry_filter)
Parses the name string. Args: names_string (str): comma separated filenames to filter.
codesearchnet
def match_hail_sizes(model_tracks, obs_tracks, track_pairings): unpaired = list(range(len(model_tracks))) for p, pair in enumerate(track_pairings): model_track = model_tracks[pair[0]] unpaired.remove(pair[0]) obs_track = obs_tracks[pair[1]] obs_hail_sizes = np.array([step[obs_track.masks[t] == 1].max() for t, step in enumerate(obs_track.timesteps)]) if obs_track.times.size > 1 and model_track.times.size > 1: normalized_obs_times = 1.0 / (obs_track.times.max() - obs_track.times.min())\ * (obs_track.times - obs_track.times.min()) normalized_model_times = 1.0 / (model_track.times.max() - model_track.times.min())\ * (model_track.times - model_track.times.min()) hail_interp = interp1d(normalized_obs_times, obs_hail_sizes, kind="nearest", bounds_error=False, fill_value=0) model_track.observations = hail_interp(normalized_model_times) elif obs_track.times.size == 1: model_track.observations = np.ones(model_track.times.shape) * obs_hail_sizes[0] elif model_track.times.size == 1: model_track.observations = np.array([obs_hail_sizes.max()]) print(pair[0], "obs", obs_hail_sizes) print(pair[0], "model", model_track.observations) for u in unpaired: model_tracks[u].observations = np.zeros(model_tracks[u].times.shape)
Given forecast and observed track pairings, maximum hail sizes are associated with each paired forecast storm track timestep. If the duration of the forecast and observed tracks differ, then interpolation is used for the intermediate timesteps. Args: model_tracks: List of model track STObjects obs_tracks: List of observed STObjects track_pairings: list of tuples containing the indices of the paired (forecast, observed) tracks
juraj-google-style
def get_all(): info_dir = _get_info_dir() results = [] for filename in os.listdir(info_dir): filepath = os.path.join(info_dir, filename) try: with open(filepath) as infile: contents = infile.read() except IOError as e: if (e.errno == errno.EACCES): continue else: raise try: info = _info_from_string(contents) except ValueError: tb_logging.get_logger().warning('invalid info file: %r', filepath, exc_info=True) else: results.append(info) return results
Return TensorBoardInfo values for running TensorBoard processes. This function may not provide a perfect snapshot of the set of running processes. Its result set may be incomplete if the user has cleaned their /tmp/ directory while TensorBoard processes are running. It may contain extraneous entries if TensorBoard processes exited uncleanly (e.g., with SIGKILL or SIGQUIT). Returns: A fresh list of `TensorBoardInfo` objects.
codesearchnet
def validate_config_has_one_of(config, one_of_keys): intersection = set(config).intersection(one_of_keys) if len(intersection) > 1: raise Exception('Only one of the values in "%s" is needed' % ', '.join(intersection)) if len(intersection) == 0: raise Exception('One of the values in "%s" is needed' % ', '.join(one_of_keys))
Validate a config dictionary to make sure it has one and only one key in one_of_keys. Args: config: the config to validate. one_of_keys: the list of possible keys that config can have one and only one. Raises: Exception if the config does not have any of them, or multiple of them.
juraj-google-style
def store(self, obj): if type(obj) is AtlasServiceInstance.Instance: query = { "instance_id" : obj.instance_id, "database" : obj.get_dbname(), "cluster": obj.get_cluster(), "parameters" : obj.parameters } elif type(obj) is AtlasServiceBinding.Binding: query = { "binding_id" : obj.binding_id, "parameters" : obj.parameters, "instance_id": obj.instance.instance_id } else: raise ErrStorageTypeUnsupported(type(obj)) try: result = self.broker.insert_one(query) except: raise ErrStorageMongoConnection("Store Instance or Binding") if result is not None: obj.provisioned = True return result.inserted_id raise ErrStorageStore()
Store Store an object into the MongoDB storage for caching Args: obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding Returns: ObjectId: MongoDB _id Raises: ErrStorageMongoConnection: Error during MongoDB communication. ErrStorageTypeUnsupported: Type unsupported. ErrStorageStore : Failed to store the binding or instance.
juraj-google-style
def wb010(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `wb010`'.format(value)) self._wb010 = value
Corresponds to IDD Field `wb010` Wet-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `wb010` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def load(self, filename, create=None, default_conf={}): (filenames, tries) = self.__search_config_files(filename) if len(filenames): self.__loaded_config_file = (filenames if self.__nested else filenames[0]) return self.__load_config_files((filenames if self.__nested else filenames[:1])) if (create is not None): self.__loaded_config_file = os.path.join(create, filename) self.save(default_conf) return default_conf raise ConfigFileNotFoundException(('Config file not found in: %s' % tries))
Load the config file Args: filename (str): the filename of the config, without any path create (str): if the config file not found, and this parameter is not None, a config file will be create with content of default_conf default_conf (dict): content of the default config data Returns: Return value of the ConfigFormatter.decode or the default_conf value Raises: ConfigFileNotFoundException: if the config file not found
codesearchnet
def run_dumper(self, dumper): logging.debug("start dumper::") dumper( experiments=self.experiments, farms=self.farms, barn=self.barn, engine=self.current_engine, ) logging.debug("::dumper ended")
run dumber (once pr. engine) Args: dumper: dumper to run (function or method). The dumper takes the attributes experiments, farms, and barn as input. It does not return anything. But can, if the dumper designer feels in a bad and nasty mood, modify the input objects (for example experiments).
juraj-google-style
def __init__(self, x, y): self.x = x self.y = y
Init method. Args: x: Argument x. y: Argument y.
github-repos
def get_vmss(access_token, subscription_id, resource_group, vmss_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) return do_get(endpoint, access_token)
Get virtual machine scale set details. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response. JSON body of scale set properties.
codesearchnet
def correct_absolute_refs(self, construction_table): c_table = construction_table.copy() abs_refs = constants.absolute_refs problem_index = self.check_absolute_refs(c_table) for i in problem_index: order_of_refs = iter(permutations(abs_refs.keys())) finished = False while (not finished): if self._has_valid_abs_ref(i, c_table): finished = True else: row = c_table.index.get_loc(i) c_table.iloc[(row, row:)] = next(order_of_refs)[row:3] return c_table
Reindexe construction_table if linear reference in first three rows present. Uses :meth:`~Cartesian.check_absolute_refs` to obtain the problematic indices. Args: construction_table (pd.DataFrame): Returns: pd.DataFrame: Appropiately renamed construction table.
codesearchnet
def __init__(self, filenames=None, selected_scans=None, profile=False, filestatuschecker=None, fetch_one_liners=False, tester=None, initialize=False, ): if tester is None: self.tester = prms.Instruments.tester else: self.tester = tester self.loader = None self.logger = logging.getLogger(__name__) self.logger.debug("created CellpyData instance") self.name = None self.profile = profile self.minimum_selection = {} if filestatuschecker is None: self.filestatuschecker = prms.Reader.filestatuschecker else: self.filestatuschecker = filestatuschecker self.forced_errors = 0 self.summary_exists = False if not filenames: self.file_names = [] else: self.file_names = filenames if not self._is_listtype(self.file_names): self.file_names = [self.file_names] if not selected_scans: self.selected_scans = [] else: self.selected_scans = selected_scans if not self._is_listtype(self.selected_scans): self.selected_scans = [self.selected_scans] self.datasets = [] self.status_datasets = [] self.selected_dataset_number = 0 self.number_of_datasets = 0 self.capacity_modifiers = ['reset', ] self.list_of_step_types = ['charge', 'discharge', 'cv_charge', 'cv_discharge', 'charge_cv', 'discharge_cv', 'ocvrlx_up', 'ocvrlx_down', 'ir', 'rest', 'not_known'] self.force_step_table_creation = \ prms.Reader.force_step_table_creation self.force_all = prms.Reader.force_all self.sep = prms.Reader.sep self._cycle_mode = prms.Reader.cycle_mode self.load_only_summary = prms.Reader.load_only_summary self.select_minimal = prms.Reader.select_minimal self.limit_loaded_cycles = prms.Reader.limit_loaded_cycles self.ensure_step_table = prms.Reader.ensure_step_table self.daniel_number = prms.Reader.daniel_number self.raw_datadir = prms.Paths.rawdatadir self.cellpy_datadir = prms.Paths.cellpydatadir self.auto_dirs = prms.Reader.auto_dirs self.headers_normal = get_headers_normal() self.headers_summary = get_headers_summary() self.headers_step_table = get_headers_step_table() self.table_names = None self.set_instrument() self.cellpy_units = get_cellpy_units() if initialize: self.initialize()
CellpyData object Args: filenames: list of files to load. selected_scans: profile: experimental feature. filestatuschecker: property to compare cellpy and raw-files; default read from prms-file. fetch_one_liners: experimental feature. tester: instrument used (e.g. "arbin") (checks prms-file as default). initialize: create a dummy (empty) dataset; defaults to False.
juraj-google-style