code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def stitch_images(images, margin=5, cols=5): if (len(images) == 0): return None (h, w, c) = images[0].shape n_rows = int(math.ceil((len(images) / cols))) n_cols = min(len(images), cols) out_w = ((n_cols * w) + ((n_cols - 1) * margin)) out_h = ((n_rows * h) + ((n_rows - 1) * margin)) stitched_images = np.zeros((out_h, out_w, c), dtype=images[0].dtype) for row in range(n_rows): for col in range(n_cols): img_idx = ((row * cols) + col) if (img_idx >= len(images)): break stitched_images[(((h + margin) * row):(((h + margin) * row) + h), ((w + margin) * col):(((w + margin) * col) + w), :)] = images[img_idx] return stitched_images
Utility function to stitch images together with a `margin`. Args: images: The array of 2D images to stitch. margin: The black border margin size between images (Default value = 5) cols: Max number of image cols. New row is created when number of images exceed the column size. (Default value = 5) Returns: A single numpy image array comprising of input images.
codesearchnet
def _open_file(filename): if (filename is None): raise DataSourceError('Trace filename is not defined') try: trace_file = open(filename, 'r') except IOError as e: raise DataSourceError(('Unable to open trace file %s' % filename), e) else: LOG.debug('Opened trace file %s', filename) return trace_file
Attempt to open the the file at ``filename`` for reading. Raises: DataSourceError, if the file cannot be opened.
codesearchnet
def init_log(logger, loglevel=0): global log_tmp_dir, log_tmp_fn log_tmp_dir = tempfile.mkdtemp() log_tmp_fn = os.path.join(log_tmp_dir, 'multiqc.log') debug_template = '[%(asctime)s] %(name)-50s [%(levelname)-7s] %(message)s' info_template = '[%(levelname)-7s] %(module)15s : %(message)s' logger.setLevel(getattr(logging, 'DEBUG')) console = logging.StreamHandler() console.setLevel(getattr(logging, loglevel)) if loglevel == 'DEBUG': console.setFormatter(logging.Formatter(debug_template)) else: console.setFormatter(logging.Formatter(info_template)) logger.addHandler(console) file_handler = logging.FileHandler(log_tmp_fn, encoding='utf-8') file_handler.setLevel(getattr(logging, 'DEBUG')) file_handler.setFormatter(logging.Formatter(debug_template)) logger.addHandler(file_handler)
Initializes logging. Prints logs to console with level defined by loglevel Also prints verbose log to the multiqc data directory if available. (multiqc_data/multiqc.log) Args: loglevel (str): Determines the level of the log output.
juraj-google-style
def post_process_generation(self, generation: Union[str, List[str]], fix_markdown: bool=True, num_workers: Optional[int]=None) -> Union[str, List[str]]: requires_backends(self, ['nltk', 'levenshtein']) if isinstance(generation, list): if num_workers is not None and isinstance(num_workers, int): with Pool(num_workers) as p: return p.map(partial(self.post_process_single, fix_markdown=fix_markdown), generation) else: return [self.post_process_single(s, fix_markdown=fix_markdown) for s in generation] else: return self.post_process_single(generation, fix_markdown=fix_markdown)
Postprocess a generated text or a list of generated texts. This function can be used to perform postprocessing on generated text, such as fixing Markdown formatting. Postprocessing is quite slow so it is recommended to use multiprocessing to speed up the process. Args: generation (Union[str, List[str]]): The generated text or a list of generated texts. fix_markdown (`bool`, *optional*, defaults to `True`): Whether to perform Markdown formatting fixes. num_workers (`int`, *optional*): Optional number of workers to pass to leverage multiprocessing (postprocessing several texts in parallel). Returns: Union[str, List[str]]: The postprocessed text or list of postprocessed texts.
github-repos
def SCM(root_dir, repo=None): if Git.is_repo(root_dir) or Git.is_submodule(root_dir): return Git(root_dir, repo=repo) return NoSCM(root_dir, repo=repo)
Returns SCM instance that corresponds to a repo at the specified path. Args: root_dir (str): path to a root directory of the repo. repo (dvc.repo.Repo): dvc repo instance that root_dir belongs to. Returns: dvc.scm.base.Base: SCM instance.
juraj-google-style
def __init__(self, filename,f_start=None, f_stop=None,t_start=None, t_stop=None, load_data=True, max_load=1.): super(FilReader, self).__init__() self.header_keywords_types = sigproc.header_keyword_types if filename and os.path.isfile(filename): self.filename = filename self.load_data = load_data self.header = self.read_header() self.file_size_bytes = os.path.getsize(self.filename) self.idx_data = sigproc.len_header(self.filename) self.n_channels_in_file = self.header[b'nchans'] self.n_beams_in_file = self.header[b'nifs'] self.n_pols_in_file = 1 self._n_bytes = int(self.header[b'nbits'] / 8) self._d_type = self._setup_dtype() self._setup_n_ints_in_file() self.file_shape = (self.n_ints_in_file,self.n_beams_in_file,self.n_channels_in_file) if self.header[b'foff'] < 0: self.f_end = self.header[b'fch1'] self.f_begin = self.f_end + self.n_channels_in_file*self.header[b'foff'] else: self.f_begin = self.header[b'fch1'] self.f_end = self.f_begin + self.n_channels_in_file*self.header[b'foff'] self.t_begin = 0 self.t_end = self.n_ints_in_file self._setup_selection_range(f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop, init=True) self._setup_chans() self._setup_freqs() self.freq_axis = 2 self.time_axis = 0 self.beam_axis = 1 if max_load is not None: if max_load > 1.0: logger.warning('Setting data limit != 1GB, please handle with care!') self.MAX_DATA_ARRAY_SIZE = max_load * MAX_DATA_ARRAY_SIZE_UNIT else: self.MAX_DATA_ARRAY_SIZE = MAX_DATA_ARRAY_SIZE_UNIT if self.file_size_bytes > self.MAX_DATA_ARRAY_SIZE: self.large_file = True else: self.large_file = False if self.load_data: if self.large_file: if self.f_start or self.f_stop or self.t_start or self.t_stop: if self.isheavy(): logger.warning("Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded, please try another (t,v) selection." % (self._calc_selection_size() / (1024. ** 3), self.MAX_DATA_ARRAY_SIZE / (1024. ** 3))) self._init_empty_selection() else: self.read_data() else: logger.warning("The file is of size %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded. You could try another (t,v) selection."%(self.file_size_bytes/(1024.**3), self.MAX_DATA_ARRAY_SIZE/(1024.**3))) self._init_empty_selection() else: self.read_data() else: logger.info("Skipping loading data ...") self._init_empty_selection() else: raise IOError("Need a file to open, please give me one!")
Constructor. Args: filename (str): filename of blimpy file. f_start (float): start frequency, in MHz f_stop (float): stop frequency, in MHz t_start (int): start time bin t_stop (int): stop time bin
juraj-google-style
def mkdir(path): try: os.makedirs(path) if (not os.path.isdir(path)): raise IOError('path is not a directory') except OSError as e: if ((e.errno == 17) and os.path.isdir(path)): return raise
Make a directory and its parents. Args: path (str): path to create Returns: None Raises: OSError if the directory cannot be created.
codesearchnet
def delete_bq_table(project, dataset_id, table_id): _LOGGER.info('Clean up a BigQuery table with project: %s, dataset: %s, table: %s.', project, dataset_id, table_id) client = bigquery.Client(project=project) table_ref = client.dataset(dataset_id).table(table_id) try: client.delete_table(table_ref) except gexc.NotFound: raise GcpTestIOError('BigQuery table does not exist: %s' % table_ref)
Delete a BiqQuery table. Args: project: Name of the project. dataset_id: Name of the dataset where table is. table_id: Name of the table.
github-repos
def from_config(cls, config): return cls(**config)
Instantiates a `Loss` from its config (output of `get_config()`). Args: config: Output of `get_config()`. Returns: A `Loss` instance.
github-repos
def get_percentage_lattice_parameter_changes(self): initial_latt = self.initial.lattice final_latt = self.final.lattice d = {l: ((getattr(final_latt, l) / getattr(initial_latt, l)) - 1) for l in ['a', 'b', 'c']} return d
Returns the percentage lattice parameter changes. Returns: A dict of the percentage change in lattice parameter, e.g., {'a': 0.012, 'b': 0.021, 'c': -0.031} implies a change of 1.2%, 2.1% and -3.1% in the a, b and c lattice parameters respectively.
codesearchnet
def execute_only_once(): f = inspect.currentframe().f_back ident = (f.f_code.co_filename, f.f_lineno) if (ident in _EXECUTE_HISTORY): return False _EXECUTE_HISTORY.add(ident) return True
Each called in the code to this function is guaranteed to return True the first time and False afterwards. Returns: bool: whether this is the first time this function gets called from this line of code. Example: .. code-block:: python if execute_only_once(): # do something only once
codesearchnet
def iterator_product(variables: VarType, parent: str=None) -> Iterable[VarMatrix]: logger.debug('Yielding from product iterator') if isinstance(variables, list): raise ValueError(f'Product only takes mappings of values, got {variables} of type {type(variables)}') (yield list(variable_matrix(variables, parent, 'product')))
Apply the product operator to a set of variables. This uses the python itertools.product iterator to combine multiple variables such that all possible combinations are generated. This is the default iterator however this is a method of manually specifying the option. Args: variables: The variables object parent: Unused
codesearchnet
def __init__(self, points, add_bounding_box=False): self.points = list(points) dim = [len(i) for i in self.points] if max(dim) != min(dim): raise ValueError("Input points must all have the same dimension!") self.dim = dim[0] if add_bounding_box: coord_ranges = zip(np.amin(points, 0), np.amax(points, 0)) for coord in itertools.product(*coord_ranges): self.points.append(coord) output = qvoronoi("o Fv", self.points) output.pop(0) nvertices, nregions, i = [int(i) for i in output.pop(0).split()] self.vertices = [[float(j) for j in output.pop(0).split()] for i in range(nvertices)] self.regions = [[int(j) for j in output.pop(0).split()[1:]] for i in range(nregions)] output.pop(0) ridges = {} for line in output: val = [int(i) for i in line.split()] ridges[tuple(val[1:3])] = val[3:] self.ridges = ridges
Initializes a VoronoiTess from points. Args: points ([[float]]): All the points as a sequence of sequences. e.g., [[-0.5, -0.5], [-0.5, 0.5], [0.5, -0.5], [0.5, 0.5]] add_bounding_box (bool): If True, a hypercube corresponding to the extremes of each coordinate will be added to the list of points.
juraj-google-style
def setMAC(self, xEUI): print '%s call setMAC' % self.port address64 = '' try: if not xEUI: address64 = self.mac if not isinstance(xEUI, str): address64 = self.__convertLongToString(xEUI) if len(address64) < 16: address64 = address64.zfill(16) print address64 else: address64 = xEUI cmd = WPANCTL_CMD + 'setprop NCP:MACAddress %s' % address64 if self.__sendCommand(cmd)[0] != 'Fail': self.mac = address64 return True else: return False except Exception, e: ModuleHelper.WriteIntoDebugLogger('setMAC() Error: ' + str(e))
set the extended addresss of Thread device Args: xEUI: extended address in hex format Returns: True: successful to set the extended address False: fail to set the extended address
juraj-google-style
def _build_late_dispatcher(func_name): def _late_dynamic_dispatcher(obj, *args): method = getattr(obj, func_name, None) if (not callable(method)): raise NotImplementedError(('Instance method %r is not implemented by %r.' % (func_name, obj))) return method(*args) return _late_dynamic_dispatcher
Return a function that calls method 'func_name' on objects. This is useful for building late-bound dynamic dispatch. Arguments: func_name: The name of the instance method that should be called. Returns: A function that takes an 'obj' parameter, followed by *args and returns the result of calling the instance method with the same name as the contents of 'func_name' on the 'obj' object with the arguments from *args.
codesearchnet
def rebuild(self, image_id=None, return_dict=True): if not image_id: image_id = self.image['id'] return self._perform_action( {"type": "rebuild", "image": image_id}, return_dict )
Restore the droplet to an image ( snapshot or backup ) Args: image_id (int): id of image Optional Args: return_dict (bool): Return a dict when True (default), otherwise return an Action. Returns dict or Action
juraj-google-style
def visit_Import(self, node): for import_alias in node.names: full_import = (import_alias.name, import_alias.asname) detection = self._api_analysis_spec.imports_to_detect.get(full_import, None) if detection: self.add_result(detection) self.add_log(detection.log_level, node.lineno, node.col_offset, detection.log_message) self.generic_visit(node)
Handle visiting an import node in the AST. Args: node: Current Node
github-repos
def append(self, event, category=None): date = datetime.datetime.now() self.store.insert(0, (date, event, category)) if (len(self.store) > self.size): del self.store[(- 1)]
Adds a new event to the trace store. The event may hava a category Args: event (spade.message.Message): the event to be stored category (str, optional): a category to classify the event (Default value = None)
codesearchnet
def get_programs_dict(pkgname_only=None, flag_protected=False): ___ret = _get_programs_dict() __ret = ___ret if pkgname_only is None else OrderedDict(((pkgname_only, ___ret[pkgname_only]),)) if flag_protected: _ret = __ret else: _ret = copy.deepcopy(__ret) for value in _ret.values(): value["exeinfo"] = [exeinfo for exeinfo in value["exeinfo"] if not exeinfo.filename.startswith("_")] ret = _ret if pkgname_only is None and flag_protected is None else \ OrderedDict(((key, value) for key, value in _ret.items() if len(value["exeinfo"]) > 0)) return ret
Scans COLLABORATORS_S packages for scripts, eventually filtering if arguments passed Args: pkgname_only: name of single package within COLLABORATORS_S flag_protected: include scripts starting with "_"? Returns: dictionary: {"packagename0": {"exeinfo": [ExeInfo00, ...], "description": description0}, ...}
juraj-google-style
def transform_absolute_coords(self, width, height): if self.type != EventType.POINTER_MOTION_ABSOLUTE: raise AttributeError(_wrong_meth.format(self.type)) abs_x = self._libinput \ .libinput_event_pointer_get_absolute_x_transformed( self._handle, width) abs_y = self._libinput \ .libinput_event_pointer_get_absolute_y_transformed( self._handle, height) return abs_x, abs_y
Return the current absolute coordinates of the pointer event, transformed to screen coordinates. For pointer events that are not of type :attr:`~libinput.constant.EventType.POINTER_MOTION_ABSOLUTE`, this method raises :exc:`AttributeError`. Args: width (int): The current output screen width. height (int): The current output screen height. Returns: (float, float): The current absolute (x, y) coordinates transformed to a screen coordinates. Raises: AttributeError
juraj-google-style
def get_structure_from_id(self, task_id, final_structure=True): args = {'task_id': task_id} field = 'output.crystal' if final_structure else 'input.crystal' results = tuple(self.query([field], args)) if len(results) > 1: raise QueryError("More than one result found for task_id {}!".format(task_id)) elif len(results) == 0: raise QueryError("No structure found for task_id {}!".format(task_id)) c = results[0] return Structure.from_dict(c[field])
Returns a structure from the database given the task id. Args: task_id: The task_id to query for. final_structure: Whether to obtain the final or initial structure. Defaults to True.
juraj-google-style
def split(cls, tensor, split_dimension, num_devices, input_shape=None): if input_shape: shape = input_shape else: shape = tensor.shape.as_list() if shape[split_dimension] is not None and shape[split_dimension] < num_devices: raise ValueError('Split dimension was smaller than the required number of splits: shape=%r, dimension=%r, num_devices=%r' % (shape, split_dimension, num_devices)) tile_assignment_dims = [1] * len(shape) tile_assignment_dims[split_dimension] = num_devices return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.OTHER, tile_assignment_dimensions=tile_assignment_dims, tile_assignment_devices=range(num_devices)))
Returns a Sharding that splits a tensor across a dimension. This creates a Tiled attribute, similar to tile(), but easier to use for the common case of tiling a tensor N ways in one dimension. Args: tensor: A tf.Tensor to split. split_dimension: The dimension number to split. num_devices: The number of cores to split `tensor` over. input_shape: The shape of the original tensor. Raises: ValueError: The tensor to split was smaller in the split dimension than the number of devices to split over.
github-repos
def locate(desktop_filename_or_name): paths = [ os.path.expanduser('~/.local/share/applications'), '/usr/share/applications'] result = [] for path in paths: for file in os.listdir(path): if desktop_filename_or_name in file.split( '.') or desktop_filename_or_name == file: result.append(os.path.join(path, file)) else: file_parsed = parse(os.path.join(path, file)) try: if desktop_filename_or_name.lower() == file_parsed[ 'Name'].lower(): result.append(file) elif desktop_filename_or_name.lower() == file_parsed[ 'Exec'].split(' ')[0]: result.append(file) except KeyError: pass for res in result: if not res.endswith('.desktop'): result.remove(res) if not result and not result.endswith('.desktop'): result.extend(locate(desktop_filename_or_name + '.desktop')) return result
Locate a .desktop from the standard locations. Find the path to the .desktop file of a given .desktop filename or application name. Standard locations: - ``~/.local/share/applications/`` - ``/usr/share/applications`` Args: desktop_filename_or_name (str): Either the filename of a .desktop file or the name of an application. Returns: list: A list of all matching .desktop files found.
juraj-google-style
def record_operation_forwardprop_only(op_type, output_tensors, input_tensors, backward_function, forwardprop_output_indices): pywrap_tfe.TFE_Py_TapeSetRecordOperationForwardprop(op_type, output_tensors, input_tensors, backward_function, forwardprop_output_indices)
Records the operation on all forward accumulators in the stack. Args: op_type: a string for the operation type, used in the backprop code output_tensors: a list of Python Tensor objects output by the operation input_tensors: a list of input Tensors to the recorded operation backward_function: the function to be called to, given the gradients of the output tensors, produce the gradients of the input tensors. This function is automatically transposed to produce output gradients given input gradients. forwardprop_output_indices: indicates any output_tensors which contain JVPs. Typically these will have come from TFE_Py_PackForwardGradients. May be None or an empty sequence if there are no JVP outputs from the operation.
github-repos
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(Framework, cls)._prepare_init_params_from_job_description(job_details, model_channel_name) init_params['entry_point'] = json.loads(init_params['hyperparameters'].get(SCRIPT_PARAM_NAME)) init_params['source_dir'] = json.loads(init_params['hyperparameters'].get(DIR_PARAM_NAME)) init_params['enable_cloudwatch_metrics'] = json.loads( init_params['hyperparameters'].get(CLOUDWATCH_METRICS_PARAM_NAME)) init_params['container_log_level'] = json.loads( init_params['hyperparameters'].get(CONTAINER_LOG_LEVEL_PARAM_NAME)) hyperparameters = {} for k, v in init_params['hyperparameters'].items(): if k == '_tuning_objective_metric': if v.startswith('"') and v.endswith('"'): v = v.strip('"') hyperparameters[k] = v else: hyperparameters[k] = json.loads(v) init_params['hyperparameters'] = hyperparameters return init_params
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded Returns: dictionary: The transformed init_params
juraj-google-style
def implicit_static(cls, for_type=None, for_types=None): for type_ in cls.__get_type_args(for_type, for_types): implementations = {} for function in cls.required(): method = getattr(type_, function.__name__, None) if (not callable(method)): raise TypeError(('%s.implicit invokation on type %r is missing instance method %r.' % (cls.__name__, type_, function.__name__))) implementations[function] = method for function in cls.optional(): method = getattr(type_, function.__name__, None) if callable(method): implementations[function] = method return cls.implement(for_type=type_, implementations=implementations)
Automatically generate implementations for a type. Implement the protocol for the 'for_type' type by dispatching each member function of the protocol to an instance method of the same name declared on the type 'for_type'. Arguments: for_type: The type to implictly implement the protocol with. Raises: TypeError if not all implementations are provided by 'for_type'.
codesearchnet
def sh(cmd, ignore_error=False, cwd=None, shell=False, **kwargs): kwargs.update({'shell': shell, 'cwd': cwd, 'stderr': subprocess.STDOUT, 'stdout': subprocess.PIPE}) log.debug((('cmd', cmd), ('kwargs', kwargs))) p = subprocess.Popen(cmd, universal_newlines=True, **kwargs) p_stdout = p.communicate()[0] if (p.returncode and (not ignore_error)): raise subprocess.CalledProcessError(p.returncode, cmd, p_stdout) return p_stdout
Execute a command with subprocess.Popen and block until output Args: cmd (tuple or str): same as subprocess.Popen args Keyword Arguments: ignore_error (bool): if False, raise an Exception if p.returncode is not 0 cwd (str): current working directory path to run cmd with shell (bool): subprocess.Popen ``shell`` kwarg Returns: str: stdout output of wrapped call to ``sh`` (``subprocess.Popen``) Raises: Exception: if ignore_error is true and returncode is not zero .. note:: this executes commands with ``shell=True``: careful with shell-escaping.
codesearchnet
def subscribe_sns_topic_to_sqs(self, region): sns = self.session.resource('sns', region_name=region) topic = sns.Topic('arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name)) topic.subscribe(Protocol='sqs', Endpoint=self.sqs_queue) auditlog( event='cloudtrail.subscribe_sns_topic_to_sqs', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) return topic.attributes['TopicArn']
Subscribe SQS to the SNS topic. Returns the ARN of the SNS Topic subscribed Args: region (`str`): Name of the AWS region Returns: `str`
juraj-google-style
def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool): if max_length is None and len(ids) > self.model_max_length and verbose and (self.model_max_length != 0): if not self.deprecation_warnings.get('sequence-length-is-longer-than-the-specified-maximum', False): logger.warning(f'Token indices sequence length is longer than the specified maximum sequence length for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model will result in indexing errors') self.deprecation_warnings['sequence-length-is-longer-than-the-specified-maximum'] = True
Depending on the input and internal state we might trigger a warning about a sequence that is too long for its corresponding model Args: ids (`List[str]`): The ids produced by the tokenization max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set) verbose (`bool`): Whether or not to print more information and warnings.
github-repos
def __call__(self, fn): def output(app, *args, **kwargs): data = fn(app, *args, **kwargs) index = '{}-{}'.format(self.key, self.variable_type) if self.value is not None: app.tcex.playbook.add_output(self.key, self.value, self.variable_type) elif app.tcex.playbook.output_data.get(index) and not self.overwrite: pass else: app.tcex.playbook.add_output(self.key, data, self.variable_type) return data return output
Implement __call__ function for decorator. Args: fn (function): The decorated function. Returns: function: The custom decorator function.
juraj-google-style
async def verify_docker_image_task(chain, link): errors = [] worker_type = get_worker_type(link.task) if worker_type not in chain.context.config['valid_docker_image_worker_types']: errors.append("{} is not a valid docker-image workerType!".format(worker_type)) raise_on_errors(errors)
Verify the docker image Link. Args: chain (ChainOfTrust): the chain we're operating on. link (LinkOfTrust): the task link we're checking.
juraj-google-style
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops, gradient_uid='__unsupported__'): if len(grad_ys) != len(ys): raise ValueError(f'Length mismatch. Passed {len(grad_ys)} grad_ys for {len(ys)} ys') grad_ys = indexed_slices.convert_n_to_tensor_or_indexed_slices(grad_ys, name='grad_y') new_grad_ys = [] for i, (y, grad_y) in enumerate(zip(ys, grad_ys)): with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops): if grad_y is None: if y.dtype.is_complex: raise TypeError(f'Gradients of complex tensors ({y}) must set grad_ys (y.dtype = {dtypes.as_dtype(y.dtype).name})') new_grad_ys.append(array_ops.ones(array_ops.shape(y), dtype=y.dtype, name='grad_ys_%d' % i)) continue if y.dtype.is_floating or y.dtype.is_integer: if not grad_y.dtype.is_floating and (not grad_y.dtype.is_integer): raise TypeError(f'Gradient type {dtypes.as_dtype(grad_y.dtype).name} generated for real or integer-valued tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be real or integer') elif y.dtype.is_complex: if not grad_y.dtype.is_complex: raise TypeError(f'Gradient type {dtypes.as_dtype(grad_y.dtype).name} generated for complex-valued tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be real') elif y.dtype == dtypes.variant: if grad_y.dtype != dtypes.variant: raise TypeError(f'Gradient type {dtypes.as_dtype(grad_y.dtype).name} generated for variant tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be variant') elif y.dtype == dtypes.resource: if grad_y.dtype == dtypes.resource: raise TypeError(f'Input gradient {grad_y} for resource tensor {y} should not be a resource') else: raise TypeError(f'Tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be numeric to obtain a default gradient') if isinstance(grad_y, indexed_slices.IndexedSlices): new_grad_ys.append(indexed_slices.IndexedSlices(indices=array_ops.identity(grad_y.indices, name='grad_ys_%d_indices' % i) if isinstance(grad_y.indices, tensor_lib.Tensor) else grad_y.indices, values=array_ops.identity(grad_y.values, name='grad_ys_%d_values' % i) if isinstance(grad_y.values, tensor_lib.Tensor) else grad_y.values, dense_shape=array_ops.identity(grad_y.dense_shape, name='grad_ys_%d_shape' % i) if isinstance(grad_y.dense_shape, tensor_lib.Tensor) else grad_y.dense_shape)) else: new_grad_ys.append(array_ops.identity(grad_y, name='grad_ys_%d' % i)) return new_grad_ys
Fill in default values for grad_ys. Args: grad_ys: List of gradients, can contain None. ys: List of tensors. colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. gradient_uid: A unique identifier within the graph indicating which invocation of gradients is being executed. Used to cluster ops for compilation. Returns: A list of gradients to use, without None. Raises: ValueError: If sizes of gradients and inputs don't match TypeError: If type of any gradient is not valid for its input.
github-repos
def file_crc32(filename, block_size=_DEFAULT_BLOCK_SIZE): crc = 0 with FileIO(filename, mode='rb') as f: chunk = f.read(n=block_size) while chunk: crc = binascii.crc32(chunk, crc) chunk = f.read(n=block_size) return hex(crc & 4294967295)
Get the crc32 of the passed file. The crc32 of a file can be used for error checking; two files with the same crc32 are considered equivalent. Note that the entire file must be read to produce the crc32. Args: filename: string, path to a file block_size: Integer, process the files by reading blocks of `block_size` bytes. Use -1 to read the file as once. Returns: hexadecimal as string, the crc32 of the passed file.
github-repos
def convertTime(self, time): m_format = "" if time.minute: m_format = ":%M" timeString = time.strftime("%I" + m_format + " %p") if not int(timeString[0]): timeString = timeString[1:] return timeString
Convert a datetime object representing a time into a human-ready string that can be read, spoken aloud, etc. Args: time (datetime.date): A datetime object to be converted into text. Returns: A string representation of the input time, ignoring any day-related information.
juraj-google-style
def create_mutation_file(self, list_of_tuples): self.mutation_infile = op.join(self.foldx_dir, 'individual_list.txt') idx = 1 with open(self.mutation_infile, 'w') as f: for mutant_group in list_of_tuples: mutstring = ''.join(list(map((lambda x: '{}{}{}{};'.format(x[0], x[1], x[2], x[3])), mutant_group))) f.write((mutstring + '\n')) self.mutation_index_to_group[idx] = mutant_group idx += 1
Create the FoldX file 'individual_list.txt' to run BuildModel upon. Args: list_of_tuples (list): A list of tuples indicating mutation groups to carry out BuildModel upon. Example:: [ (('N', 'A', 308, 'S'), ('S', 'A', 320, 'T'), ('S', 'A', 321, 'H')), # Mutation group 1 (('S', 'A', 321, 'R'), ('T', 'A', 345, 'S')) # Mutation group 2 ]
codesearchnet
def get_backend(backend_class=None): cache_name = '_backend_instance' if (not hasattr(get_backend, cache_name)): backend_class = (backend_class or settings.ROUGHPAGES_BACKEND) if isinstance(backend_class, basestring): (module_path, class_name) = backend_class.rsplit('.', 1) module = import_module(module_path) backend_class = getattr(module, class_name) setattr(get_backend, cache_name, backend_class()) return getattr(get_backend, cache_name)
Get backend instance If no `backend_class` is specified, the backend class is determined from the value of `settings.ROUGHPAGES_BACKEND`. `backend_class` can be a class object or dots separated python import path Returns: backend instance
codesearchnet
def api_server(api_services, **kwargs): if ('protocols' in kwargs): raise TypeError("__init__() got an unexpected keyword argument 'protocols'") from . import _logger as endpoints_logger from . import __version__ as endpoints_version endpoints_logger.info('Initializing Endpoints Framework version %s', endpoints_version) apis_app = _ApiServer(api_services, **kwargs) dispatcher = endpoints_dispatcher.EndpointsDispatcherMiddleware(apis_app) service_name = os.environ.get('ENDPOINTS_SERVICE_NAME') if (not service_name): _logger.warn('Did not specify the ENDPOINTS_SERVICE_NAME environment variable so service control is disabled. Please specify the name of service in ENDPOINTS_SERVICE_NAME to enable it.') return dispatcher if control_wsgi.running_on_devserver(): _logger.warn('Running on local devserver, so service control is disabled.') return dispatcher from endpoints_management import _logger as management_logger from endpoints_management import __version__ as management_version management_logger.info('Initializing Endpoints Management Framework version %s', management_version) controller = control_client.Loaders.DEFAULT.load(service_name) control_client.use_gae_thread() controller.start() return control_wsgi.add_all(dispatcher, app_identity.get_application_id(), controller)
Create an api_server. The primary function of this method is to set up the WSGIApplication instance for the service handlers described by the services passed in. Additionally, it registers each API in ApiConfigRegistry for later use in the BackendService.getApiConfigs() (API config enumeration service). It also configures service control. Args: api_services: List of protorpc.remote.Service classes implementing the API or a list of _ApiDecorator instances that decorate the service classes for an API. **kwargs: Passed through to protorpc.wsgi.service.service_handlers except: protocols - ProtoRPC protocols are not supported, and are disallowed. Returns: A new WSGIApplication that serves the API backend and config registry. Raises: TypeError: if protocols are configured (this feature is not supported).
codesearchnet
def restore_saveables(self, tensor_saveables, python_positions, registered_savers=None, reader=None): if reader is None: reader = py_checkpoint_reader.NewCheckpointReader(self.save_path_string) restore_ops = [] for position in python_positions: key = position.object_proto.attributes[0].checkpoint_key position.trackable.deserialize(reader.get_tensor(key)) if tensor_saveables or registered_savers: flat_saveables = saveable_object_util.validate_and_slice_inputs(tensor_saveables) new_restore_ops = functional_saver.MultiDeviceSaver.from_saveables(flat_saveables, registered_savers).restore(self.save_path_tensor, self.options) if not context.executing_eagerly(): for name, restore_op in sorted(new_restore_ops.items()): restore_ops.append(restore_op) assert name not in self.restore_ops_by_name self.restore_ops_by_name[name] = restore_op return restore_ops
Run or build restore operations for SaveableObjects. Args: tensor_saveables: `SaveableObject`s which correspond to Tensors. python_positions: List of CheckpointPositions bound to `PythonState` objects which must be restored eagerly. registered_savers: a dict mapping saver names-> object name -> Trackable. reader: A `CheckpointReader`. If None, a new instance will be created. Returns: When graph building, a list of restore operations, either cached or newly created, to restore `tensor_saveables`.
github-repos
def merge_with(self, other): other = as_shape(other) if self._dims is None: return other else: try: self.assert_same_rank(other) new_dims = [] for i, dim in enumerate(self._dims): new_dims.append(dim.merge_with(other[i])) return TensorShape(new_dims) except ValueError: raise ValueError("Shapes %s and %s are not convertible" % (self, other))
Returns a `TensorShape` combining the information in `self` and `other`. The dimensions in `self` and `other` are merged elementwise, according to the rules defined for `Dimension.merge_with()`. Args: other: Another `TensorShape`. Returns: A `TensorShape` containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not convertible.
juraj-google-style
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): if data_format == 'channels_first': output = repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4) return output elif data_format == 'channels_last': output = repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output else: raise ValueError('Invalid data_format: ' + str(data_format))
Resizes the volume contained in a 5D tensor. Args: x: Tensor or variable to resize. depth_factor: Positive integer. height_factor: Positive integer. width_factor: Positive integer. data_format: One of `"channels_first"`, `"channels_last"`. Returns: A tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`.
github-repos
def IsRValueAllowed(clean_lines, linenum, typenames): for i in xrange(linenum, 0, -1): line = clean_lines.elided[i] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): if not line.endswith('PUSH'): return False for j in xrange(linenum, clean_lines.NumLines(), 1): line = clean_lines.elided[j] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): return line.endswith('POP') line = clean_lines.elided[linenum] if Search(r'\boperator\s*=\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line) if match and match.group(1) == match.group(2): return IsDeletedOrDefault(clean_lines, linenum) if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) if Match(r'\s*[\w<>]+\s*\(', line): previous_line = 'ReturnType' if linenum > 0: previous_line = clean_lines.elided[linenum - 1] if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line): return IsDeletedOrDefault(clean_lines, linenum) while line: match = Match(r'^.*?(\w+)\s*&&(.*)$', line) if not match: break if match.group(1) not in typenames: return False line = match.group(2) return line.find('&&') < 0
Check if RValue reference is allowed on a particular line. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. typenames: set of type names from template-argument-list. Returns: True if line is within the region where RValue references are allowed.
juraj-google-style
def __init__(self, name, type, description=''): self._name = name self._type = type self._description = description
Filter constructor. Args: name (str): Filter name. type (str): Type of the filter (boolean, int, etc.). description (str): Filter description.
juraj-google-style
def __init__(self, lat=None, lng=None, name=None, stop_id=None, field_dict=None, stop_code=None): self._schedule = None if field_dict: if isinstance(field_dict, self.__class__): for k, v in field_dict.iteritems(): self.__dict__[k] = v else: self.__dict__.update(field_dict) else: if lat is not None: self.stop_lat = lat if lng is not None: self.stop_lon = lng if name is not None: self.stop_name = name if stop_id is not None: self.stop_id = stop_id if stop_code is not None: self.stop_code = stop_code
Initialize a new Stop object. Args: field_dict: A dictionary mapping attribute name to unicode string lat: a float, ignored when field_dict is present lng: a float, ignored when field_dict is present name: a string, ignored when field_dict is present stop_id: a string, ignored when field_dict is present stop_code: a string, ignored when field_dict is present
juraj-google-style
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local): raise NotImplementedError('subclasses must implement')
Resolves the type of a (possibly annotated) function argument. Args: ns: namespace types_ns: types namespace f_name: str, the function name name: str, the argument name type_anno: the type annotating the argument, if any f_is_local: bool, whether the function is a local function Returns: Set of the argument types.
github-repos
def visualize(logdir, outdir, num_agents, num_episodes, checkpoint=None, env_processes=True): config = utility.load_config(logdir) with tf.device('/cpu:0'): batch_env = utility.define_batch_env((lambda : _create_environment(config, outdir)), num_agents, env_processes) graph = utility.define_simulation_graph(batch_env, config.algorithm, config) total_steps = (num_episodes * config.max_length) loop = _define_loop(graph, total_steps) saver = utility.define_saver(exclude=('.*_temporary.*', 'global_step')) sess_config = tf.ConfigProto(allow_soft_placement=True) sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: utility.initialize_variables(sess, saver, config.logdir, checkpoint, resume=True) for unused_score in loop.run(sess, saver, total_steps): pass batch_env.close()
Recover checkpoint and render videos from it. Args: logdir: Logging directory of the trained algorithm. outdir: Directory to store rendered videos in. num_agents: Number of environments to simulate in parallel. num_episodes: Total number of episodes to simulate. checkpoint: Checkpoint name to load; defaults to most recent. env_processes: Whether to step environments in separate processes.
codesearchnet
def incr(self, counter_name, delta=1): self._state.counters_map.increment(counter_name, delta)
Changes counter by delta. Args: counter_name: the name of the counter to change. str. delta: int.
juraj-google-style
def _comparison_functions(cls, partial=False): def prerelease_cmp(a, b): if a and b: return identifier_list_cmp(a, b) elif a: return -1 elif b: return 1 else: return 0 def build_cmp(a, b): if a == b: return 0 else: return NotImplemented def make_optional(orig_cmp_fun): @functools.wraps(orig_cmp_fun) def alt_cmp_fun(a, b): if a is None or b is None: return 0 return orig_cmp_fun(a, b) return alt_cmp_fun if partial: return [ base_cmp, make_optional(base_cmp), make_optional(base_cmp), make_optional(prerelease_cmp), make_optional(build_cmp), ] else: return [ base_cmp, base_cmp, base_cmp, prerelease_cmp, build_cmp, ]
Retrieve comparison methods to apply on version components. This is a private API. Args: partial (bool): whether to provide 'partial' or 'strict' matching. Returns: 5-tuple of cmp-like functions.
juraj-google-style
def _process(self, input): input = re.sub("<[^>]*>", " ", input) punct = list(string.punctuation) for symbol in punct: input = input.replace(symbol, " %s " % symbol) input = filter(lambda x: x != u'', input.lower().split(' ')) return input
Takes in html-mixed body text as a string and returns a list of strings, lower case and with punctuation given spacing. Called by self._gen_sentence() Args: inpnut (string): body text
juraj-google-style
async def get_event(self, stream: str, event_number: int, resolve_links=True, require_master=False, correlation_id: uuid.UUID=None) -> msg.Event: correlation_id = (correlation_id or uuid.uuid4()) cmd = convo.ReadEvent(stream, event_number, resolve_links, require_master, conversation_id=correlation_id) result = (await self.dispatcher.start_conversation(cmd)) return (await result)
Get a single event by stream and event number. Args: stream: The name of the stream containing the event. event_number: The sequence number of the event to read. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Returns: The resolved event if found, else None. Examples: >>> async with connection() as conn: >>> await conn.publish("inventory_item-1", "item_created") >>> event = await conn.get_event("inventory_item-1", 1) >>> print(event)
codesearchnet
def visit_comparison(self, comparison: _evaluation.ComparisonNode) -> _sql_data_types.Select: lhs_result = self.visit(comparison.left) rhs_result = self.visit(comparison.right) lhs_subquery = lhs_result.as_operand() rhs_subquery = rhs_result.as_operand() sql_value = f'{lhs_subquery} {comparison.op} {rhs_subquery}' return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_value, _sql_data_type=_sql_data_types.Boolean, _sql_alias='comparison_'), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)
Translates a FHIRPath comparison to Spark SQL. Each operand is expected to be a collection of a single element. Operands can be strings, integers, decimals, dates, datetimes, and times. Comparison will perform implicit conversion between applicable types. Args: comparison: The `Comparison` Expression node. Returns: A compiled Spark SQL expression.
github-repos
def _psum(tensor, axis_name=None): if axis_name != _pmap_config.axis_name(): raise ValueError('axis_name (%s) is not equal to that of the surrounding pmap (%s)' % (axis_name, _pmap_config.axis_name())) devices = _pmap_config.devices() if devices is None: raise ValueError("Can't retrieve the device list from the surrounding pmap") tensor = tf_np.asarray(tensor) if tpu_devices(devices): is_int64 = False is_float64 = False if tensor.dtype == np.int64: is_int64 = True tensor = tensor.astype(np.int32) elif tensor.dtype == np.float64: is_float64 = True tensor = tensor.astype(np.float32) tensor = tpu_ops.cross_replica_sum(tensor) if is_int64: tensor = math_ops.cast(tensor, dtypes.int64) elif is_float64: tensor = math_ops.cast(tensor, dtypes.float64) else: tensor = gen_collective_ops.collective_reduce(input=tensor, group_size=len(devices), group_key=_GROUP_KEY, instance_key=_get_instance_key(), merge_op='Add', final_op='Id', subdiv_offsets=(0,)) return tf_np.asarray(tensor)
Sum all-reduction. Args: tensor: A tensor. axis_name: The axis name to reduce. Must equal to that of the surrounding pmap. Returns: The sum of the `tensor` replicas on each participating devices.
github-repos
def add_arguments(self, parser): group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-d', '--downgrade', action='store_true', help='downgrade the J-Link firmware') group.add_argument('-u', '--upgrade', action='store_true', help='upgrade the J-Link firmware') return self.add_common_arguments(parser, False)
Adds the arguments for the firmware command. Args: self (FirmwareCommand): the ``FirmwareCommand`` instance parser (argparse.ArgumentParser): parser to add the commands to Returns: ``None``
juraj-google-style
def find_log_dir(log_dir=None): if log_dir: dirs = [log_dir] elif FLAGS['log_dir'].value: dirs = [FLAGS['log_dir'].value] else: dirs = ['/tmp/', './'] for d in dirs: if os.path.isdir(d) and os.access(d, os.W_OK): return d _absl_logger.fatal("Can't find a writable directory for logs, tried %s", dirs)
Returns the most suitable directory to put log files into. Args: log_dir: str|None, if specified, the logfile(s) will be created in that directory. Otherwise if the --log_dir command-line flag is provided, the logfile will be created in that directory. Otherwise the logfile will be created in a standard location.
juraj-google-style
def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): axis = self._get_axis_number(axis) if axis is not None else 0 data = self._validate_dtypes_min_max(axis, numeric_only) return data._reduce_dimension( data._query_compiler.min( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs ) )
Perform min across the DataFrame. Args: axis (int): The axis to take the min on. skipna (bool): True to skip NA values, false otherwise. Returns: The min of the DataFrame.
juraj-google-style
def atomic_write_string_to_file(filename, contents, overwrite): temp_pathname = ((tf.compat.as_bytes(filename) + tf.compat.as_bytes('.tmp')) + tf.compat.as_bytes(uuid.uuid4().hex)) with tf_v1.gfile.GFile(temp_pathname, mode='w') as f: f.write(contents) try: tf_v1.gfile.Rename(temp_pathname, filename, overwrite) except tf.errors.OpError: tf_v1.gfile.Remove(temp_pathname) raise
Writes to `filename` atomically. This means that when `filename` appears in the filesystem, it will contain all of `contents`. With write_string_to_file, it is possible for the file to appear in the filesystem with `contents` only partially written. Accomplished by writing to a temp file and then renaming it. Args: filename: string, pathname for a file contents: string, contents that need to be written to the file overwrite: boolean, if false it's an error for `filename` to be occupied by an existing file.
codesearchnet
def push_file(self, source, dest_dir): local_dest = ((dest_dir + '/') + os.path.basename(source)) if (os.path.dirname(source) != dest_dir): try: shutil.copyfile(source, local_dest) os.chmod(local_dest, 511) except OSError as e: raise FileCopyException(e, self.hostname) return local_dest
If the source files dirpath is the same as dest_dir, a copy is not necessary, and nothing is done. Else a copy is made. Args: - source (string) : Path to the source file - dest_dir (string) : Path to the directory to which the files is to be copied Returns: - destination_path (String) : Absolute path of the destination file Raises: - FileCopyException : If file copy failed.
codesearchnet
def add_subassistants_to(cls, parser, assistant_tuple, level, alias=None): name = (alias or assistant_tuple[0].name) p = parser.add_parser(name, description=assistant_tuple[0].description, argument_default=argparse.SUPPRESS) for arg in assistant_tuple[0].args: arg.add_argument_to(p) if (len(assistant_tuple[1]) > 0): subparsers = cls._add_subparsers_required(p, dest=settings.SUBASSISTANT_N_STRING.format(level), title=cls.subparsers_str, description=cls.subparsers_desc) for subas_tuple in sorted(assistant_tuple[1], key=(lambda x: x[0].name)): cls.add_subassistants_to(subparsers, subas_tuple, (level + 1)) elif (level == 1): subparsers = cls._add_subparsers_required(p, dest=settings.SUBASSISTANT_N_STRING.format(level), title=cls.subparsers_str, description=devassistant_argparse.ArgumentParser.no_assistants_msg)
Adds assistant from given part of assistant tree and all its subassistants to a given argument parser. Args: parser: instance of devassistant_argparse.ArgumentParser assistant_tuple: part of assistant tree (see generate_argument_parser doc) level: level of subassistants that given assistant is at
codesearchnet
def get_schema_node(self, path: SchemaPath) -> Optional[SchemaNode]: return self.schema.get_schema_descendant(self.schema_data.path2route(path))
Return the schema node addressed by a schema path. Args: path: Schema path. Returns: Schema node if found in the schema, or ``None``. Raises: InvalidSchemaPath: If the schema path is invalid.
codesearchnet
def _create(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): kwargs = UserAgent._environment_variables(**kwargs) if ('user_agent' in kwargs): user_agent = kwargs['user_agent'] del kwargs['user_agent'] prefix = kwargs.get('prefix') if prefix: del kwargs['prefix'] else: prefix = ('HDXPythonUtilities/%s' % get_utils_version()) if (not user_agent): ua = cls._load(prefix, user_agent_config_yaml, user_agent_lookup) else: ua = cls._construct(kwargs, prefix, user_agent) return ua
Get full user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string
codesearchnet
def connect(self): if self.is_connected(): raise tornado.gen.Return(True) cb1 = self._read_callback cb2 = self._close_callback self.__callback_queue = collections.deque() self._reply_list = [] self.__reader = hiredis.Reader(replyError=ClientError) kwargs = self.connection_kwargs self.__connection = Connection(cb1, cb2, **kwargs) connection_status = (yield self.__connection.connect()) if (connection_status is not True): raise tornado.gen.Return(False) if (self.password is not None): authentication_status = (yield self._call('AUTH', self.password)) if (authentication_status != b'OK'): LOG.warning('impossible to connect: bad password') self.__connection.disconnect() raise tornado.gen.Return(False) if (self.db != 0): db_status = (yield self._call('SELECT', self.db)) if (db_status != b'OK'): LOG.warning("can't select db %s", self.db) raise tornado.gen.Return(False) raise tornado.gen.Return(True)
Connects the client object to redis. It's safe to use this method even if you are already connected. Note: this method is useless with autoconnect mode (default). Returns: a Future object with True as result if the connection was ok.
codesearchnet
def get_regularization_loss(scope=None, name='total_regularization_loss'): losses = get_regularization_losses(scope) if losses: return math_ops.add_n(losses, name=name) else: return constant_op.constant(0.0)
Gets the total regularization loss. Args: scope: An optional scope name for filtering the losses to return. name: The name of the returned tensor. Returns: A scalar regularization loss.
github-repos
def restore_saveables(self, tensor_saveables: Dict[str, saveable_object.SaveableObject], python_positions: List[restore_lib.CheckpointPosition], registered_savers: Optional[Dict[str, Dict[str, base.Trackable]]]=None, reader: py_checkpoint_reader.NewCheckpointReader=None) -> Optional[List[ops.Operation]]: del registered_savers restore_ops = [] if python_positions: if reader is None: reader = py_checkpoint_reader.NewCheckpointReader(self.save_path_string) for position in python_positions: key = position.object_proto.attributes[0].checkpoint_key position.trackable.deserialize(reader.get_tensor(key)) if tensor_saveables: validated_saveables = saveable_object_util.validate_and_slice_inputs(tensor_saveables) validated_names = set((saveable.name for saveable in validated_saveables)) if set(tensor_saveables.keys()) != validated_names: raise AssertionError('Saveable keys changed when validating. Got back %s, was expecting %s' % (tensor_saveables.keys(), validated_names)) new_restore_ops = _DSaver(self._mesh, validated_saveables).restore(self.save_path_tensor, self.options) if not context.executing_eagerly(): for name, restore_op in sorted(new_restore_ops.items()): restore_ops.append(restore_op) assert name not in self.restore_ops_by_name self.restore_ops_by_name[name] = restore_op return restore_ops
Run or build restore operations for SaveableObjects. Args: tensor_saveables: `SaveableObject`s which correspond to Tensors. python_positions: `CheckpointPosition`s which correspond to `PythonState` Trackables bound to the checkpoint. registered_savers: a dict mapping saver names-> object name -> Trackable. This argument is not implemented for DTensorCheckpoint. reader: A CheckpointReader. Creates one lazily if None. Returns: When graph building, a list of restore operations, either cached or newly created, to restore `tensor_saveables`.
github-repos
def set_targets(x, delta=10): data = [] for (row, _) in x.iterrows(): if (row == (x.shape[0] - 1)): break curr_close = x.close[row] next_close = x.close[(row + 1)] high_close = (next_close + (delta / 2)) low_close = (next_close - (delta / 2)) if (curr_close < low_close): target = TARGET_CODES['bearish'] elif (curr_close > high_close): target = TARGET_CODES['bullish'] else: target = TARGET_CODES['neutral'] data.append(target) return pd.Series(data=data, dtype=np.int32, name='target')
Sets target market trend for a date Args: x: Pandas DataFrame of market features delta: Positive number defining a price buffer between what is classified as a bullish/bearish market for the training set. delta is equivalent to the total size of the neutral price zone. delta / 2 is equivalent to either the positive or negative threshold of the neutral price zone. Returns: Pandas Series of numpy int8 market trend targets
codesearchnet
def scan(self, func=operator.add): if self.closed(): raise ValueError('Attempt to call scan() on a closed Queryable.') if (not is_callable(func)): raise TypeError('scan() parameter func={0} is not callable'.format(repr(func))) return self._create(self._generate_scan_result(func))
An inclusive prefix sum which returns the cumulative application of the supplied function up to an including the current element. Args: func: An optional binary function which is commutative - that is, the order of the arguments is unimportant. Defaults to a summing operator. Returns: A Queryable such that the nth element is the sum of the first n elements of the source sequence. Raises: ValueError: If the Queryable has been closed. TypeError: If func is not callable.
codesearchnet
def all_elements_equal(value): if is_scalar(value): return True return np.array((value == value.flatten()[0])).all()
Checks if all elements in the given value are equal to each other. If the input is a single value the result is trivial. If not, we compare all the values to see if they are exactly the same. Args: value (ndarray or number): a numpy array or a single number. Returns: bool: true if all elements are equal to each other, false otherwise
codesearchnet
def _wait_for_any_event(events, timeout_s): def any_event_set(): return any((event.is_set() for event in events)) result = timeouts.loop_until_timeout_or_true(timeout_s, any_event_set, sleep_s=_WAIT_FOR_ANY_EVENT_POLL_S) return (result or any_event_set())
Wait for any in a list of threading.Event's to be set. Args: events: List of threading.Event's. timeout_s: Max duration in seconds to wait before returning. Returns: True if at least one event was set before the timeout expired, else False.
codesearchnet
def _ParseGUIDTable(self, parser_mediator, cache, database, esedb_table, values_map, event_data_class): if (cache is None): raise ValueError('Missing cache value.') if (database is None): raise ValueError('Missing database value.') if (esedb_table is None): raise ValueError('Missing table value.') identifier_mappings = self._GetIdentifierMappings(parser_mediator, cache, database) for esedb_record in esedb_table.records: if parser_mediator.abort: break record_values = self._GetRecordValues(parser_mediator, esedb_table.name, esedb_record, value_mappings=self._GUID_TABLE_VALUE_MAPPINGS) event_data = event_data_class() for (attribute_name, column_name) in values_map.items(): record_value = record_values.get(column_name, None) if (attribute_name in ('application', 'user_identifier')): record_value = identifier_mappings.get(record_value, record_value) setattr(event_data, attribute_name, record_value) timestamp = record_values.get('TimeStamp') if timestamp: date_time = dfdatetime_ole_automation_date.OLEAutomationDate(timestamp=timestamp) timestamp_description = definitions.TIME_DESCRIPTION_SAMPLE else: date_time = dfdatetime_semantic_time.SemanticTime('Not set') timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME event = time_events.DateTimeValuesEvent(date_time, timestamp_description) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = record_values.get('ConnectStartTime') if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_FIRST_CONNECTED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a table with a GUID as name. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache (ESEDBCache): cache, which contains information about the identifiers stored in the SruDbIdMapTable table. database (pyesedb.file): ESE database. esedb_table (pyesedb.table): table. values_map (dict[str, str]): mapping of table columns to event data attribute names. event_data_class (type): event data class. Raises: ValueError: if the cache, database or table value is missing.
codesearchnet
def diff(self, periods=1, axis=0): axis = self._get_axis_number(axis) return self.__constructor__( query_compiler=self._query_compiler.diff(periods=periods, axis=axis) )
Finds the difference between elements on the axis requested Args: periods: Periods to shift for forming difference axis: Take difference over rows or columns Returns: DataFrame with the diff applied
juraj-google-style
def _ParseEventData(self, variable_length_section): event_data = WinJobEventData() event_data.application = ( variable_length_section.application_name.rstrip('\x00')) event_data.comment = variable_length_section.comment.rstrip('\x00') event_data.parameters = ( variable_length_section.parameters.rstrip('\x00')) event_data.username = variable_length_section.author.rstrip('\x00') event_data.working_directory = ( variable_length_section.working_directory.rstrip('\x00')) return event_data
Parses the event data form a variable-length data section. Args: variable_length_section (job_variable_length_data_section): a Windows Scheduled Task job variable-length data section. Returns: WinJobEventData: event data of the job file.
juraj-google-style
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): shutdown_value = registry_key.GetValueByName('ShutdownTime') if (not shutdown_value): return try: date_time = self._ParseFiletime(shutdown_value.data) except errors.ParseError as exception: parser_mediator.ProduceExtractionWarning('unable to determine shutdown timestamp with error: {0!s}'.format(exception)) return if (not date_time): date_time = dfdatetime_semantic_time.SemanticTime('Not set') event_data = ShutdownWindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = shutdown_value.offset event_data.value_name = shutdown_value.name event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_SHUTDOWN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts events from a ShutdownTime Windows Registry value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codesearchnet
def _load_metadata_files(self): metadata_paths = file_io.get_matching_files(os.path.join(self._dump_root, '*%s' % self._METADATA_SUFFIX)) if not metadata_paths: raise ValueError('Cannot find any tfdbg metadata file in directory: %s' % self._dump_root) wall_times = [] run_ids = [] tensorflow_versions = [] file_versions = [] for metadata_path in metadata_paths: reader = tf_record.tf_record_random_reader(metadata_path) try: record = reader.read(0)[0] debug_event = debug_event_pb2.DebugEvent.FromString(record) wall_times.append(debug_event.wall_time) run_ids.append(debug_event.debug_metadata.tfdbg_run_id) tensorflow_versions.append(debug_event.debug_metadata.tensorflow_version) file_versions.append(debug_event.debug_metadata.file_version) except Exception as e: raise errors.DataLossError(None, None, 'Error reading tfdbg metadata from paths %s' % metadata_paths) from e finally: reader.close() self._starting_wall_time = wall_times[0] self._tfdbg_run_id = run_ids[0] self._tensorflow_version = tensorflow_versions[0] self._file_version = file_versions[0] if len(metadata_paths) == 1: return metadata_paths num_no_id = len([run_id for run_id in run_ids if not run_id]) if num_no_id: paths_without_run_id = [metadata_path for metadata_path, run_id in zip(metadata_paths, run_ids) if not run_id] raise ValueError('Found %d tfdbg metadata files and %d of them do not have tfdbg run ids. The metadata files without run ids are: %s' % (len(run_ids), num_no_id, paths_without_run_id)) elif len(set(run_ids)) != 1: raise ValueError('Unexpected: Found multiple (%d) tfdbg2 runs in directory %s' % (len(set(run_ids)), self._dump_root)) paths_and_timestamps = sorted(zip(metadata_paths, wall_times), key=lambda t: t[1]) self._starting_wall_time = paths_and_timestamps[0][1] return [path[0] for path in paths_and_timestamps]
Load and parse metadata files in the dump root. Check that all metadata files have a common tfdbg_run_id, and raise a ValueError if their tfdbg_run_ids differ. Returns: A list of metadata file paths in ascending order of their starting wall_time timestamp.
github-repos
def map_across_blocks(self, map_func): preprocessed_map_func = self.preprocess_func(map_func) new_partitions = np.array( [ [part.apply(preprocessed_map_func) for part in row_of_parts] for row_of_parts in self.partitions ] ) return self.__constructor__(new_partitions)
Applies `map_func` to every partition. Args: map_func: The function to apply. Returns: A new BaseFrameManager object, the type of object that called this.
juraj-google-style
def cmRecall(cm, average=True): cm = cm.type(torch.float64) recall = cm.diag() / (cm.sum(dim=1) + 1e-15) if average: return recall.mean() return recall
Calculates recall using :class:`~ignite.metrics.ConfusionMatrix` metric. Args: cm (ConfusionMatrix): instance of confusion matrix metric average (bool, optional): if True metric value is averaged over all classes Returns: MetricsLambda
juraj-google-style
def SmartBroadcastGradientArgs(x, y, grad=None): del grad x_shape = array_ops.shape(x) y_shape = array_ops.shape(y) if not context.executing_eagerly() and isinstance(x, tensor.Tensor) and isinstance(y, tensor.Tensor): x_axes, y_axes = _InferGradientReductionAxes(x.shape, y.shape) else: x_axes, y_axes = (None, None) if x_axes is None or y_axes is None: x_axes, y_axes = gen_array_ops.broadcast_gradient_args(x_shape, y_shape) x_must_reduce = True y_must_reduce = True else: x_must_reduce = x_axes or x.shape.rank < y.shape.rank y_must_reduce = y_axes or y.shape.rank < x.shape.rank return ((x_shape, x_axes, x_must_reduce), (y_shape, y_axes, y_must_reduce))
Version of `BroadcastGradientArgs` optimized for partially-known shapes. Args: x: The first argument of a broadcasting binary op. y: The second argument of a broadcasting binary op. grad: Deprecated. Returns: A pair of triples, one per argument with * Shape of the argument (tensor); * Reduction axes for the argument (list or tensor); * Boolean indicating whether the reduction must be applied.
github-repos
def _remove_outliers_from_hist(hist: Hist, outliers_start_index: int, outliers_removal_axis: OutliersRemovalAxis) -> None: if (outliers_start_index > 0): x = ctypes.c_int(0) y = ctypes.c_int(0) z = ctypes.c_int(0) outliers_removal_axis_values: Dict[(OutliersRemovalAxis, ctypes.c_int)] = {projectors.TH1AxisType.x_axis: x, projectors.TH1AxisType.y_axis: y, projectors.TH1AxisType.z_axis: z} for index in range(0, hist.GetNcells()): hist.GetBinXYZ(index, x, y, z) if (hist.GetBinContent(index) < hist.GetBinError(index)): logger.warning(f'Bin content < error. Name: {hist.GetName()}, Bin content: {hist.GetBinContent(index)}, Bin error: {hist.GetBinError(index)}, index: {index}, ({x.value}, {y.value})') if (outliers_removal_axis_values[outliers_removal_axis].value >= outliers_start_index): hist.SetBinContent(index, 0) hist.SetBinError(index, 0) else: logger.info(f'Hist {hist.GetName()} did not have any outliers to cut')
Remove outliers from a given histogram. Args: hist: Histogram to check for outliers. outliers_start_index: Index in the truth axis where outliers begin. outliers_removal_axis: Axis along which outliers removal will be performed. Usually the particle level aixs. Returns: None. The histogram is modified in place.
codesearchnet
def site_occupation_statistics(self): if (self.time == 0.0): return None occupation_stats = {label: 0.0 for label in self.site_labels} for site in self.sites: occupation_stats[site.label] += site.time_occupied for label in self.site_labels: occupation_stats[label] /= self.time return occupation_stats
Average site occupation for each site type Args: None Returns: (Dict(Str:Float)): Dictionary of occupation statistics, e.g.:: { 'A' : 2.5, 'B' : 25.3 }
codesearchnet
def combine(a1, a2): if (not isinstance(a1, list)): a1 = [a1] if (not isinstance(a2, list)): a2 = [a2] return (a1 + a2)
Combine to argument into a single flat list It is used when you are not sure whether arguments are lists but want to combine them into one flat list Args: a1: list or other thing a2: list or other thing Returns: list: a flat list contain a1 and a2
codesearchnet
def get_common_properties(root): properties = {} for elem in root.iterfind('commonProperties/property'): name = elem.attrib['name'] if name == 'initial composition': properties['composition'] = {'species': [], 'kind': None} for child in elem.iter('component'): spec = {} spec['species-name'] = child.find('speciesLink').attrib['preferredKey'] units = child.find('amount').attrib['units'] try: spec['InChI'] = child.find('speciesLink').attrib['InChI'] except KeyError: warn('Missing InChI for species ' + spec['species-name']) pass if units in ['mole fraction', 'mass fraction', 'mole percent']: spec['amount'] = [float(child.find('amount').text)] elif units == 'percent': warn('Assuming percent in composition means mole percent') spec['amount'] = [float(child.find('amount').text)] units = 'mole percent' elif units == 'ppm': warn('Assuming molar ppm in composition and converting to mole fraction') spec['amount'] = [float(child.find('amount').text) * 1.e-6] units = 'mole fraction' elif units == 'ppb': warn('Assuming molar ppb in composition and converting to mole fraction') spec['amount'] = [float(child.find('amount').text) * 1.e-9] units = 'mole fraction' else: raise KeywordError('Composition units need to be one of: mole fraction, ' 'mass fraction, mole percent, percent, ppm, or ppb.' ) properties['composition']['species'].append(spec) if properties['composition']['kind'] is None: properties['composition']['kind'] = units elif properties['composition']['kind'] != units: raise KeywordError('composition units ' + units + ' not consistent with ' + properties['composition']['kind'] ) elif name in datagroup_properties: field = name.replace(' ', '-') units = elem.attrib['units'] if units == 'Torr': units = 'torr' quantity = 1.0 * unit_registry(units) try: quantity.to(property_units[field]) except pint.DimensionalityError: raise KeywordError('units incompatible for property ' + name) properties[field] = [' '.join([elem.find('value').text, units])] else: raise KeywordError('Property ' + name + ' not supported as common property') return properties
Read common properties from root of ReSpecTh XML file. Args: root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file Returns: properties (`dict`): Dictionary with common properties
juraj-google-style
def convert_flatten(params, w_name, scope_name, inputs, layers, weights, names): print('Converting flatten ...') if names == 'short': tf_name = 'R' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) reshape = keras.layers.Reshape([-1], name=tf_name) layers[scope_name] = reshape(layers[inputs[0]])
Convert reshape(view). Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def __message_to_schema(self, message_type): name = self.__normalized_name(message_type) schema = {'id': name, 'type': 'object'} if message_type.__doc__: schema['description'] = message_type.__doc__ properties = {} for field in message_type.all_fields(): descriptor = {} type_info = {} if (type(field) == messages.MessageField): field_type = field.type().__class__ type_info['$ref'] = self.add_message(field_type) if field_type.__doc__: descriptor['description'] = field_type.__doc__ else: schema_type = self.__FIELD_TO_SCHEMA_TYPE_MAP.get(type(field), self.__DEFAULT_SCHEMA_TYPE) if isinstance(schema_type, dict): variant_map = schema_type variant = getattr(field, 'variant', None) if (variant in variant_map): schema_type = variant_map[variant] else: schema_type = variant_map[None] type_info['type'] = schema_type[0] if schema_type[1]: type_info['format'] = schema_type[1] if (type(field) == messages.EnumField): sorted_enums = sorted([enum_info for enum_info in field.type], key=(lambda enum_info: enum_info.number)) type_info['enum'] = [enum_info.name for enum_info in sorted_enums] if field.required: descriptor['required'] = True if field.default: if (type(field) == messages.EnumField): descriptor['default'] = str(field.default) else: descriptor['default'] = field.default if field.repeated: descriptor['items'] = type_info descriptor['type'] = 'array' else: descriptor.update(type_info) properties[field.name] = descriptor schema['properties'] = properties return schema
Parse a single message into JSON Schema. Will recursively descend the message structure and also parse other messages references via MessageFields. Args: message_type: protorpc.messages.Message class to parse. Returns: An object representation of the schema.
codesearchnet
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0): tstream = BytearrayStream() self.revocation_code.write(tstream, kmip_version=kmip_version) if self.revocation_message is not None: self.revocation_message.write(tstream, kmip_version=kmip_version) self.length = tstream.length() super(RevocationReason, self).write(ostream, kmip_version=kmip_version) ostream.write(tstream.buffer)
Write the data encoding the RevocationReason object to a stream. Args: ostream (Stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def start_reporter(redis_address, stdout_file=None, stderr_file=None, redis_password=None): reporter_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'reporter.py') command = [sys.executable, '-u', reporter_filepath, '--redis-address={}'.format(redis_address)] if redis_password: command += ['--redis-password', redis_password] try: import psutil except ImportError: logger.warning("Failed to start the reporter. The reporter requires 'pip install psutil'.") return None process_info = start_ray_process(command, ray_constants.PROCESS_TYPE_REPORTER, stdout_file=stdout_file, stderr_file=stderr_file) return process_info
Start a reporter process. Args: redis_address (str): The address of the Redis instance. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. Returns: ProcessInfo for the process that was started.
codesearchnet
def _MergeIdenticalCaseInsensitive(self, a, b): if a.lower() != b.lower(): raise MergeError("values must be the same (case insensitive) " "('%s' vs '%s')" % (transitfeed.EncodeUnicode(a), transitfeed.EncodeUnicode(b))) return b
Tries to merge two strings. The string are required to be the same ignoring case. The second string is always used as the merged value. Args: a: The first string. b: The second string. Returns: The merged string. This is equal to the second string. Raises: MergeError: The strings were not the same ignoring case.
juraj-google-style
def add(self, other): if isinstance(other, SeriesWeld): other = other.expr return SeriesWeld( grizzly_impl.element_wise_op( self.expr, other, "+", self.weld_type ), self.weld_type, self.df, self.column_name )
Summary Args: other (TYPE): Description Returns: TYPE: Description
juraj-google-style
def get_feature_variable_boolean(self, feature_key, variable_key, user_id, attributes=None): variable_type = entities.Variable.Type.BOOLEAN return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
Returns value for a certain boolean variable attached to a feature flag. Args: feature_key: Key of the feature whose variable's value is being accessed. variable_key: Key of the variable whose value is to be accessed. user_id: ID for user. attributes: Dict representing user attributes. Returns: Boolean value of the variable. None if: - Feature key is invalid. - Variable key is invalid. - Mismatch with type of variable.
codesearchnet
def center_of_mass(self, time): if self.start_time <= time <= self.end_time: diff = time - self.start_time valid = np.flatnonzero(self.masks[diff] != 0) if valid.size > 0: com_x = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] * self.x[diff].ravel()[valid]) com_y = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] * self.y[diff].ravel()[valid]) else: com_x = np.mean(self.x[diff]) com_y = np.mean(self.y[diff]) else: com_x = None com_y = None return com_x, com_y
Calculate the center of mass at a given timestep. Args: time: Time at which the center of mass calculation is performed Returns: The x- and y-coordinates of the center of mass.
juraj-google-style
def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location): file = __os.path.join(file_location, file_name) csv_write = open(file, 'a') writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\n') headers = dict((n, n) for n in field_names_tuple) writer.writerow(headers) for dict_key, a in list(orig_dict.items()): writer.writerow(orig_dict[dict_key]) csv_write.close() return file_name
Function to export a dictionary to a csv file Args: orig_dict: The dictionary you want exported file_name: The name of the exported file field_names_tuple: The fieldnames in a tuple file_location: The location of the file, derive from the os module Returns: returns the filename info
juraj-google-style
def controlled_by(self, *control_qubits: Qid) -> 'Gate': from cirq.ops import ControlledGate return ControlledGate(self, control_qubits, len(control_qubits) if control_qubits is not None else 1)
Returns a controlled version of this gate. Args: control_qubits: Optional qubits to control the gate by.
juraj-google-style
def ls(root='.', abspaths=False, recursive=False): def _expand_subdirs(file): if isdir(path(root, file)): return ([file] + [path(file, x) for x in ls(path(root, file), recursive=True)]) else: return [file] if isfile(root): return ([abspath(root)] if abspaths else [basename(root)]) elif abspaths: relpaths = ls(root, recursive=recursive, abspaths=False) base = abspath(root) return [path(base, relpath) for relpath in relpaths] elif recursive: paths = ls(root, abspaths=abspaths, recursive=False) return labtypes.flatten([_expand_subdirs(file) for file in paths]) else: return list(sorted(os.listdir(root)))
Return a list of files in directory. Directory listings are sorted alphabetically. If the named directory is a file, return it's path. Examples: >>> fs.ls("foo") ["a", "b", "c"] >>> fs.ls("foo/a") ["foo/a"] >>> fs.ls("foo", abspaths=True) ["/home/test/foo/a", "/home/test/foo/b", "/home/test/foo/c"] >>> fs.ls("foo", recursive=True) ["a", "b", "b/d", "b/d/e", "c"] Arguments: root (str): Path to directory. Can be relative or absolute. abspaths (bool, optional): Return absolute paths if true. recursive (bool, optional): Recursively list subdirectories if true. Returns: list of str: A list of paths. Raises: OSError: If root directory does not exist.
codesearchnet
def _add_genetic_models(self, variant_obj, info_dict): genetic_models_entry = info_dict.get('GeneticModels') if genetic_models_entry: genetic_models = [] for family_annotation in genetic_models_entry.split(','): for genetic_model in family_annotation.split(':')[-1].split('|'): genetic_models.append(genetic_model) logger.debug("Updating genetic models to: {0}".format( ', '.join(genetic_models))) variant_obj.genetic_models = genetic_models
Add the genetic models found Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
juraj-google-style
def get(self, block_id): pool = current_app.config['bigchain_pool'] with pool() as bigchain: block = bigchain.get_block(block_id=block_id) if (not block): return make_error(404) return block
API endpoint to get details about a block. Args: block_id (str): the id of the block. Return: A JSON string containing the data about the block.
codesearchnet
def __init__(self, uri='http: try: self.graph_db = neo4j.GraphDatabaseService(uri) version = self.graph_db.neo4j_version print '\t- Neo4j GraphDB connected: %s %s' % (str(uri), version) except packages.httpstream.http.SocketError: print '\t- Neo4j connection failed! Is your Neo4j server running? $ neo4j start' raise RuntimeError('Could not connect to Neo4j')
Initialization for NeoDB indexer. Args: uri: The uri to connect NeoDB. Raises: RuntimeError: When connection to NeoDB failed.
juraj-google-style
def project(self, project, entity=None): query = gql() return self.gql(query, variable_values={ 'entity': entity, 'project': project})['model']
Retrive project Args: project (str): The project to get details for entity (str, optional): The entity to scope this project to. Returns: [{"id","name","repo","dockerImage","description"}]
juraj-google-style
def format_params_diff(parameter_diff): params_output = '\n'.join([line for v in parameter_diff for line in v.changes()]) return % params_output
Handles the formatting of differences in parameters. Args: parameter_diff (list): A list of DictValues detailing the differences between two dicts returned by :func:`stacker.actions.diff.diff_dictionaries` Returns: string: A formatted string that represents a parameter diff
juraj-google-style
def imwrite(img, file_path, params=None, auto_mkdir=True): if auto_mkdir: dir_name = osp.abspath(osp.dirname(file_path)) mkdir_or_exist(dir_name) return cv2.imwrite(file_path, img, params)
Write image to file Args: img (ndarray): Image array to be written. file_path (str): Image file path. params (None or list): Same as opencv's :func:`imwrite` interface. auto_mkdir (bool): If the parent folder of `file_path` does not exist, whether to create it automatically. Returns: bool: Successful or not.
juraj-google-style
def loop_until_timeout_or_not_none(timeout_s, function, sleep_s=1): return loop_until_timeout_or_valid( timeout_s, function, lambda x: x is not None, sleep_s)
Loops until the specified function returns non-None or until a timeout. Args: timeout_s: The number of seconds to wait until a timeout condition is reached. As a convenience, this accepts None to mean never timeout. Can also be passed a PolledTimeout object instead of an integer. function: The function to call each iteration. sleep_s: The number of seconds to wait after calling the function. Returns: Whatever the function returned last.
juraj-google-style
def update(self, other, **kwargs): assert isinstance( other, type(self) ), "Must have the same DataManager subclass to perform this operation" def update_builder(df, other, **kwargs): df = df.copy() df.update(other, **kwargs) return df return self._inter_df_op_handler(update_builder, other, **kwargs)
Uses other manager to update corresponding values in this manager. Args: other: The other manager. Returns: New DataManager with updated data and index.
juraj-google-style
def compare_files(path1, path2): diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines()) return [x for x in diff if x[0] in ['-', '+', '?']]
Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files
juraj-google-style
def load_data(path, dense=False): catalog = {'.csv': load_csv, '.sps': load_svmlight_file, '.h5': load_hdf5} ext = os.path.splitext(path)[1] func = catalog[ext] (X, y) = func(path) if (dense and sparse.issparse(X)): X = X.todense() return (X, y)
Load data from a CSV, LibSVM or HDF5 file based on the file extension. Args: path (str): A path to the CSV, LibSVM or HDF5 format file containing data. dense (boolean): An optional variable indicating if the return matrix should be dense. By default, it is false. Returns: Data matrix X and target vector y
codesearchnet
def validate_string(string, options=None): output.info("Performing JSON schema validation on input string: " + string) stream = io.StringIO(string) return validate(stream, options)
Validate the input `string` according to the options passed in. If any exceptions are raised during validation, no further validation will take place. Args: string: The string containing the JSON to be validated. options: An instance of ``ValidationOptions``. Returns: An ObjectValidationResults instance, or a list of such.
juraj-google-style