code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def create_symlink(self, file_path, link_target, create_missing_dirs=True): if (not self._is_link_supported()): raise OSError('Symbolic links are not supported on Windows before Python 3.2') file_path = self.make_string_path(file_path) link_target = self.make_string_path(link_target) file_path = self.normcase(file_path) if self.ends_with_path_separator(file_path): if self.exists(file_path): self.raise_os_error(errno.EEXIST, file_path) if self.exists(link_target): if (not self.is_windows_fs): self.raise_os_error(errno.ENOENT, file_path) else: if self.is_windows_fs: self.raise_os_error(errno.EINVAL, link_target) if (not self.exists(self._path_without_trailing_separators(file_path), check_link=True)): self.raise_os_error(errno.ENOENT, link_target) if self.is_macos: if self.exists(file_path, check_link=True): self.remove_object(file_path) else: self.raise_os_error(errno.EEXIST, link_target) if (not self.islink(file_path)): file_path = self.resolve_path(file_path) link_target = make_string_path(link_target) return self.create_file_internally(file_path, st_mode=(S_IFLNK | PERM_DEF), contents=link_target, create_missing_dirs=create_missing_dirs, raw_io=True)
Create the specified symlink, pointed at the specified link target. Args: file_path: path to the symlink to create link_target: the target of the symlink create_missing_dirs: If `True`, any missing parent directories of file_path will be created Returns: The newly created FakeFile object. Raises: OSError: if the symlink could not be created (see :py:meth:`create_file`). OSError: if on Windows before Python 3.2.
codesearchnet
def _parse_resource(self, uri: str, json_obj: Dict[str, Any]) -> Optional[_T]: json_parser = _json_parser.JsonParser(self.handler, self.resource_time_zone) resource_type = json_obj.get('resourceType') if resource_type is None: raise ValueError(f'JSON for URI {uri} does not have a resource type.') if resource_type == 'Bundle': json_value = _find_resource_in_bundle(uri, json_obj) if json_value is None: return None else: target = self.proto_cls() json_parser.merge_value(json_value, target) return target else: target = self.proto_cls() json_parser.merge_value(json_obj, target) return target
Parses a protocol buffer for the given JSON object. Args: uri: The URI of the resource to parse. json_obj: The JSON object to parse into a proto. Returns: The protocol buffer for the resource or `None` if it can not be found.
github-repos
def get_subdomain(url): if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) return ".".join(URLHelper.__cache[url].netloc.split(".")[:-2])
Get the subdomain of the given URL. Args: url (str): The URL to get the subdomain from. Returns: str: The subdomain(s)
juraj-google-style
def macro_tpm_sbs(self, state_by_state_micro_tpm): validate.tpm(state_by_state_micro_tpm, check_independence=False) mapping = self.make_mapping() num_macro_states = 2 ** len(self.macro_indices) macro_tpm = np.zeros((num_macro_states, num_macro_states)) micro_states = range(2 ** len(self.micro_indices)) micro_state_transitions = itertools.product(micro_states, repeat=2) for previous_state, current_state in micro_state_transitions: macro_tpm[mapping[previous_state], mapping[current_state]] += ( state_by_state_micro_tpm[previous_state, current_state]) return np.array([distribution.normalize(row) for row in macro_tpm])
Create a state-by-state coarse-grained macro TPM. Args: micro_tpm (nd.array): The state-by-state TPM of the micro-system. Returns: np.ndarray: The state-by-state TPM of the macro-system.
juraj-google-style
def get_dataset(self, name): url = (self.url() + '/resource/dataset/{}'.format(name)) req = self.remote_utils.get_url(url) if (req.status_code is not 200): raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
Returns info regarding a particular dataset. Arugments: name (str): Dataset name Returns: dict: Dataset information
codesearchnet
def _process_tensorlike(inputs): def _convert_numpy_and_scipy(x): if isinstance(x, np.ndarray): dtype = None if issubclass(x.dtype.type, np.floating): dtype = backend.floatx() return tensor_conversion.convert_to_tensor_v2_with_dispatch(x, dtype=dtype) elif _is_scipy_sparse(x): return _scipy_sparse_to_sparse_tensor(x) return x inputs = nest.map_structure(_convert_numpy_and_scipy, inputs) return nest.list_to_tuple(inputs)
Process tensor-like inputs. This function: (1) Converts `Numpy` arrays to `Tensor`s. (2) Converts `Scipy` sparse matrices to `SparseTensor`s. (2) Converts `list`s to `tuple`s (for `tf.data` support). Args: inputs: Structure of `Tensor`s, `NumPy` arrays, or tensor-like. Returns: Structure of `Tensor`s or tensor-like.
github-repos
def convert_gather(params, w_name, scope_name, inputs, layers, weights, names): print('Converting embedding ...') if names == 'short': tf_name = 'EMBD' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) weights_name = '{0}.weight'.format(w_name) W = weights[weights_name].numpy() input_channels, output_channels = W.shape keras_weights = [W] dense = keras.layers.Embedding( input_channels, weights=keras_weights, output_dim=output_channels, name=tf_name ) layers[scope_name] = dense(layers[inputs[1]])
Convert gather (embedding) layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def aggregate_field(self, field, combine_fn, dest): return _GroupAndAggregate(self, ()).aggregate_field(field, combine_fn, dest)
Returns a grouping operation that also aggregates grouped values. Args: field: indicates the field to be aggregated combine_fn: indicates the aggregation function to be used dest: indicates the name that will be used for the aggregate in the output May be called repeatedly to aggregate multiple fields, e.g. GroupBy('key') .aggregate_field('some_attr', sum, 'sum_attr') .aggregate_field(lambda v: ..., MeanCombineFn, 'mean')
github-repos
def uniform_full_int(self, shape, dtype=dtypes.uint64, name=None): dtype = dtypes.as_dtype(dtype) with ops.name_scope(name, 'stateful_uniform_full_int', [shape]) as name: shape = _shape_tensor(shape) return self._uniform_full_int(shape=shape, dtype=dtype, name=name)
Uniform distribution on an integer type's entire range. This method is the same as setting `minval` and `maxval` to `None` in the `uniform` method. Args: shape: the shape of the output. dtype: (optional) the integer type, default to uint64. name: (optional) the name of the node. Returns: A tensor of random numbers of the required shape.
github-repos
def connection_made(self, transport): self.transport = transport self.transport.sendto(self.message) self.transport.close()
Create connection, use to send message and close. Args: transport (asyncio.DatagramTransport): Transport used for sending.
juraj-google-style
def _get_bradcrack_data(bravais): r json_file = pkg_resources.resource_filename(__name__, 'bradcrack.json') with open(json_file, 'r') as f: bradcrack_data = load_json(f) return bradcrack_data[bravais]
r"""Read Bradley--Cracknell k-points path from data file Args: bravais (str): Lattice code including orientation e.g. 'trig_p_c' Returns: dict: kpoint path and special point locations, formatted as e.g.:: {'kpoints': {'\Gamma': [0., 0., 0.], 'X': [0., 0.5, 0.], ...}, 'path': [['\Gamma', 'X', ..., 'P'], ['H', 'N', ...]]}
juraj-google-style
def replace_tensors_by_numpy_ndarrays(repr_ds: RepresentativeDataset, sess: session.Session) -> RepresentativeDataset: new_repr_ds = [] for sample in repr_ds: new_sample = {} for input_key, input_data in sample.items(): if isinstance(input_data, core.Tensor): input_data = input_data.eval(session=sess) new_sample[input_key] = input_data new_repr_ds.append(new_sample) return new_repr_ds
Replaces tf.Tensors in samples by their evaluated numpy arrays. Note: This should be run in graph mode (default in TF1) only. Args: repr_ds: Representative dataset to replace the tf.Tensors with their evaluated values. `repr_ds` is iterated through, so it may not be reusable (e.g. if it is a generator object). sess: Session instance used to evaluate tf.Tensors. Returns: The new representative dataset where each tf.Tensor is replaced by its evaluated numpy ndarrays.
github-repos
def _BuildScanTreeNode(self, path_filter_table, ignore_list): paths_list = list(path_filter_table.paths) ignore_list = list(ignore_list) similarity_weights = _PathSegmentWeights() occurrence_weights = _PathSegmentWeights() value_weights = _PathSegmentWeights() for path_segment_index in path_filter_table.path_segments_per_index.keys(): if (not path_filter_table.path_segments_per_index[path_segment_index]): continue similarity_weights.AddIndex(path_segment_index) occurrence_weights.AddIndex(path_segment_index) value_weights.AddIndex(path_segment_index) path_segments = path_filter_table.GetPathSegments(path_segment_index) number_of_path_segments = len(path_segments.keys()) if (number_of_path_segments > 1): occurrence_weights.SetWeight(path_segment_index, number_of_path_segments) for paths_per_segment_list in path_segments.values(): path_segment_weight = len(paths_per_segment_list) if (path_segment_weight > 1): similarity_weights.AddWeight(path_segment_index, path_segment_weight) path_segment_index = self._GetMostSignificantPathSegmentIndex(paths_list, similarity_weights, occurrence_weights, value_weights) ignore_list.append(path_segment_index) if (path_segment_index < 0): raise ValueError('Invalid path segment index value out of bounds.') scan_tree_node = PathFilterScanTreeNode(path_segment_index) path_segments = path_filter_table.GetPathSegments(path_segment_index) for (path_segment, paths_per_segment_list) in path_segments.items(): if (not paths_per_segment_list): raise ValueError('Invalid number of paths value out of bounds.') if (len(paths_per_segment_list) == 1): for path in paths_per_segment_list: scan_tree_node.AddPathSegment(path_segment, path) else: sub_path_filter_table = _PathFilterTable(paths_per_segment_list, ignore_list, path_segment_separator=self._path_segment_separator) scan_sub_node = self._BuildScanTreeNode(sub_path_filter_table, ignore_list) scan_tree_node.AddPathSegment(path_segment, scan_sub_node) for path in paths_per_segment_list: paths_list.remove(path) number_of_paths = len(paths_list) if (number_of_paths == 1): scan_tree_node.SetDefaultValue(paths_list[0]) elif (number_of_paths > 1): path_filter_table = _PathFilterTable(paths_list, ignore_list, path_segment_separator=self._path_segment_separator) scan_sub_node = self._BuildScanTreeNode(path_filter_table, ignore_list) scan_tree_node.SetDefaultValue(scan_sub_node) return scan_tree_node
Builds a scan tree node. Args: path_filter_table: a path filter table object (instance of _PathFilterTable). ignore_list: a list of path segment indexes to ignore, where 0 is the index of the first path segment relative from the root. Returns: A scan tree node (instance of PathFilterScanTreeNode). Raises: ValueError: if the path segment index value or the number of paths segments value is out of bounds.
codesearchnet
def closest_distance(item_a, time_a, item_b, time_b, max_value): return (np.minimum(item_a.closest_distance(time_a, item_b, time_b), max_value) / float(max_value))
Euclidean distance between the pixels in item_a and item_b closest to each other. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
codesearchnet
def predict_next_action(self, state_key, next_action_list): if self.q_df is not None: next_action_q_df = self.q_df[self.q_df.state_key == state_key] next_action_q_df = next_action_q_df[next_action_q_df.action_key.isin(next_action_list)] if next_action_q_df.shape[0] == 0: return random.choice(next_action_list) else: if next_action_q_df.shape[0] == 1: max_q_action = next_action_q_df["action_key"].values[0] else: next_action_q_df = next_action_q_df.sort_values(by=["q_value"], ascending=False) max_q_action = next_action_q_df.iloc[0, :]["action_key"] return max_q_action else: return random.choice(next_action_list)
Predict next action by Q-Learning. Args: state_key: The key of state in `self.t+1`. next_action_list: The possible action in `self.t+1`. Returns: The key of action.
juraj-google-style
def member_of(self, group): if isinstance(group, Group): group = group.name return self.groups.filter(name=group).exists()
Returns whether a user is a member of a certain group. Args: group The name of a group (string) or a group object Returns: Boolean
juraj-google-style
def maybe_copy_file_to_directory(source_filepath, target_directory): if not tf.gfile.Exists(target_directory): tf.logging.info("Creating directory %s" % target_directory) os.mkdir(target_directory) target_filepath = os.path.join(target_directory, os.path.basename(source_filepath)) if not tf.gfile.Exists(target_filepath): tf.logging.info("Copying %s to %s" % (source_filepath, target_filepath)) tf.gfile.Copy(source_filepath, target_filepath) statinfo = os.stat(target_filepath) tf.logging.info("Successfully copied %s, %s bytes." % (target_filepath, statinfo.st_size)) else: tf.logging.info("Not copying, file already found: %s" % target_filepath) return target_filepath
Copy a file to a directory if it is not already there. Returns the target filepath. Args: source_filepath: a string target_directory: a string Returns: a string
juraj-google-style
def from_path(cls, path, suffix=''): def _get_filepath(filename): name_pattern = (((filename + suffix) + '*') if (filename != 'POTCAR') else (filename + '*')) paths = glob.glob(os.path.join(path, name_pattern)) fpath = None if (len(paths) >= 1): paths.sort(reverse=True) warning_msg = (('Multiple files detected, using %s' % os.path.basename(paths[0])) if (len(paths) > 1) else None) fpath = paths[0] else: warning_msg = ('Could not find %s' % filename) if (filename in ['AECCAR0', 'AECCAR2']): warning_msg += ', cannot calculate charge transfer.' elif (filename == 'POTCAR'): warning_msg += ', interpret Bader results with caution.' if warning_msg: warnings.warn(warning_msg) return fpath chgcar_filename = _get_filepath('CHGCAR') if (chgcar_filename is None): raise IOError('Could not find CHGCAR!') potcar_filename = _get_filepath('POTCAR') aeccar0 = _get_filepath('AECCAR0') aeccar2 = _get_filepath('AECCAR2') if (aeccar0 and aeccar2): chgref = (Chgcar.from_file(aeccar0) + Chgcar.from_file(aeccar2)) chgref_filename = 'CHGREF' chgref.write_file(chgref_filename) else: chgref_filename = None return cls(chgcar_filename, potcar_filename=potcar_filename, chgref_filename=chgref_filename)
Convenient constructor that takes in the path name of VASP run to perform Bader analysis. Args: path (str): Name of directory where VASP output files are stored. suffix (str): specific suffix to look for (e.g. '.relax1' for 'CHGCAR.relax1.gz').
codesearchnet
def _get_file_names(file_pattern, shuffle): if isinstance(file_pattern, list): if not file_pattern: raise ValueError('Argument `file_pattern` should not be empty.') file_names = [] for entry in file_pattern: file_names.extend(gfile.Glob(entry)) else: file_names = list(gfile.Glob(file_pattern)) if not file_names: raise ValueError(f'No files match `file_pattern` {file_pattern}.') if not shuffle: file_names = sorted(file_names) return file_names
Parse list of file names from pattern, optionally shuffled. Args: file_pattern: File glob pattern, or list of glob patterns. shuffle: Whether to shuffle the order of file names. Returns: List of file names matching `file_pattern`. Raises: ValueError: If `file_pattern` is empty, or pattern matches no files.
github-repos
def normalize(inputs, epsilon=1e-08, scope='ln'): with tf.variable_scope(scope): inputs_shape = inputs.get_shape() params_shape = inputs_shape[(- 1):] (mean, variance) = tf.nn.moments(inputs, [(- 1)], keep_dims=True) beta = tf.Variable(tf.zeros(params_shape)) gamma = tf.Variable(tf.ones(params_shape)) normalized = ((inputs - mean) / ((variance + epsilon) ** 0.5)) outputs = ((gamma * normalized) + beta) return outputs
Applies layer normalization. Args: inputs: A tensor with 2 or more dimensions, where the first dimension has `batch_size`. epsilon: A floating number. A very small number for preventing ZeroDivision Error. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A tensor with the same shape and data dtype as `inputs`.
codesearchnet
def version(self, api_version=True): url = self._url('/version', versioned_api=api_version) return self._result(self._get(url), json=True)
Returns version information from the server. Similar to the ``docker version`` command. Returns: (dict): The server version information Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def latlong_to_locator(latitude, longitude): if ((longitude >= 180) or (longitude <= (- 180))): raise ValueError if ((latitude >= 90) or (latitude <= (- 90))): raise ValueError longitude += 180 latitude += 90 locator = chr((ord('A') + int((longitude / 20)))) locator += chr((ord('A') + int((latitude / 10)))) locator += chr((ord('0') + int(((longitude % 20) / 2)))) locator += chr((ord('0') + int((latitude % 10)))) locator += chr((ord('A') + int(((longitude - (int((longitude / 2)) * 2)) / (2 / 24))))) locator += chr((ord('A') + int(((latitude - (int((latitude / 1)) * 1)) / (1 / 24))))) return locator
converts WGS84 coordinates into the corresponding Maidenhead Locator Args: latitude (float): Latitude longitude (float): Longitude Returns: string: Maidenhead locator Raises: ValueError: When called with wrong or invalid input args TypeError: When args are non float values Example: The following example converts latitude and longitude into the Maidenhead locator >>> from pyhamtools.locator import latlong_to_locator >>> latitude = 48.5208333 >>> longitude = 9.375 >>> latlong_to_locator(latitude, longitude) 'JN48QM' Note: Latitude (negative = West, positive = East) Longitude (negative = South, positive = North)
codesearchnet
def set_status(self, status: Status, increment_try_count: bool=True, filename: str=None): url = self.url_record.url assert (not self._try_count_incremented), (url, status) if increment_try_count: self._try_count_incremented = True _logger.debug(__('Marking URL {0} status {1}.', url, status)) url_result = URLResult() url_result.filename = filename self.app_session.factory['URLTable'].check_in(url, status, increment_try_count=increment_try_count, url_result=url_result) self._processed = True
Mark the item with the given status. Args: status: a value from :class:`Status`. increment_try_count: if True, increment the ``try_count`` value
codesearchnet
def Environ(variable, default): precondition.AssertType(variable, Text) value = os.environ.get(variable, default) if (value is None): return default if PY2: value = value.decode('utf-8') return value
A wrapper for `os.environ.get` that works the same way in both Pythons. Args: variable: A name of the variable to get the value of. default: A default value to return in case no value for the given variable is set. Returns: An environment value of the given variable.
codesearchnet
def template_instance(self): ofs = self.offset() if ((self.unpack_byte(0) & 15) == 15): ofs += 4 return TemplateInstanceNode(self._buf, ofs, self._chunk, self)
parse the template instance node. this is used to compute the location of the template definition structure. Returns: TemplateInstanceNode: the template instance.
codesearchnet
def next(self): if (self._mode != 'r'): raise UnsupportedOperation("not available in 'w' mode") self._n += 1 if (self._n > self._nb_markers): raise StopIteration() return (self._bim.index[(self._n - 1)], self._read_current_marker())
Returns the next marker. Returns: tuple: The marker name as a string and its genotypes as a :py:class:`numpy.ndarray`.
codesearchnet
def cdnode(self, astr_path): l_absPath = [] (b_valid, l_absPath) = self.b_pathInTree(astr_path) if b_valid: self.l_cwd = l_absPath[:] self.snode_current = self.snode_root self.sbranch_current = self.sbranch_root for node in l_absPath[1:]: self.snode_current = self.snode_current.d_nodes[node] self.sbranch_current.dict_branch = self.snode_current.snode_parent.d_nodes return {'status': True, 'path': self.l_cwd} return {'status': False, 'path': []}
Change working node to astr_path. The path is converted to a list, split on '/'. By performing a 'cd' all parent and derived nodes need to be updated relative to new location. Args: astr_path (string): The path to cd to. Returns: {"status" : True/False , "path": l_cwd -- the path as list}
codesearchnet
def delete_vnet(access_token, subscription_id, resource_group, name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/virtualNetworks/', name, '?api-version=', NETWORK_API]) return do_delete(endpoint, access_token)
Delete a virtual network. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. name (str): Name of the VNet. Returns: HTTP response. VNet JSON body.
codesearchnet
def next_moments_operating_on(self, qubits: Iterable[ops.Qid], start_moment_index: int=0) -> Dict[(ops.Qid, int)]: next_moments = {} for q in qubits: next_moment = self.next_moment_operating_on([q], start_moment_index) next_moments[q] = (len(self._moments) if (next_moment is None) else next_moment) return next_moments
Finds the index of the next moment that touches each qubit. Args: qubits: The qubits to find the next moments acting on. start_moment_index: The starting point of the search. Returns: The index of the next moment that touches each qubit. If there is no such moment, the next moment is specified as the number of moments in the circuit. Equivalently, can be characterized as one plus the index of the last moment after start_moment_index (inclusive) that does *not* act on a given qubit.
codesearchnet
def rename(self, container, name): url = self._url('/containers/{0}/rename', container) params = {'name': name} res = self._post(url, params=params) self._raise_for_status(res)
Rename a container. Similar to the ``docker rename`` command. Args: container (str): ID of the container to rename name (str): New name for the container Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def find_faces(self, image, draw_box=False): frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) faces = self.cascade.detectMultiScale(frame_gray, scaleFactor=1.3, minNeighbors=5, minSize=(50, 50), flags=0) if draw_box: for (x, y, w, h) in faces: cv2.rectangle(image, (x, y), ((x + w), (y + h)), (0, 255, 0), 2) return faces
Uses a haarcascade to detect faces inside an image. Args: image: The image. draw_box: If True, the image will be marked with a rectangle. Return: The faces as returned by OpenCV's detectMultiScale method for cascades.
codesearchnet
def plot_thermodynamic_properties(self, tmin, tmax, ntemp, ylim=None, **kwargs): temperatures = np.linspace(tmin, tmax, ntemp) mol = ('' if self.structure else '-c') fig = self._plot_thermo(self.dos.cv, temperatures, ylabel='Thermodynamic properties', ylim=ylim, label='$C_v$ (J/K/mol{})'.format(mol), **kwargs) self._plot_thermo(self.dos.entropy, temperatures, ylim=ylim, ax=fig.axes[0], label='$S$ (J/K/mol{})'.format(mol), **kwargs) self._plot_thermo(self.dos.internal_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=0.001, label='$\\Delta E$ (kJ/K/mol{})'.format(mol), **kwargs) self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=0.001, label='$\\Delta F$ (kJ/K/mol{})'.format(mol), **kwargs) fig.axes[0].legend(loc='best') return fig
Plots all the thermodynamic properties in a temperature range. Args: tmin: minimum temperature tmax: maximum temperature ntemp: number of steps ylim: tuple specifying the y-axis limits. kwargs: kwargs passed to the matplotlib function 'plot'. Returns: matplotlib figure
codesearchnet
def get_item(self, name, bootstrap=False): for item in self._get_items(bootstrap): if (item.name == name): return item return None
Get a particular item in the specification. Args: name (str): The name of the item to retrieve. bootstrap (bool): Only search bootstrap items Returns (YapconfItem): A YapconfItem if it is found, None otherwise.
codesearchnet
def gradient_summaries(grad_vars, groups=None, scope='gradients'): groups = (groups or {'all': '.*'}) grouped = collections.defaultdict(list) for (grad, var) in grad_vars: if (grad is None): continue for (name, pattern) in groups.items(): if re.match(pattern, var.name): name = re.sub(pattern, name, var.name) grouped[name].append(grad) for name in groups: if (name not in grouped): tf.logging.warn("No variables matching '{}' group.".format(name)) summaries = [] for (name, grads) in grouped.items(): grads = [tf.reshape(grad, [(- 1)]) for grad in grads] grads = tf.concat(grads, 0) summaries.append(tf.summary.histogram(((scope + '/') + name), grads)) return tf.summary.merge(summaries)
Create histogram summaries of the gradient. Summaries can be grouped via regexes matching variables names. Args: grad_vars: List of (gradient, variable) tuples as returned by optimizers. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor.
codesearchnet
def process_messages(self, max_messages=10000): subscribe_clients = [self.primary_subscribe_client] for subscribe_client in subscribe_clients: for _ in range(max_messages): message = subscribe_client.get_message() if message is None: break channel = message["channel"] data = message["data"] if channel == ray.gcs_utils.XRAY_HEARTBEAT_BATCH_CHANNEL: message_handler = self.xray_heartbeat_batch_handler elif channel == ray.gcs_utils.XRAY_DRIVER_CHANNEL: message_handler = self.xray_driver_removed_handler else: raise Exception("This code should be unreachable.") message_handler(channel, data)
Process all messages ready in the subscription channels. This reads messages from the subscription channels and calls the appropriate handlers until there are no messages left. Args: max_messages: The maximum number of messages to process before returning.
juraj-google-style
def signHostCsr(self, xcsr, signas, outp=None, sans=None): pkey = xcsr.get_pubkey() name = xcsr.get_subject().CN return self.genHostCert(name, csr=pkey, signas=signas, outp=outp, sans=sans)
Signs a host CSR with a CA keypair. Args: cert (OpenSSL.crypto.X509Req): The certificate signing request. signas (str): The CA keypair name to sign the CSR with. outp (synapse.lib.output.Output): The output buffer. sans (list): List of subject alternative names. Examples: Sign a host key with the CA "myca": cdir.signHostCsr(mycsr, 'myca') Returns: ((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the public key and certificate objects.
codesearchnet
def cctop_check_status(jobid): status = 'http: status_text = requests.post(status) return status_text.text
Check the status of a CCTOP job ID. Args: jobid (str): Job ID obtained when job was submitted Returns: str: 'Finished' if the job is finished and results ready to be downloaded, 'Running' if still in progress, 'Invalid' for any errors.
codesearchnet
def _add_task(cls, worker_task, mapreduce_spec, queue_name): if not _run_task_hook(mapreduce_spec.get_hooks(), "enqueue_worker_task", worker_task, queue_name): try: worker_task.add(queue_name) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e: logging.warning("Task %r already exists. %s: %s", worker_task.name, e.__class__, e)
Schedule slice scanning by adding it to the task queue. Args: worker_task: a model.HugeTask task for slice. This is NOT a taskqueue task. mapreduce_spec: an instance of model.MapreduceSpec. queue_name: Optional queue to run on; uses the current queue of execution or the default queue if unspecified.
juraj-google-style
def write_files(dos, pdos, prefix=None, directory=None, zero_to_efermi=True): if (len(dos.densities) == 1): sdata = [[Spin.up, 1, '']] else: sdata = [[Spin.up, 1, '(up)'], [Spin.down, (- 1), '(down)']] header = ['energy'] eners = ((dos.energies - dos.efermi) if zero_to_efermi else dos.energies) tdos_data = [eners] for (spin, sign, label) in sdata: header.append('dos{}'.format(label)) tdos_data.append((dos.densities[spin] * sign)) tdos_data = np.stack(tdos_data, axis=1) filename = ('{}_total_dos.dat'.format(prefix) if prefix else 'total_dos.dat') if directory: filename = os.path.join(directory, filename) np.savetxt(filename, tdos_data, header=' '.join(header)) spin = len(dos.densities) for (el, el_pdos) in pdos.items(): header = ['energy'] pdos_data = [eners] for orb in sort_orbitals(el_pdos): for (spin, sign, label) in sdata: header.append('{}{}'.format(orb, label)) pdos_data.append((el_pdos[orb].densities[spin] * sign)) pdos_data = np.stack(pdos_data, axis=1) if prefix: filename = '{}_{}_dos.dat'.format(prefix, el) else: filename = '{}_dos.dat'.format(el) if directory: filename = os.path.join(directory, filename) np.savetxt(filename, pdos_data, header=' '.join(header))
Write the density of states data to disk. Args: dos (:obj:`~pymatgen.electronic_structure.dos.Dos` or \ :obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The total density of states. pdos (dict): The projected density of states. Formatted as a :obj:`dict` of :obj:`dict` mapping the elements and their orbitals to :obj:`~pymatgen.electronic_structure.dos.Dos` objects. For example:: { 'Bi': {'s': Dos, 'p': Dos}, 'S': {'s': Dos} } prefix (:obj:`str`, optional): A prefix for file names. directory (:obj:`str`, optional): The directory in which to save files. zero_to_efermi (:obj:`bool`, optional): Normalise the energy such that the Fermi level is set as 0 eV.
codesearchnet
def has_apical_dendrite(neuron, min_number=1, treefun=_read_neurite_type): types = [treefun(n) for n in neuron.neurites] return CheckResult(types.count(NeuriteType.apical_dendrite) >= min_number)
Check if a neuron has apical dendrites Arguments: neuron(Neuron): The neuron object to test min_number: minimum number of apical dendrites required treefun: Optional function to calculate the tree type of neuron's neurites Returns: CheckResult with result
juraj-google-style
def cursor_event(self, x, y, dx, dy): self.sys_camera.rot_state(x, y)
The standard mouse movement event method. Can be overriden to add new functionality. By default this feeds the system camera with new values. Args: x: The current mouse x position y: The current mouse y position dx: Delta x postion (x position difference from the previous event) dy: Delta y postion (y position difference from the previous event)
codesearchnet
def _get_default_retry_params(): default = getattr(_thread_local_settings, 'default_retry_params', None) if ((default is None) or (not default.belong_to_current_request())): return RetryParams() else: return copy.copy(default)
Get default RetryParams for current request and current thread. Returns: A new instance of the default RetryParams.
codesearchnet
def all_reduce_ring(x, parallelism, maybe_reduce=True, use_bfloat16=True): if (parallelism.n == 1): return x if maybe_reduce: original_parallelism = parallelism (parallelism, x) = reduce_by_device(parallelism, x, tf.add_n) if (parallelism.n == 1): y = x else: x_flat = parallelism(tf.reshape, x, ([[(- 1)]] * parallelism.n)) x_split = parallelism(common_layers.approximate_split, x_flat, parallelism.n, 0) def _step(source_replica, target_replica, x_split, op='plus_eq'): 'Helper function - one step of summing or copying.\n\n If op == "plus_eq", then adds source_replica into target_replica\n If op == "copy", then copies source_replica onto target_replica\n\n These operations happen for all shards. The replica numbers are offset\n by the shard numbers to keep all physical links busy.\n\n Args:\n source_replica: an integer\n target_replica: an integer\n x_split: a list of lists of tensors\n op: a string\n ' for shard in range(parallelism.n): source_device = ((shard + source_replica) % parallelism.n) target_device = ((shard + target_replica) % parallelism.n) source = x_split[source_device][shard] if use_bfloat16: with tf.device(parallelism.devices[source_device]): source = tf.to_bfloat16(source) with tf.device(parallelism.devices[target_device]): source = tf.to_float(source) if (op == 'plus_eq'): x_split[target_device][shard] += source else: assert (op == 'copy') x_split[target_device][shard] = tf.identity(source) center = (parallelism.n for i in reversed(range(center, (parallelism.n - 1))): _step((i + 1), i, x_split, op='plus_eq') for i in range(center): _step(i, (i + 1), x_split, op='plus_eq') for i in range(center, (parallelism.n - 1)): _step(i, (i + 1), x_split, op='copy') for i in reversed(range(center)): _step((i + 1), i, x_split, op='copy') x_concat = parallelism(tf.concat, x_split, 0) y = parallelism(common_layers.reshape_like_all_dims, x_concat, x) if maybe_reduce: y = expand_by_device(original_parallelism, parallelism, y) return y
Compute the sum of all Tensors and put the result everywhere. Assumes that the devices are connected in a ring. Args: x: a list of Tensors with length parallelism.n parallelism: a expert_utils.Parallelism object. maybe_reduce: a boolean - first reduce per device. use_bfloat16: a boolean - saves bandwidth but loses precision Returns: a list of Tensors with length parallelism.n
codesearchnet
def run(self, env: env_tools.PreparedEnv, verbose: bool, previous_failures: Set['Check']) -> CheckResult: if previous_failures.intersection(self.dependencies): print(shell_tools.highlight(('Skipped ' + self.command_line_switch()), shell_tools.YELLOW)) return CheckResult(self, False, 'Skipped due to dependency failing.', None) print(shell_tools.highlight(('Running ' + self.command_line_switch()), shell_tools.GREEN)) try: (success, message) = self.perform_check(env, verbose=verbose) result = CheckResult(self, success, message, None) except Exception as ex: result = CheckResult(self, False, 'Unexpected error.', ex) print(shell_tools.highlight(('Finished ' + self.command_line_switch()), (shell_tools.GREEN if result.success else shell_tools.RED))) if verbose: print(result) return result
Evaluates this check. Args: env: The prepared python environment to run the check in. verbose: When set, more progress output is produced. previous_failures: Checks that have already run and failed. Returns: A CheckResult instance.
codesearchnet
def tag(self, name, action='ADD', params=None): if not name: self._tcex.handle_error(925, ['name', 'tag', 'name', 'name', name]) if not self.can_update(): self._tcex.handle_error(910, [self.type]) if action in ['GET', 'ADD', 'DELETE']: return self.tc_requests.tag( self.api_type, self.api_sub_type, self.unique_id, name, action=action, owner=self.owner, params=params, ) self._tcex.handle_error(925, ['action', 'tag', 'action', 'action', action]) return None
Adds a tag to a Indicator/Group/Victim/Security Label Args: params: action: name: The name of the tag
juraj-google-style
def connect_with(self, wire_char): if len([qbit for qbit in self.qubit_layer if qbit is not None]) == 1: return for label, affected_bits in self.connections: if not affected_bits: continue affected_bits[0].connect(wire_char, ['bot']) for affected_bit in affected_bits[1:-1]: affected_bit.connect(wire_char, ['bot', 'top']) affected_bits[-1].connect(wire_char, ['top'], label) if label: for affected_bit in affected_bits: affected_bit.right_fill = len(label) + len(affected_bit.mid)
Connects the elements in the layer using wire_char. Args: wire_char (char): For example '║' or '│'.
juraj-google-style
def step(self, action, blocking=True): promise = self.call('step', action) if blocking: return promise() else: return promise
Step the environment. Args: action: The action to apply to the environment. blocking: Whether to wait for the result. Returns: Transition tuple when blocking, otherwise callable that returns the transition tuple.
juraj-google-style
def random_string(): numpy_state = np.random.get_state() np.random.seed(None) random_id = np.random.bytes(ray_constants.ID_SIZE) np.random.set_state(numpy_state) return random_id
Generate a random string to use as an ID. Note that users may seed numpy, which could cause this function to generate duplicate IDs. Therefore, we need to seed numpy ourselves, but we can't interfere with the state of the user's random number generator, so we extract the state of the random number generator and reset it after we are done. TODO(rkn): If we want to later guarantee that these are generated in a deterministic manner, then we will need to make some changes here. Returns: A random byte string of length ray_constants.ID_SIZE.
codesearchnet
def verify_task_in_task_graph(task_link, graph_defn, level=logging.CRITICAL): ignore_keys = ('created', 'deadline', 'expires', 'dependencies', 'schedulerId') errors = [] runtime_defn = deepcopy(task_link.task) bad_deps = (set(runtime_defn['dependencies']) - set(graph_defn['task']['dependencies'])) bad_deps = (bad_deps - {task_link.decision_task_id}) if bad_deps: errors.append("{} {} dependencies don't line up!\n{}".format(task_link.name, task_link.task_id, bad_deps)) runtime_defn['payload'] = _take_expires_out_from_artifacts_in_payload(runtime_defn['payload']) graph_defn['task']['payload'] = _take_expires_out_from_artifacts_in_payload(graph_defn['task']['payload']) for (key, value) in graph_defn['task'].items(): if (key in ignore_keys): continue if (value != runtime_defn[key]): errors.append('{} {} {} differs!\n graph: {}\n task: {}'.format(task_link.name, task_link.task_id, key, format_json(value), format_json(runtime_defn[key]))) raise_on_errors(errors, level=level)
Verify a given task_link's task against a given graph task definition. This is a helper function for ``verify_link_in_task_graph``; this is split out so we can call it multiple times when we fuzzy match. Args: task_link (LinkOfTrust): the link to try to match graph_defn (dict): the task definition from the task-graph.json to match ``task_link`` against level (int, optional): the logging level to use on errors. Defaults to logging.CRITICAL Raises: CoTError: on failure
codesearchnet
def _validate_state_root(self, state_root): if (self._state_root_regex.fullmatch(state_root) is None): LOGGER.debug('Invalid state root: %s', state_root) raise _ResponseFailed(self._status.INVALID_ROOT)
Validates a state root, raising a ResponseFailed error if invalid. Args: state_root (str): The state_root to validate Raises: ResponseFailed: The state_root was invalid, and a status of INVALID_ROOT will be sent with the response.
codesearchnet
def _get_node_dependencies(self, proto): dependencies = {ref.local_name: ref.node_id for ref in proto.dependencies} kind = proto.WhichOneof('kind') if kind == 'function': concrete_functions = proto.function.concrete_functions for fn_name in concrete_functions: for bound_input in self._proto.concrete_functions[fn_name].bound_inputs: dependencies[bound_input] = bound_input elif kind == 'bare_concrete_function': fn_name = proto.bare_concrete_function.concrete_function_name for bound_input in self._proto.concrete_functions[fn_name].bound_inputs: dependencies[bound_input] = bound_input elif kind == 'resource': for child in proto.children: if child.local_name == '_create_resource': dependencies['_create_resource'] = child.node_id return dependencies
Returns a dictionary of all dependencies of an object. Args: proto: A SavedObject proto. Returns: Dict mapping string dependency name *or* int node id to the node id. The int node id key is used for mapping function captures.
github-repos
def mnist_generator(tmp_dir, training, how_many, start_from=0): _get_mnist(tmp_dir) d = _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME l = _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)
Image generator for MNIST. Args: tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. Returns: An instance of image_generator that produces MNIST images.
juraj-google-style
def _get_function_inputs(f, src_kwargs): if hasattr(f, '_func'): f = f._func try: argspec = inspect.getfullargspec(f) except AttributeError: argspec = inspect.getargspec(f) fkwargs = {k: v for (k, v) in six.iteritems(src_kwargs) if (k in argspec.args)} return fkwargs
Filters inputs to be compatible with function `f`'s signature. Args: f: Function according to whose input signature we filter arguments. src_kwargs: Keyword arguments to filter according to `f`. Returns: kwargs: Dict of key-value pairs in `src_kwargs` which exist in `f`'s signature.
codesearchnet
def to_pil_image(self, image, rescale=None): self._ensure_format_supported(image) if is_torch_tensor(image): image = image.numpy() if isinstance(image, np.ndarray): if rescale is None: rescale = isinstance(image.flat[0], np.floating) if image.ndim == 3 and image.shape[0] in [1, 3]: image = image.transpose(1, 2, 0) if rescale: image = image * 255 image = image.astype(np.uint8) return PIL.Image.fromarray(image) return image
Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if needed. Args: image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`): The image to convert to the PIL Image format. rescale (`bool`, *optional*): Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default to `True` if the image type is a floating type, `False` otherwise.
github-repos
def index_filename_rel_other_index(self, other: str) -> str: return relpath(self.index_filename, start=dirname(other))
Returns the filename of this index, relative to the director of another index. (For inserting a reference to this index into ``other``.) Args: other: the other index Returns: relative filename of our index
juraj-google-style
def delete_device(self, auth_body, device_id): content = { "auth": auth_body } return self._send("DELETE", "/devices/%s" % device_id, content=content)
Deletes the given device, and invalidates any access token associated with it. NOTE: This endpoint uses the User-Interactive Authentication API. Args: auth_body (dict): Authentication params. device_id (str): The device ID of the device to delete.
juraj-google-style
def __init__(self, columns: list[str], hub_url: str, **kwargs): super().__init__(columns=columns, **kwargs) self.model_uri = hub_url
Embedding config for tensorflow hub models. This config can be used with MLTransform to embed image data. Models are loaded using the RunInference PTransform with the help of a ModelHandler. Args: columns: The columns containing the images to be embedded. hub_url: The url of the tensorflow hub model. min_batch_size: The minimum batch size to be used for inference. max_batch_size: The maximum batch size to be used for inference. large_model: Whether to share the model across processes.
github-repos
def set_default_backend(self, backend_name): if (backend_name not in BACKENDS): raise ValueError(f"Unknown backend '{backend_name}'.") self._default_backend = backend_name
Set the default backend of this circuit. This setting is only applied for this circuit. If you want to change the default backend of all gates, use `BlueqatGlobalSetting.set_default_backend()`. After set the default backend by this method, global setting is ignored even if `BlueqatGlobalSetting.set_default_backend()` is called. If you want to use global default setting, call this method with backend_name=None. Args: backend_name (str or None): new default backend name. If None is given, global setting is applied. Raises: ValueError: If `backend_name` is not registered backend.
codesearchnet
def _accept(random_sample: float, cost_diff: float, temp: float) -> Tuple[(bool, float)]: exponent = ((- cost_diff) / temp) if (exponent >= 0.0): return (True, 1.0) else: probability = math.exp(exponent) return ((probability > random_sample), probability)
Calculates probability and draws if solution should be accepted. Based on exp(-Delta*E/T) formula. Args: random_sample: Uniformly distributed random number in the range [0, 1). cost_diff: Cost difference between new and previous solutions. temp: Current temperature. Returns: Tuple of boolean and float, with boolean equal to True if solution is accepted, and False otherwise. The float value is acceptance probability.
codesearchnet
def locked_get(self): credential = self._backend.locked_get(self._key) if (credential is not None): credential.set_store(self) return credential
Retrieves the current credentials from the store. Returns: An instance of :class:`oauth2client.client.Credentials` or `None`.
codesearchnet
def target_encode_plus(self, answer: str, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Optional[Union[bool, str]]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._target_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
Prepare a answer string for the model. Args: answer `str`: Corresponding answer supervision to the queries for training the model.
github-repos
def software_breakpoint(self): software_types = [enums.JLinkBreakpoint.SW_RAM, enums.JLinkBreakpoint.SW_FLASH, enums.JLinkBreakpoint.SW] return any(((self.Type & stype) for stype in software_types))
Returns whether this is a software breakpoint. Args: self (JLinkBreakpointInfo): the ``JLinkBreakpointInfo`` instance Returns: ``True`` if the breakpoint is a software breakpoint, otherwise ``False``.
codesearchnet
def get_program_by_title(self, program_title): all_programs = self._load_data(self.PROGRAMS_ENDPOINT, default=[]) matching_programs = [program for program in all_programs if (program.get('title') == program_title)] if (len(matching_programs) > 1): raise MultipleProgramMatchError(len(matching_programs)) elif (len(matching_programs) == 1): return matching_programs[0] else: return None
Return single program by name, or None if not found. Arguments: program_title(string): Program title as seen by students and in Course Catalog Admin Returns: dict: Program data provided by Course Catalog API
codesearchnet
def to_element(self): if (not self.protocol_info): raise DIDLMetadataError('Could not create Element for thisresource:protocolInfo not set (required).') root = XML.Element('res') root.attrib['protocolInfo'] = self.protocol_info if (self.import_uri is not None): root.attrib['importUri'] = self.import_uri if (self.size is not None): root.attrib['size'] = str(self.size) if (self.duration is not None): root.attrib['duration'] = self.duration if (self.bitrate is not None): root.attrib['bitrate'] = str(self.bitrate) if (self.sample_frequency is not None): root.attrib['sampleFrequency'] = str(self.sample_frequency) if (self.bits_per_sample is not None): root.attrib['bitsPerSample'] = str(self.bits_per_sample) if (self.nr_audio_channels is not None): root.attrib['nrAudioChannels'] = str(self.nr_audio_channels) if (self.resolution is not None): root.attrib['resolution'] = self.resolution if (self.color_depth is not None): root.attrib['colorDepth'] = str(self.color_depth) if (self.protection is not None): root.attrib['protection'] = self.protection root.text = self.uri return root
Return an ElementTree Element based on this resource. Returns: ~xml.etree.ElementTree.Element: an Element.
codesearchnet
def _findSourceLine(self, annotated_source, line_number): index = None for i, line in enumerate(annotated_source.lines): if line.startswith('L%d ' % line_number): index = i break return index
Find line of given line number in annotated source. Args: annotated_source: (debugger_cli_common.RichTextLines) the annotated source line_number: (int) 1-based line number Returns: (int) If line_number is found, 0-based line index in annotated_source.lines. Otherwise, None.
github-repos
def loadfn(fname): if ((fnmatch(fname, '*POSCAR*') or fnmatch(fname, '*CONTCAR*') or ('.cif' in fname.lower())) or fnmatch(fname, '*.vasp')): return Structure.from_file(fname) elif fnmatch(fname, '*vasprun*'): from pymatgen.io.vasp import Vasprun return Vasprun(fname) elif fnmatch(fname, '*.json*'): from monty.serialization import loadfn return loadfn(fname)
Convenience method to perform quick loading of data from a filename. The type of object returned depends the file type. Args: fname (string): A filename. Returns: Note that fname is matched using unix-style, i.e., fnmatch. (Structure) if *POSCAR*/*CONTCAR*/*.cif (Vasprun) *vasprun* (obj) if *json* (passthrough to monty.serialization.loadfn)
codesearchnet
def create_extended_model(model, db_penalty=None, ex_penalty=None, tp_penalty=None, penalties=None): model_extended = model.create_metabolic_model() extra_compartment = model.extracellular_compartment compartment_ids = set((c.id for c in model.compartments)) if (len(compartment_ids) > 0): logger.info('Using all database reactions in compartments: {}...'.format(', '.join(('{}'.format(c) for c in compartment_ids)))) db_added = add_all_database_reactions(model_extended, compartment_ids) else: logger.warning('No compartments specified in the model; database reactions will not be used! Add compartment specification to model to include database reactions for those compartments.') db_added = set() logger.info('Using artificial exchange reactions for compartment: {}...'.format(extra_compartment)) ex_added = add_all_exchange_reactions(model_extended, extra_compartment, allow_duplicates=True) boundaries = model.compartment_boundaries if (len(boundaries) > 0): logger.info('Using artificial transport reactions for the compartment boundaries: {}...'.format('; '.join(('{}<->{}'.format(c1, c2) for (c1, c2) in boundaries)))) tp_added = add_all_transport_reactions(model_extended, boundaries, allow_duplicates=True) else: logger.warning('No compartment boundaries specified in the model; artificial transport reactions will not be used!') tp_added = set() weights = {} if (db_penalty is not None): weights.update(((rxnid, db_penalty) for rxnid in db_added)) if (tp_penalty is not None): weights.update(((rxnid, tp_penalty) for rxnid in tp_added)) if (ex_penalty is not None): weights.update(((rxnid, ex_penalty) for rxnid in ex_added)) if (penalties is not None): for (rxnid, penalty) in iteritems(penalties): weights[rxnid] = penalty return (model_extended, weights)
Create an extended model for gap-filling. Create a :class:`psamm.metabolicmodel.MetabolicModel` with all reactions added (the reaction database in the model is taken to be the universal database) and also with artificial exchange and transport reactions added. Return the extended :class:`psamm.metabolicmodel.MetabolicModel` and a weight dictionary for added reactions in that model. Args: model: :class:`psamm.datasource.native.NativeModel`. db_penalty: penalty score for database reactions, default is `None`. ex_penalty: penalty score for exchange reactions, default is `None`. tb_penalty: penalty score for transport reactions, default is `None`. penalties: a dictionary of penalty scores for database reactions.
codesearchnet
def query(self, coords): gal = coords l = gal.l.deg b = gal.b.deg scalar_input = not hasattr(l, '__len__') if scalar_input: l = np.array([l]) b = np.array([b]) ebv = np.empty(l.shape, dtype='f8') ebv[:] = np.nan idx = (b >= 65.) & (b <= 90.) ebv[idx] = self._lb2ebv_northcap(l[idx], b[idx]) idx = (b <= -65.) & (b >= -90.) ebv[idx] = self._lb2ebv_southcap(l[idx], b[idx]) idx = (b < 65.) & (b >= 10.) ebv[idx] = self._lb2ebv_midnorth(l[idx], b[idx]) idx = (b > -65.) & (b <= -10.) ebv[idx] = self._lb2ebv_midsouth(l[idx], b[idx]) if scalar_input: ebv = ebv[0] return ebv
Returns E(B-V) at the specified location(s) on the sky. Args: coords (`astropy.coordinates.SkyCoord`): The coordinates to query. Returns: A float array of reddening, in units of E(B-V), at the given coordinates. The shape of the output is the same as the shape of the coordinates stored by `coords`.
juraj-google-style
def tabulate_filetypes_rest(attrnames=None, header=None, flag_wrap_description=True, description_width=40, flag_leaf=True): infos = get_filetypes_info(editor_quote='``', flag_leaf=flag_leaf) (rows, header) = filetypes_info_to_rows_header(infos, attrnames, header, flag_wrap_description, description_width) ret = a99.rest_table(rows, header) return ret
Generates a reST multirow table Args: attrnames: list of attribute names (keys of FILE_TYPE_INFO_ATTRS). Defaults to all attributes header: list of strings containing headers. If not passed, uses default names flag_wrap_description: whether to wrap the description text description_width: width to wrap the description text (effective only if flag_wrap_description is True) flag_leaf: returns only classes that do not have subclasses ("leaf" nodes as in a class tree graph)
codesearchnet
def swo_set_host_buffer_size(self, buf_size): buf = ctypes.c_uint32(buf_size) res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.SET_BUFFERSIZE_HOST, ctypes.byref(buf)) if (res < 0): raise errors.JLinkException(res) return None
Sets the size of the buffer used by the host to collect SWO data. Args: self (JLink): the ``JLink`` instance buf_size (int): the new size of the host buffer Returns: ``None`` Raises: JLinkException: on error
codesearchnet
def peek(init, exposes, debug=False): def _peek(store, container, _stack=None): args = [store.peek(objname, container, _stack=_stack) for objname in exposes] if debug: print(args) return init(*args) return _peek
Default deserializer factory. Arguments: init (callable): type constructor. exposes (iterable): attributes to be peeked and passed to `init`. Returns: callable: deserializer (`peek` routine).
codesearchnet
def authenticate(self, email=None, password=None, source=None): from gdata.service import BadAuthentication Api.yt_service.email = (email if email else settings.YOUTUBE_AUTH_EMAIL) Api.yt_service.password = (password if password else settings.YOUTUBE_AUTH_PASSWORD) Api.yt_service.source = (source if source else settings.YOUTUBE_CLIENT_ID) try: Api.yt_service.ProgrammaticLogin() self.authenticated = True except BadAuthentication: raise ApiError(_('Incorrect username or password'))
Authenticates the user and sets the GData Auth token. All params are optional, if not set, we will use the ones on the settings, if no settings found, raises AttributeError params are email, password and source. Source is the app id Raises: gdata.service.exceptions.BadAuthentication
codesearchnet
def export_mt_variants(variants, sample_id): document_lines = [] for variant in variants: line = [] position = variant.get('position') change = '>'.join([variant.get('reference'),variant.get('alternative')]) line.append(position) line.append(change) line.append(str(position)+change) genes = [] prot_effect = [] for gene in variant.get('genes'): genes.append(gene.get('hgnc_symbol','')) for transcript in gene.get('transcripts'): if transcript.get('is_canonical') and transcript.get('protein_sequence_name'): prot_effect.append(urllib.parse.unquote(transcript.get('protein_sequence_name'))) line.append(','.join(prot_effect)) line.append(','.join(genes)) ref_ad = '' alt_ad = '' for sample in variant['samples']: if sample.get('sample_id') == sample_id: ref_ad = sample['allele_depths'][0] alt_ad = sample['allele_depths'][1] line.append(ref_ad) line.append(alt_ad) document_lines.append(line) return document_lines
Export mitochondrial variants for a case to create a MT excel report Args: variants(list): all MT variants for a case, sorted by position sample_id(str) : the id of a sample within the case Returns: document_lines(list): list of lines to include in the document
juraj-google-style
def step(self, actions): observations, raw_rewards, dones, infos = self._step(actions) raw_rewards = raw_rewards.astype(np.float32) processed_rewards = self.process_rewards(raw_rewards) processed_observations = self.process_observations(observations) self.trajectories.step(processed_observations, raw_rewards, processed_rewards, dones, actions) return processed_observations, processed_rewards, dones, infos
Takes a step in all environments. Subclasses should override _step to do the actual reset if something other than the default implementation is desired. Args: actions: Batch of actions. Returns: (preprocessed_observations, processed_rewards, dones, infos).
juraj-google-style
def set_local_interface(self, value=None, default=False, disable=False): return self._configure_mlag('local-interface', value, default, disable)
Configures the mlag local-interface value Args: value (str): The value to configure the local-interface default (bool): Configures the local-interface using the default keyword disable (bool): Negates the local-interface using the no keyword Returns: bool: Returns True if the commands complete successfully
codesearchnet
def getSlicesForText(self, body, getFingerprint=None, startIndex=0, maxResults=10): return self._text.getSlicesForText(self._retina, body, getFingerprint, startIndex, maxResults)
Get a list of slices of the text Args: body, str: The text to be evaluated (required) getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) startIndex, int: The start-index for pagination (optional) maxResults, int: Max results per page (optional) Returns: list of Text Raises: CorticalioException: if the request was not successful
juraj-google-style
def __call__(self, parser, namespace, value, option_string=None, **kwargs): handle = copen(value, mode=self.mode, **self.kwargs) setattr(namespace, self.dest, handle)
Detects and opens compressed files Args: parser (ArgumentParser): parser used to generate values namespace (Namespace): namespace to set values for value (str): actual value specified by user option_string (str): argument flag used to call this function **kwargs (various): optional arguments later passed to the compression algorithm
juraj-google-style
def send(self, message): body = {'notificationType': self._notification_type, 'priority': self._priority, 'isOrganization': self._is_organization, 'message': message} if self._recipients: body['recipients'] = self._recipients self._tcex.log.debug('notification body: {}'.format(json.dumps(body))) resource = resource = self._tcex.resource('Notification') resource.http_method = 'POST' resource.body = json.dumps(body) results = resource.request() if (results.get('response').status_code == 200): response = results.get('response').json() elif (results.get('response').status_code == 400): err = 'Failed to send notification ({})'.format(results.get('response').text) self._tcex.log.error(err) response = results.get('response').json() else: err = 'Failed to send notification ({})'.format(results.get('response').text) self._tcex.log.error(err) raise RuntimeError(err) return response
Send our message Args: message (str): The message to be sent. Returns: requests.models.Response: The response from the request.
codesearchnet
def from_filename(filename, require=None): with io.open(filename, 'r', encoding='utf-8') as json_file: data = json.load(json_file) return data, from_dict(data, require=require)
Reads a Google service account JSON file and returns its parsed info. Args: filename (str): The path to the service account .json file. require (Sequence[str]): List of keys required to be present in the info. Returns: Tuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified info and a signer instance.
juraj-google-style
def add_showcases(self, showcases, showcases_to_check=None): if (showcases_to_check is None): showcases_to_check = self.get_showcases() allshowcasesadded = True for showcase in showcases: if (not self.add_showcase(showcase, showcases_to_check=showcases_to_check)): allshowcasesadded = False return allshowcasesadded
Add dataset to multiple showcases Args: showcases (List[Union[Showcase,Dict,str]]): A list of either showcase ids or showcase metadata from Showcase objects or dictionaries showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset. Returns: bool: True if all showcases added or False if any already present
codesearchnet
def _any(objs, query): for obj in objs: if isinstance(obj, Document): if _any(obj.roots, query): return True elif any((query(ref) for ref in obj.references())): return True else: return False
Whether any of a collection of objects satisfies a given query predicate Args: objs (seq[Model or Document]) : query (callable) Returns: True, if ``query(obj)`` is True for some object in ``objs``, else False
codesearchnet
def get_first_model_with_rest_name(cls, rest_name): models = cls.get_models_with_rest_name(rest_name) if (len(models) > 0): return models[0] return None
Get the first model corresponding to a rest_name Args: rest_name: the rest name
codesearchnet
def nuc_v(msg): tc = typecode(msg) if tc != 19: raise RuntimeError("%s: Not an airborne velocity message, expecting TC = 19" % msg) msgbin = common.hex2bin(msg) NUCv = common.bin2int(msgbin[42:45]) try: HVE = uncertainty.NUCv[NUCv]['HVE'] VVE = uncertainty.NUCv[NUCv]['VVE'] except KeyError: HVE, VVE = uncertainty.NA, uncertainty.NA return HVE, VVE
Calculate NUCv, Navigation Uncertainty Category - Velocity (ADS-B version 1) Args: msg (string): 28 bytes hexadecimal message string, Returns: int or string: 95% Horizontal Velocity Error int or string: 95% Vertical Velocity Error
juraj-google-style
def run(self, dag): self.layout = (self.layout or self.property_set['layout']) if (self.layout is None): raise TranspilerError('EnlargeWithAncilla requires property_set["layout"] or "layout" parameter to run') layout_virtual_qubits = self.layout.get_virtual_bits().keys() new_qregs = set((virtual_qubit[0] for virtual_qubit in layout_virtual_qubits if (virtual_qubit not in dag.wires))) for qreg in new_qregs: dag.add_qreg(qreg) return dag
Extends dag with virtual qubits that are in layout but not in the circuit yet. Args: dag (DAGCircuit): DAG to extend. Returns: DAGCircuit: An extended DAG. Raises: TranspilerError: If there is not layout in the property set or not set at init time.
codesearchnet
def FromString(val): if isinstance(val, bytes): val = val.decode('utf-8') try: return ContractParameterType[val] except Exception as e: pass try: if isinstance(val, (bytearray, bytes)): int_val = int.from_bytes(val, 'little') else: int_val = int.from_bytes(binascii.unhexlify(val), 'little') except (binascii.Error, TypeError) as e: int_val = int(val) return ContractParameterType(int_val)
Create a ContractParameterType object from a str Args: val (str): the value to be converted to a ContractParameterType. val can be hex encoded (b'07'), int (7), string int ("7"), or string literal ("String") Returns: ContractParameterType
codesearchnet
def _create_typed_object_meta(get_fset): def _get_fget(attr, private_attr, type_): 'Create a property getter method for an attribute.\n\n Args:\n attr: The name of the attribute that will be retrieved.\n private_attr: The name of the attribute that will store any data\n related to the attribute.\n type_: The annotated type defining what values can be stored in the\n attribute.\n\n Returns:\n A function that takes self and retrieves the private attribute from\n self.\n ' def _fget(self): 'Get attribute from self without revealing the private name.' try: return getattr(self, private_attr) except AttributeError: raise AttributeError("'{}' object has no attribute '{}'".format(_get_type_name(type_), attr)) return _fget class _AnnotatedObjectMeta(type): 'A metaclass that reads annotations from a class definition.' def __new__(mcs, name, bases, attrs, **kwargs): 'Create class objs that replaces annotated attrs with properties.\n\n Args:\n mcs: The class object being created.\n name: The name of the class to create.\n bases: The list of all base classes for the new class.\n attrs: The list of all attributes for the new class from the\n definition.\n\n Returns:\n A new class instance with the expected base classes and\n attributes, but with annotated, public, non-constant,\n non-method attributes replaced by property objects that\n validate against the annotated type.\n ' annotations = attrs.get('__annotations__', {}) use_comment_type_hints = ((not annotations) and (attrs.get('__module__') != __name__)) if use_comment_type_hints: frame_source = _get_class_frame_source(name) annotations = get_type_hints(*frame_source) names = (list(attrs) + list(annotations)) typed_attrs = {} for attr in names: typed_attrs[attr] = attrs.get(attr) if _is_propertyable(names, attrs, annotations, attr): private_attr = '__{}'.format(attr) if (attr in attrs): typed_attrs[private_attr] = attrs[attr] type_ = (Optional[annotations[attr]] if ((not use_comment_type_hints) and (attr in attrs) and (attrs[attr] is None)) else annotations[attr]) typed_attrs[attr] = property(_get_fget(attr, private_attr, type_), get_fset(attr, private_attr, type_)) properties = [attr for attr in annotations if _is_propertyable(names, attrs, annotations, attr)] typed_attrs['_tp__typed_properties'] = properties typed_attrs['_tp__required_typed_properties'] = [attr for attr in properties if (((attr not in attrs) or ((attrs[attr] is None) and use_comment_type_hints)) and (NoneType not in getattr(annotations[attr], '__args__', ())))] return super(_AnnotatedObjectMeta, mcs).__new__(mcs, name, bases, typed_attrs, **kwargs) return _AnnotatedObjectMeta
Create a metaclass for typed objects. Args: get_fset: A function that takes three parameters: the name of an attribute, the name of the private attribute that holds the property data, and a type. This function must an object method that accepts a value. Returns: A metaclass that reads annotations from a class definition and creates properties for annotated, public, non-constant, non-method attributes that will guarantee the type of the stored value matches the annotation.
codesearchnet
def convert_to_tensors(self, inputs, tensor_type: Optional[Union[str, TensorType]]=None, prepend_batch_axis: bool=False): if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError('Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.') import tensorflow as tf as_tensor = tf.constant is_tensor = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.') import torch as_tensor = torch.tensor is_tensor = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.') import jax.numpy as jnp as_tensor = jnp.array is_tensor = _is_jax else: as_tensor = np.asarray is_tensor = _is_numpy try: if prepend_batch_axis: inputs = [inputs] if not is_tensor(inputs): inputs = as_tensor(inputs) except: raise ValueError("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' 'truncation=True' to have batched tensors with the same length.") return inputs
Convert the inner content to tensors. Args: tensor_type (`str` or [`~utils.TensorType`], *optional*): The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If unset, no modification is done. prepend_batch_axis (`int`, *optional*, defaults to `False`): Whether or not to add the batch dimension during the conversion.
github-repos
def get_vocab(self, vocab_name, **kwargs): vocab_dict = self.__get_vocab_dict__(vocab_name, **kwargs) filepaths = list(set([os.path.join(self.cache_dir, vocab_dict['filename']), os.path.join(self.vocab_dir, vocab_dict['filename'])])) for path in filepaths: if os.path.exists(path): with open(path, 'rb') as f_obj: vocab_dict.update({"name": vocab_name, "data": f_obj.read(), "modified": os.path.getmtime(path)}) return vocab_dict download_locs = make_list(vocab_dict.get('download',[])) for loc in download_locs: loc_web = urllib.request.urlopen(loc) urllib.request.urlretrieve(loc, filepaths[0]) with open(filepaths[0], 'rb') as f_obj: vocab_dict.update({"name": vocab_name, "data": f_obj.read(), "modified": os.path.getmtime(filepaths[0])}) return vocab_dict
Returns data stream of an rdf vocabulary args: vocab_name: the name or uri of the vocab to return
juraj-google-style
def meta_features_path(self, path): return (os.path.join(path, app.config['XCESSIV_META_FEATURES_FOLDER'], str(self.id)) + '.npy')
Returns path for meta-features Args: path (str): Absolute/local path of xcessiv folder
codesearchnet
def normalize_genotypes(genotypes): genotypes = genotypes.genotypes return (genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes)
Normalize the genotypes. Args: genotypes (Genotypes): The genotypes to normalize. Returns: numpy.array: The normalized genotypes.
juraj-google-style
def __init__(self, value, opaque_type, name='Opaque Object'): super(OpaqueObject, self).__init__() self._object_type = enums.ObjectType.OPAQUE_DATA self.value = value self.opaque_type = opaque_type self.names.append(name) self._digest = None self._revocation_reason = None self._destroy_date = None self._compromise_occurrence_date = None self._compromise_date = None self.validate()
Create a OpaqueObject. Args: value(bytes): The bytes representing opaque data. opaque_type(OpaqueDataType): An enumeration defining the type of the opaque value. name(string): The string name of the opaque object.
juraj-google-style
def _RDFClass(cls, table): rdf_cls_name = 'OsqueryTable{}'.format(hash(table.query)) try: return cls._rdf_cls_cache[rdf_cls_name] except KeyError: pass rdf_cls = compatibility.MakeType(rdf_cls_name, (rdf_structs.RDFProtoStruct,), {}) rdf_cls.AddDescriptor(rdf_structs.ProtoEmbedded(name='metadata', field_number=1, nested=ExportedMetadata)) rdf_cls.AddDescriptor(rdf_structs.ProtoString(name='__query__', field_number=2)) for (idx, column) in enumerate(table.header.columns): if (column.name == 'metadata'): name = '__metadata__' else: name = column.name descriptor = rdf_structs.ProtoString(name=name, field_number=(idx + 3)) rdf_cls.AddDescriptor(descriptor) cls._rdf_cls_cache[rdf_cls_name] = rdf_cls return rdf_cls
Creates a dynamic RDF proto struct class for given osquery table. The fields of the proto will correspond to the columns of the table. Args: table: An osquery table for which the class is about to be generated. Returns: A class object corresponding to the given table.
codesearchnet
def getbalance(self, user_id="", as_decimal=True): balance = unicode(self.rpc.call("getbalance", user_id)) self.logger.debug("\"" + user_id + "\"", self.coin, "balance:", balance) if as_decimal: return Decimal(balance) else: return balance
Calculate the total balance in all addresses belonging to this user. Args: user_id (str): this user's unique identifier as_decimal (bool): balance is returned as a Decimal if True (default) or a string if False Returns: str or Decimal: this account's total coin balance
juraj-google-style
def _inject(self, value, settings): assert isinstance(value, string_types), 'Expected str; got {0.__class__}'.format(value) begin, end = '{{', '}}' if begin not in value: return value, False new_value = value begin_pos, end_pos = 0, None len_begin, len_end = len(begin), len(end) len_value = len(new_value) while begin_pos < len_value: begin_pos = new_value.find(begin, begin_pos) if begin_pos == -1: break before = new_value[:begin_pos] begin_pos += len_begin end_pos = new_value.find(end, begin_pos) if end_pos == -1: raise ValueError('Unmatched {begin}...{end} in {value}'.format(**locals())) name = new_value[begin_pos:end_pos] name = name.strip() if not name: raise ValueError('Empty name in {value}'.format(**locals())) after_pos = end_pos + len_end try: after = new_value[after_pos:] except IndexError: after = '' try: injection_value = settings.get_dotted(name) except KeyError: raise KeyError('{name} not found in {settings}'.format(**locals())) if not isinstance(injection_value, string_types): injection_value = self.strategy.encode_value(injection_value) new_value = ''.join((before, injection_value, after)) begin_pos = len(before) + len(injection_value) len_value = len(new_value) return new_value, (new_value != value)
Inject ``settings`` into ``value``. Go through ``value`` looking for ``{{NAME}}`` groups and replace each group with the value of the named item from ``settings``. Args: value (str): The value to inject settings into settings: An object that provides the dotted access interface Returns: (str, bool): The new value and whether the new value is different from the original value
juraj-google-style
def intent(self, user: str = None, token: Optional[str] = None) -> "IntentAPI": if self.is_real_user: raise ValueError("Can't get child intent of real user") if token: return IntentAPI(user, self.real_user(user, token), self.bot_intent(), self.state_store, self.intent_log) return IntentAPI(user, self.user(user), self.bot_intent(), self.state_store, self.intent_log)
Get the intent API for a specific user. Args: user: The Matrix ID of the user whose intent API to get. Returns: The IntentAPI for the given user.
juraj-google-style
def __get_default_value_from_element(self, element): if (element.name == 'select'): options = element.find_all('option') is_multiple = element.has_attr('multiple') selected_options = [option for option in options if option.has_attr('selected')] if ((not selected_options) and options): selected_options = [options[0]] selected_values = [] if is_multiple: for option in selected_options: value = (option['value'] if option.has_attr('value') else option.string) selected_values.append(value) return selected_values elif (len(selected_options) >= 1): if selected_options[0].has_attr('value'): return selected_options[0]['value'] else: return selected_options[0].string return '' if (element.name == 'textarea'): return (element.string if (element.string is not None) else '') if ((element.name == 'input') and element.has_attr('type')): if (element['type'] in ('checkbox', 'radio')): if (not element.has_attr('checked')): return False if element.has_attr('value'): return element['value'] else: return 'on' if element.has_attr('value'): return element['value'] return ''
Get the default value of a form element Args: elements (obj): The soup element. Returns: str: The default value
codesearchnet
class RMSprop(Optimizer): def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0.0, **kwargs): super(RMSprop, self).__init__(**kwargs) with backend.name_scope(self.__class__.__name__): self.lr = backend.variable(lr, name='lr') self.rho = backend.variable(rho, name='rho') self.decay = backend.variable(decay, name='decay') self.iterations = backend.variable(0, dtype='int64', name='iterations') if epsilon is None: epsilon = backend.epsilon() self.epsilon = epsilon self.initial_decay = decay def _create_all_weights(self, params): accumulators = [backend.zeros(backend.int_shape(p), dtype=backend.dtype(p)) for p in params] self.weights = accumulators return accumulators def get_updates(self, loss, params): grads = self.get_gradients(loss, params) accumulators = self._create_all_weights(params) self.updates = [state_ops.assign_add(self.iterations, 1)] lr = self.lr if self.initial_decay > 0: lr = lr * (1.0 / (1.0 + self.decay * math_ops.cast(self.iterations, backend.dtype(self.decay)))) for p, g, a in zip(params, grads, accumulators): new_a = self.rho * a + (1.0 - self.rho) * math_ops.square(g) self.updates.append(state_ops.assign(a, new_a)) new_p = p - lr * g / (backend.sqrt(new_a) + self.epsilon) if getattr(p, 'constraint', None) is not None: new_p = p.constraint(new_p) self.updates.append(state_ops.assign(p, new_p)) return self.updates def get_config(self): config = {'lr': float(backend.get_value(self.lr)), 'rho': float(backend.get_value(self.rho)), 'decay': float(backend.get_value(self.decay)), 'epsilon': self.epsilon} base_config = super(RMSprop, self).get_config() return dict(list(base_config.items()) + list(config.items()))
RMSProp optimizer. It is recommended to leave the parameters of this optimizer at their default values (except the learning rate, which can be freely tuned). Args: lr: float >= 0. Learning rate. rho: float >= 0. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `backend.epsilon()`. decay: float >= 0. Learning rate decay over each update.
github-repos
def handle_message_registered(self, msg_data, host): response = None if (msg_data['method'] == 'EVENT'): logger.debug(('<%s> <euuid:%s> Event message received' % (msg_data['cuuid'], msg_data['euuid']))) response = self.event(msg_data['cuuid'], host, msg_data['euuid'], msg_data['event_data'], msg_data['timestamp'], msg_data['priority']) elif (msg_data['method'] == 'OK EVENT'): logger.debug(('<%s> <euuid:%s> Event confirmation message received' % (msg_data['cuuid'], msg_data['euuid']))) try: del self.event_uuids[msg_data['euuid']] except KeyError: logger.warning(('<%s> <euuid:%s> Euuid does not exist in event buffer. Key was removed before we could process it.' % (msg_data['cuuid'], msg_data['euuid']))) elif (msg_data['method'] == 'OK NOTIFY'): logger.debug(('<%s> <euuid:%s> Ok notify received' % (msg_data['cuuid'], msg_data['euuid']))) try: del self.event_uuids[msg_data['euuid']] except KeyError: logger.warning(('<%s> <euuid:%s> Euuid does not exist in event buffer. Key was removed before we could process it.' % (msg_data['cuuid'], msg_data['euuid']))) return response
Processes messages that have been delivered by a registered client. Args: msg (string): The raw packet data delivered from the listener. This data will be unserialized and then processed based on the packet's method. host (tuple): The (address, host) tuple of the source message. Returns: A response that will be sent back to the client via the listener.
codesearchnet
def __init__(self, **kwargs): try: arguments = Adapter(Schema(ApplicationOptions.SCHEMA).validate(kwargs)) self.definition = arguments.definition self.matrix_tags = [entry for entry in arguments.matrix_tags.split(',') if len(entry) > 0] self.tags = [entry for entry in arguments.tags.split(',') if len(entry) > 0] self.validate_only = arguments.validate_only self.dry_run = arguments.dry_run self.event_logging = arguments.event_logging self.logging_config = arguments.logging_config self.debug = arguments.debug self.strict = arguments.strict self.report = arguments.report self.temporary_scripts_path = arguments.temporary_scripts_path except SchemaError as exception: logging.getLogger(__name__).error(exception) raise RuntimeError(str(exception))
Initializing and validating fields. Args: kwargs (dict): application command line options.
juraj-google-style
def get(self, id=None, **kwargs): server_data = self.gitlab.http_get(self.path, **kwargs) if server_data is None: return None return self._obj_cls(self, server_data)
Retrieve a single object. Args: **kwargs: Extra options to send to the server (e.g. sudo) Returns: object: The generated RESTObject Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the server cannot perform the request
juraj-google-style