code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def shape4d(a, data_format='NHWC'): s2d = shape2d(a) if (get_data_format(data_format, False) == 'NHWC'): return (([1] + s2d) + [1]) else: return ([1, 1] + s2d)
Ensuer a 4D shape, to use with 4D symbolic functions. Args: a: a int or tuple/list of length 2 Returns: list: of length 4. if ``a`` is a int, return ``[1, a, a, 1]`` or ``[1, 1, a, a]`` depending on data_format.
codesearchnet
def _make_train_step_fn(model, mode, strategy, output_labels): def _step_fn(ctx, inputs): if isinstance(inputs, (tuple, list)) and len(inputs) == 2: inputs, targets = inputs else: targets = None if isinstance(inputs, dict): inputs = [inputs[input_name] for input_name in model._feed_input_names] _build_model(strategy, model, mode, inputs, targets) grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args = strategy.extended.call_for_each_replica(_per_replica_execution_function, args=(dist_utils.get_distributed_model(model, mode), mode)) all_inputs, all_outputs, all_updates, all_session_args = dist_utils.unwrap_values(strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) combined_fn = backend.function(all_inputs, all_outputs, updates=all_updates, name='distributed_' + str(mode) + '_function', **all_session_args) for label, output in zip(output_labels, combined_fn.outputs): if label == 'loss': reduce_op = ds_reduce_util.ReduceOp.SUM else: reduce_op = ds_reduce_util.ReduceOp.MEAN ctx.set_last_step_output(label, output, reduce_op) return combined_fn.updates_op return _step_fn
Create step fn. Args: model: a Keras Model instance. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. strategy: a `tf.distribute.Strategy` instance. output_labels: the output labels for the step function. Returns: A step function to run by `tf.distribute.Strategy`.
github-repos
def setup_spline(self, spline_options=None): self.spline_options = spline_options relative_energies = (self.energies - self.energies[0]) if scipy_old_piecewisepolynomial: if self.spline_options: raise RuntimeError('Option for saddle point not available withold scipy implementation') self.spline = PiecewisePolynomial(self.r, np.array([relative_energies, (- self.forces)]).T, orders=3) elif (self.spline_options.get('saddle_point', '') == 'zero_slope'): imax = np.argmax(relative_energies) self.spline = CubicSpline(x=self.r[:(imax + 1)], y=relative_energies[:(imax + 1)], bc_type=((1, 0.0), (1, 0.0))) cspline2 = CubicSpline(x=self.r[imax:], y=relative_energies[imax:], bc_type=((1, 0.0), (1, 0.0))) self.spline.extend(c=cspline2.c, x=cspline2.x[1:]) else: self.spline = CubicSpline(x=self.r, y=relative_energies, bc_type=((1, 0.0), (1, 0.0)))
Setup of the options for the spline interpolation Args: spline_options (dict): Options for cubic spline. For example, {"saddle_point": "zero_slope"} forces the slope at the saddle to be zero.
codesearchnet
async def open_interface(self, client_id, conn_string, interface): conn_id = self._client_connection(client_id, conn_string) self._hook_open_interface(conn_string, interface, client_id) (await self.adapter.open_interface(conn_id, interface))
Open a device interface on behalf of a client. See :meth:`AbstractDeviceAdapter.open_interface`. Args: client_id (str): The client we are working for. conn_string (str): A connection string that will be passed to the underlying device adapter. interface (str): The name of the interface to open. Raises: DeviceServerError: There is an issue with your client_id such as not being connected to the device. DeviceAdapterError: The adapter had an issue opening the interface.
codesearchnet
def __process_node(self, node: yaml.Node, expected_type: Type) -> yaml.Node: logger.info('Processing node {} expecting type {}'.format(node, expected_type)) (recognized_types, message) = self.__recognizer.recognize(node, expected_type) if (len(recognized_types) != 1): raise RecognitionError(message) recognized_type = recognized_types[0] logger.debug('Savorizing node {}'.format(node)) if (recognized_type in self._registered_classes.values()): node = self.__savorize(node, recognized_type) logger.debug('Savorized, now {}'.format(node)) logger.debug('Recursing into subnodes') if is_generic_list(recognized_type): if (node.tag != 'tag:yaml.org,2002:seq'): raise RecognitionError('{}{}Expected a {} here'.format(node.start_mark, os.linesep, type_to_desc(expected_type))) for item in node.value: self.__process_node(item, generic_type_args(recognized_type)[0]) elif is_generic_dict(recognized_type): if (node.tag != 'tag:yaml.org,2002:map'): raise RecognitionError('{}{}Expected a {} here'.format(node.start_mark, os.linesep, type_to_desc(expected_type))) for (_, value_node) in node.value: self.__process_node(value_node, generic_type_args(recognized_type)[1]) elif (recognized_type in self._registered_classes.values()): if ((not issubclass(recognized_type, enum.Enum)) and (not issubclass(recognized_type, str)) and (not issubclass(recognized_type, UserString))): for (attr_name, type_, _) in class_subobjects(recognized_type): cnode = Node(node) if cnode.has_attribute(attr_name): subnode = cnode.get_attribute(attr_name) new_subnode = self.__process_node(subnode.yaml_node, type_) cnode.set_attribute(attr_name, new_subnode) else: logger.debug('Not a generic class or a user-defined class, not recursing') node.tag = self.__type_to_tag(recognized_type) logger.debug('Finished processing node {}'.format(node)) return node
Processes a node. This is the main function that implements yatiml's \ functionality. It figures out how to interpret this node \ (recognition), then applies syntactic sugar, and finally \ recurses to the subnodes, if any. Args: node: The node to process. expected_type: The type we expect this node to be. Returns: The transformed node, or a transformed copy.
codesearchnet
def dframe(self, dimensions=None, multi_index=False): import pandas as pd if dimensions is None: dimensions = [d.name for d in self.dimensions()] else: dimensions = [self.get_dimension(d, strict=True).name for d in dimensions] column_names = dimensions dim_vals = OrderedDict([(dim, self.dimension_values(dim)) for dim in column_names]) df = pd.DataFrame(dim_vals) if multi_index: df = df.set_index([d for d in dimensions if d in self.kdims]) return df
Convert dimension values to DataFrame. Returns a pandas dataframe of columns along each dimension, either completely flat or indexed by key dimensions. Args: dimensions: Dimensions to return as columns multi_index: Convert key dimensions to (multi-)index Returns: DataFrame of columns corresponding to each dimension
juraj-google-style
def get_header(graphs, proto_fileformat='rawproto', default_ops='NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'): ops_and_kernels = get_ops_and_kernels(proto_fileformat, graphs, default_ops) if not ops_and_kernels: print('Error reading graph!') return 1 return get_header_from_ops_and_kernels(ops_and_kernels, default_ops == 'all')
Computes a header for use with tensorflow SELECTIVE_REGISTRATION. Args: graphs: a list of paths to GraphDef files to include. proto_fileformat: optional format of proto file, either 'textproto', 'rawproto' (default) or ops_list. The ops_list is the file contain the list of ops in JSON format, Ex: "[["Transpose", "TransposeCpuOp"]]". default_ops: optional comma-separated string of operator:kernel pairs to always include implementation for. Pass 'all' to have all operators and kernels included. Default: 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'. Returns: the string of the header that should be written as ops_to_register.h.
github-repos
def import_object_from_string_code(code, object): sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest() module = imp.new_module(sha256) try: exec_(code, module.__dict__) except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) sys.modules[sha256] = module try: return getattr(module, object) except AttributeError: raise exceptions.UserError('{} not found in code'.format(object))
Used to import an object from arbitrary passed code. Passed in code is treated as a module and is imported and added to `sys.modules` with its SHA256 hash as key. Args: code (string): Python code to import as module object (string): Name of object to extract from imported module
codesearchnet
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=0.001): if ndim(x) == 4: if axis == 1 or axis == -3: tf_data_format = 'NCHW' elif axis == 3 or axis == -1: tf_data_format = 'NHWC' else: tf_data_format = None if tf_data_format == 'NHWC' or (tf_data_format == 'NCHW' and _has_nchw_support()): if ndim(mean) > 1: mean = array_ops.reshape(mean, [-1]) if ndim(var) > 1: var = array_ops.reshape(var, [-1]) if beta is None: beta = zeros_like(mean) elif ndim(beta) > 1: beta = array_ops.reshape(beta, [-1]) if gamma is None: gamma = ones_like(mean) elif ndim(gamma) > 1: gamma = array_ops.reshape(gamma, [-1]) y, _, _ = nn.fused_batch_norm(x, gamma, beta, epsilon=epsilon, mean=mean, variance=var, data_format=tf_data_format, is_training=False) return y return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
Applies batch normalization on x given mean, var, beta and gamma. I.e. returns: `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta` Args: x: Input tensor or variable. mean: Mean of batch. var: Variance of batch. beta: Tensor with which to center the input. gamma: Tensor by which to scale the input. axis: Integer, the axis that should be normalized. (typically the features axis). epsilon: Fuzz factor. Returns: A tensor.
github-repos
def limit(self, count): return self.__class__( self._parent, projection=self._projection, field_filters=self._field_filters, orders=self._orders, limit=count, offset=self._offset, start_at=self._start_at, end_at=self._end_at, )
Limit a query to return a fixed number of results. If the current query already has a limit set, this will overwrite it. Args: count (int): Maximum number of documents to return that match the query. Returns: ~.firestore_v1beta1.query.Query: A limited query. Acts as a copy of the current query, modified with the newly added "limit" filter.
juraj-google-style
def update_compliance(self, timeout=(- 1)): uri = '{}/compliance'.format(self.data['uri']) return self._helper.update(None, uri, timeout=timeout)
Returns logical interconnects to a consistent state. The current logical interconnect state is compared to the associated logical interconnect group. Any differences identified are corrected, bringing the logical interconnect back to a consistent state. Changes are asynchronously applied to all managed interconnects. Note that if the changes detected involve differences in the interconnect map between the logical interconnect group and the logical interconnect, the process of bringing the logical interconnect back to a consistent state might involve automatically removing existing interconnects from management and/or adding new interconnects for management. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Logical Interconnect.
codesearchnet
def dtype(self, value): for (dtype, string) in self._all: if string == value: return dtype return None
Gets the datatype for the given `value` (description). Args: value (str): A text description for any datatype. Returns: numpy.dtype: The matching datatype for the given text. None: If no match can be found, `None` will be returned.
juraj-google-style
def transform_coords(self, width, height): if self.type not in {EventType.TOUCH_DOWN, EventType.TOUCH_MOTION}: raise AttributeError(_wrong_meth.format(self.type)) x = self._libinput.libinput_event_touch_get_x_transformed( self._handle, width) y = self._libinput.libinput_event_touch_get_y_transformed( self._handle, height) return x, y
Return the current absolute coordinates of the touch event, transformed to screen coordinates. For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`, :attr:`~libinput.constant.EventType.TOUCH_MOTION`, this method raises :exc:`AttributeError`. Args: width (int): The current output screen width. height (int): The current output screen height. Returns: (float, float): The current absolute (x, y) coordinates transformed to screen coordinates.
juraj-google-style
def _CreateLineStringForShape(self, parent, shape): coordinate_list = [(longitude, latitude) for (latitude, longitude, distance) in shape.points] return self._CreateLineString(parent, coordinate_list)
Create a KML LineString using coordinates from a shape. Args: parent: The parent ElementTree.Element instance. shape: The transitfeed.Shape instance. Returns: The LineString ElementTree.Element instance or None if coordinate_list is empty.
juraj-google-style
def pull_screenrecord(self, bit_rate: int = 5000000, time_limit: int = 180, remote: _PATH = '/sdcard/demo.mp4', local: _PATH = 'demo.mp4') -> None: self.screenrecord(bit_rate, time_limit, filename=remote) self.pull(remote, local)
Recording the display of devices running Android 4.4 (API level 19) and higher. Then copy it to your computer. Args: bit_rate:You can increase the bit rate to improve video quality, but doing so results in larger movie files. time_limit: Sets the maximum recording time, in seconds, and the maximum value is 180 (3 minutes).
juraj-google-style
def add_role(user, roles): def _add_role(role): user_role = UserRole() user_role.user_id = user.user_id user_role.role_id = role.role_id db.session.add(user_role) db.session.commit() [_add_role(role) for role in roles]
Map roles for user in database Args: user (User): User to add roles to roles ([Role]): List of roles to add Returns: None
codesearchnet
def add_workflow_definitions(sbi_config: dict): registered_workflows = [] for i in range(len(sbi_config['processing_blocks'])): workflow_config = sbi_config['processing_blocks'][i]['workflow'] workflow_name = '{}:{}'.format(workflow_config['id'], workflow_config['version']) if workflow_name in registered_workflows: continue workflow_definition = dict( id=workflow_config['id'], version=workflow_config['version'], stages=[] ) key = "workflow_definitions:{}:{}".format(workflow_config['id'], workflow_config['version']) DB.save_dict(key, workflow_definition, hierarchical=False) registered_workflows.append(workflow_name)
Add any missing SBI workflow definitions as placeholders. This is a utility function used in testing and adds mock / test workflow definitions to the database for workflows defined in the specified SBI config. Args: sbi_config (dict): SBI configuration dictionary.
juraj-google-style
def indices2nodes(self, indices): if (set(indices) - set(self.node_indices)): raise ValueError("`indices` must be a subset of the Subsystem's indices.") return tuple((self._index2node[n] for n in indices))
Return |Nodes| for these indices. Args: indices (tuple[int]): The indices in question. Returns: tuple[Node]: The |Node| objects corresponding to these indices. Raises: ValueError: If requested indices are not in the subsystem.
codesearchnet
def create_or_update(cls, course_video, file_name=None, image_data=None, generated_images=None): (video_image, created) = cls.objects.get_or_create(course_video=course_video) if image_data: if ((not created) and (VideoImage.objects.filter(image=video_image.image).count() == 1)): video_image.image.delete() with closing(image_data) as image_file: file_name = '{uuid}{ext}'.format(uuid=uuid4().hex, ext=os.path.splitext(file_name)[1]) try: video_image.image.save(file_name, image_file) except Exception: logger.exception('VAL: Video Image save failed to storage for course_id [%s] and video_id [%s]', course_video.course_id, course_video.video.edx_video_id) raise else: if generated_images: video_image.generated_images = generated_images if (not video_image.image.name): file_name = generated_images[0] video_image.image.name = file_name video_image.save() return (video_image, created)
Create a VideoImage object for a CourseVideo. NOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise a new file name is constructed based on uuid and extension from `file_name` value. `image_data` will be None in case of course re-run and export. `generated_images` list contains names of images auto generated by VEDA. If an image is not already set then first image name from `generated_images` list will be used. Arguments: course_video (CourseVideo): CourseVideo instance file_name (str): File name of the image image_data (InMemoryUploadedFile): Image data to be saved. generated_images (list): auto generated image names Returns: Returns a tuple of (video_image, created).
codesearchnet
def leave_module(self, node): for triple_quote in self._tokenized_triple_quotes.values(): self._check_triple_quotes(triple_quote) self._tokenized_triple_quotes = {}
Leave module and check remaining triple quotes. Args: node: the module node we are leaving.
juraj-google-style
def commit_channel(self, channel_id): payload = { "channel_id":channel_id, "stage": config.STAGE, } response = config.SESSION.post(config.finish_channel_url(), data=json.dumps(payload)) if response.status_code != 200: config.LOGGER.error("\n\nCould not activate channel: {}\n".format(response._content.decode('utf-8'))) if response.status_code == 403: config.LOGGER.error("Channel can be viewed at {}\n\n".format(config.open_channel_url(channel_id, staging=True))) sys.exit() response.raise_for_status() new_channel = json.loads(response._content.decode("utf-8")) channel_link = config.open_channel_url(new_channel['new_channel']) return channel_id, channel_link
commit_channel: commits channel to Kolibri Studio Args: channel_id (str): channel's id on Kolibri Studio Returns: channel id and link to uploadedchannel
juraj-google-style
def default_value(fieldname, datatype): if fieldname in tsdb_coded_attributes: return str(tsdb_coded_attributes[fieldname]) else: return _default_datatype_values.get(datatype, '')
Return the default value for a column. If the column name (e.g. *i-wf*) is defined to have an idiosyncratic value, that value is returned. Otherwise the default value for the column's datatype is returned. Args: fieldname: the column name (e.g. `i-wf`) datatype: the datatype of the column (e.g. `:integer`) Returns: The default value for the column. .. deprecated:: v0.7.0
juraj-google-style
def _get_cached_certs(cert_uri, cache): certs = cache.get(cert_uri, namespace=_CERT_NAMESPACE) if (certs is None): _logger.debug('Cert cache miss for %s', cert_uri) try: result = urlfetch.fetch(cert_uri) except AssertionError: return None if (result.status_code == 200): certs = json.loads(result.content) expiration_time_seconds = _get_cert_expiration_time(result.headers) if expiration_time_seconds: cache.set(cert_uri, certs, time=expiration_time_seconds, namespace=_CERT_NAMESPACE) else: _logger.error('Certs not available, HTTP request returned %d', result.status_code) return certs
Get certs from cache if present; otherwise, gets from URI and caches them. Args: cert_uri: URI from which to retrieve certs if cache is stale or empty. cache: Cache of pre-fetched certs. Returns: The retrieved certs.
codesearchnet
def _and_join(self, terms): if (len(terms) > 1): return ' AND '.join([self._or_join(t) for t in terms]) else: return self._or_join(terms[0])
Joins terms using AND operator. Args: terms (list): terms to join Examples: self._and_join(['term1']) -> 'term1' self._and_join(['term1', 'term2']) -> 'term1 AND term2' self._and_join(['term1', 'term2', 'term3']) -> 'term1 AND term2 AND term3' Returns: str
codesearchnet
def __init__(self, missing_modules: Collection[str]=()): if os.getenv('TYPESHED_HOME'): self._store = ExternalTypeshedFs(missing_file=self.MISSING_FILE) else: self._store = InternalTypeshedFs(missing_file=self.MISSING_FILE) self._missing = self._load_missing().union(missing_modules) self._stdlib_versions = self._load_stdlib_versions() self._third_party_packages = self._load_third_party_packages()
Initializer. Args: missing_modules: A collection of modules in the format 'stdlib/module_name', which will be combined with the contents of MISSING_FILE to form a set of missing modules for which pytype will not report errors.
github-repos
def NetworkFee(self): if (self._network_fee is None): input = Fixed8(0) for coin_ref in self.References.values(): if (coin_ref.AssetId == GetBlockchain().SystemCoin().Hash): input = (input + coin_ref.Value) output = Fixed8(0) for tx_output in self.outputs: if (tx_output.AssetId == GetBlockchain().SystemCoin().Hash): output = (output + tx_output.Value) self._network_fee = ((input - output) - self.SystemFee()) return self._network_fee
Get the network fee. Returns: Fixed8:
codesearchnet
def populate(projects_to_filter=None, group=None): if (projects_to_filter is None): projects_to_filter = [] import benchbuild.projects as all_projects all_projects.discover() prjs = ProjectRegistry.projects if projects_to_filter: prjs = {} for filter_project in set(projects_to_filter): try: prjs.update({x: y for (x, y) in ProjectRegistry.projects.items(prefix=filter_project)}) except KeyError: pass if group: groupkeys = set(group) prjs = {name: cls for (name, cls) in prjs.items() if (cls.GROUP in groupkeys)} return {x: prjs[x] for x in prjs if ((prjs[x].DOMAIN != 'debug') or (x in projects_to_filter))}
Populate the list of projects that belong to this experiment. Args: projects_to_filter (list(Project)): List of projects we want to assign to this experiment. We intersect the list of projects with the list of supported projects to get the list of projects that belong to this experiment. group (list(str)): In addition to the project filter, we provide a way to filter whole groups.
codesearchnet
def mesh_axis_to_cumprod(self, tensor_shape): tensor_layout = self.tensor_layout(tensor_shape) ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims) ta2cumprod = tensor_shape.cumprod return [(None if (ta is None) else ta2cumprod[ta]) for ta in ma2ta]
For each mesh axis, give the product of previous tensor axes. Args: tensor_shape: Shape. Returns: list with length self.ndims where each element is an integer or None.
codesearchnet
def expand_abbreviations(txt, fields): def _expand(matchobj): s = matchobj.group('var') if (s not in fields): matches = [x for x in fields if x.startswith(s)] if (len(matches) == 1): s = matches[0] return ('{%s}' % s) return re.sub(FORMAT_VAR_REGEX, _expand, txt)
Expand abbreviations in a format string. If an abbreviation does not match a field, or matches multiple fields, it is left unchanged. Example: >>> fields = ("hey", "there", "dude") >>> expand_abbreviations("hello {d}", fields) 'hello dude' Args: txt (str): Format string. fields (list of str): Fields to expand to. Returns: Expanded string.
codesearchnet
def np_dtype(dtype): if dtype == 'float' or dtype == float or dtype == np.float32 or dtype == tf.float32: return np.float32 elif dtype == np.float64 or dtype == tf.float64: return np.float64 elif dtype == np.float16 or dtype == tf.float16: return np.float16 elif dtype == 'int' or dtype == int or dtype == np.int32 or dtype == tf.int32: return np.int32 elif dtype == np.int64 or dtype == tf.int64: return np.int64 elif dtype == np.int16 or dtype == tf.int16: return np.int16 elif dtype == 'bool' or dtype == bool or dtype == np.bool_ or dtype == tf.bool: return np.bool_ else: raise TensorForceError("Error: Type conversion from type {} not supported.".format(str(dtype)))
Translates dtype specifications in configurations to numpy data types. Args: dtype: String describing a numerical type (e.g. 'float') or numerical type primitive. Returns: Numpy data type
juraj-google-style
def Matches(self, file_entry, search_depth): if (self._location_segments is None): location_match = None else: location_match = self._CheckLocation(file_entry, search_depth) if (not location_match): return (False, location_match) if (search_depth != self._number_of_location_segments): return (False, location_match) match = self._CheckFileEntryType(file_entry) if ((match is not None) and (not match)): return (False, location_match) match = self._CheckIsAllocated(file_entry) if ((match is not None) and (not match)): return (False, location_match) return (True, location_match)
Determines if the file entry matches the find specification. Args: file_entry (FileEntry): file entry. search_depth (int): number of location path segments to compare. Returns: tuple: contains: bool: True if the file entry matches the find specification, False otherwise. bool: True if the location matches, False if not or None if no location specified.
codesearchnet
def draw_arc(self, x, y, r, start, end, color): check_int_err(lib.arcRGBA(self._ptr, x, y, r, start, end, color[0], color[1], color[2], color[3]))
Draw an arc. Args: x (int): The x coordinate of the center of the arc. y (int): The y coordinate of the center of the arc. r (int): The radius of the arc. start (int): The start of the arc. end (int): The end of the arc. color (Tuple[int, int, int, int]): The color of the circle. Raises: SDLError: If an error is encountered.
juraj-google-style
def update_with_token(self, token_id: int) -> bool: if self.status != RequestStatus.DECODING: return False is_eos = token_id == self.eos_token_id and self.eos_token_id != -1 is_max_len = self.generated_len() >= self.max_new_tokens if is_eos or is_max_len: self.status = RequestStatus.FINISHED return True return False
Update the request with a newly generated token and check for completion. Args: token_id: The token ID to add to the output sequence Returns: bool: True if the request is now complete, False otherwise
github-repos
def get_proj_info(self, token): r = self.remote_utils.get_url((self.url() + '{}/info/'.format(token))) return r.json()
Return the project info for a given token. Arguments: token (str): Token to return information for Returns: JSON: representation of proj_info
codesearchnet
def from_file(cls, filename="CTRL", **kwargs): with zopen(filename, "rt") as f: contents = f.read() return LMTOCtrl.from_string(contents, **kwargs)
Creates a CTRL file object from an existing file. Args: filename: The name of the CTRL file. Defaults to 'CTRL'. Returns: An LMTOCtrl object.
juraj-google-style
def diagonalize_real_symmetric_and_sorted_diagonal_matrices(symmetric_matrix: np.ndarray, diagonal_matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08, check_preconditions: bool=True) -> np.ndarray: if check_preconditions: if (np.any(np.imag(symmetric_matrix)) or (not predicates.is_hermitian(symmetric_matrix, rtol=rtol, atol=atol))): raise ValueError('symmetric_matrix must be real symmetric.') if ((not predicates.is_diagonal(diagonal_matrix, atol=atol)) or np.any(np.imag(diagonal_matrix)) or np.any((diagonal_matrix[(:(- 1), :(- 1))] < diagonal_matrix[(1:, 1:)]))): raise ValueError('diagonal_matrix must be real diagonal descending.') if (not predicates.commutes(diagonal_matrix, symmetric_matrix, rtol=rtol, atol=atol)): raise ValueError('Given matrices must commute.') def similar_singular(i, j): return np.allclose(diagonal_matrix[(i, i)], diagonal_matrix[(j, j)], rtol=rtol) ranges = _contiguous_groups(diagonal_matrix.shape[0], similar_singular) p = np.zeros(symmetric_matrix.shape, dtype=np.float64) for (start, end) in ranges: block = symmetric_matrix[(start:end, start:end)] p[(start:end, start:end)] = diagonalize_real_symmetric_matrix(block, rtol=rtol, atol=atol) return p
Returns an orthogonal matrix that diagonalizes both given matrices. The given matrices must commute. Guarantees that the sorted diagonal matrix is not permuted by the diagonalization (except for nearly-equal values). Args: symmetric_matrix: A real symmetric matrix. diagonal_matrix: A real diagonal matrix with entries along the diagonal sorted into descending order. rtol: Relative numeric error threshold. atol: Absolute numeric error threshold. check_preconditions: If set, verifies that the input matrices commute and are respectively symmetric and diagonal descending. Returns: An orthogonal matrix P such that P.T @ symmetric_matrix @ P is diagonal and P.T @ diagonal_matrix @ P = diagonal_matrix (up to tolerance). Raises: ValueError: Matrices don't meet preconditions (e.g. not symmetric).
codesearchnet
def asin(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.asin, tf.float32)
Returns a TensorFluent for the arcsin function. Args: x: The input fluent. Returns: A TensorFluent wrapping the arcsin function.
codesearchnet
def _all_correct_list(array): if type(array) not in _ITERABLE_TYPES: return False for item in array: if not type(item) in _ITERABLE_TYPES: return False if len(item) != 2: return False return True
Make sure, that all items in `array` has good type and size. Args: array (list): Array of python types. Returns: True/False
juraj-google-style
def print_info(info_mapping): if (not info_mapping): return content_format = '{:<16} : {:<}\n' content = '\n==================== Output ====================\n' content += content_format.format('Variable', 'Value') content += content_format.format(('-' * 16), ('-' * 29)) for (key, value) in info_mapping.items(): if isinstance(value, (tuple, collections.deque)): continue elif isinstance(value, (dict, list)): value = json.dumps(value) elif (value is None): value = 'None' if is_py2: if isinstance(key, unicode): key = key.encode('utf-8') if isinstance(value, unicode): value = value.encode('utf-8') content += content_format.format(key, value) content += (('-' * 48) + '\n') logger.log_info(content)
print info in mapping. Args: info_mapping (dict): input(variables) or output mapping. Examples: >>> info_mapping = { "var_a": "hello", "var_b": "world" } >>> info_mapping = { "status_code": 500 } >>> print_info(info_mapping) ==================== Output ==================== Key : Value ---------------- : ---------------------------- var_a : hello var_b : world ------------------------------------------------
codesearchnet
def assertNumpyObjectTensorsRecursivelyEqual(self, a, b, msg): if isinstance(a, np.ndarray) and a.dtype == object: self.assertEqual(a.dtype, b.dtype, msg) self.assertEqual(a.shape, b.shape, msg) self.assertLen(a, len(b), msg) for a_val, b_val in zip(a, b): self.assertNumpyObjectTensorsRecursivelyEqual(a_val, b_val, msg) else: self.assertAllEqual(a, b, msg)
Check that two numpy arrays are equal. For arrays with dtype=object, check values recursively to see if a and b are equal. (c.f. `np.array_equal`, which checks dtype=object values using object identity.) Args: a: A numpy array. b: A numpy array. msg: Message to display if a != b.
github-repos
def load_schema(schema_path): try: with open(schema_path) as schema_file: schema = json.load(schema_file) except ValueError as e: raise SchemaInvalidError(('Invalid JSON in schema or included schema: %s\n%s' % (schema_file.name, str(e)))) return schema
Load the JSON schema at the given path as a Python object. Args: schema_path: A filename for a JSON schema. Returns: A Python object representation of the schema.
codesearchnet
def create_profile(profile_name): try: profile = Profile(profile_name=profile_name) profile.full_clean() profile.save() except ValidationError as err: raise ValCannotCreateError(err.message_dict)
Used to create Profile objects in the database A profile needs to exists before an EncodedVideo object can be created. Args: profile_name (str): ID of the profile Raises: ValCannotCreateError: Raised if the profile name is invalid or exists
juraj-google-style
def from_http(cls, raw_body: MutableMapping, verification_token: Optional[str]=None, team_id: Optional[str]=None) -> 'Event': if (verification_token and (raw_body['token'] != verification_token)): raise exceptions.FailedVerification(raw_body['token'], raw_body['team_id']) if (team_id and (raw_body['team_id'] != team_id)): raise exceptions.FailedVerification(raw_body['token'], raw_body['team_id']) if raw_body['event']['type'].startswith('message'): return Message(raw_body['event'], metadata=raw_body) else: return Event(raw_body['event'], metadata=raw_body)
Create an event with data coming from the HTTP Event API. If the event type is a message a :class:`slack.events.Message` is returned. Args: raw_body: Decoded body of the Event API request verification_token: Slack verification token used to verify the request came from slack team_id: Verify the event is for the correct team Returns: :class:`slack.events.Event` or :class:`slack.events.Message` Raises: :class:`slack.exceptions.FailedVerification`: when `verification_token` or `team_id` does not match the incoming event's.
codesearchnet
def decrypt(key, ciphertext): key = ''.join(key) alphabet = string.ascii_letters cipher_alphabet = key.lower() + key.upper() return ciphertext.translate(str.maketrans(cipher_alphabet, alphabet))
Decrypt Simple Substitution enciphered ``ciphertext`` using ``key``. Example: >>> decrypt("PQSTUVWXYZCODEBRAKINGFHJLM", "XUOOB") HELLO Args: key (iterable): The key to use ciphertext (str): The text to decrypt Returns: Decrypted ciphertext
juraj-google-style
def shape_rb_data(raw_rb): rb_data = [] rb_data.append(np.mean(raw_rb, 0)) rb_data.append(np.std(raw_rb, 0)) return rb_data
Take the raw rb data and convert it into averages and std dev Args: raw_rb (numpy.array): m x n x l list where m is the number of seeds, n is the number of Clifford sequences and l is the number of qubits Return: numpy_array: 2 x n x l list where index 0 is the mean over seeds, 1 is the std dev overseeds
codesearchnet
def is_installed(name): out = __salt__['cmd.run_all'](((FLATPAK_BINARY_NAME + ' info ') + name)) if (out['retcode'] and out['stderr']): return False else: return True
Determine if a package or runtime is installed. Args: name (str): The name of the package or the runtime. Returns: bool: True if the specified package or runtime is installed. CLI Example: .. code-block:: bash salt '*' flatpak.is_installed org.gimp.GIMP
codesearchnet
def bonds(lines, atoms): conv_stereo_table = {0: 0, 1: 1, 3: 3, 4: 3, 6: 2} results = {a: {} for a in atoms} for line in lines: bond = Bond() first = int(line[0:3]) second = int(line[3:6]) if (first > second): bond.is_lower_first = 0 order = int(line[6:9]) if (order < 4): bond.order = order bond.type = conv_stereo_table[int(line[9:12])] results[first][second] = {'bond': bond} results[second][first] = {'bond': bond} return results
Parse bond block into bond objects Returns: dict: networkx adjacency dict
codesearchnet
def CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) self._normalized_timestamp = None self._number_of_seconds = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) self._microseconds = date_time_values.get('microseconds', None) self.is_local_time = False
Copies a fake timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC.
juraj-google-style
def config_stdio(self, log_configurations: Optional[List[LogConfiguration]] = None, default_level=logging.INFO) -> None: if not log_configurations: for logger in self.loggers.values(): self._restrict_output(logger, default_level) else: for component, level in log_configurations: try: logger = self.loggers[self.root + component] except KeyError: raise ValueError("Failed to configure component. Invalid name: {}".format(component)) self._restrict_output(logger, level)
Configure the stdio `StreamHandler` levels on the specified loggers. If no log configurations are specified then the `default_level` will be applied to all handlers. Args: log_configurations: a list of (component name, log level) tuples default_level: logging level to apply when no log_configurations are specified
juraj-google-style
def add_behaviour(self, behaviour, template=None): behaviour.set_agent(self) if issubclass(type(behaviour), FSMBehaviour): for (_, state) in behaviour.get_states().items(): state.set_agent(self) behaviour.set_template(template) self.behaviours.append(behaviour) if self.is_alive(): behaviour.start()
Adds and starts a behaviour to the agent. If template is not None it is used to match new messages and deliver them to the behaviour. Args: behaviour (spade.behaviour.CyclicBehaviour): the behaviour to be started template (spade.template.Template, optional): the template to match messages with (Default value = None)
codesearchnet
def wait(self, timeout_ms=None): closed = timeouts.loop_until_timeout_or_true(timeouts.PolledTimeout.from_millis(timeout_ms), self.stream.is_closed, 0.1) if closed: if hasattr(self.stdout, 'getvalue'): return self.stdout.getvalue() return True return None
Block until this command has completed. Args: timeout_ms: Timeout, in milliseconds, to wait. Returns: Output of the command if it complete and self.stdout is a StringIO object or was passed in as None. Returns True if the command completed but stdout was provided (and was not a StringIO object). Returns None if the timeout expired before the command completed. Be careful to check the return value explicitly for None, as the output may be ''.
codesearchnet
def _backspaced_single_line_animation(animation_, *args, **kwargs): animation_gen = animation_(*args, **kwargs) (yield next(animation_gen)) (yield from util.concatechain(util.BACKSPACE_GEN(kwargs['width']), animation_gen))
Turn an animation into an automatically backspaced animation. Args: animation: A function that returns a generator that yields strings for animation frames. args: Arguments for the animation function. kwargs: Keyword arguments for the animation function. Returns: the animation generator, with backspaces applied to each but the first frame.
codesearchnet
def rms_forward(hidden_states, variance_epsilon=1e-06): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + variance_epsilon) return hidden_states.to(input_dtype)
Calculates simple RMSNorm with no learnable weights. `MambaRMSNorm` will leverage this in order to multiply the final result with the RMSNorm weight Args: hidden_states (`torch.Tensor`): Hidden states to normalize variance_epsilon (`float`): The eps value to add in the square root scaling factor
github-repos
def _find_max_under_constraint(self, constrained, dependent, predicate): feasible = ops.nonzero(predicate(constrained, self.value)) feasible_exists = ops.greater(ops.size(feasible), 0) max_dependent = ops.max(ops.take(dependent, feasible), initial=0) return ops.where(feasible_exists, max_dependent, 0.0)
Returns the maximum of dependent_statistic that satisfies the constraint. Args: constrained: Over these values the constraint is specified. A rank-1 tensor. dependent: From these values the maximum that satiesfies the constraint is selected. Values in this tensor and in `constrained` are linked by having the same threshold at each position, hence this tensor must have the same shape. predicate: A binary boolean functor to be applied to arguments `constrained` and `self.value`, e.g. `ops.greater`. Returns: maximal dependent value, if no value satisfies the constraint 0.0.
github-repos
def __init__(self, graph, run_metadata): self._graph = graph self._run_metadata = run_metadata self._string_table = StringTable() self._functions = Functions(self._string_table) self._locations = Locations(self._functions)
Constructor. Args: graph: A `Graph` instance. run_metadata: A list of `RunMetadata` objects.
github-repos
def parse_yaml_file(self, yaml_file: Union[str, os.PathLike], allow_extra_keys: bool=False) -> tuple[DataClass, ...]: outputs = self.parse_dict(yaml.safe_load(Path(yaml_file).read_text()), allow_extra_keys=allow_extra_keys) return tuple(outputs)
Alternative helper method that does not use `argparse` at all, instead loading a yaml file and populating the dataclass types. Args: yaml_file (`str` or `os.PathLike`): File name of the yaml file to parse allow_extra_keys (`bool`, *optional*, defaults to `False`): Defaults to False. If False, will raise an exception if the json file contains keys that are not parsed. Returns: Tuple consisting of: - the dataclass instances in the same order as they were passed to the initializer.
github-repos
def strip_strings(model): model.description = None for subgraph in model.subgraphs: subgraph.name = None for tensor in subgraph.tensors: tensor.name = None model.signatureDefs = None
Strips all nonessential strings from the model to reduce model size. We remove the following strings: (find strings by searching ":string" in the tensorflow lite flatbuffer schema) 1. Model description 2. SubGraph name 3. Tensor names We retain OperatorCode custom_code and Metadata name. Args: model: The model from which to remove nonessential strings.
github-repos
def __item_descriptor(self, config): descriptor = { 'kind': 'discovery 'icons': { 'x16': 'https: 'googleg_16dp.png', 'x32': 'https: 'googleg_32dp.png', }, 'preferred': True, } description = config.get('description') root_url = config.get('root') name = config.get('name') version = config.get('api_version') relative_path = '/apis/{0}/{1}/rest'.format(name, version) if description: descriptor['description'] = description descriptor['name'] = name descriptor['version'] = version descriptor['discoveryLink'] = '.{0}'.format(relative_path) root_url_port = urlparse.urlparse(root_url).port original_path = self.__request.reconstruct_full_url( port_override=root_url_port) descriptor['discoveryRestUrl'] = '{0}/{1}/{2}/rest'.format( original_path, name, version) if name and version: descriptor['id'] = '{0}:{1}'.format(name, version) return descriptor
Builds an item descriptor for a service configuration. Args: config: A dictionary containing the service configuration to describe. Returns: A dictionary that describes the service configuration.
juraj-google-style
def circuit_to_image(circ: Circuit, qubits: Qubits=None) -> PIL.Image: latex = circuit_to_latex(circ, qubits) img = render_latex(latex) return img
Create an image of a quantum circuit. A convenience function that calls circuit_to_latex() and render_latex(). Args: circ: A quantum Circuit qubits: Optional qubit list to specify qubit order Returns: Returns: A PIL Image (Use img.show() to display) Raises: NotImplementedError: For unsupported gates. OSError: If an external dependency is not installed.
codesearchnet
def mach60(msg): d = hex2bin(data(msg)) if d[23] == '0': return None mach = bin2int(d[24:34]) * 2.048 / 512.0 return round(mach, 3)
Aircraft MACH number Args: msg (String): 28 bytes hexadecimal message (BDS60) string Returns: float: MACH number
juraj-google-style
def output_file_for(window, shard, pane): filename = '%s/LOG-%s-%s-%03d-%s' % (output_path, window.max_timestamp(), shard, pane.index, pane.timing) if output_path else None return OutputFile(window.max_timestamp(), shard, pane.index, pane.timing, filename)
Returns: an OutputFile object constructed with pane, window and shard.
github-repos
def on_modified(self, event): self._logger.debug('Detected modify event on watched path: %s', event.src_path) self._process_event(event)
Function called everytime a new file is modified. Args: event: Event to process.
codesearchnet
def __init__(self, default: typing.Optional[int]=MISSING_VALUE, min_value: typing.Optional[int]=None, max_value: typing.Optional[int]=None, is_noneable: bool=False, frozen: bool=False): super().__init__(int, default, min_value, max_value, is_noneable, frozen)
Constructor. Args: default: (Optional) default value for this spec. min_value: (Optional) minimum value of acceptable values. max_value: (Optional) maximum value of acceptable values. is_noneable: If True, None is acceptable. frozen: If True, values other than the default value is not accceptable.
github-repos
def recipe_dt(config, auth_read, auth_write, bucket, paths, days, hours, dataset): dt(config, {'auth': auth_read, 'from': {'bucket': bucket, 'paths': paths, 'days': days, 'hours': hours}, 'to': {'auth': auth_write, 'dataset': dataset}})
Move data from a DT bucket into a BigQuery table. Args: auth_read (authentication) - Credentials used for reading data. auth_write (authentication) - Credentials used for writing data. bucket (string) - Name of bucket where DT files are stored. paths (string_list) - List of prefixes to pull specific DT files. days (integer) - Number of days back to synchronize. hours (integer) - Number of hours back to synchronize. dataset (string) - Existing dataset in BigQuery.
github-repos
def _FormatValue(self, value, level=0): def FormatDictItem(key_value): key, value = key_value return (self._FormatValue(key, level + 1) + ': ' + self._FormatValue(value, level + 1)) def LimitedEnumerate(items, formatter, level=0): count = 0 limit = self.max_sublist_items if level > 0 else self.max_list_items for item in items: if count == limit: yield '...' break yield formatter(item) count += 1 def FormatList(items, formatter, level=0): return ', '.join(LimitedEnumerate(items, formatter, level=level)) if isinstance(value, _PRIMITIVE_TYPES): return _TrimString(repr(value), self.max_value_len) if isinstance(value, _DATE_TYPES): return str(value) if level > self.max_depth: return str(type(value)) if isinstance(value, dict): return '{' + FormatList(six.iteritems(value), FormatDictItem) + '}' if isinstance(value, _VECTOR_TYPES): return _ListTypeFormatString(value).format(FormatList( value, lambda item: self._FormatValue(item, level + 1), level=level)) if isinstance(value, types.FunctionType): return 'function ' + value.__name__ if hasattr(value, '__dict__') and value.__dict__: return self._FormatValue(value.__dict__, level) return str(type(value))
Pretty-prints an object for a logger. This function is very similar to the standard pprint. The main difference is that it enforces limits to make sure we never produce an extremely long string or take too much time. Args: value: Python object to print. level: current recursion level. Returns: Formatted string.
juraj-google-style
def softplus(x, scale=1.0, name=None): if (scale == 1): return tf.nn.softplus(x) else: with tf.name_scope(name, 'softplus', [x]): scale = tf.convert_to_tensor(scale, dtype=x.dtype.base_dtype) return (tf.nn.softplus((x * scale)) / scale)
Computes softplus with a scale factor to sharpen of the hinge. This is an alternate non-linearity to relu. It has a similar shape, but it has a smooth transition from the linear part to 0. Args: x: A tensor. scale: A float that sharpens the curve. name: Optional name. Returns: y = log(1 + exp(scale * x)) / scale
codesearchnet
def read_binary(self, key, b64decode=True, decode=False): data = None if (key is not None): data = self.db.read(key.strip()) if (data is not None): data = json.loads(data) if b64decode: data = base64.b64decode(data) if decode: try: data = data.decode('utf-8') except UnicodeDecodeError: data = data.decode('latin-1') else: self.tcex.log.warning(u'The key field was None.') return data
Read method of CRUD operation for binary data. Args: key (string): The variable to read from the DB. b64decode (bool): If true the data will be base64 decoded. decode (bool): If true the data will be decoded to a String. Returns: (bytes|string): Results retrieved from DB.
codesearchnet
def initialize_tpu_system_impl(cluster_resolver, tpu_cluster_resolver_cls): if tpu_cluster_resolver_cls is None or not issubclass(tpu_cluster_resolver_cls, cluster_resolver_lib.ClusterResolver) or (not hasattr(tpu_cluster_resolver_cls, 'tpu_hardware_feature')): raise TypeError('tpu_cluster_resolver_cls is not tf.distribute.cluster_resolver.TPUClusterResolver.') logging.info('Deallocate tpu buffers before initializing tpu system.') context.context()._clear_caches() context.context().clear_kernel_cache() gc.collect() job = None if cluster_resolver is None: if context.executing_eagerly(): curr_device = device.DeviceSpec.from_string(context.context().device_name) if curr_device.job is not None: job = '{}/replica:0/task:0'.format(curr_device.job) cluster_resolver = tpu_cluster_resolver_cls('') assert isinstance(cluster_resolver, tpu_cluster_resolver_cls) tpu_name = compat.as_text(cluster_resolver._tpu) if tpu_name in _INITIALIZED_TPU_SYSTEMS: logging.warning('TPU system %s has already been initialized. Reinitializing the TPU can cause previously created variables on TPU to be lost.', tpu_name) logging.info('Initializing the TPU system: %s', tpu_name) if tpu_name not in _LOCAL_MASTERS: job = '{}/replica:0/task:0'.format(cluster_resolver.get_job_name()) if context.executing_eagerly(): @def_function.function(autograph=False) def _tpu_init_fn(): return tpu.initialize_system(job=job, compilation_failure_closes_chips=False, tpu_cancellation_closes_chips=False) run_eagerly = def_function.functions_run_eagerly() if run_eagerly: logging.warning('It looks like tf.function behavior was disabled, perhaps using tf.config.run_functions_eagerly. tf.tpu.experimental.initialize_tpu_system requires tf.function to work. This primitive will override the disable.') def_function.run_functions_eagerly(False) try: with ops.device(tpu._tpu_system_device_name(job)): output = _tpu_init_fn() context.async_wait() except errors.InvalidArgumentError as e: raise errors.NotFoundError(None, None, 'TPUs not found in the cluster. Failed in initialization: ' + str(e)) finally: if run_eagerly is not None: def_function.run_functions_eagerly(run_eagerly) context.context()._initialize_logical_devices() serialized_topology = output.numpy() elif not ops.executing_eagerly_outside_functions(): master = cluster_resolver.master() cluster_spec = cluster_resolver.cluster_spec() session_config = config_pb2.ConfigProto(allow_soft_placement=True) if cluster_spec: session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def()) with ops.Graph().as_default(): with session_lib.Session(config=session_config, target=master) as sess: serialized_topology = sess.run(tpu.initialize_system()) else: with ops.device(tpu._tpu_system_device_name(job)): serialized_topology = tpu.initialize_system(job=job, compilation_failure_closes_chips=False) return serialized_topology logging.info('Finished initializing TPU system.') tpu_topology = topology.Topology(serialized=serialized_topology) cluster_resolver.set_tpu_topology(serialized_topology) _INITIALIZED_TPU_SYSTEMS[tpu_name] = tpu_topology _tpu_worker_address.get_cell('address').set(cluster_resolver.get_master()) return tpu_topology
Implementation for tpu.experimental.initialize_tpu_system. Kept separate to avoid tpu_oss code duplication. Initialize the TPU devices. Args: cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver, which provides information about the TPU cluster. tpu_cluster_resolver_cls: a reference to tf.distribute.cluster_resolver.TPUClusterResolver so that an instance of it can be initialized if cluster_resolver is None. Returns: The tf.tpu.Topology object for the topology of the TPU cluster. If called inside tf.function, it returns the serialized topology object instead. Raises: RuntimeError: If running inside a tf.function. NotFoundError: If no TPU devices found in eager mode. TypeError: If tpu_cluster_resolver_cls is not tf.distribute.cluster_resolver.TPUClusterResolver.
github-repos
def _flatten_subsection(subsection, _type, offset, parent): for row in subsection: if (row in ('Low', 'Generated', 'High')): continue elif isinstance(row[0], StringType): if (len(row) in (4, 5)): if (len(row) == 5): assert (row[4][0] == 'S'), ('Only known usage of a fifth member is Sn, found: %s' % row[4][0]) (yield (float(row[0]), float(row[1]), float(row[2]), (float(row[3]) / 2.0), _type, offset, parent)) parent = offset offset += 1 elif isinstance(row[0], list): split_parent = (offset - 1) start_offset = 0 slices = [] start = 0 for (i, value) in enumerate(row): if (value == '|'): slices.append(slice((start + start_offset), i)) start = (i + 1) slices.append(slice((start + start_offset), len(row))) for split_slice in slices: for _row in _flatten_subsection(row[split_slice], _type, offset, split_parent): offset += 1 (yield _row)
Flatten a subsection from its nested version Args: subsection: Nested subsection as produced by _parse_section, except one level in _type: type of section, ie: AXON, etc parent: first element has this as it's parent offset: position in the final array of the first element Returns: Generator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID]
codesearchnet
def plot(self, figsize=None, rotation=45): (fig, ax) = plt.subplots(figsize=figsize) plt.imshow(self._cm, interpolation='nearest', cmap=plt.cm.Blues, aspect='auto') plt.title('Confusion matrix') plt.colorbar() tick_marks = np.arange(len(self._labels)) plt.xticks(tick_marks, self._labels, rotation=rotation) plt.yticks(tick_marks, self._labels) if isinstance(self._cm, list): thresh = (max(max(self._cm)) / 2.0) for (i, j) in itertools.product(range(len(self._labels)), range(len(self._labels))): plt.text(j, i, self._cm[i][j], horizontalalignment='center', color=('white' if (self._cm[i][j] > thresh) else 'black')) else: thresh = (self._cm.max() / 2.0) for (i, j) in itertools.product(range(len(self._labels)), range(len(self._labels))): plt.text(j, i, self._cm[(i, j)], horizontalalignment='center', color=('white' if (self._cm[(i, j)] > thresh) else 'black')) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label')
Plot the confusion matrix. Args: figsize: tuple (x, y) of ints. Sets the size of the figure rotation: the rotation angle of the labels on the x-axis.
codesearchnet
def authenticate(self, auth_token, auth_info, service_name): try: jwt_claims = self.get_jwt_claims(auth_token) except Exception as error: raise suppliers.UnauthenticatedException(u'Cannot decode the auth token', error) _check_jwt_claims(jwt_claims) user_info = UserInfo(jwt_claims) issuer = user_info.issuer if (issuer not in self._issuers_to_provider_ids): raise suppliers.UnauthenticatedException((u'Unknown issuer: ' + issuer)) provider_id = self._issuers_to_provider_ids[issuer] if (not auth_info.is_provider_allowed(provider_id)): raise suppliers.UnauthenticatedException((u'The requested method does not allow provider id: ' + provider_id)) audiences = user_info.audiences has_service_name = (service_name in audiences) allowed_audiences = auth_info.get_allowed_audiences(provider_id) intersected_audiences = set(allowed_audiences).intersection(audiences) if ((not has_service_name) and (not intersected_audiences)): raise suppliers.UnauthenticatedException(u'Audiences not allowed') return user_info
Authenticates the current auth token. Args: auth_token: the auth token. auth_info: the auth configurations of the API method being called. service_name: the name of this service. Returns: A constructed UserInfo object representing the identity of the caller. Raises: UnauthenticatedException: When * the issuer is not allowed; * the audiences are not allowed; * the auth token has already expired.
codesearchnet
def insert(self, fields, typecast=False): return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast})
Inserts a record >>> record = {'Name': 'John'} >>> airtable.insert(record) Args: fields(``dict``): Fields to insert. Must be dictionary with Column names as Key. typecast(``boolean``): Automatic data conversion from string values. Returns: record (``dict``): Inserted record
juraj-google-style
def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor]=None): pixel_values = pixel_values.type(self.visual.dtype) image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw) split_sizes = (image_grid_thw.prod(-1) image_embeds = torch.split(image_embeds, split_sizes) return image_embeds
Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM.
github-repos
def _AddExtractionProcessStatusTableRow(self, process_status, table_view): used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory) sources = '' if ((process_status.number_of_produced_sources is not None) and (process_status.number_of_produced_sources_delta is not None)): sources = '{0:d} ({1:d})'.format(process_status.number_of_produced_sources, process_status.number_of_produced_sources_delta) events = '' if ((process_status.number_of_produced_events is not None) and (process_status.number_of_produced_events_delta is not None)): events = '{0:d} ({1:d})'.format(process_status.number_of_produced_events, process_status.number_of_produced_events_delta) table_view.AddRow([process_status.identifier, process_status.pid, process_status.status, used_memory, sources, events, process_status.display_name])
Adds an extraction process status table row. Args: process_status (ProcessStatus): processing status. table_view (CLITabularTableView): table view.
codesearchnet
def __init__(self, points, joggle=False): self.points = points dim = [len(i) for i in self.points] if max(dim) != min(dim): raise ValueError("Input points must all have the same dimension!") self.dim = dim[0] if joggle: options = "i QJ" else: options = "i Qt" output = qdelaunay(options, points) output.pop(0) self.vertices = [[int(i)for i in row.strip().split()] for row in output]
Initializes a DelaunayTri from points. Args: points ([[float]]): All the points as a sequence of sequences. e.g., [[-0.5, -0.5], [-0.5, 0.5], [0.5, -0.5], [0.5, 0.5]] joggle (bool): Use qhull option to joggle inputs until simplical result is obtained instead of merging facets.
juraj-google-style
def render_trees(trees, path_composer): trees = list(trees) def create_pub_cache(trees): "\n Create uuid -> DBPublication cache from all uuid's linked from `trees`.\n\n Args:\n trees (list): List of :class:`.Tree`.\n\n Returns:\n dict: {uuid: DBPublication}\n " sub_pubs_uuids = sum((x.collect_publications() for x in trees), []) uuid_mapping = {uuid: search_pubs_by_uuid(uuid) for uuid in set(sub_pubs_uuids)} return {uuid: pub[0] for (uuid, pub) in uuid_mapping.iteritems() if pub} pub_cache = create_pub_cache(trees) def render_tree(tree, ind=1): '\n Render the tree into HTML using :attr:`TREE_TEMPLATE`. Private trees\n are ignored.\n\n Args:\n tree (obj): :class:`.Tree` instance.\n ind (int, default 1): Indentation. This function is called\n recursively.\n\n Returns:\n str: Rendered string.\n ' if (not tree.is_public): return '' rendered_tree = SimpleTemplate(TREE_TEMPLATE).render(tree=tree, render_tree=render_tree, ind=ind, path_composer=path_composer, pub_cache=pub_cache) ind_txt = (ind * ' ') return (ind_txt + ('\n' + ind_txt).join(rendered_tree.splitlines())) parent = tree_handler().get_parent(trees[0]) link_up = (path_composer(parent) if parent else None) return SimpleTemplate(TREES_TEMPLATE).render(trees=trees, render_tree=render_tree, link_up=link_up)
Render list of `trees` to HTML. Args: trees (list): List of :class:`.Tree`. path_composer (fn reference): Function used to compose paths from UUID. Look at :func:`.compose_tree_path` from :mod:`.web_tools`. Returns: str: HTML representation of trees.
codesearchnet
def _AddSerializedEvent(self, event): identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT, self._serialized_event_heap.number_of_events + 1) event.SetIdentifier(identifier) serialized_data = self._SerializeAttributeContainer(event) self._serialized_event_heap.PushEvent(event.timestamp, serialized_data) if self._serialized_event_heap.data_size > self._maximum_buffer_size: self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT)
Adds an serialized event. Args: event (EventObject): event. Raises: IOError: if the event cannot be serialized. OSError: if the event cannot be serialized.
juraj-google-style
def add_comment(self, line): if not isinstance(self.last_item, Comment): comment = Comment(self._structure) self._structure.append(comment) self.last_item.add_line(line) return self
Add a Comment object to the section Used during initial parsing mainly Args: line (str): one line in the comment
juraj-google-style
def start(self) -> asyncio.Future: if (os.name != 'nt'): signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT) for s in signals: self._event_loop.add_signal_handler(s, self.stop) future = asyncio.ensure_future(self._connect_and_read(), loop=self._event_loop) if (self.run_async or self._event_loop.is_running()): return future return self._event_loop.run_until_complete(future)
Starts an RTM Session with Slack. Makes an authenticated call to Slack's RTM API to retrieve a websocket URL and then connects to the message server. As events stream-in we run any associated callbacks stored on the client. If 'auto_reconnect' is specified we retrieve a new url and reconnect any time the connection is lost unintentionally or an exception is thrown. Raises: SlackApiError: Unable to retreive RTM URL from Slack.
codesearchnet
def bottom(self, features): if (not self._problem_hparams): log_warn('Without a Problem, T2TModel.bottom is a passthrough.') return features transformed_features = collections.OrderedDict() all_previous_modalities = [] target_modality = _create_target_modality(self._problem_hparams.modality) for (feature_name, modality) in sorted(six.iteritems(self._problem_hparams.modality)): if (feature_name not in features): tf.logging.warning(('Missing feature %s - ignoring.' % feature_name)) continue vocab_size = self._problem_hparams.vocab_size[feature_name] if ((vocab_size is not None) and hasattr(self._hparams, 'vocab_divisor')): vocab_size += ((- vocab_size) % self._hparams.vocab_divisor) modality_name = self._hparams.name.get(feature_name, modalities.get_name(modality))(self._hparams, vocab_size) if (feature_name in target_modality): if (len(target_modality) > 1): variable_scope_name = ('%s/%s' % (modality_name, feature_name)) else: variable_scope_name = modality_name bottom = self._hparams.bottom.get(feature_name, modalities.get_targets_bottom(modality)) with tf.variable_scope(variable_scope_name) as vs: self._add_variable_scope(variable_scope_name, vs) log_info("Transforming feature '%s' with %s.targets_bottom", feature_name, modality_name) transformed_features[feature_name] = bottom(features[feature_name], self._hparams, vocab_size) else: bottom = self._hparams.bottom.get(feature_name, modalities.get_bottom(modality)) do_reuse = (modality_name in all_previous_modalities) with tf.variable_scope(modality_name, reuse=do_reuse) as vs: self._add_variable_scope(modality_name, vs) log_info("Transforming feature '%s' with %s.bottom", feature_name, modality_name) transformed_features[feature_name] = bottom(features[feature_name], self._hparams, vocab_size) all_previous_modalities.append(modality_name) for key in features: if (key not in transformed_features): transformed_features[key] = features[key] else: transformed_features[(key + '_raw')] = features[key] return transformed_features
Transforms features to feed into body. Args: features: dict of str to Tensor. Typically it is the preprocessed data batch after Problem's preprocess_example(). Returns: transformed_features: dict of same key-value pairs as features. The value Tensors are newly transformed.
codesearchnet
def to_event(self, event_type, field_name=None, depth=None): if self.ion_event is None: value = self if isinstance(self, IonPyNull): value = None self.ion_event = IonEvent(event_type, ion_type=self.ion_type, value=value, field_name=field_name, annotations=self.ion_annotations, depth=depth) return self.ion_event
Constructs an IonEvent from this _IonNature value. Args: event_type (IonEventType): The type of the resulting event. field_name (Optional[text]): The field name associated with this value, if any. depth (Optional[int]): The depth of this value. Returns: An IonEvent with the properties from this value.
juraj-google-style
def convert2wavenumber(rsr): retv = {} for chname in rsr.keys(): retv[chname] = {} for det in rsr[chname].keys(): retv[chname][det] = {} if ('wavenumber' in rsr[chname][det].keys()): retv[chname][det] = rsr[chname][det].copy() LOG.debug('RSR data already in wavenumber space. No conversion needed.') continue for sat in rsr[chname][det].keys(): if (sat == 'wavelength'): wnum = (1.0 / (0.0001 * rsr[chname][det][sat])) retv[chname][det]['wavenumber'] = wnum[::(- 1)] elif (sat == 'response'): if (type(rsr[chname][det][sat]) is dict): retv[chname][det][sat] = {} for name in rsr[chname][det][sat].keys(): resp = rsr[chname][det][sat][name] retv[chname][det][sat][name] = resp[::(- 1)] else: resp = rsr[chname][det][sat] retv[chname][det][sat] = resp[::(- 1)] unit = 'cm-1' si_scale = 100.0 return (retv, {'unit': unit, 'si_scale': si_scale})
Take rsr data set with all channels and detectors for an instrument each with a set of wavelengths and normalised responses and convert to wavenumbers and responses :rsr: Relative Spectral Response function (all bands) Returns: :retv: Relative Spectral Responses in wave number space :info: Dictionary with scale (to go convert to SI units) and unit
codesearchnet
def copy_modified_gene(self, modified_gene, ignore_model_attributes=True): ignore = ['_model', '_reaction', '_functional', 'model', 'reaction', 'functional'] for attr in filter((lambda a: ((not a.startswith('__')) and (not isinstance(getattr(type(self), a, None), property)) and (not callable(getattr(self, a))))), dir(modified_gene)): if ((attr not in ignore) and ignore_model_attributes): setattr(self, attr, getattr(modified_gene, attr))
Copy attributes of a Gene object over to this Gene, given that the modified gene has the same ID. Args: modified_gene (Gene, GenePro): Gene with modified attributes that you want to copy over. ignore_model_attributes (bool): If you want to ignore copying over attributes related to metabolic models.
codesearchnet
def get_month_description(self): return self.get_segment_description(self._expression_parts[4], '', (lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime('%B')), (lambda s: _(', every {0} months').format(s)), (lambda s: _(', {0} through {1}')), (lambda s: _(', only in {0}')))
Generates a description for only the MONTH portion of the expression Returns: The MONTH description
codesearchnet
def model_loader(gem_file_path, gem_file_type): if ((gem_file_type.lower() == 'xml') or (gem_file_type.lower() == 'sbml')): model = read_sbml_model(gem_file_path) elif (gem_file_type.lower() == 'mat'): model = load_matlab_model(gem_file_path) elif (gem_file_type.lower() == 'json'): model = load_json_model(gem_file_path) else: raise ValueError('File type must be "sbml", "xml", "mat", or "json".') return model
Consolidated function to load a GEM using COBRApy. Specify the file type being loaded. Args: gem_file_path (str): Path to model file gem_file_type (str): GEM model type - ``sbml`` (or ``xml``), ``mat``, or ``json`` format Returns: COBRApy Model object.
codesearchnet
def convert_to_dimension(d): if d is None: return None if isinstance(d, Dimension): if not isinstance(d.name, str) or not isinstance(d.size, int): raise ValueError("Bad dimension %s" % (d,)) return d name, size = d if isinstance(name, str) and isinstance(size, int): return Dimension(name, size) else: raise ValueError("could not convert %s to Dimension" % (d,))
Converts input to a Dimension. Args: d: Dimension, tuple (string, int), or None. Returns: Dimension or None. Raises: ValueError: If d cannot be converted to a Dimension.
juraj-google-style
async def updateCronJob(self, iden, query): cron = self.cell.agenda.appts.get(iden) if (cron is None): raise s_exc.NoSuchIden() self._trig_auth_check(cron.useriden) (await self.cell.agenda.mod(iden, query))
Change an existing cron job's query Args: iden (bytes): The iden of the cron job to be changed
codesearchnet
def WriteSessionCompletion(self, aborted=False): self._RaiseIfNotWritable() if (self._storage_type != definitions.STORAGE_TYPE_SESSION): raise IOError('Unsupported storage type.') self._session.aborted = aborted session_completion = self._session.CreateSessionCompletion() self._storage_file.WriteSessionCompletion(session_completion)
Writes session completion information. Args: aborted (Optional[bool]): True if the session was aborted. Raises: IOError: if the storage type is not supported or when the storage writer is closed. OSError: if the storage type is not supported or when the storage writer is closed.
codesearchnet
def delete(self): self.manager.session.delete(self._uri) self.manager._name_uri_cache.delete(self.properties.get(self.manager._name_prop, None))
Delete this NIC. Authorization requirements: * Object-access permission to the Partition containing this HBA. * Task permission to the "Partition Details" task. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
codesearchnet
def MakeSelfExtractingZip(self, payload_data, output_path): context = self.context + ["Client Context"] src_zip = zipfile.ZipFile(io.BytesIO(payload_data), mode="r") zip_data = io.BytesIO() output_zip = zipfile.ZipFile( zip_data, mode="w", compression=zipfile.ZIP_DEFLATED) config_file_name = config.CONFIG.Get( "ClientBuilder.config_filename", context=context) for template_file in src_zip.namelist(): if template_file != config_file_name: CopyFileInZip(src_zip, template_file, output_zip) client_config_content = self.GetClientConfig(context) output_zip.writestr( config_file_name, client_config_content.encode("utf-8"), compress_type=zipfile.ZIP_STORED) output_zip.comment = b"$AUTORUN$>%s" % config.CONFIG.Get( "ClientBuilder.autorun_command_line", context=context).encode("utf-8") output_zip.close() utils.EnsureDirExists(os.path.dirname(output_path)) with open(output_path, "wb") as fd: stub_data = io.BytesIO() unzipsfx_stub = config.CONFIG.Get( "ClientBuilder.unzipsfx_stub", context=context) stub_raw = open(unzipsfx_stub, "rb").read() if b"level=\"requireAdministrator" not in stub_raw: raise RuntimeError("Bad unzip binary in use. Not compiled with the" "requireAdministrator manifest option.") stub_data.write(stub_raw) SetPeSubsystem( stub_data, console=config.CONFIG.Get("ClientBuilder.console", context=context)) end_of_file = zip_data.tell() + stub_data.tell() offset_to_rsrc = stub_data.getvalue().find(b".rsrc") stub_data.seek(offset_to_rsrc + 20) start_of_rsrc_section = struct.unpack("<I", stub_data.read(4))[0] stub_data.seek(offset_to_rsrc + 16) stub_data.write(struct.pack("<I", end_of_file - start_of_rsrc_section)) out_data = io.BytesIO() out_data.write(stub_data.getvalue()) out_data.write(zip_data.getvalue()) fd.write(out_data.getvalue()) if self.signer: self.signer.SignFile(output_path) logging.info("Deployable binary generated at %s", output_path) return output_path
Repack the installer into the payload. Args: payload_data: data payload for zip file output_path: filename for the zip output Raises: RuntimeError: if the ClientBuilder.unzipsfx_stub doesn't require admin. Returns: output_path: filename string of zip output file
juraj-google-style
def do_get(self, uri): self.validate_resource_uri(uri) return self._connection.get(uri)
Helps to make get requests Args: uri: URI of the resource Returns: Returns: Returns the resource data
juraj-google-style
def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG): if df.empty: return { "x": [], "y": [], "mode": mode } _data = [] for x, y in xypairs: if (x in df.columns) and (y in df.columns): _data.append( { "x": df[x].values.tolist(), "y": df[y].values.tolist(), "mode": mode } ) return { "data": _data, "layout": layout, "config": config }
basic line plot dataframe to json for a line plot Args: df (pandas.DataFrame): input dataframe xypairs (list): list of tuples containing column names mode (str): plotly.js mode (e.g. lines) layout (dict): layout parameters config (dict): config parameters
juraj-google-style
def use(self, middleware, path=None): self.log.info(' Using middleware {}', middleware) if (path is None): path = MiddlewareChain.ROOT_PATTERN self.add(HTTPMethod.ALL, path, middleware) return self
Call the provided middleware upon requests matching the path. If path is not provided or None, all requests will match. Args: middleware (callable): Callable with the signature ``(res, req) -> None`` path (Optional[str or regex]): a specific path the request must match for the middleware to be called. Returns: This router
codesearchnet
def temp_shell_task(cls, inp, mpi_procs=1, workdir=None, manager=None): import tempfile workdir = (tempfile.mkdtemp() if (workdir is None) else workdir) if (manager is None): manager = TaskManager.from_user_config() task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs)) task.set_name('temp_shell_task') return task
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc. Mainly used for invoking Abinit to get important parameters needed to prepare the real task. Args: mpi_procs: Number of MPI processes to use.
codesearchnet
def _prompt_split_image(self, num_patches): img_patches_per_tile = (self.img_size img_string = f'{self.start_of_img_token}' if num_patches > 1: for idx in range(1, num_patches): img_string += f'{self.tile_token}_{idx}' + f'{self.img_patch_token}' * img_patches_per_tile img_string += f'{self.tile_global_token}' + f'{self.img_patch_token}' * img_patches_per_tile img_string += f'{self.end_of_img_token}' return img_string
Create a structured string representation of image tokens Args: num_patches: Number of patches in the image Returns: String with appropriate image tokens
github-repos
def flag_last(o): it = o.__iter__() try: e = next(it) except StopIteration: return while True: try: nxt = next(it) yield (False, e) e = nxt except StopIteration: yield (True, e) break
Flags the last loop of an iterator. Consumes an iterator, buffers one instance so it can look ahead. Returns True on last iteration. Args: * o: An iterator instance. Returns: * A tuple of (True/False, iteration). Returns True, next on StopIteration.
github-repos
def get_vmss_vm(access_token, subscription_id, resource_group, vmss_name, instance_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/virtualMachines/', str(instance_id), '?api-version=', COMP_API]) return do_get(endpoint, access_token)
Get individual VMSS VM details. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. instance_id (int): VM ID of the scale set VM. Returns: HTTP response. JSON body of VMSS VM model view.
codesearchnet
def chat_meMessage(self, *, channel: str, text: str, **kwargs) -> SlackResponse: kwargs.update({'channel': channel, 'text': text}) return self.api_call('chat.meMessage', json=kwargs)
Share a me message into a channel. Args: channel (str): The channel id. e.g. 'C1234567890' text (str): The message you'd like to share. e.g. 'Hello world'
codesearchnet
def writeCmdMsg(self, msg): ekm_log("(writeCmdMsg | " + self.getContext() + ") " + msg) self.m_command_msg = msg
Internal method to set the command result string. Args: msg (str): Message built during command.
juraj-google-style
def generate(cache_fn): if not os.path.exists(cache_fn): print >> sys.stderr, "Can't access `%s`!" % cache_fn sys.exit(1) with SqliteDict(cache_fn) as db: for item in _pick_keywords(db): yield item
Go thru `cache_fn` and filter keywords. Store them in `keyword_list.json`. Args: cache_fn (str): Path to the file with cache. Returns: list: List of :class:`KeywordInfo` objects.
juraj-google-style