code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def highlight(__text: str, *, lexer: str = 'diff', formatter: str = 'terminal') -> str: if sys.stdout.isatty(): lexer = get_lexer_by_name(lexer) formatter = get_formatter_by_name(formatter) __text = pyg_highlight(__text, lexer, formatter) return __text
Highlight text highlighted using ``pygments``. Returns text untouched if colour output is not enabled. See also: :pypi:`Pygments` Args: __text: Text to highlight lexer: Jinja lexer to use formatter: Jinja formatter to use Returns: Syntax highlighted output, when possible
juraj-google-style
def transpose(self, permutation: Optional[List[int]] = None) -> 'TensorFluent': if permutation == []: return self t = tf.transpose(self.tensor, permutation) if permutation != [] else self.tensor scope = self.scope.as_list() batch = self.batch return TensorFluent(t, scope, batch=batch)
Returns a TensorFluent for the transpose operation with given `permutation`. Args: permutation: The output's shape permutation. Returns: A TensorFluent wrapping the transpose operation.
juraj-google-style
def restore_state(self, state): super(ReferenceController, self).restore_state(state) state_name = state.get('state_name') state_version = state.get('state_version') if state_name != self.STATE_NAME or state_version != self.STATE_VERSION: raise ArgumentError("Invalid emulated device state name or version", found=(state_name, state_version), expected=(self.STATE_NAME, self.STATE_VERSION)) self.app_info = state.get('app_info', (0, "0.0")) self.os_info = state.get('os_info', (0, "0.0")) self.sensor_log.prepare_for_restore() self.remote_bridge.restore(state.get('remote_bridge', {})) self.tile_manager.restore(state.get('tile_manager', {})) self.config_database.restore(state.get('config_database', {})) self.sensor_log.restore(state.get('sensor_log', {}))
Restore the current state of this emulated object. Args: state (dict): A previously dumped state produced by dump_state.
juraj-google-style
def logistic(x: Union[(float, np.ndarray)], k: float, theta: float) -> Optional[float]: if ((x is None) or (k is None) or (theta is None)): return None return (1 / (1 + np.exp(((- k) * (x - theta)))))
r""" Standard logistic function. .. math:: y = \frac {1} {1 + e^{-k (x - \theta)}} Args: x: :math:`x` k: :math:`k` theta: :math:`\theta` Returns: :math:`y`
codesearchnet
def CanonicalPathToLocalPath(path): path = path.replace('/\\', '\\') path = path.replace('/', '\\') m = re.match('\\\\([a-zA-Z]):(.*)$', path) if m: path = ('%s:\\%s' % (m.group(1), m.group(2).lstrip('\\'))) return path
r"""Converts the canonical paths as used by GRR to OS specific paths. Due to the inconsistencies between handling paths in windows we need to convert a path to an OS specific version prior to using it. This function should be called just before any OS specific functions. Canonical paths on windows have: - / instead of \. - Begin with /X:// where X is the drive letter. Args: path: A canonical path specification. Returns: A windows specific path.
codesearchnet
def retrieve_reviewers(self, product): if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product))
Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed.
juraj-google-style
def titles(self, unique=False): if unique: return tools.uniqued((title for (_, title) in self.iterfiles())) return [title for (_, title) in self.iterfiles()]
Return a list of all available spreadsheet titles. Args: unique (bool): drop duplicates Returns: list: list of title/name strings
codesearchnet
def has_types(self, types, all_=True): func = all if all_ else any return func([self.get_stim(t) for t in listify(types)])
Check whether the current component list matches all Stim types in the types argument. Args: types (Stim, list): a Stim class or iterable of Stim classes. all_ (bool): if True, all input types must match; if False, at least one input type must match. Returns: True if all passed types match at least one Stim in the component list, otherwise False.
juraj-google-style
def make_new_node(self, distance, angle): return Node((((cos((- angle)) * distance) + self.pos[0]), ((sin((- angle)) * distance) + self.pos[1])))
Make a new node from an existing one. This method creates a new node with a distance and angle given. The position of the new node is calculated with: x2 = cos(-angle)*distance+x1 y2 = sin(-angle)*distance+y1 Args: distance (float): The distance of the original node to the new node. angle (rad): The angle between the old and new node, relative to the horizont. Returns: object: The node with calculated poistion.
codesearchnet
def validate_field_value_type(value_type, in_mapping_key=False, allow_forward_references=False): if isinstance(value_type, str) or type_annotations.is_forward_ref(value_type): if allow_forward_references: return else: raise TypeError(f'Unresolved forward reference {value_type!r}') if value_type in (int, float, str, bytes, bool, None, _NoneType, dtypes.DType): return elif value_type in (tensor.Tensor, tensor_shape.TensorShape) or (isinstance(value_type, type) and _issubclass(value_type, composite_tensor.CompositeTensor)): if in_mapping_key: raise TypeError(f'Mapping had a key {value_type.__name__!r} with type {type(value_type).__name__!r}') elif type_annotations.is_generic_tuple(value_type) or type_annotations.is_generic_union(value_type): type_args = type_annotations.get_generic_type_args(value_type) if len(type_args) == 2 and type_args[1] is Ellipsis and type_annotations.is_generic_tuple(value_type): validate_field_value_type(type_args[0], in_mapping_key, allow_forward_references) else: for arg in type_annotations.get_generic_type_args(value_type): validate_field_value_type(arg, in_mapping_key, allow_forward_references) elif type_annotations.is_generic_mapping(value_type): key_type, value_type = type_annotations.get_generic_type_args(value_type) validate_field_value_type(key_type, True, allow_forward_references) validate_field_value_type(value_type, in_mapping_key, allow_forward_references) elif isinstance(value_type, type): raise TypeError(f'Unsupported type annotation {value_type.__name__!r}') else: raise TypeError(f'Unsupported type annotation {value_type!r}')
Checks that `value_type` contains only supported type annotations. Args: value_type: The type annotation to check. in_mapping_key: True if `value_type` is nested in the key of a mapping. allow_forward_references: If false, then raise an exception if a `value_type` contains a forward reference (i.e., a string literal). Raises: TypeError: If `value_type` contains an unsupported type annotation.
github-repos
def CallState(self, messages=None, next_state='', client_id=None, request_data=None, start_time=None): if (messages is None): messages = [] if (not next_state): raise ValueError("next_state can't be empty.") request_state = rdf_flow_runner.RequestState(id=random.UInt32(), session_id=self.context.session_id, client_id=client_id, next_state=next_state) if request_data: request_state.data = rdf_protodict.Dict().FromDict(request_data) self.QueueRequest(request_state, timestamp=start_time) if ((not messages) or (not isinstance(messages[(- 1)], rdf_flows.GrrStatus))): messages.append(rdf_flows.GrrStatus()) for (i, payload) in enumerate(messages): if isinstance(payload, rdfvalue.RDFValue): msg = rdf_flows.GrrMessage(session_id=self.session_id, request_id=request_state.id, response_id=(1 + i), auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED, payload=payload, type=rdf_flows.GrrMessage.Type.MESSAGE) if isinstance(payload, rdf_flows.GrrStatus): msg.type = rdf_flows.GrrMessage.Type.STATUS else: raise flow_runner.FlowRunnerError(('Bad message %s of type %s.' % (payload, type(payload)))) self.QueueResponse(msg, timestamp=start_time) self.QueueNotification(session_id=self.session_id, timestamp=start_time)
This method is used to asynchronously schedule a new hunt state. The state will be invoked in a later time and receive all the messages we send. Args: messages: A list of rdfvalues to send. If the last one is not a GrrStatus, we append an OK Status. next_state: The state in this hunt to be invoked with the responses. client_id: ClientURN to use in scheduled requests. request_data: Any dict provided here will be available in the RequestState protobuf. The Responses object maintains a reference to this protobuf for use in the execution of the state method. (so you can access this data by responses.request). start_time: Schedule the state at this time. This delays notification and messages for processing into the future. Raises: ValueError: on arguments error.
codesearchnet
def remove_plugin(self, f): if f.endswith('.py'): plugin_name = os.path.splitext(os.path.basename(f))[0] print '- %s %sREMOVED' % (plugin_name, color.Red) print '\t%sNote: still in memory, restart Workbench to remove...%s' % \ (color.Yellow, color.Normal)
Remvoing a deleted plugin. Args: f: the filepath for the plugin.
juraj-google-style
def message_to_extension(msg: message.Message, extension_cls: Type[_T]) -> _T: extension = extension_cls() add_message_to_extension(msg, extension) return extension
Converts an Extension profile into a generic Extension type. Args: msg: The Message to convert. extension_cls: The type of FHIR Extension to convert to. Returns: A an instance of extension_cls.
github-repos
def action_scope(self, action_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: return dict(zip(self.rddl.domain.action_fluent_ordering, action_fluents))
Returns a partial scope with current action-fluents. Args: action_fluents (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from action fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
juraj-google-style
def lcm(*numbers): n = 1 for i in numbers: n = (i * n) return n
Return lowest common multiple of a sequence of numbers. Args: \*numbers: Sequence of numbers. Returns: (int) Lowest common multiple of numbers.
juraj-google-style
def _get_cert_expiration_time(headers): cache_control = headers.get('Cache-Control', '') for entry in cache_control.split(','): match = _MAX_AGE_REGEX.match(entry) if match: cache_time_seconds = int(match.group(1)) break else: return 0 age = headers.get('Age') if (age is not None): try: age = int(age) except ValueError: age = 0 cache_time_seconds -= age return max(0, cache_time_seconds)
Get the expiration time for a cert, given the response headers. Get expiration time from the headers in the result. If we can't get a time from the headers, this returns 0, indicating that the cert shouldn't be cached. Args: headers: A dict containing the response headers from the request to get certs. Returns: An integer with the number of seconds the cert should be cached. This value is guaranteed to be >= 0.
codesearchnet
def ProduceAnalysisReport(self, plugin): analysis_report = plugin.CompileReport(self) if not analysis_report: return analysis_report.time_compiled = timelib.Timestamp.GetNow() plugin_name = getattr(analysis_report, 'plugin_name', plugin.plugin_name) if plugin_name: analysis_report.plugin_name = plugin_name if self._event_filter_expression: analysis_report.filter_string = self._event_filter_expression self._storage_writer.AddAnalysisReport(analysis_report) self.number_of_produced_analysis_reports += 1 self.number_of_produced_event_tags = ( self._storage_writer.number_of_event_tags) self.last_activity_timestamp = time.time()
Produces an analysis report. Args: plugin (AnalysisPlugin): plugin.
juraj-google-style
def direct_normal_illuminance(self, value=999999.0): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `direct_normal_illuminance`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `direct_normal_illuminance`') self._direct_normal_illuminance = value
Corresponds to IDD Field `direct_normal_illuminance` will be missing if >= 999900 Args: value (float): value for IDD Field `direct_normal_illuminance` Unit: lux value >= 0.0 Missing value: 999999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def hash32(data: Any, seed=0) -> int: with MultiTimerContext(timer, TIMING_HASH): c_data = to_str(data) if mmh3: return mmh3.hash(c_data, seed=seed) py_data = to_bytes(c_data) py_unsigned = murmur3_x86_32(py_data, seed=seed) return twos_comp_to_signed(py_unsigned, n_bits=32)
Non-cryptographic, deterministic, fast hash. Args: data: data to hash seed: seed Returns: signed 32-bit integer
juraj-google-style
def apply_actions(self, actions): modified = [] for a in actions: if "dict" in a: k = a["dict"] modified.append(k) self.vi[k] = self.modify_object(a["action"], self.vi[k]) elif "file" in a: self.modify(a["action"], a["file"]) else: raise ValueError("Unrecognized format: {}".format(a)) for f in modified: self.vi[f].write_file(f)
Applies a list of actions to the Vasp Input Set and rewrites modified files. Args: actions [dict]: A list of actions of the form {'file': filename, 'action': moddermodification} or {'dict': vaspinput_key, 'action': moddermodification}
juraj-google-style
def fail_api(channel): gui = ui_embed.UI( channel, "Couldn't get stats off RLTrackerNetwork.", "Maybe the API changed, please tell Infraxion.", modulename=modulename, colour=0x0088FF ) return gui
Creates an embed UI for when the API call didn't work Args: channel (discord.Channel): The Discord channel to bind the embed to Returns: ui (ui_embed.UI): The embed UI object
juraj-google-style
def process_alias_export_namespace(namespace): namespace.export_path = os.path.abspath(namespace.export_path) if os.path.isfile(namespace.export_path): raise CLIError(FILE_ALREADY_EXISTS_ERROR.format(namespace.export_path)) export_path_dir = os.path.dirname(namespace.export_path) if (not os.path.isdir(export_path_dir)): os.makedirs(export_path_dir) if os.path.isdir(namespace.export_path): namespace.export_path = os.path.join(namespace.export_path, ALIAS_FILE_NAME)
Validate input arguments when the user invokes 'az alias export'. Args: namespace: argparse namespace object.
codesearchnet
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): format_version = match.get('WebHistoryFileVersion', None) if (format_version != 1): parser_mediator.ProduceExtractionWarning('unsupported Safari history version: {0!s}'.format(format_version)) return if ('WebHistoryDates' not in match): return for history_entry in match.get('WebHistoryDates', {}): last_visited_date = history_entry.get('lastVisitedDate', None) if (last_visited_date is None): parser_mediator.ProduceExtractionWarning('missing last visited date') continue try: timestamp = float(last_visited_date) except (TypeError, ValueError): parser_mediator.ProduceExtractionWarning('unable to convert last visited date {0:s}'.format(last_visited_date)) continue display_title = history_entry.get('displayTitle', None) event_data = SafariHistoryEventData() if (display_title != event_data.title): event_data.display_title = display_title event_data.title = history_entry.get('title', None) event_data.url = history_entry.get('', None) event_data.visit_count = history_entry.get('visitCount', None) event_data.was_http_non_get = history_entry.get('lastVisitWasHTTPNonGet', None) timestamp = int(timestamp) date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts Safari history items. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
codesearchnet
def rmtree(self, exclude_wildcard=''): if (not exclude_wildcard): shutil.rmtree(self.workdir) else: w = WildCard(exclude_wildcard) for (dirpath, dirnames, filenames) in os.walk(self.workdir): for fname in filenames: path = os.path.join(dirpath, fname) if (not w.match(fname)): os.remove(path)
Remove all files and directories in the working directory Args: exclude_wildcard: Optional string with regular expressions separated by `|`. Files matching one of the regular expressions will be preserved. example: exclude_wildard="*.nc|*.txt" preserves all the files whose extension is in ["nc", "txt"].
codesearchnet
def upload(self, file_path, uri=None, timeout=-1): if not uri: uri = self._uri upload_file_name = os.path.basename(file_path) task, entity = self._connection.post_multipart_with_response_handling(uri, file_path, upload_file_name) if not task: return entity return self._task_monitor.wait_for_task(task, timeout)
Makes a multipart request. Args: file_path: File to upload. uri: A specific URI (optional). timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Response body.
juraj-google-style
def _uses_buffer_offset(model: schema_fb.ModelT) -> bool: if not model.metadata: return False return any(map(lambda metadata: metadata.name.decode('utf-8') == 'buffer_location', model.metadata))
Determines whether the model is using an offset buffer. Args: model: A TFLite model. Returns: True iff the model is using offset buffers. Offset buffers are enabled by the flag `_experimental_use_buffer_offset`.
github-repos
def extract_attribute_grid(self, model_grid, potential=False, future=False): if potential: var_name = model_grid.variable + "-potential" timesteps = np.arange(self.start_time - 1, self.end_time) elif future: var_name = model_grid.variable + "-future" timesteps = np.arange(self.start_time + 1, self.end_time + 2) else: var_name = model_grid.variable timesteps = np.arange(self.start_time, self.end_time + 1) self.attributes[var_name] = [] for ti, t in enumerate(timesteps): self.attributes[var_name].append( model_grid.data[t - model_grid.start_hour, self.i[ti], self.j[ti]])
Extracts the data from a ModelOutput or ModelGrid object within the bounding box region of the STObject. Args: model_grid: A ModelGrid or ModelOutput Object potential: Extracts from the time before instead of the same time as the object
juraj-google-style
def easeInOutQuad(n): _checkRange(n) if n < 0.5: return 2 * n**2 else: n = n * 2 - 1 return -0.5 * (n*(n-2) - 1)
A quadratic tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
juraj-google-style
def __init__(self, location=None, **kwargs): if not location: raise ValueError('Missing location value.') parent = None if 'parent' in kwargs: parent = kwargs['parent'] del kwargs['parent'] if parent: raise ValueError('Parent value set.') location = os.path.abspath(location) super(OSPathSpec, self).__init__(location=location, parent=parent, **kwargs)
Initializes a path specification. Note that the operating system path specification cannot have a parent. Args: location (Optional[str]): operating specific location string e.g. /opt/dfvfs or C:\\Opt\\dfvfs. Raises: ValueError: when location is not set or parent is set.
juraj-google-style
def dequeue(self) -> Tuple[(int, TItem)]: if (self._len == 0): raise ValueError('BucketPriorityQueue is empty.') while (self._buckets and (not self._buckets[0])): self._buckets.pop(0) self._offset += 1 item = self._buckets[0].pop(0) priority = self._offset self._len -= 1 if (self._drop_set is not None): self._drop_set.remove((priority, item)) return (priority, item)
Removes and returns an item from the priority queue. Returns: A tuple whose first element is the priority of the dequeued item and whose second element is the dequeued item. Raises: ValueError: The queue is empty.
codesearchnet
def register_peer(self, connection_id, endpoint): with self._lock: if len(self._peers) < self._maximum_peer_connectivity: self._peers[connection_id] = endpoint self._topology.set_connection_status(connection_id, PeerStatus.PEER) LOGGER.debug("Added connection_id %s with endpoint %s, " "connected identities are now %s", connection_id, endpoint, self._peers) else: raise PeeringException( "At maximum configured number of peers: {} " "Rejecting peering request from {}.".format( self._maximum_peer_connectivity, endpoint)) public_key = self.peer_to_public_key(connection_id) if public_key: self._consensus_notifier.notify_peer_connected(public_key)
Registers a connected connection_id. Args: connection_id (str): A unique identifier which identifies an connection on the network server socket. endpoint (str): The publically reachable endpoint of the new peer
juraj-google-style
def __init__(self, unique_identifier=None, attributes=None): super(GetAttributesResponsePayload, self).__init__( enums.Tags.RESPONSE_PAYLOAD) self._unique_identifier = None self._attributes = list() self.unique_identifier = unique_identifier self.attributes = attributes
Construct a GetAttributes response payload. Args: unique_identifier (string): The ID of the managed object with which the retrieved attributes should be associated. Optional, defaults to None. attributes (list): A list of attribute structures associated with the managed object. Optional, defaults to None.
juraj-google-style
def all_prod(tensors): return _apply_all_reduce('prod', tensors)
Returns a list of tensors with the all-reduce product across `tensors`. The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to multiply; must be assigned to GPU devices. Returns: List of tensors, each with the product of the input tensors, where tensor i has the same device as `tensors[i]`.
github-repos
def crop_and_resize(image, boxes, box_ind, crop_size, pad_border=True): assert isinstance(crop_size, int), crop_size boxes = tf.stop_gradient(boxes) if pad_border: image = tf.pad(image, [[0, 0], [0, 0], [1, 1], [1, 1]], mode='SYMMETRIC') boxes = (boxes + 1) @under_name_scope() def transform_fpcoor_for_tf(boxes, image_shape, crop_shape): '\n The way tf.image.crop_and_resize works (with normalized box):\n Initial point (the value of output[0]): x0_box * (W_img - 1)\n Spacing: w_box * (W_img - 1) / (W_crop - 1)\n Use the above grid to bilinear sample.\n\n However, what we want is (with fpcoor box):\n Spacing: w_box / W_crop\n Initial point: x0_box + spacing/2 - 0.5\n (-0.5 because bilinear sample (in my definition) assumes floating point coordinate\n (0.0, 0.0) is the same as pixel value (0, 0))\n\n This function transform fpcoor boxes to a format to be used by tf.image.crop_and_resize\n\n Returns:\n y1x1y2x2\n ' (x0, y0, x1, y1) = tf.split(boxes, 4, axis=1) spacing_w = ((x1 - x0) / tf.cast(crop_shape[1], tf.float32)) spacing_h = ((y1 - y0) / tf.cast(crop_shape[0], tf.float32)) imshape = [tf.cast((image_shape[0] - 1), tf.float32), tf.cast((image_shape[1] - 1), tf.float32)] nx0 = (((x0 + (spacing_w / 2)) - 0.5) / imshape[1]) ny0 = (((y0 + (spacing_h / 2)) - 0.5) / imshape[0]) nw = ((spacing_w * tf.cast((crop_shape[1] - 1), tf.float32)) / imshape[1]) nh = ((spacing_h * tf.cast((crop_shape[0] - 1), tf.float32)) / imshape[0]) return tf.concat([ny0, nx0, (ny0 + nh), (nx0 + nw)], axis=1) image_shape = tf.shape(image)[2:] boxes = transform_fpcoor_for_tf(boxes, image_shape, [crop_size, crop_size]) image = tf.transpose(image, [0, 2, 3, 1]) ret = tf.image.crop_and_resize(image, boxes, tf.cast(box_ind, tf.int32), crop_size=[crop_size, crop_size]) ret = tf.transpose(ret, [0, 3, 1, 2]) return ret
Aligned version of tf.image.crop_and_resize, following our definition of floating point boxes. Args: image: NCHW boxes: nx4, x1y1x2y2 box_ind: (n,) crop_size (int): Returns: n,C,size,size
codesearchnet
def is_control(input, model_file=None, model_proto=None, name=None): return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type(input, model_file=model_file, model_proto=model_proto, name=name, piece_type=1)
Returns true if input id is control piece. Args: input: An arbitrary tensor of int32. model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A tensor of bool with the same shape as input.
codesearchnet
def upload(self, params={}): if self.upload_token is not None: status = self.check() if status['status'] != 4: return self.commit() else: self.new_slice() while self.slice_task_id != 0: self.upload_slice() return self.commit() else: self.create(self.prepare_video_params(**params)) self.create_file() self.new_slice() while self.slice_task_id != 0: self.upload_slice() return self.commit()
start uploading the file until upload is complete or error. This is the main method to used, If you do not care about state of process. Args: params: a dict object describe video info, eg title, tags, description, category. all video params see the doc of prepare_video_params. Returns: return video_id if upload successfully
juraj-google-style
def validate_and_decode(jwt_bu64, cert_obj): try: return jwt.decode( jwt_bu64.strip(), cert_obj.public_key(), algorithms=['RS256'], verify=True ) except jwt.InvalidTokenError as e: raise JwtException('Signature is invalid. error="{}"'.format(str(e)))
Validate the JWT and return as a dict. - JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and returns it as a dict. Args: jwt_bu64: bytes The JWT encoded using a a URL safe flavor of Base64. cert_obj: cryptography.Certificate Public certificate used for signing the JWT (typically the CN cert). Raises: JwtException: If validation fails. Returns: dict: Values embedded in the JWT.
juraj-google-style
def _MergeEventTag(self, storage_writer, attribute_container): if (attribute_container.CONTAINER_TYPE != 'event_tag'): return event_identifier = attribute_container.GetEventIdentifier() if (not event_identifier): return stored_event_tag = self._event_tag_index.GetEventTagByIdentifier(storage_writer, event_identifier) if stored_event_tag: attribute_container.AddComment(stored_event_tag.comment) attribute_container.AddLabels(stored_event_tag.labels) self._event_tag_index.SetEventTag(attribute_container)
Merges an event tag with the last stored event tag. If there is an existing event the provided event tag is updated with the contents of the existing one. After which the event tag index is updated. Args: storage_writer (StorageWriter): storage writer. attribute_container (AttributeContainer): container.
codesearchnet
def GetContract(self, script_hash): if (script_hash.ToBytes() in self._contracts.keys()): return self._contracts[script_hash.ToBytes()] return None
Get contract for specified script_hash. Args: script_hash (UInt160): a bytearray (len 20). Returns: Contract: if a contract was found matching the provided script hash, otherwise None
codesearchnet
def _populate_quantization_options_default_values(quantization_options: _QuantizationOptions) -> None: if quantization_options.op_set == quant_opts_pb2.OpSet.OP_SET_UNSPECIFIED: quantization_options.op_set = quant_opts_pb2.OpSet.XLA if not quantization_options.tags: quantization_options.tags.append(tag_constants.SERVING) if not quantization_options.signature_keys: quantization_options.signature_keys.append(signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY) if not quantization_options.HasField('freeze_all_variables'): quantization_options.freeze_all_variables = True if quantization_options.enable_legacy_weight_only: raise ValueError('Legacy weight-only is deprecated. Use weight-only quantization method.') if quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_UNSPECIFIED: logging.debug('"preset_method" for QuantizationMethod is not specified.Static range quantization is used by default.') quantization_options.quantization_method.preset_method = _PresetMethod.METHOD_STATIC_RANGE_INT8 if quantization_options.min_num_elements_for_weights == 0: quantization_options.min_num_elements_for_weights = _DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS logging.warning('QuantizationOptions.min_num_elements_for_weights is not set (0). Setting to the default value: %d.', _DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS) if not quantization_options.HasField('enable_per_channel_quantization'): quantization_options.enable_per_channel_quantization = False if quantization_options.enable_per_channel_quantization and (not ((quantization_options.op_set == quant_opts_pb2.OpSet.UNIFORM_QUANTIZED or quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8) or (quantization_options.op_set in (quant_opts_pb2.OpSet.XLA, quant_opts_pb2.OpSet.STABLEHLO) and quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8))): raise ValueError('Currently, per-channel quantization is supported for Uniform Quantized opset, weight only quantization, or XLA/StableHLO opset with static range quantization.') if quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8 and (quantization_options.op_set == quant_opts_pb2.OpSet.UNIFORM_QUANTIZED or quantization_options.op_set == quant_opts_pb2.OpSet.TF): raise ValueError('TF/Uniform quantized opset does not support weight-only.') if quantization_options.op_set == quant_opts_pb2.OpSet.STABLEHLO and (quantization_options.quantization_method.preset_method != _PresetMethod.METHOD_STATIC_RANGE_INT8 and quantization_options.quantization_method.preset_method != _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8): raise ValueError('StableHLO quantized opset currently only supports static range quantization and weight-only quantizationvia TF Quantizer.') logging.debug('Setting `force_graph_mode_calibration = True` to ensure the calibration mode is executed properly.') quantization_options.force_graph_mode_calibration = True if quantization_options.HasField('debugger_config'): if not quantization_options.debugger_config.log_dir_path: quantization_options.debugger_config.log_dir_path = '/tmp/dumps' if quantization_options.debugger_config.debugger_type == stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_UNSPECIFIED: raise ValueError('Debugger is enabled but debugger type was not specified.') if quantization_options.debugger_config.debugger_type == stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_WHOLE_MODEL and (not quantization_options.debugger_config.unquantized_dump_model_path): raise ValueError('Debugger type whole model verify was used but unquantized_dump_model_path was not specified.') _populate_quantization_component_spec(quantization_options.quantization_method) _populate_unitwise_quantization_specs(quantization_options) if quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8: _populate_calibration_options(quantization_options)
Populates default values for QuantizationOptions. Populates unspecified or unset fields of QuantizationOptions with the default values. * If `op_set` is unspecified, it defaults to `OpSet.XLA`. * If `freeze_all_variables` is not set, it defaults to `True`. * Check if configurations are set correctly: - Per-channel quantization is supported for Uniform Quantized opset only. Args: quantization_options: An instance of QuantizationOptions.
github-repos
def connect_container_to_network(self, container, net_id, ipv4_address=None, ipv6_address=None, aliases=None, links=None, link_local_ips=None): data = {'Container': container, 'EndpointConfig': self.create_endpoint_config(aliases=aliases, links=links, ipv4_address=ipv4_address, ipv6_address=ipv6_address, link_local_ips=link_local_ips)} url = self._url('/networks/{0}/connect', net_id) res = self._post_json(url, data=data) self._raise_for_status(res)
Connect a container to a network. Args: container (str): container-id/name to be connected to the network net_id (str): network id aliases (:py:class:`list`): A list of aliases for this endpoint. Names in that list can be used within the network to reach the container. Defaults to ``None``. links (:py:class:`list`): A list of links for this endpoint. Containers declared in this list will be linked to this container. Defaults to ``None``. ipv4_address (str): The IP address of this container on the network, using the IPv4 protocol. Defaults to ``None``. ipv6_address (str): The IP address of this container on the network, using the IPv6 protocol. Defaults to ``None``. link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) addresses.
codesearchnet
def _block_qargs_to_indices(self, block_qargs, global_index_map): block_indices = [global_index_map[q] for q in block_qargs] ordered_block_indices = sorted(block_indices) block_positions = {q: ordered_block_indices.index(global_index_map[q]) for q in block_qargs} return block_positions
Map each qubit in block_qargs to its wire position among the block's wires. Args: block_qargs (list): list of qubits that a block acts on global_index_map (dict): mapping from each qubit in the circuit to its wire position within that circuit Returns: dict: mapping from qarg to position in block
juraj-google-style
def get_action(self, action_id): return Action.get_object( api_token=self.token, action_id=action_id )
Returns a specific Action by its ID. Args: action_id (int): id of action
juraj-google-style
def ContainsKey(self, public_key): return self.ContainsKeyHash(Crypto.ToScriptHash(public_key.encode_point(True), unhex=True))
Test if the wallet contains the supplied public key. Args: public_key (edcsa.Curve.point): a public key to test for its existance. e.g. KeyPair.PublicKey Returns: bool: True if exists, False otherwise.
codesearchnet
def TrimVariableTable(self, new_size): def ProcessBufferFull(variables): for variable in variables: var_index = variable.get('varTableIndex') if ((var_index is not None) and (var_index >= new_size)): variable['varTableIndex'] = 0 members = variable.get('members') if (members is not None): ProcessBufferFull(members) del self._var_table[new_size:] ProcessBufferFull(self.breakpoint['evaluatedExpressions']) for stack_frame in self.breakpoint['stackFrames']: ProcessBufferFull(stack_frame['arguments']) ProcessBufferFull(stack_frame['locals']) ProcessBufferFull(self._var_table)
Trims the variable table in the formatted breakpoint message. Removes trailing entries in variables table. Then scans the entire breakpoint message and replaces references to the trimmed variables to point to var_index of 0 ("buffer full"). Args: new_size: desired size of variables table.
codesearchnet
def _set_state_variables(self, updates): if not self.built: raise RuntimeError('_set_state_variables() must be called after build().') with ops.init_scope(): for var_name, value in updates.items(): self.state_variables[var_name].assign(value)
Directly update the internal state of this Layer. This method expects a string-keyed dict of {state_variable_name: state}. The precise nature of the state, and the names associated, are describe by the subclasses of CombinerPreprocessingLayer. Args: updates: A string keyed dict of weights to update. Raises: RuntimeError: if 'build()' was not called before 'set_processing_state'.
github-repos
def find_input(self, stream): for (i, input_x) in enumerate(self.inputs): if input_x[0].matches(stream): return i
Find the input that responds to this stream. Args: stream (DataStream): The stream to find Returns: (index, None): The index if found or None
codesearchnet
def _generate_composite(self, comp_node, keepables): if comp_node.name in self.datasets: return compositor, prereqs, optional_prereqs = comp_node.data try: prereq_datasets = self._get_prereq_datasets( comp_node.name, prereqs, keepables, ) except KeyError: return optional_datasets = self._get_prereq_datasets( comp_node.name, optional_prereqs, keepables, skip=True ) try: composite = compositor(prereq_datasets, optional_datasets=optional_datasets, **self.attrs) cid = DatasetID.from_dict(composite.attrs) self.datasets[cid] = composite if comp_node.name in self.wishlist: self.wishlist.remove(comp_node.name) self.wishlist.add(cid) comp_node.name = cid except IncompatibleAreas: LOG.debug("Delaying generation of %s because of incompatible areas", str(compositor.id)) preservable_datasets = set(self.datasets.keys()) prereq_ids = set(p.name for p in prereqs) opt_prereq_ids = set(p.name for p in optional_prereqs) keepables |= preservable_datasets & (prereq_ids | opt_prereq_ids) keepables.add(comp_node.name) return
Collect all composite prereqs and create the specified composite. Args: comp_node (Node): Composite Node to generate a Dataset for keepables (set): `set` to update if any datasets are needed when generation is continued later. This can happen if generation is delayed to incompatible areas which would require resampling first.
juraj-google-style
def _get_spec(self) -> dict: if self.spec: return self.spec self.spec = requests.get(self.SPEC_URL.format(self.version)).json() return self.spec
Fetches the OpenAPI spec from the server. If the spec has already been fetched, the cached version is returned instead. ArgS: None Returns: OpenAPI spec data
codesearchnet
def _Open(self, path_spec, mode='rb'): if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') file_object = resolver.Resolver.OpenFileObject( path_spec.parent, resolver_context=self._resolver_context) try: tsk_image_object = tsk_image.TSKFileSystemImage(file_object) tsk_file_system = pytsk3.FS_Info(tsk_image_object) except: file_object.close() raise self._file_object = file_object self._tsk_file_system = tsk_file_system
Opens the file system object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file system object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
juraj-google-style
def transfer_project(self, to_namespace, **kwargs): path = '/projects/%s/transfer' % (self.id,) self.manager.gitlab.http_put(path, post_data={"namespace": to_namespace}, **kwargs)
Transfer a project to the given namespace ID Args: to_namespace (str): ID or path of the namespace to transfer the project to **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTransferProjectError: If the project could not be transfered
juraj-google-style
def plot_labels(ax, label_fontsize=14, xlabel=None, xlabel_arg=None, ylabel=None, ylabel_arg=None, zlabel=None, zlabel_arg=None): xlabel = xlabel if xlabel is not None else ax.get_xlabel() or 'X' ylabel = ylabel if ylabel is not None else ax.get_ylabel() or 'Y' xlabel_arg = dict_if_none(xlabel_arg) ylabel_arg = dict_if_none(ylabel_arg) ax.set_xlabel(xlabel, fontsize=label_fontsize, **xlabel_arg) ax.set_ylabel(ylabel, fontsize=label_fontsize, **ylabel_arg) if hasattr(ax, 'zaxis'): zlabel = zlabel if zlabel is not None else ax.get_zlabel() or 'Z' zlabel_arg = dict_if_none(zlabel_arg) ax.set_zlabel(zlabel, fontsize=label_fontsize, **zlabel_arg)
Sets the labels options of a matplotlib plot Args: ax: matplotlib axes label_fontsize(int): Size of the labels' font xlabel(str): The xlabel for the figure xlabel_arg(dict): Passsed into matplotlib as xlabel arguments ylabel(str): The ylabel for the figure ylabel_arg(dict): Passsed into matplotlib as ylabel arguments zlabel(str): The zlabel for the figure zlabel_arg(dict): Passsed into matplotlib as zlabel arguments
juraj-google-style
def table_exists(client, table_reference): from google.cloud.exceptions import NotFound try: client.get_table(table_reference) return True except NotFound: return False
Return if a table exists. Args: client (google.cloud.bigquery.client.Client): A client to connect to the BigQuery API. table_reference (google.cloud.bigquery.table.TableReference): A reference to the table to look for. Returns: bool: ``True`` if the table exists, ``False`` otherwise.
codesearchnet
def from_file(cls, jss, filename): tree = ElementTree.parse(filename) root = tree.getroot() return cls(jss, root)
Create a new JSSObject from an external XML file. Args: jss: A JSS object. filename: String path to an XML file.
codesearchnet
def date_range(start, end, boo): earliest = datetime.strptime(start.replace('-', ' '), '%Y %m %d') latest = datetime.strptime(end.replace('-', ' '), '%Y %m %d') num_days = (latest - earliest).days + 1 all_days = [latest - timedelta(days=x) for x in range(num_days)] all_days.reverse() output = [] if boo: for d in all_days: output.append(int(str(d).replace('-', '')[:8])) else: for d in all_days: output.append(str(d)[:10]) return output
Return list of dates within a specified range, inclusive. Args: start: earliest date to include, String ("2015-11-25") end: latest date to include, String ("2015-12-01") boo: if true, output list contains Numbers (20151230); if false, list contains Strings ("2015-12-30") Returns: list of either Numbers or Strings
juraj-google-style
def exists(self, vars_list: List[str]) -> 'TensorFluent': return self._aggregation_op(tf.reduce_any, self, vars_list)
Returns the TensorFluent for the exists aggregation function. Args: vars_list: The list of variables to be aggregated over. Returns: A TensorFluent wrapping the exists aggregation function.
codesearchnet
def assertProtoEquals(self, expected_message_maybe_ascii, validate_message, msg=None, relative_tolerance=None): if isinstance(expected_message_maybe_ascii, type(validate_message)): expected_message = expected_message_maybe_ascii self._AssertProtoEquals(expected_message, validate_message, msg=msg, relative_tolerance=relative_tolerance) elif isinstance(expected_message_maybe_ascii, (str, bytes)): expected_message = type(validate_message)() text_format.Merge(expected_message_maybe_ascii, expected_message, descriptor_pool=descriptor_pool.Default()) self._AssertProtoEquals(expected_message, validate_message, msg=msg, relative_tolerance=relative_tolerance) else: assert False, "Can't compare protos of type %s and %s." % (type(expected_message_maybe_ascii), type(validate_message))
Asserts that message is same as parsed expected_message_ascii. Creates another prototype of message, reads the ascii message into it and then compares them using self._AssertProtoEqual(). Args: expected_message_maybe_ascii: proto message in original or ascii form. validate_message: the message to validate. msg: Optional message to report on failure. relative_tolerance: float. The allowable difference between the two values being compared is determined by multiplying the relative tolerance by the maximum of the two values. If this is not provided, then all floats are compared using string comparison.
github-repos
def get(self, url, headers=None, params=None): merged_headers = self._merge_headers(headers) if ('Accept' not in merged_headers): merged_headers['Accept'] = MEDIA_TYPE_TAXII_V20 accept = merged_headers['Accept'] resp = self.session.get(url, headers=merged_headers, params=params) resp.raise_for_status() content_type = resp.headers['Content-Type'] if (not self.valid_content_type(content_type=content_type, accept=accept)): msg = "Unexpected Response. Got Content-Type: '{}' for Accept: '{}'" raise TAXIIServiceException(msg.format(content_type, accept)) return _to_json(resp)
Perform an HTTP GET, using the saved requests.Session and auth info. If "Accept" isn't one of the given headers, a default TAXII mime type is used. Regardless, the response type is checked against the accept header value, and an exception is raised if they don't match. Args: url (str): URL to retrieve headers (dict): Any other headers to be added to the request. params: dictionary or bytes to be sent in the query string for the request. (optional)
codesearchnet
def filter_by(cls, **kwargs): limit = kwargs.pop('limit', None) reverse = kwargs.pop('reverse', False) q = cls.query.filter_by(**kwargs) if reverse: q = q.order_by(cls.id.desc()) if limit: q = q.limit(limit) return q
Same as SQLAlchemy's filter_by. Additionally this accepts two special keyword arguments `limit` and `reverse` for limiting the results and reversing the order respectively. Args: **kwargs: filter parameters Examples: >>> user = User.filter_by(email="new@x.com") >>> shipments = Shipment.filter_by(country="India", limit=3, reverse=True)
codesearchnet
def get_loggable_url(url): loggable_url = url or "" for secret_string in ("bewit=", "AWSAccessKeyId=", "access_token="): parts = loggable_url.split(secret_string) loggable_url = parts[0] if loggable_url != url: loggable_url = "{}<snip>".format(loggable_url) return loggable_url
Strip out secrets from taskcluster urls. Args: url (str): the url to strip Returns: str: the loggable url
juraj-google-style
def get_entry_by_material_id(self, material_id, compatible_only=True, inc_structure=None, property_data=None, conventional_unit_cell=False): data = self.get_entries(material_id, compatible_only=compatible_only, inc_structure=inc_structure, property_data=property_data, conventional_unit_cell=conventional_unit_cell) return data[0]
Get a ComputedEntry corresponding to a material_id. Args: material_id (str): Materials Project material_id (a string, e.g., mp-1234). compatible_only (bool): Whether to return only "compatible" entries. Compatible entries are entries that have been processed using the MaterialsProjectCompatibility class, which performs adjustments to allow mixing of GGA and GGA+U calculations for more accurate phase diagrams and reaction energies. inc_structure (str): If None, entries returned are ComputedEntries. If inc_structure="final", ComputedStructureEntries with final structures are returned. Otherwise, ComputedStructureEntries with initial structures are returned. property_data (list): Specify additional properties to include in entry.data. If None, no data. Should be a subset of supported_properties. conventional_unit_cell (bool): Whether to get the standard conventional unit cell Returns: ComputedEntry or ComputedStructureEntry object.
codesearchnet
def update(self, id=None, new_data={}, **kwargs): if id is None: path = self.path else: path = '%s/%s' % (self.path, id) self._check_missing_update_attrs(new_data) files = {} types = getattr(self, '_types', {}) if types: new_data = new_data.copy() for attr_name, type_cls in types.items(): if attr_name in new_data.keys(): type_obj = type_cls(new_data[attr_name]) if issubclass(type_cls, g_types.FileAttribute): k = type_obj.get_file_name(attr_name) files[attr_name] = (k, new_data.pop(attr_name)) else: new_data[attr_name] = type_obj.get_for_api() http_method = self._get_update_method() return http_method(path, post_data=new_data, files=files, **kwargs)
Update an object on the server. Args: id: ID of the object to update (can be None if not required) new_data: the update data for the object **kwargs: Extra options to send to the server (e.g. sudo) Returns: dict: The new object data (*not* a RESTObject) Raises: GitlabAuthenticationError: If authentication is not correct GitlabUpdateError: If the server cannot perform the request
juraj-google-style
def _StopAnalysisProcesses(self, abort=False): logger.debug('Stopping analysis processes.') self._StopMonitoringProcesses() if abort: self._AbortTerminate() if (not self._use_zeromq): logger.debug('Emptying queues.') for event_queue in self._event_queues.values(): event_queue.Empty() for event_queue in self._event_queues.values(): event_queue.PushItem(plaso_queue.QueueAbort(), block=False) self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) for event_queue in self._event_queues.values(): event_queue.Close(abort=abort) if abort: self._AbortKill() else: self._AbortTerminate() self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) for event_queue in self._event_queues.values(): event_queue.Close(abort=True)
Stops the analysis processes. Args: abort (bool): True to indicated the stop is issued on abort.
codesearchnet
def _get_scatter_keys(client, query, num_splits): scatter_point_query = _create_scatter_query(query, num_splits) client_query = scatter_point_query._to_client_query(client) client_key_splits = [client_entity.key for client_entity in client_query.fetch(client=client, limit=scatter_point_query.limit)] client_key_splits.sort(key=client_key_sort_key) return client_key_splits
Gets a list of split keys given a desired number of splits. This list will contain multiple split keys for each split. Only a single split key will be chosen as the split point, however providing multiple keys allows for more uniform sharding. Args: client: the client to datastore containing the data. query: the user query. num_splits: the number of desired splits. Returns: A list of scatter keys returned by Datastore.
github-repos
def _FormatDataToken(self, token_data): format_string = bsmtoken.BSM_TOKEN_DATA_PRINT.get( token_data.data_format, 'UNKNOWN') if token_data.data_format == 4: data = bytes(bytearray(token_data.data)).split(b'\x00')[0] data = data.decode('utf-8') else: data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data]) return { 'format': format_string, 'data': data}
Formats a data token as a dictionary of values. Args: token_data (bsm_token_data_data): AUT_DATA token data. Returns: dict[str, str]: token values.
juraj-google-style
def StringEscape(self, string, match, **unused_kwargs): if match.group(1) in '\'"rnbt': self.string += string.decode('unicode_escape') else: self.string += string
Escape backslashes found inside a string quote. Backslashes followed by anything other than ['"rnbt] will just be included in the string. Args: string: The string that matched. match: the match object (instance of re.MatchObject). Where match.group(1) contains the escaped code.
juraj-google-style
def rules(cls, attr=None): try: if attr is None: attr = cls._rules_attr() return getattr(cls, attr).keys() except TypeError: return ()
Iterable of rule names used by :meth:`create` Args: attr (None or str): Name of the class attribute to which to get the names. If None, one of ``'_rules'``, ``'_binary_rules'`` is automatically chosen
juraj-google-style
def changes(self, **kwargs): path = self._get_id_path('changes') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the changes for a specific movie id. Changes are grouped by key, and ordered by date in descending order. By default, only the last 24 hours of changes are returned. The maximum number of days that can be returned in a single request is 14. The language is present on fields that are translatable. Args: start_date: (optional) Expected format is 'YYYY-MM-DD'. end_date: (optional) Expected format is 'YYYY-MM-DD'. Returns: A dict representation of the JSON returned from the API.
codesearchnet
def close_session(self, commit=True): if self._session is not None: if commit: self._session.commit() self._session.close() self._session = None
Commit and close the DB session associated with this task (no error is raised if None is open) Args: commit (bool): commit session before closing (default=True)
juraj-google-style
def has_completed_result_block_format(self, error_message): extras = self._get_extras() if _InstrumentationResultSignals.PASS in extras: return True elif _InstrumentationResultSignals.FAIL in extras: return False else: raise signals.TestError(details=error_message, extras=extras)
Checks the instrumentation result block for a signal indicating normal completion. Args: error_message: string, the error message to give if the instrumentation run did not complete successfully.- Returns: A boolean indicating whether or not the instrumentation run passed or failed overall. Raises: signals.TestError: Error raised if the instrumentation run did not complete because of a crash or some other issue.
github-repos
def get_el(el): tag_name = el.elt.tagName.lower() if tag_name in {"input", "textarea", "select"}: return el.value else: raise ValueError( "Getter for %s (%s) not implemented!" % (tag_name, el.id) )
Get value of given `el` tag element. Automatically choose proper method to set the `value` based on the type of the `el`. Args: el (obj): Element reference to the input you want to convert to typeahead. Returns: str: Value of the object.
juraj-google-style
def inner_horizontal_border(self): return u'{lm}{lv}{hz}{rv}'.format(lm=(' ' * self.margins.left), lv=self.border_style.outer_vertical_inner_right, rv=self.border_style.outer_vertical_inner_left, hz=self.inner_horizontals())
The complete inner horizontal border section, including the left and right border verticals. Returns: str: The complete inner horizontal border.
codesearchnet
def _ValidateFleetspeakServiceConfig(self, config_path): with open(config_path, 'rb') as f: pool = descriptor_pool.DescriptorPool() pool.AddDescriptor(fs_config_pb2.Config.DESCRIPTOR) parsed_config = text_format.Parse(f.read(), fs_system_pb2.ClientServiceConfig(), descriptor_pool=pool) if (parsed_config.factory != 'Daemon'): raise BuildError('Fleetspeak config does not have the expected factory type.') daemon_cfg = fs_config_pb2.Config() parsed_config.config.Unpack(daemon_cfg) if (not daemon_cfg.argv): raise BuildError('Fleetspeak daemon service config does not specify command line args.')
Validates a Fleetspeak service config. Checks that the given file is a valid TextFormat representation of a Fleetspeak service config proto. Args: config_path: Path to the config file. Raises: BuildError: If the config is not valid.
codesearchnet
def restore(self): self.read_checkpoint_manager.restore_or_initialize()
Restore the training state from the backed up checkpoint file. Returns: True if the training state is successfully restored. False if the training state doesn't need to be restored, or error occurred so it can't.
github-repos
def write_if_allowed(filename: str, content: str, overwrite: bool = False, mock: bool = False) -> None: if not overwrite and exists(filename): fail("File exists, not overwriting: {!r}".format(filename)) directory = dirname(filename) if not mock: mkdir_p(directory) log.info("Writing to {!r}", filename) if mock: log.warning("Skipping writes as in mock mode") else: with open(filename, "wt") as outfile: outfile.write(content)
Writes the contents to a file, if permitted. Args: filename: filename to write content: contents to write overwrite: permit overwrites? mock: pretend to write, but don't Raises: RuntimeError: if file exists but overwriting not permitted
juraj-google-style
def save_scatter_table(self, fn, description=""): data = { "description": description, "time": datetime.now(), "psd_scatter": (self.num_points, self.D_max, self._psd_D, self._S_table, self._Z_table, self._angular_table, self._m_table, self.geometries), "version": tmatrix_aux.VERSION } pickle.dump(data, file(fn, 'w'), pickle.HIGHEST_PROTOCOL)
Save the scattering lookup tables. Save the state of the scattering lookup tables to a file. This can be loaded later with load_scatter_table. Other variables will not be saved, but this does not matter because the results of the computations are based only on the contents of the table. Args: fn: The name of the scattering table file. description (optional): A description of the table.
juraj-google-style
def get_charge_transfer(self, atom_index): if self.potcar is None: raise ValueError("POTCAR must be supplied in order to calculate " "charge transfer!") potcar_indices = [] for i, v in enumerate(self.natoms): potcar_indices += [i] * v nelect = self.potcar[potcar_indices[atom_index]].nelectrons return self.data[atom_index]["charge"] - nelect
Returns the charge transferred for a particular atom. Requires POTCAR to be supplied. Args: atom_index: Index of atom. Returns: Charge transfer associated with atom from the Bader analysis. Given by final charge on atom - nelectrons in POTCAR for associated atom.
juraj-google-style
def text_colour_for_hex(hexx, percent=50, dark=' return light if hex_is_dark(hexx, percent=percent) else dark
Function to decide what colour to use for a given hex colour. Args: hexx (str): A hexadecimal colour, starting with '#'. Returns: bool: The colour's brightness is less than the given percent.
juraj-google-style
def type_spec_from_value(element, use_fallback=True): spec = type_spec._type_spec_from_value(element) if spec is not None: return spec if isinstance(element, collections_abc.Mapping): if isinstance(element, collections.defaultdict): ctor = lambda items: type(element)(element.default_factory, items) else: ctor = type(element) return ctor([(k, type_spec_from_value(v)) for k, v in element.items()]) if isinstance(element, tuple): if hasattr(element, '_fields') and isinstance(element._fields, collections_abc.Sequence) and all((isinstance(f, str) for f in element._fields)): if isinstance(element, wrapt.ObjectProxy): element_type = type(element.__wrapped__) else: element_type = type(element) return element_type(*[type_spec_from_value(v) for v in element]) return tuple([type_spec_from_value(v) for v in element]) if hasattr(element.__class__, '__attrs_attrs__'): attrs = getattr(element.__class__, '__attrs_attrs__') return type(element)(*[type_spec_from_value(getattr(element, a.name)) for a in attrs]) if isinstance(element, CustomNestProtocol): metadata, children = element.__tf_flatten__() return element.__tf_unflatten__(metadata, type_spec_from_value(children)) if use_fallback: try: tensor = ops.convert_to_tensor(element) spec = type_spec_from_value(tensor) if spec is not None: return spec except (ValueError, TypeError) as e: logging.vlog(3, 'Failed to convert %r to tensor: %s' % (type(element).__name__, e)) raise TypeError('Could not build a `TypeSpec` for {} with type {}'.format(element, type(element).__name__))
Creates a type specification for the given value. Args: element: The element to create the type specification for. use_fallback: Whether to fall back to converting the element to a tensor in order to compute its `TypeSpec`. Returns: A nested structure of `TypeSpec`s that represents the type specification of `element`. Raises: TypeError: If a `TypeSpec` cannot be built for `element`, because its type is not supported.
github-repos
def _trigger(self, obj, old, value, hint=None, setter=None): if hasattr(obj, 'trigger'): obj.trigger(self.name, old, value, hint, setter)
Unconditionally send a change event notification for the property. Args: obj (HasProps) The object the property is being set on. old (obj) : The previous value of the property new (obj) : The new value of the property hint (event hint or None, optional) An optional update event hint, e.g. ``ColumnStreamedEvent`` (default: None) Update event hints are usually used at times when better update performance can be obtained by special-casing in some way (e.g. streaming or patching column data sources) setter (ClientSession or ServerSession or None, optional) : This is used to prevent "boomerang" updates to Bokeh apps. (default: None) In the context of a Bokeh server application, incoming updates to properties will be annotated with the session that is doing the updating. This value is propagated through any subsequent change notifications that the update triggers. The session can compare the event setter to itself, and suppress any updates that originate from itself. Returns: None
codesearchnet
def sqlInsert(def_buf, raw_a, raw_b): count = 0 qry_str = "INSERT INTO Meter_Reads ( \n\t" for fld in def_buf: if count > 0: qry_str += ", \n\t" qry_str = qry_str + fld count += 1 qry_str += (",\n\t" + Field.Time_Stamp + ", \n\t" + "Raw_A,\n\t" + "Raw_B\n) \n" + "VALUES( \n\t") count = 0 for fld in def_buf: if count > 0: qry_str += ", \n\t" fld_type = def_buf[fld][MeterData.TypeValue] fld_str_content = def_buf[fld][MeterData.StringValue] delim = "" if (fld_type == FieldType.Hex) or \ (fld_type == FieldType.String) or \ (fld_type == FieldType.PowerFactor): delim = "'" qry_str = qry_str + delim + fld_str_content + delim count += 1 time_val = int(time.time() * 1000) qry_str = (qry_str + ",\n\t" + str(time_val) + ",\n\t'" + binascii.b2a_hex(raw_a) + "'" + ",\n\t'" + binascii.b2a_hex(raw_b) + "'\n);") ekm_log(qry_str, 4) return qry_str
Reasonably portable SQL INSERT for from combined read buffer. Args: def_buf (SerialBlock): Database only serial block of all fields. raw_a (str): Raw A read as hex string. raw_b (str): Raw B read (if exists, otherwise empty) as hex string. Returns: str: SQL insert for passed read buffer
juraj-google-style
def modify_dict(data, key, value, create_if_missing=False): data_copy = copy.deepcopy(data) key_copy = copy.deepcopy(key) delver = data_copy current_key = key_copy last_key = "Root" while len(current_key) > 1: if current_key[0] not in delver: raise KeyError("ModifyJsonStep Key Couldn't find Subkey {} in {}.".format(current_key[0], last_key)) if len(current_key) > 2 and not isinstance(delver[current_key[0]], dict): raise ValueError("ModifyJsonStep The Value of {} is a {}, not a dict".format(current_key[0], type(delver[current_key[0]]))) last_key = current_key[0] delver = delver[current_key[0]] current_key.pop(0) if current_key[0] not in delver and not create_if_missing: raise KeyError("ModifyJsonStep Key Couldn't find Subkey {} in {}.".format(current_key[0], last_key)) delver[current_key[0]] = value return data_copy
Change (or add) a json key/value pair. Args: data (dict): The original data. This will not be modified. key (list): A list of keys and subkeys specifing the key to change (list can be one) value (str): The value to change for the above key create_if_missing (bool): Set to true to create key if the last key in the list is not found Otherwise the function will throw a KeyError Returns: (dict): the final modified dict
juraj-google-style
def plot_wigner_seitz(lattice, ax=None, **kwargs): (ax, fig, plt) = get_ax3d_fig_plt(ax) if ('color' not in kwargs): kwargs['color'] = 'k' if ('linewidth' not in kwargs): kwargs['linewidth'] = 1 bz = lattice.get_wigner_seitz_cell() (ax, fig, plt) = get_ax3d_fig_plt(ax) for iface in range(len(bz)): for line in itertools.combinations(bz[iface], 2): for jface in range(len(bz)): if ((iface < jface) and any((np.all((line[0] == x)) for x in bz[jface])) and any((np.all((line[1] == x)) for x in bz[jface]))): ax.plot(*zip(line[0], line[1]), **kwargs) return (fig, ax)
Adds the skeleton of the Wigner-Seitz cell of the lattice to a matplotlib Axes Args: lattice: Lattice object ax: matplotlib :class:`Axes` or None if a new figure should be created. kwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to black and linewidth to 1. Returns: matplotlib figure and matplotlib ax
codesearchnet
def is_unlikely_link(text): if ((text[:1] in ',;+:') or (text[(- 1):] in '.,;+:')): return True if re.search('[\\\\$()\'"[\\]{}|<>`]', text): return True if ((text[:1] == '.') and (not text.startswith('./')) and (not text.startswith('../'))): return True if (text in ('/', ' return True if ((' return True if (text in MIMETYPES): return True (tag_1, dummy, tag_2) = text.partition('.') if ((tag_1 in HTML_TAGS) and (tag_2 != 'html')): return True if FIRST_PART_TLD_PATTERN.match(text): return True
Return whether the text is likely to cause false positives. This function assumes that leading/trailing whitespace has already been removed. Returns: bool
codesearchnet
def inference(self, limit=1000): route_list = [] memory_list = [] state_key = self.__start_point_tuple x, y = state_key end_x, end_y = self.__end_point_tuple for i in range(limit): q_df = self.q_df[self.q_df.state_key == state_key] if len(memory_list): q_df = q_df[~q_df.action_key.isin(memory_list)] if q_df.shape[0] > 1: q_df = q_df.sort_values(by=["q_value"], ascending=False) action_key = q_df.iloc[0, :]["action_key"] q_value = q_df.iloc[0, :]["q_value"] elif q_df.shape[0] == 1: action_key = q_df.action_key.values[0] q_value = q_df.q_value.values[0] else: action_key_list = self.extract_possible_actions(state_key) action_key_list = [v for v in action_key_list if v not in memory_list] q_value = 0.0 if len(action_key_list): action_key = random.choice(action_key_list) _q_df = q_df[q_df.action_key == action_key] if _q_df.shape[0]: q_value = _q_df.q_value.values[0] state_key = self.update_state( state_key=state_key, action_key=action_key ) x, y = state_key route_list.append((x, y, q_value)) memory_list.append(state_key) if self.check_the_end_flag(state_key) is True: break return route_list
Inference route. Args: limit: the number of inferencing. Returns: [(x_1, y_1), (x_2, y_2), ...]
juraj-google-style
def import_tf_tensor(self, x, tf_x): return self.LaidOutTensor(self.make_slices(tf_x, x.shape))
Import a tf.Tensor, producing a LaidOutTensor. Args: x: a Tensor tf_x: a tf.Tensor Returns: a LaidOutTensor
codesearchnet
def create(self, key, value): data = None if (key is not None): key = key.strip() self.tcex.log.debug(u'create variable {}'.format(key)) parsed_key = self.parse_variable(key.strip()) variable_type = parsed_key['type'] if (variable_type in self.read_data_types): data = self.create_data_types[variable_type](key, value) else: data = self.create_raw(key, value) return data
Create method of CRUD operation for working with KeyValue DB. This method will automatically determine the variable type and call the appropriate method to write the data. If a non standard type is provided the data will be written as RAW data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result string of DB write.
codesearchnet
def load_file_system_library(library_filename): py_tf.TF_LoadLibrary(library_filename)
Loads a TensorFlow plugin, containing file system implementation. Pass `library_filename` to a platform-specific mechanism for dynamically loading a library. The rules for determining the exact location of the library are platform-specific and are not documented here. Args: library_filename: Path to the plugin. Relative or absolute filesystem path to a dynamic library file. Returns: None. Raises: RuntimeError: when unable to load the library.
github-repos
def get_filetypes_info(editor_quote="`", flag_leaf=True): NONE_REPL = "" import f311 data = [] for attr in f311.classes_file(flag_leaf): description = a99.get_obj_doc0(attr) def_ = NONE_REPL if attr.default_filename is None else attr.default_filename ee = attr.editors if ee is None: ee = NONE_REPL else: ee = ", ".join(["{0}{1}{0}".format(editor_quote, x, editor_quote) for x in ee]) data.append({"description": description, "default_filename": def_, "classname": attr.__name__, "editors": ee, "class": attr, "txtbin": "text" if attr.flag_txt else "binary"}) data.sort(key=lambda x: x["description"]) return data
Reports available data types Args: editor_quote: character to enclose the name of the editor script between. flag_leaf: see tabulate_filetypes_rest() Returns: list: list of FileTypeInfo
juraj-google-style
def _sia(cache_key, subsystem): log.info('Calculating big-phi data for %s...', subsystem) if (not subsystem): log.info('Subsystem %s is empty; returning null SIA immediately.', subsystem) return _null_sia(subsystem) if (not connectivity.is_strong(subsystem.cm, subsystem.node_indices)): log.info('%s is not strongly connected; returning null SIA immediately.', subsystem) return _null_sia(subsystem) if (len(subsystem.cut_indices) == 1): if (not subsystem.cm[subsystem.node_indices][subsystem.node_indices]): log.info('Single micro nodes %s without selfloops cannot have phi; returning null SIA immediately.', subsystem) return _null_sia(subsystem) elif (not config.SINGLE_MICRO_NODES_WITH_SELFLOOPS_HAVE_PHI): log.info('Single micro nodes %s with selfloops cannot have phi; returning null SIA immediately.', subsystem) return _null_sia(subsystem) log.debug('Finding unpartitioned CauseEffectStructure...') unpartitioned_ces = _ces(subsystem) if (not unpartitioned_ces): log.info('Empty unpartitioned CauseEffectStructure; returning null SIA immediately.') return _null_sia(subsystem) log.debug('Found unpartitioned CauseEffectStructure.') if (len(subsystem.cut_indices) == 1): cuts = [Cut(subsystem.cut_indices, subsystem.cut_indices, subsystem.cut_node_labels)] else: cuts = sia_bipartitions(subsystem.cut_indices, subsystem.cut_node_labels) engine = ComputeSystemIrreducibility(cuts, subsystem, unpartitioned_ces) result = engine.run(config.PARALLEL_CUT_EVALUATION) if config.CLEAR_SUBSYSTEM_CACHES_AFTER_COMPUTING_SIA: log.debug('Clearing subsystem caches.') subsystem.clear_caches() log.info('Finished calculating big-phi data for %s.', subsystem) return result
Return the minimal information partition of a subsystem. Args: subsystem (Subsystem): The candidate set of nodes. Returns: SystemIrreducibilityAnalysis: A nested structure containing all the data from the intermediate calculations. The top level contains the basic irreducibility information for the given subsystem.
codesearchnet
def format_var_name(variable, var_list): z_index = None if variable in var_list: var_name = variable elif variable.ljust(6, "_") in var_list: var_name = variable.ljust(6, "_") elif any([variable in v_sub.split("_") for v_sub in var_list]): var_name = var_list[[variable in v_sub.split("_") for v_sub in var_list].index(True)] z_index = var_name.split("_").index(variable) else: raise KeyError("{0} not found in {1}".format(variable, var_list)) return var_name, z_index
Searches var list for variable name, checks other variable name format options. Args: variable (str): Variable being loaded var_list (list): List of variables in file. Returns: Name of variable in file containing relevant data, and index of variable z-level if multiple variables contained in same array in file.
juraj-google-style
def __init__(self, file_path, expected_checksum, sleep_secs=None): if sleep_secs is not None: if isinstance(sleep_secs, int): self.sleep_secs = sleep_secs else: raise ValueError('Sleep seconds, if received, must be int. But received: %r, %s' % (sleep_secs, type(sleep_secs))) else: self.sleep_secs = None self.file_path = file_path self.expected_checksum = expected_checksum
Initialize a FileChecksumMatcher object Args: file_path : A string that is the full path of output file. This path can contain globs. expected_checksum : A hash string that is computed from expected result. sleep_secs : Number of seconds to wait before verification start. Extra time are given to make sure output files are ready on FS.
github-repos
def flatten(l): for el in l: if isinstance(el, Iterable) and not isinstance( el, (str, bytes)) and not isinstance(el, dict): yield from flatten(el) else: yield el
Flatten a multi-deminision list and return a iterable Note that dict and str will not be expanded, instead, they will be kept as a single element. Args: l (list): The list needs to be flattened Returns: A iterable of flattened list. To have a list instead use ``list(flatten(l))``
juraj-google-style
def _get_edge_sentences(G: AnalysisGraph, source: str, target: str) -> List[str]: return chain.from_iterable([[repr(e.text) for e in s.evidence] for s in G.edges[(source, target)]['InfluenceStatements']])
Return the sentences that led to the construction of a specified edge. Args: G source: The source of the edge. target: The target of the edge.
codesearchnet
def console_get_height(con: tcod.console.Console) -> int: return int(lib.TCOD_console_get_height(_console(con)))
Return the height of a console. Args: con (Console): Any Console instance. Returns: int: The height of a Console. .. deprecated:: 2.0 Use `Console.height` instead.
juraj-google-style
def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: bool=False) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]: if labels is not None: labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels) use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns:
github-repos
def future(self, request_iterator, timeout=None, metadata=None, credentials=None): return _utils.wrap_future_call(self._inner.future(_utils.WrappedAsyncIterator(request_iterator, self._loop), timeout, metadata, credentials), self._loop, self._executor)
Asynchronously invokes the underlying RPC on the client. Args: request_iterator: An ASYNC iterator that yields request values for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. If None, the timeout is considered infinite. metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. Returns: An object that is both a Call for the RPC and a Future. In the event of RPC completion, the return Call-Future's result value will be the response message of the RPC. Should the event terminate with non-OK status, the returned Call-Future's exception value will be an RpcError.
codesearchnet
def __init__(self, target, converter_target_spec=None, converter_allow_custom_ops=None, raise_exception=False): functools.update_wrapper(self, target) self._func = target self._obj_func = None self._verified = False self._log_messages = [] self._raise_exception = raise_exception self._converter_target_spec = converter_target_spec self._converter_allow_custom_ops = converter_allow_custom_ops
Initialize the decorator object. Here is the description of the object variables. - _func : decorated function. - _obj_func : for class object, we need to use this object to provide `self` instance as 1 first argument. - _verified : whether the compatibility is checked or not. Args: target: decorated function. converter_target_spec : target_spec of TFLite converter parameter. converter_allow_custom_ops : allow_custom_ops of TFLite converter parameter. raise_exception : to raise an exception on compatibility issues. User need to use get_compatibility_log() to check details.
github-repos
def get_and_check_project(valid_vcs_rules, source_url): project_path = match_url_regex(valid_vcs_rules, source_url, match_url_path_callback) if project_path is None: raise ValueError("Unknown repo for source url {}!".format(source_url)) project = project_path.split('/')[-1] return project
Given vcs rules and a source_url, return the project. The project is in the path, but is the repo name. `releases/mozilla-beta` is the path; `mozilla-beta` is the project. Args: valid_vcs_rules (tuple of frozendicts): the valid vcs rules, per ``match_url_regex``. source_url (str): the source url to find the project for. Raises: RuntimeError: on failure to find the project. Returns: str: the project.
juraj-google-style
def format_as_single_line(self, prefix=None, divider=' | ', enabled_item_attrs=None, disabled_item_attrs=None): if enabled_item_attrs is not None and (not isinstance(enabled_item_attrs, list)): enabled_item_attrs = [enabled_item_attrs] if disabled_item_attrs is not None and (not isinstance(disabled_item_attrs, list)): disabled_item_attrs = [disabled_item_attrs] menu_line = prefix if prefix is not None else '' attr_segs = [] for item in self._items: menu_line += item.caption item_name_begin = len(menu_line) - len(item.caption) if item.is_enabled(): final_attrs = [item] if enabled_item_attrs: final_attrs.extend(enabled_item_attrs) attr_segs.append((item_name_begin, len(menu_line), final_attrs)) elif disabled_item_attrs: attr_segs.append((item_name_begin, len(menu_line), disabled_item_attrs)) menu_line += divider return RichTextLines(menu_line, font_attr_segs={0: attr_segs})
Format the menu as a single-line RichTextLines object. Args: prefix: (str) String added to the beginning of the line. divider: (str) The dividing string between the menu items. enabled_item_attrs: (list or str) Attributes applied to each enabled menu item, e.g., ["bold", "underline"]. disabled_item_attrs: (list or str) Attributes applied to each disabled menu item, e.g., ["red"]. Returns: (RichTextLines) A single-line output representing the menu, with font_attr_segs marking the individual menu items.
github-repos