code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __init__(self, pipeline: 'Pipeline', tag: Optional[str]=None, element_type: Optional[Union[type, 'typehints.TypeConstraint']]=None, windowing: Optional['Windowing']=None, is_bounded=True): self.pipeline = pipeline self.tag = tag self.element_type = element_type self.producer: Optional[AppliedPTransform] = None self.is_bounded = is_bounded if windowing: self._windowing = windowing self.requires_deterministic_key_coder = None
Initializes a PValue with all arguments hidden behind keyword arguments. Args: pipeline: Pipeline object for this PValue. tag: Tag of this PValue. element_type: The type of this PValue.
github-repos
def serialize_array(array, domain=(0, 1), fmt='png', quality=70): normalized = _normalize_array(array, domain=domain) return _serialize_normalized_array(normalized, fmt=fmt, quality=quality)
Given an arbitrary rank-3 NumPy array, returns the byte representation of the encoded image. Args: array: NumPy array of dtype uint8 and range 0 to 255 domain: expected range of values in array, see `_normalize_array()` fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer
codesearchnet
def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False): grid_h = np.arange(grid_size, dtype=np.float32) grid_w = np.arange(grid_size, dtype=np.float32) grid = np.meshgrid(grid_w, grid_h) grid = np.stack(grid, axis=0) grid = grid.reshape([2, 1, grid_size, grid_size]) pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) if add_cls_token: pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) return pos_embed
Create 2D sin/cos positional embeddings. Args: embed_dim (`int`): Embedding dimension. grid_size (`int`): The grid height and width. add_cls_token (`bool`, *optional*, defaults to `False`): Whether or not to add a classification (CLS) token. Returns: (`torch.FloatTensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the position embeddings (with or without classification token)
github-repos
def update(self, resource, timeout=(- 1)): return self._client.update(resource, timeout=timeout, default_values=self.DEFAULT_VALUES, uri=self.URI)
Updates a User. Args: resource (dict): Object to update. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Updated resource.
codesearchnet
def _applyInter(finter0, finter1, conflict="ignore"): OPTIONS = ["error", "ignore", "me", "other"] assert conflict in OPTIONS, "Invalid value in `conflict`." min_int = -2**63 inter0 = tuple([f.getValue() if f else min_int for f in finter0]) inter1 = tuple([f.getValue() if f else min_int for f in finter1]) le00 = inter0[0] <= inter1[0] le01 = inter1[1] == min_int or inter0[0] <= inter1[1] le11 = inter1[1] == min_int or (inter0[1] != min_int and inter0[1] <= inter1[1]) ge00 = not le00 or inter0[0] == inter1[0] ge10 = inter0[1] == min_int or inter0[1] >= inter1[0] if le00 and ge10 and le11: return finter1[0], finter0[1] elif le00 and ge10 and not le11: return finter1 elif ge00 and le01 and le11: return finter0 elif ge00 and le01 and not le11: return finter0[0], finter1[1] elif conflict == "me": return finter0 elif conflict == "other": return finter1 elif conflict == "error": raise Exception("Disjoint intervals!") return None
Return the restriction of first interval by the second. Args: - inter0, inter1 (tuple of Feature): intervals Return(tuple of Feature): the resulting interval - conflict(str): if a property hasn't compatible values/constrains, do: - ``"error"``: raise exception. - ``"ignore"``: return None. - ``"me"``: return finter0. - ``"other"``: return finter1.
juraj-google-style
def GetExcludePatternsForDir(dirname): ignore_patterns = [] yapfignore_file = os.path.join(dirname, '.yapfignore') if os.path.exists(yapfignore_file): ignore_patterns += _GetExcludePatternsFromYapfIgnore(yapfignore_file) pyproject_toml_file = os.path.join(dirname, 'pyproject.toml') if os.path.exists(pyproject_toml_file): ignore_patterns += _GetExcludePatternsFromPyprojectToml(pyproject_toml_file) return ignore_patterns
Return patterns of files to exclude from ignorefile in a given directory. Looks for .yapfignore in the directory dirname. Arguments: dirname: (unicode) The name of the directory. Returns: A List of file patterns to exclude if ignore file is found, otherwise empty List.
github-repos
def metadata_path(self, m_path): if not m_path: self.metadata_dir = None self.metadata_file = None else: if not op.exists(m_path): raise OSError('{}: file does not exist!'.format(m_path)) if not op.dirname(m_path): self.metadata_dir = '.' else: self.metadata_dir = op.dirname(m_path) self.metadata_file = op.basename(m_path) tmp_sr = SeqIO.read(self.metadata_path, 'uniprot-xml') parsed = parse_uniprot_xml_metadata(tmp_sr) self.update(parsed, overwrite=True)
Provide pointers to the paths of the metadata file Args: m_path: Path to metadata file
juraj-google-style
def serialize(layer): return serialization_lib.serialize_keras_object(layer)
Returns the layer configuration as a Python dict. Args: layer: A `keras.layers.Layer` instance to serialize. Returns: Python dict which contains the configuration of the layer.
github-repos
def __init__(self, path): self.path = os.path.join(path, app.config['XCESSIV_NOTEBOOK_NAME'])
Initialize context manager Args: path (str, unicode): Path to project folder
juraj-google-style
def __init__(self, substream_name: str='realtime', video_mode: VideoMode=VideoMode.CAMERA): self._video_mode = video_mode self._substream_name = substream_name
Initializes the processor. Args: substream_name: The name of the substream to use for the generated images. video_mode: The video mode to use for the video. Can be CAMERA or SCREEN.
github-repos
def image(cam): yield marv.set_header(title=cam.topic) msg = yield marv.pull(cam) if msg is None: return pytype = get_message_type(cam) rosmsg = pytype() rosmsg.deserialize(msg.data) name = '{}.jpg'.format(cam.topic.replace('/', ':')[1:]) imgfile = yield marv.make_file(name) img = imgmsg_to_cv2(rosmsg, "rgb8") cv2.imwrite(imgfile.path, img, (cv2.IMWRITE_JPEG_QUALITY, 60)) yield marv.push(imgfile)
Extract first image of input stream to jpg file. Args: cam: Input stream of raw rosbag messages. Returns: File instance for first image of input stream.
juraj-google-style
def CopyAFF4ToLocal(aff4_urn, target_dir, token=None, overwrite=False): try: fd = aff4.FACTORY.Open(aff4_urn, token=token) filepath = os.path.join(target_dir, fd.urn.Path()[1:]) if isinstance(fd, standard.VFSDirectory): try: os.makedirs(filepath) except OSError: pass return None elif isinstance(fd, aff4.AFF4Stream): if not os.path.isfile(filepath): try: os.makedirs(os.path.dirname(filepath)) except OSError: pass DownloadFile(fd, filepath) elif (os.stat(filepath)[stat.ST_SIZE] != fd.Get(fd.Schema.SIZE) or overwrite): DownloadFile(fd, filepath) else: logging.info("File %s exists, skipping", filepath) return filepath else: raise ValueError("Opened urn is neither a downloaded file nor a " "directory: %s" % aff4_urn) except IOError as e: logging.exception("Failed to read %s due to %s", aff4_urn, e) raise
Copy an AFF4 object that supports a read interface to local filesystem. Args: aff4_urn: URN of thing to copy. target_dir: Directory to copy the file to. token: Auth token. overwrite: If True overwrite the file if it exists. Returns: If aff4_urn points to a file, returns path to the downloaded file. Otherwise returns None. By default file will only be overwritten if file size differs.
juraj-google-style
def unpack_x_y_sample_weight(data): if isinstance(data, list): data = tuple(data) if not isinstance(data, tuple): return (data, None, None) elif len(data) == 1: return (data[0], None, None) elif len(data) == 2: return (data[0], data[1], None) elif len(data) == 3: return (data[0], data[1], data[2]) error_msg = f'Data is expected to be in format `x`, `(x,)`, `(x, y)`, or `(x, y, sample_weight)`, found: {data}' raise ValueError(error_msg)
Unpacks user-provided data tuple. This is a convenience utility to be used when overriding `Model.train_step`, `Model.test_step`, or `Model.predict_step`. This utility makes it easy to support data of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`. Example: >>> features_batch = ops.ones((10, 5)) >>> labels_batch = ops.zeros((10, 5)) >>> data = (features_batch, labels_batch) >>> # `y` and `sample_weight` will default to `None` if not provided. >>> x, y, sample_weight = unpack_x_y_sample_weight(data) >>> sample_weight is None True Args: data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`. Returns: The unpacked tuple, with `None`s for `y` and `sample_weight` if they are not provided.
github-repos
def _write_reqs(amend: bool = False, stage: bool = False): LOGGER.info('writing requirements') base_cmd = 'pipenv lock -r' _write_reqs_file(f'{base_cmd}', 'requirements.txt') _write_reqs_file(f'{base_cmd} -d', 'requirements-dev.txt') files_to_add = ['Pipfile', 'requirements.txt', 'requirements-dev.txt'] if amend: CTX.repo.amend_commit(append_to_msg='update requirements [auto]', files_to_add=files_to_add) elif stage: CTX.repo.stage_subset(*files_to_add)
Writes the requirement files Args: amend: amend last commit with changes stage: stage changes
juraj-google-style
def generate_filename(self, file_type, time_identifier=None, extension_name=None): time_str = time_identifier if time_identifier is None: time_str = mobly_logger.get_log_file_timestamp() elif isinstance(time_identifier, runtime_test_info.RuntimeTestInfo): time_str = time_identifier.signature filename_tokens = [file_type] if self.debug_tag != self.serial: filename_tokens.append(self.debug_tag) filename_tokens.extend([self.serial, self.model, time_str]) filename_str = ','.join(filename_tokens) if extension_name is not None: filename_str = '%s.%s' % (filename_str, extension_name) filename_str = mobly_logger.sanitize_filename(filename_str) self.log.debug('Generated filename: %s', filename_str) return filename_str
Generates a name for an output file related to this device. The name follows the pattern: {file type},{debug_tag},{serial},{model},{time identifier}.{ext} "debug_tag" is only added if it's different from the serial. "ext" is added if specified by user. Args: file_type: string, type of this file, like "logcat" etc. time_identifier: string or RuntimeTestInfo. If a `RuntimeTestInfo` is passed in, the `signature` of the test case will be used. If a string is passed in, the string itself will be used. Otherwise the current timestamp will be used. extension_name: string, the extension name of the file. Returns: String, the filename generated.
github-repos
def of_type_function(function: _evaluation.OfTypeFunction, operand_result: Optional[_sql_data_types.IdentifierSelect], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select: if operand_result is None: raise ValueError('ofType() cannot be called without an operand.') if len(params_result) != 1: raise ValueError('ofType must have a data type parameter.') sql_alias = 'ofType_' attribute = function.base_type_str return_type = _sql_data_types.get_standard_sql_data_type(function.return_type) return dataclasses.replace(operand_result, select_part=operand_result.select_part.dot(attribute, return_type, sql_alias=sql_alias))
Generates Spark SQL representing the FHIRPath ofType() function. Returns the resource of the given type, typically used in choice types. Args: function: The FHIRPath AST `MatchesFunction` node operand_result: The expression which is being evaluated params_result: The parameter passed in to function Returns: A compiled Spark SQL expression. Raises: ValueError: When the function is called without an operand, or the length of params_result is not one.
github-repos
def quote(src_string, return_expr=False): node = parse_string(src_string) body = node.body if len(body) == 1: if isinstance(body[0], gast.Expr) and not return_expr: out = body[0].value else: out = body[0] else: out = node return out
Go from source code to AST nodes. This function returns a tree without enclosing `Module` or `Expr` nodes. Args: src_string: The source code to parse. return_expr: Whether or not to return a containing expression. This can be set to `True` if the result is to be part of a series of statements. Returns: An AST of the given source code.
juraj-google-style
def _get_commands(dist): py_files = (f for f in setuptools.findall() if (os.path.splitext(f)[1].lower() == '.py')) pkg_files = (f for f in py_files if (_get_package_name(f) in dist.packages)) commands = {} for file_name in pkg_files: with open(file_name) as py_file: module = typing.cast(ast.Module, ast.parse(py_file.read())) module_name = _get_module_name(file_name) _append_commands(commands, module_name, _get_module_commands(module)) _append_commands(commands, module_name, _get_class_commands(module)) _append_commands(commands, module_name, _get_function_commands(module)) return commands
Find all commands belonging to the given distribution. Args: dist: The Distribution to search for docopt-compatible docstrings that can be used to generate command entry points. Returns: A dictionary containing a mapping of primary commands to sets of subcommands.
codesearchnet
def unlock_kinetis_abort_clear(): flags = registers.AbortRegisterFlags() flags.STKCMPCLR = 1 flags.STKERRCLR = 1 flags.WDERRCLR = 1 flags.ORUNERRCLR = 1 return flags.value
Returns the abort register clear code. Returns: The abort register clear code.
codesearchnet
def __init__(self, recommender, repeat=True, maxlen=None, debug=False): self.rec = recommender self.feature_rec = issubclass(recommender.__class__, FeatureRecommenderMixin) self.repeat = repeat self.item_buffer = deque(maxlen=maxlen) self.debug = debug
Set/initialize parameters. Args: recommender (Recommender): Instance of a recommender which has been initialized. repeat (boolean): Choose whether the same item can be repeatedly interacted by the same user. maxlen (int): Size of an item buffer which stores most recently observed items.
juraj-google-style
def event_vars(self): if (self._event_vars is None): self._event_vars = list(self.iter_event_vars()) return self._event_vars
The service's eventable variables. Returns: list(tuple): A list of (variable name, data type) tuples.
codesearchnet
def get(self): config = self.config if (not config): return None response = dict() response.update(self._parse_source_interface(config)) response.update(self._parse_servers(config)) return response
Returns the current NTP configuration The Ntp resource returns the following: * source_interface (str): The interface port that specifies NTP server * servers (list): A list of the NTP servers that have been assigned to the node. Each entry in the list is a key/value pair of the name of the server as the key and None or 'prefer' as the value if the server is preferred. Returns: A Python dictionary object of key/value pairs that represents the current NTP configuration of the node:: { "source_interface": 'Loopback0', 'servers': [ { '1.1.1.1': None }, { '1.1.1.2': 'prefer' }, { '1.1.1.3': 'prefer' }, { '1.1.1.4': None }, ] }
codesearchnet
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
github-repos
def np_auc(self, predictions, labels, weights): if weights is None: weights = np.ones(np.size(predictions)) is_positive = labels > 0 num_positives = np.sum(weights[is_positive]) num_negatives = np.sum(weights[~is_positive]) inds = np.argsort(-predictions) sorted_labels = labels[inds] sorted_weights = weights[inds] is_positive = sorted_labels > 0 tp = np.cumsum(sorted_weights * is_positive) / num_positives return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
Computes the AUC explicitly using Numpy. Args: predictions: an ndarray with shape [N]. labels: an ndarray with shape [N]. weights: an ndarray with shape [N]. Returns: the area under the ROC curve.
github-repos
def __init__(self, parser): self.parser = parser args = getattr(self, 'args', ()) for arg in args: flags = arg[0] if not isinstance(flags, tuple): flags = (flags,) self.parser.add_argument(*flags, **arg[1])
Initialize the subcommand with its parser Args: parser (Parser) : an Argparse ``Parser`` instance to configure with the args for this subcommand. This method will automatically add all the arguments described in ``self.args``. Subclasses can perform any additional customizations on ``self.parser``.
juraj-google-style
def project_group_token(self, group_tokens): projected_group_tokens = self.mlp_inter(group_tokens) projected_group_tokens = self.norm_post_tokens(projected_group_tokens) return projected_group_tokens
Args: group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels] Returns: projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels]
github-repos
def __init__(self, parent): super(ModuleUIFrame, self).__init__(parent, padding=8) self.columnconfigure(0, weight=1) self.rowconfigure(1, weight=1) chat = ChatFrame(self) chat.grid(column=0, row=0, sticky="W E N S")
The console tab for bethebot Args: parent: tk or ttk element
juraj-google-style
def set_float(self, option, value): if not isinstance(value, float): raise TypeError("Value must be a float") self.options[option] = value
Set a float option. Args: option (str): name of option. value (float): value of the option. Raises: TypeError: Value must be a float.
juraj-google-style
def _BreakpointEvent(self, event, frame): error_status = None if (event != native.BREAKPOINT_EVENT_HIT): error_status = _BREAKPOINT_EVENT_STATUS[event] elif (self.definition.get('action') == 'LOG'): error_status = self._collector.Log(frame) if (not error_status): return if (not self._SetCompleted()): return self.Clear() if error_status: self._CompleteBreakpoint({'status': error_status}) return collector = capture_collector.CaptureCollector(self.definition, self.data_visibility_policy) try: collector.Collect(frame) except BaseException as e: native.LogInfo(('Internal error during data capture: %s' % repr(e))) error_status = {'isError': True, 'description': {'format': ('Internal error while capturing data: %s' % repr(e))}} self._CompleteBreakpoint({'status': error_status}) return except: native.LogInfo('Unknown exception raised') error_status = {'isError': True, 'description': {'format': 'Unknown internal error'}} self._CompleteBreakpoint({'status': error_status}) return self._CompleteBreakpoint(collector.breakpoint, is_incremental=False)
Callback invoked by cdbg_native when breakpoint hits. Args: event: breakpoint event (see kIntegerConstants in native_module.cc). frame: Python stack frame of breakpoint hit or None for other events.
codesearchnet
def operation_spec(input_element_spec: Optional[pg.typing.ValueSpec]=None, output_element_spec: Optional[pg.typing.ValueSpec]=None) -> pg.typing.ValueSpec: if input_element_spec is None: input_element_spec = pg.typing.Object(pg.DNA) if output_element_spec is None: output_element_spec = pg.typing.Object(pg.DNA) return pg.typing.Callable([pg.typing.List(input_element_spec)], returns=pg.typing.List(output_element_spec))
Returns the value spec (PyGlove typing) for an evolutionary operation. We use `pg.typing.Callable` instead of `pg.typing.Object(Operation)` to make it more flexible to plugin lambdas. Args: input_element_spec: The value spec for input element. output_element_spec: The value spec for output element. Returns: A value spec for Callable[[List[DNA]], List[DNA]].
github-repos
def merge(self, options): merged = copy.deepcopy(self) if options is None: return merged if options.bytes_per_pack != 0: merged.bytes_per_pack = options.bytes_per_pack if options.timeout_seconds is not None: merged.timeout_seconds = options.timeout_seconds if options.implementation != CommunicationImplementation.AUTO: merged.implementation = options.implementation return merged
Merges with another options and returns a new one. Values specified in the `options` takes precedence if they're not the default. Args: options: a `tf.distribute.experimental.CollectiveCommunication`. Returns: A new `tf.distribute.experimental.CollectiveCommunication`.
github-repos
class SecondaryBufferedQuantileTracker(WindowedTracker, QuantileTracker): def __init__(self, master: QuantileTracker, q): assert isinstance(master, BufferedQuantileTracker), 'Cannot create secondary tracker from non-BufferedQuantileTracker' self._master = master super().__init__(self._master._window_mode) QuantileTracker.__init__(self, q) self._sorted_items = self._master._sorted_items def push(self, x): pass def get(self): return self._master._get_helper(self._master._sorted_items, self._q)
A secondary quantile tracker that shares its data with a master tracker. This tracker acts as a read-only view of the master tracker's data, providing quantile calculations without maintaining its own independent buffer. It relies on the master's sorted items for quantile estimations. Args: master: The BufferedQuantileTracker instance to share data with. q: A list of quantiles to track.
github-repos
def create_initial(self, address_values): with self._lock: for add, val in address_values: self._state[add] = _ContextFuture(address=add, result=val)
Create futures from inputs with the current value for that address at the start of that context. Args: address_values (list of tuple): The tuple is string, bytes of the address and value.
juraj-google-style
def supported_tifs(self): buf = ctypes.c_uint32() self._dll.JLINKARM_TIF_GetAvailable(ctypes.byref(buf)) return buf.value
Returns a bitmask of the supported target interfaces. Args: self (JLink): the ``JLink`` instance Returns: Bitfield specifying which target interfaces are supported.
codesearchnet
def topic_update(channel, topic_channel): if topic_channel is not None: try: channel_message = "Topic channel is now `{}`.".format(topic_channel.name) except Exception as e: logger.exception(e) channel_message = "Topic channel has been updated." else: channel_message = "Topic channel has been cleared." gui = ui_embed.UI( channel, "Topic channel updated", channel_message, modulename=modulename, colour=modulecolor_info ) return gui
Creates an embed UI for the topic update Args: channel (discord.Channel): The Discord channel to bind the embed to topic_channel: The new topic channel Returns: embed: The created embed
juraj-google-style
def output_types(self): return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), self._element_spec)
Returns the type of each component of an element of this iterator. Returns: A nested structure of `tf.DType` objects corresponding to each component of an element of this dataset.
github-repos
def inputs(self, name): self._closed() step = self._get_step(name, make_copy=False) return step.list_inputs()
List input names and types of a step in the steps library. Args: name (str): name of a step in the steps library.
juraj-google-style
def active_futures(ticker: str, dt) -> str: t_info = ticker.split() prefix, asset = ' '.join(t_info[:-1]), t_info[-1] info = const.market_info(f'{prefix[:-1]}1 {asset}') f1, f2 = f'{prefix[:-1]}1 {asset}', f'{prefix[:-1]}2 {asset}' fut_2 = fut_ticker(gen_ticker=f2, dt=dt, freq=info['freq']) fut_1 = fut_ticker(gen_ticker=f1, dt=dt, freq=info['freq']) fut_tk = bdp(tickers=[fut_1, fut_2], flds='Last_Tradeable_Dt', cache=True) if pd.Timestamp(dt).month < pd.Timestamp(fut_tk.last_tradeable_dt[0]).month: return fut_1 d1 = bdib(ticker=f1, dt=dt) d2 = bdib(ticker=f2, dt=dt) return fut_1 if d1[f1].volume.sum() > d2[f2].volume.sum() else fut_2
Active futures contract Args: ticker: futures ticker, i.e., ESA Index, Z A Index, CLA Comdty, etc. dt: date Returns: str: ticker name
juraj-google-style
def load_state(self, in_path): with open(in_path, "r") as infile: state = json.load(infile) self.restore_state(state)
Load the current state of this emulated object from a file. The file should have been produced by a previous call to save_state. Args: in_path (str): The path to the saved state dump that you wish to load.
juraj-google-style
async def remember(self, request, user_id): ticket = self._new_ticket(request, user_id) (await self.remember_ticket(request, ticket))
Called to store the userid for a request. This function creates a ticket from the request and user_id, and calls the abstract function remember_ticket() to store the ticket. Args: request: aiohttp Request object. user_id: String representing the user_id to remember
codesearchnet
def pop(self, identifier, default=None): if identifier in self.children: item = self[identifier] self.__delitem__(identifier) return item else: return default
Pop a node of the AttrTree using its path string. Args: identifier: Path string of the node to return default: Value to return if no node is found Returns: The node that was removed from the AttrTree
juraj-google-style
def _bytestringToLong(bytestring, signed=False, numberOfRegisters=2): _checkString(bytestring, 'byte string', minlength=4, maxlength=4) _checkBool(signed, description='signed parameter') _checkInt(numberOfRegisters, minvalue=2, maxvalue=2, description='number of registers') formatcode = '>' if signed: formatcode += 'l' else: formatcode += 'L' return _unpack(formatcode, bytestring)
Convert a bytestring to a long integer. Long integers (32 bits = 4 bytes) are stored in two consecutive 16-bit registers in the slave. Args: * bytestring (str): A string of length 4. * signed (bol): Whether large positive values should be interpreted as negative values. * numberOfRegisters (int): Should be 2. For error checking only. Returns: The numerical value (int). Raises: ValueError, TypeError
juraj-google-style
def byte_swap_tflite_buffer(tflite_model, from_endiness, to_endiness): if tflite_model is None: return None model = convert_bytearray_to_object(tflite_model) byte_swap_tflite_model_obj(model, from_endiness, to_endiness) return convert_object_to_bytearray(model)
Generates a new model byte array after byte swapping its buffers field. Args: tflite_model: TFLite flatbuffer in a byte array. from_endiness: The original endianness format of the buffers in tflite_model. to_endiness: The destined endianness format of the buffers in tflite_model. Returns: TFLite flatbuffer in a byte array, after being byte swapped to to_endiness format.
github-repos
def atoms(lines): conv_charge_table = {0: 0, 1: 3, 2: 2, 3: 1, 4: 0, 5: (- 1), 6: (- 2), 7: (- 3)} results = {} for (i, line) in enumerate(lines): symbol = line[31:34].rstrip() try: atom = Atom(symbol) except KeyError: raise ValueError(symbol) xpos = float(line[0:10]) ypos = float(line[10:20]) zpos = float(line[20:30]) atom.coords = (xpos, ypos, zpos) atom.mass_diff = int(line[34:37]) old_sdf_charge = int(line[37:40]) atom.charge = conv_charge_table[old_sdf_charge] if (old_sdf_charge == 4): atom.radical = 1 results[(i + 1)] = {'atom': atom} return results
Parse atom block into atom objects Returns: dict: networkx nodes
codesearchnet
def front(self, n): new_dtypes = (self._dtype_cache if (self._dtype_cache is None) else self._dtype_cache[:n]) if self._is_transposed: result = self.__constructor__(self.data.transpose().take(0, n).transpose(), self.index, self.columns[:n], new_dtypes) result._is_transposed = True else: result = self.__constructor__(self.data.take(1, n), self.index, self.columns[:n], new_dtypes) return result
Returns the first n columns. Args: n: Integer containing the number of columns to return. Returns: DataManager containing the first n columns of the original DataManager.
codesearchnet
def set_state(self, vid, value=None, default=False, disable=False): cmds = self.command_builder('state', value=value, default=default, disable=disable) return self.configure_vlan(vid, cmds)
Configures the VLAN state EosVersion: 4.13.7M Args: vid (str): The VLAN ID to configure value (str): The value to set the vlan state to default (bool): Configures the vlan state to its default value disable (bool): Negates the vlan state Returns: True if the operation was successful otherwise False
juraj-google-style
def _ReadTimestamp(self, filename): if not os.path.exists(filename): return None try: timestamp_file = open(filename, 'r') timestamp_string = timestamp_file.read().strip() except IOError as e: self.log.warning('error opening timestamp file: %s', e) timestamp_string = None else: timestamp_file.close() self.log.debug('read timestamp %s from file %r', timestamp_string, filename) if timestamp_string is not None: try: timestamp = int(calendar.timegm(time.strptime(timestamp_string + ' UTC', '%Y-%m-%dT%H:%M:%SZ %Z'))) except ValueError as e: self.log.error('cannot parse timestamp file %r: %s', filename, e) timestamp = None else: timestamp = None now = self._GetCurrentTime() if timestamp and timestamp > now: self.log.warning('timestamp %r from %r is in the future, now is %r', timestamp_string, filename, now) if timestamp - now >= 60 * 60: self.log.info('Resetting timestamp to now.') timestamp = now return timestamp
Return a timestamp from a file. The timestamp file format is a single line, containing a string in the ISO-8601 format YYYY-MM-DDThh:mm:ssZ (i.e. UTC time). We do not support all ISO-8601 formats for reasons of convenience in the code. Timestamps internal to nss_cache deliberately do not carry milliseconds. Args: filename: A String naming the file to read from. Returns: An int with the number of seconds since epoch, or None if the timestamp file doesn't exist or has errors.
github-repos
def __getitem__(self, index): del index raise NotImplementedError
Gets batch at position `index`. Args: index: position of the batch in the PyDataset. Returns: A batch
github-repos
def Py3GetFullArgSpec(fn): try: sig = inspect._signature_from_callable(fn, skip_bound_arg=True, follow_wrapper_chains=True, sigcls=inspect.Signature) except Exception: raise TypeError('Unsupported callable.') args = [] varargs = None varkw = None kwonlyargs = [] defaults = () annotations = {} defaults = () kwdefaults = {} if sig.return_annotation is not sig.empty: annotations['return'] = sig.return_annotation for param in sig.parameters.values(): kind = param.kind name = param.name if kind is inspect._POSITIONAL_ONLY: args.append(name) elif kind is inspect._POSITIONAL_OR_KEYWORD: args.append(name) if param.default is not param.empty: defaults += (param.default,) elif kind is inspect._VAR_POSITIONAL: varargs = name elif kind is inspect._KEYWORD_ONLY: kwonlyargs.append(name) if param.default is not param.empty: kwdefaults[name] = param.default elif kind is inspect._VAR_KEYWORD: varkw = name if param.annotation is not param.empty: annotations[name] = param.annotation if not kwdefaults: kwdefaults = None if not defaults: defaults = None return inspect.FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwdefaults, annotations)
A alternative to the builtin getfullargspec. The builtin inspect.getfullargspec uses: `skip_bound_args=False, follow_wrapped_chains=False` in order to be backwards compatible. This function instead skips bound args (self) and follows wrapped chains. Args: fn: The function or class of interest. Returns: An inspect.FullArgSpec namedtuple with the full arg spec of the function.
github-repos
def recipe_to_python(name, description, instructions, tasks, parameters={}, project=None, client_credentials=None, user_credentials=None, service_credentials=None): tasks = json_expand_queries(tasks) code = DISCLAIMER code += 'import argparse\n' code += 'import textwrap\n\n' code += 'from starthinker.util.configuration import Configuration\n' imported = set() for task in tasks: script, task = next(iter(task.items())) if script not in imported: code += 'from starthinker.task.%s.run import %s\n' % (script, script) imported.add(script) code += '\n' code += '\n' fields = json_get_fields(tasks) if fields: code += 'def recipe_%s(config, %s):\n' % (name, ', '.join([f['name'] for f in fields])) else: code += 'def recipe_%s(config):\n' % name if description or fields: code += ' \n\n' for task in tasks: script, task = next(iter(task.items())) code += ' %s(config, %s)\n\n' % (script, dict_to_python(task, indent=1)) code += '\n' code += '\n' code += 'if __name__ == "__main__":\n' code += parameters_to_argparse(description, instructions, fields) code += '\n' code += ' args = parser.parse_args()\n' code += '\n' code += ' config = Configuration(\n project=args.project,\n user=args.user,\n service=args.service,\n client=args.client,\n key=args.key,\n verbose=args.verbose\n )' code += '\n\n' if fields: code += ' recipe_%s(config, %s)\n' % (name, ', '.join(['args.%s' % f['name'] for f in fields])) else: code += ' recipe_%s(config)\n' % name return code
Converts a JSON recipe into a python stand alone example. Sets up multiple steps to execute recipe: 1. Install starthinker from repository 2. Get Cloud Project ID. 3. Get Client Credentials ( optional if User Credentials exist ). 4. Enter Recipe parameters if fields present. 5. Execute recipe tasks. Args: * name: (string) The name of the notebook. * description: (string) A description fo the recipe. * instructions: (string) Recipe manual instructions, for example connecting datastudios. * tasks: (list) The task JSON to execute. * parameters: (dict) Values for field parameters in tasks, optional. * project: (string) The GCP project id. * client_credentials: (string) The GCP Desktop Client Credentials in JSON string. * user_credentials: (string) Not used, placeholder. * service_credentials: (string) Not used, placeholder. Returns: * (string) Rendered example source code to be written to a py file.
github-repos
def _calculate_expected_result(dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config): if config.use_gumbel_for_cells: gumbel_dist = torch.distributions.RelaxedBernoulli(temperature=config.temperature, logits=dist_per_cell.logits * config.temperature) scaled_probability_per_cell = gumbel_dist.sample() else: scaled_probability_per_cell = dist_per_cell.probs scaled_probability_per_cell = scaled_probability_per_cell / numeric_values_scale * input_mask_float count_result = torch.sum(scaled_probability_per_cell, dim=1) numeric_values_masked = torch.where(torch.isnan(numeric_values), torch.zeros_like(numeric_values), numeric_values) sum_result = torch.sum(scaled_probability_per_cell * numeric_values_masked, dim=1) avg_approximation = config.average_approximation_function if avg_approximation == AverageApproximationFunction.RATIO: average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION) elif avg_approximation == AverageApproximationFunction.FIRST_ORDER: ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1 average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell / ex, dim=1) elif avg_approximation == AverageApproximationFunction.SECOND_ORDER: ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1 pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell) var = torch.sum(pointwise_var, dim=1, keepdim=True) - pointwise_var multiplier = (var / torch.square(ex) + 1) / ex average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell * multiplier, dim=1) else: raise ValueError(f'Invalid average_approximation_function: {config.average_approximation_function}') if config.use_gumbel_for_aggregation: gumbel_dist = torch.distributions.RelaxedOneHotCategorical(config.aggregation_temperature, logits=logits_aggregation[:, 1:]) aggregation_op_only_probs = gumbel_dist.sample() else: aggregation_op_only_probs = nn.functional.softmax(logits_aggregation[:, 1:] / config.aggregation_temperature, dim=-1) all_results = torch.cat([torch.unsqueeze(sum_result, dim=1), torch.unsqueeze(average_result, dim=1), torch.unsqueeze(count_result, dim=1)], dim=1) expected_result = torch.sum(all_results * aggregation_op_only_probs, dim=1) return expected_result
Calculates the expected result given cell and aggregation probabilities. Args: dist_per_cell (`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the hyperparameters of the model Returns: expected_result (`torch.FloatTensor` of shape `(batch_size,)`): The expected result per example.
github-repos
def add_mutex_switch(parser, dest, arguments=set(), default=None, single_arg=False, required=False): if (default is not None): assert (default in arguments) if isinstance(arguments, set): arguments = {k: None for k in arguments} if (not single_arg): mg = parser.add_mutually_exclusive_group(required=required) for (name, help_text) in arguments.items(): kwargs = {'action': 'store_const', 'dest': dest, 'const': name, 'help': help_text} if (default == name): kwargs['default'] = name mg.add_argument('--{}'.format(name), **kwargs) return mg else: kwargs = {'dest': dest, 'type': str, 'default': default, 'help': '\n'.join(('{}: {}'.format(k, v) for (k, v) in arguments.items())), 'choices': list(arguments.keys())} return parser.add_argument('--{}'.format(dest), **kwargs)
Adds mutually exclusive switch arguments. Args: arguments: a dictionary that maps switch name to helper text. Use sets to skip help texts.
codesearchnet
def convert_op_hints_to_stubs(session=None, graph_def=None, write_callback=lambda graph_def, comments: None): if session is not None and graph_def is not None: raise ValueError('Provide only one of session and graph_def.') if session is not None: return _convert_op_hints_to_stubs_helper(session.graph_def, write_callback) elif graph_def is not None: return _convert_op_hints_to_stubs_helper(graph_def, write_callback) else: raise ValueError('Must specify session or graph_def as input.')
Converts a graphdef with LiteOp hints into stub operations. This is used to prepare for toco conversion of complex intrinsic usages. Note: only one of session or graph_def should be used, not both. Args: session: A TensorFlow session that contains the graph to convert. graph_def: A graph def that we should convert. write_callback: A function pointer that can be used to write intermediate steps of graph transformation (optional). Returns: A new graphdef with all ops contained in OpHints being replaced by a single op call with the right parameters. Raises: ValueError: If both session and graph_def are provided.
github-repos
def anti_commutator(A, B=None): if B: return A * B + B * A return SPre(A) + SPost(A)
If ``B != None``, return the anti-commutator :math:`\{A,B\}`, otherwise return the super-operator :math:`\{A,\cdot\}`. The super-operator :math:`\{A,\cdot\}` maps any other operator ``B`` to the anti-commutator :math:`\{A, B\} = A B + B A`. Args: A: The first operator to form all anti-commutators of. B: The second operator to form the anti-commutator of, or None. Returns: SuperOperator: The linear superoperator :math:`[A,\cdot]`
juraj-google-style
def update_pipeline_stage(self, stage): payload = None if type(stage) is not StreakStage: return requests.codes.bad_request, None payload = stage.to_dict(rw = True) try: uri = '/'.join([self.api_uri, self.pipelines_suffix, stage.attributes['pipelineKey'], self.stages_suffix, stage.attributes['key'] ]) except KeyError: return requests.codes.bad_request, None code, data = self._req('post', uri , json.dumps(payload)) return code, data
Updates a box with the provided attributes. Args: pipeline_key reqiured identifier for the pipeline stage StreakStage object kwargs {name} return (status code, stage dict)
juraj-google-style
def create_new(mapreduce_id=None, gettime=datetime.datetime.now): if (not mapreduce_id): mapreduce_id = MapreduceState.new_mapreduce_id() state = MapreduceState(key_name=mapreduce_id, last_poll_time=gettime()) state.set_processed_counts([], []) return state
Create a new MapreduceState. Args: mapreduce_id: Mapreduce id as string. gettime: Used for testing.
codesearchnet
def remove_son(self, son): self._sons = [x for x in self._sons if x.node_id != son.node_id]
Remove the son node. Do nothing if the node is not a son Args: fathers: list of fathers to add
juraj-google-style
def __init__(self, name, common_channel_mask=True, **kwargs): self.common_channel_mask = common_channel_mask super(GenericCompositor, self).__init__(name, **kwargs)
Collect custom configuration values. Args: common_channel_mask (bool): If True, mask all the channels with a mask that combines all the invalid areas of the given data.
juraj-google-style
def __init__(self, incoming_client=False): from neo.Network.NodeLeader import NodeLeader self.leader = NodeLeader.Instance() self.nodeid = self.leader.NodeId self.remote_nodeid = random.randint(1294967200, 4294967200) self.endpoint = '' self.address = None self.buffer_in = bytearray() self.myblockrequests = set() self.bytes_in = 0 self.bytes_out = 0 self.sync_mode = MODE_CATCHUP self.host = None self.port = None self.incoming_client = incoming_client self.handshake_complete = False self.expect_verack_next = False self.start_outstanding_data_request = {HEARTBEAT_BLOCKS: 0, HEARTBEAT_HEADERS: 0} self.block_loop = None self.block_loop_deferred = None self.peer_loop = None self.peer_loop_deferred = None self.header_loop = None self.header_loop_deferred = None self.disconnect_deferred = None self.disconnecting = False logger.debug(f"{self.prefix} new node created, not yet connected")
Create an instance. The NeoNode class is the equivalent of the C# RemoteNode.cs class. It represents a single Node connected to the client. Args: incoming_client (bool): True if node is an incoming client and the handshake should be initiated.
juraj-google-style
def ensure_s3_bucket(s3_client, bucket_name, bucket_region): try: s3_client.head_bucket(Bucket=bucket_name) except botocore.exceptions.ClientError as e: if e.response['Error']['Message'] == "Not Found": logger.debug("Creating bucket %s.", bucket_name) create_args = {"Bucket": bucket_name} location_constraint = s3_bucket_location_constraint( bucket_region ) if location_constraint: create_args["CreateBucketConfiguration"] = { "LocationConstraint": location_constraint } s3_client.create_bucket(**create_args) elif e.response['Error']['Message'] == "Forbidden": logger.exception("Access denied for bucket %s. Did " + "you remember to use a globally unique name?", bucket_name) raise else: logger.exception("Error creating bucket %s. Error %s", bucket_name, e.response) raise
Ensure an s3 bucket exists, if it does not then create it. Args: s3_client (:class:`botocore.client.Client`): An s3 client used to verify and create the bucket. bucket_name (str): The bucket being checked/created. bucket_region (str, optional): The region to create the bucket in. If not provided, will be determined by s3_client's region.
juraj-google-style
def read_config(contents): file_obj = io.StringIO(contents) config = six.moves.configparser.ConfigParser() config.readfp(file_obj) return config
Reads pylintrc config into native ConfigParser object. Args: contents (str): The contents of the file containing the INI config. Returns: ConfigParser.ConfigParser: The parsed configuration.
codesearchnet
def _StopExtractionProcesses(self, abort=False): logger.debug('Stopping extraction processes.') self._StopMonitoringProcesses() if abort: self._AbortTerminate() logger.debug('Emptying task queue.') self._task_queue.Empty() for _ in self._processes_per_pid: try: self._task_queue.PushItem(plaso_queue.QueueAbort(), block=False) except errors.QueueFull: logger.warning('Task queue full, unable to push abort message.') self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) self._task_queue.Close(abort=abort) if not abort: self._AbortTerminate() self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) self._task_queue.Close(abort=True) self._AbortKill()
Stops the extraction processes. Args: abort (bool): True to indicated the stop is issued on abort.
juraj-google-style
def execute_add(args, root_dir=None): command = ' '.join(args['command']) instruction = {'command': command, 'path': os.getcwd()} print_command_factory('add')(instruction, root_dir)
Add a new command to the daemon queue. Args: args['command'] (list(str)): The actual programm call. Something like ['ls', '-a'] or ['ls -al'] root_dir (string): The path to the root directory the daemon is running in.
codesearchnet
def get_gains_losses(changes): res = {'gains': [], 'losses': []} for change in changes: if (change > 0): res['gains'].append(change) else: res['losses'].append((change * (- 1))) logger.debug('Gains: {0}'.format(res['gains'])) logger.debug('Losses: {0}'.format(res['losses'])) return res
Categorizes changes into gains and losses Args: changes: List of floats of price changes between entries in JSON. Returns: Dict of changes with keys 'gains' and 'losses'. All values are positive.
codesearchnet
def _compute_useful_frames(tb, num): defining_frame_index = _find_index_of_defining_frame(tb) innermost_excluded = min(defining_frame_index + 2 + 1, len(tb)) outermost_included = max(innermost_excluded - num, 0) return tb[outermost_included:innermost_excluded]
Return a list of frames, which form a 'useful' stack. Starting from the defining frame to the outermost one, this method computes the contiguous portion of the 'useful' stack trace and returns the selected frames. Args: tb: A list of traceback frames (as from Operation.traceback). num: total number of frames to return. Returns: A list of frames.
github-repos
def get_params(img, output_size): w, h = img.size th, tw = output_size if w == tw and h == th: return 0, 0, h, w i = random.randint(0, h - th) j = random.randint(0, w - tw) return i, j, th, tw
Get parameters for ``crop`` for a random crop. Args: img (PIL Image): Image to be cropped. output_size (tuple): Expected output size of the crop. Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
juraj-google-style
def _perp_eigendecompose(matrix: np.ndarray, rtol: float=1e-05, atol: float=1e-08) -> Tuple[(np.array, List[np.ndarray])]: (vals, cols) = np.linalg.eig(matrix) vecs = [cols[(:, i)] for i in range(len(cols))] for i in range(len(vecs)): vecs[i] = np.reshape(vecs[i], (len(vecs[i]), vecs[i].ndim)) n = len(vecs) groups = _group_similar(list(range(n)), (lambda k1, k2: np.allclose(vals[k1], vals[k2], rtol=rtol))) for g in groups: (q, _) = np.linalg.qr(np.hstack([vecs[i] for i in g])) for i in range(len(g)): vecs[g[i]] = q[(:, i)] return (vals, vecs)
An eigendecomposition that ensures eigenvectors are perpendicular. numpy.linalg.eig doesn't guarantee that eigenvectors from the same eigenspace will be perpendicular. This method uses Gram-Schmidt to recover a perpendicular set. It further checks that all eigenvectors are perpendicular and raises an ArithmeticError otherwise. Args: matrix: The matrix to decompose. rtol: Relative threshold for determining whether eigenvalues are from the same eigenspace and whether eigenvectors are perpendicular. atol: Absolute threshold for determining whether eigenvalues are from the same eigenspace and whether eigenvectors are perpendicular. Returns: The eigenvalues and column eigenvectors. The i'th eigenvalue is associated with the i'th column eigenvector. Raises: ArithmeticError: Failed to find perpendicular eigenvectors.
codesearchnet
def _find_image_bounding_boxes(filenames, image_to_bboxes): num_image_bbox = 0 bboxes = [] for f in filenames: basename = os.path.basename(f) if (basename in image_to_bboxes): bboxes.append(image_to_bboxes[basename]) num_image_bbox += 1 else: bboxes.append([]) print(('Found %d images with bboxes out of %d images' % (num_image_bbox, len(filenames)))) return bboxes
Find the bounding boxes for a given image file. Args: filenames: list of strings; each string is a path to an image file. image_to_bboxes: dictionary mapping image file names to a list of bounding boxes. This list contains 0+ bounding boxes. Returns: List of bounding boxes for each image. Note that each entry in this list might contain from 0+ entries corresponding to the number of bounding box annotations for the image.
codesearchnet
def generate_rpcs(self, address): rpc_list = [] for offset in range(2, len(self.data), 16): rpc = (address, rpcs.SET_CONFIG_VARIABLE, self.var_id, offset - 2, self.data[offset:offset + 16]) rpc_list.append(rpc) return rpc_list
Generate the RPCs needed to stream this config variable to a tile. Args: address (int): The address of the tile that we should stream to. Returns: list of tuples: A list of argument tuples for each RPC. These tuples can be passed to EmulatedDevice.rpc to actually make the RPCs.
juraj-google-style
def remove(self, keys, name=None): return self.erase(keys, name)
Removes `keys` and its associated values from the table. If a key is not present in the table, it is silently ignored. Args: keys: Keys to remove. Can be a tensor of any shape. Must match the table's key type. name: A name for the operation (optional). Returns: The created Operation. Raises: TypeError: when `keys` do not match the table data types.
github-repos
def autocov(x): acorr = autocorr(x) varx = np.var(x, ddof=1) * (len(x) - 1) / len(x) acov = acorr * varx return acov
Compute autocovariance estimates for every lag for the input array. Args: x (array-like): An array containing MCMC samples. Returns: np.ndarray: An array of the same size as the input array.
juraj-google-style
def gt(left: Any, right: Any) -> bool: return lt(right, left)
Returns True if a value is symbolically greater than the other value. Refer to :func:`pyglove.lt` for the definition of symbolic comparison. Args: left: The left-hand value to compare. right: The right-hand value to compare. Returns: True if the left value is symbolically greater than the right value.
github-repos
def needs_to_run(G, target, in_mem_shas, from_store, settings): force = settings["force"] sprint = settings["sprint"] if(force): sprint("Target rebuild is being forced so {} needs to run".format(target), level="verbose") return True node_dict = get_the_node_dict(G, target) if 'output' in node_dict: for output in acts.get_all_outputs(node_dict): if not os.path.isfile(output): outstr = "Output file '{}' is missing so it needs to run" sprint(outstr.format(output), level="verbose") return True if 'dependencies' not in node_dict: sprint("Target {} has no dependencies and needs to run".format(target), level="verbose") return True for dep in node_dict['dependencies']: if ('files' in in_mem_shas and dep not in in_mem_shas['files'] or 'files' not in in_mem_shas): outstr = "Dep '{}' doesn't exist in memory so it needs to run" sprint(outstr.format(dep), level="verbose") return True now_sha = in_mem_shas['files'][dep]['sha'] if ('files' in from_store and dep not in from_store['files'] or 'files' not in from_store): outst = "Dep '{}' doesn't exist in shastore so it needs to run" sprint(outst.format(dep), level="verbose") return True old_sha = from_store['files'][dep]['sha'] if now_sha != old_sha: outstr = "There's a mismatch for dep {} so it needs to run" sprint(outstr.format(dep), level="verbose") return True sprint("Target '{}' doesn't need to run".format(target), level="verbose") return False
Determines if a target needs to run. This can happen in two ways: (a) If a dependency of the target has changed (b) If an output of the target is missing Args: The graph we are going to build The name of the target The dictionary of the current shas held in memory The dictionary of the shas from the shastore The settings dictionary Returns: True if the target needs to be run False if not
juraj-google-style
async def find_person(self, query): url = self.url_builder('search/person', dict(), url_params=OrderedDict([('query', query), ('include_adult', False)])) data = (await self.get_data(url)) if (data is None): return return [Person.from_json(item, self.config['data'].get('images')) for item in data.get('results', [])]
Retrieve person data by search query. Arguments: query (:py:class:`str`): Query to search for. Returns: :py:class:`list`: Possible matches.
codesearchnet
def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult: known_args, pipeline_args = parse_known_args(argv) pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = save_main_session pipeline = test_pipeline if not test_pipeline: pipeline = beam.Pipeline(options=pipeline_options) data = pipeline | read_csv(known_args.input) features = ['longitude', 'latitude', 'median_income'] housing_features = to_pcollection(data[features]) model = housing_features | beam.Map(lambda record: list(record)) | 'Train clustering model' >> OnlineClustering(OnlineKMeans, n_clusters=6, batch_size=256, cluster_args={}, checkpoints_path=known_args.checkpoints_path) _ = housing_features | beam.Map(lambda sample: np.array(sample)) | 'RunInference' >> AssignClusterLabelsInMemoryModel(model=pvalue.AsSingleton(model), model_id='kmeans', n_clusters=6, batch_size=512) | beam.Map(print) result = pipeline.run() result.wait_until_finish() return result
Args: argv: Command line arguments defined for this example. save_main_session: Used for internal testing. test_pipeline: Used for internal testing.
github-repos
def fswap(p, q): yield cirq.ISWAP(q, p), cirq.Z(p) ** 1.5 yield cirq.Z(q) ** 1.5
Decompose the Fermionic SWAP gate into two single-qubit gates and one iSWAP gate. Args: p: the id of the first qubit q: the id of the second qubit
juraj-google-style
def memoise(cls, func): @functools.wraps(func) def f(*a): for arg in a: if isinstance(arg, User): user = arg break else: raise ValueError('One position argument must be a User') func_key = (func, tuple(a)) cache = cls.get_cache(user) if (func_key not in cache): cache[func_key] = func(*a) return cache[func_key] return f
Decorator that stores the result of the stored function in the user's results cache until the batch completes. Keyword arguments are not yet supported. Arguments: func (callable(*a)): The function whose results we want to store. The positional arguments, ``a``, are used as cache keys. Returns: callable(*a): The memosing version of ``func``.
codesearchnet
def _abort_workflow(pb: ProcessingBlock, workflow_stage_dict: dict, docker: DockerSwarmClient): _abort_flag = False if _abort_flag: for workflow_stage in pb.workflow_stages: for (service_id, _) in workflow_stage_dict[workflow_stage.id]['services'].items(): docker.delete_service(service_id) LOG.info('Deleted Service Id %s', service_id) return True return False
Abort the workflow. TODO(BMo): This function currently does nothing as the abort flag is hardcoded to False! This function is used by `execute_processing_block`. Args: pb (ProcessingBlock): Configuration database Processing block object. workflow_stage_dict (dict): Workflow stage metadata dictionary. docker (DockerClient): Docker Swarm Client object. Returns: bool, True if the stage is aborted, otherwise False.
codesearchnet
def from_row_partitions(cls, row_partitions, dtype=None): if not row_partitions: raise ValueError('row_partitions cannot be empty') inner_shape = [row_partitions[-1].nvals()] return DynamicRaggedShape(row_partitions, inner_shape, dtype=dtype)
Create a shape from row_partitions. Args: row_partitions: a nonempty list of RowPartition objects. dtype: the dtype to use, or None to use the row_partitions dtype. Returns: a DynamicRaggedShape with inner_rank==1.
github-repos
def __init__(self, state_view): self._state_view = state_view self.get_setting = lru_cache(maxsize=128)(self._get_setting)
Creates a SettingsView, given a StateView for merkle tree access. Args: state_view (:obj:`StateView`): a state view
juraj-google-style
def _to_node(self, operand: BuilderOperand) -> _evaluation.ExpressionNode: if isinstance(operand, Builder): return operand.node else: as_message = None if operand is None else self._primitive_to_message(operand) primitive_type = _fhir_path_data_types.primitive_type_from_type_code(type(operand).__name__) return _evaluation.LiteralNode(self.node.context, as_message, self._primitive_to_fhir_path(operand), primitive_type)
Returns a node from a Builder or Comparable. Args: operand: An input to the operator that is either a comparable or Builder. Returns: An ExpressionNode.
github-repos
def line(xo: int, yo: int, xd: int, yd: int, py_callback: Callable[([int, int], bool)]) -> bool: for (x, y) in line_iter(xo, yo, xd, yd): if (not py_callback(x, y)): break else: return True return False
Iterate over a line using a callback function. Your callback function will take x and y parameters and return True to continue iteration or False to stop iteration and return. This function includes both the start and end points. Args: xo (int): X starting point. yo (int): Y starting point. xd (int): X destination point. yd (int): Y destination point. py_callback (Callable[[int, int], bool]): A callback which takes x and y parameters and returns bool. Returns: bool: False if the callback cancels the line interation by returning False or None, otherwise True. .. deprecated:: 2.0 Use `line_iter` instead.
codesearchnet
def start(self, workers=1, max_queue_size=10): if self.use_multiprocessing: self.executor_fn = self._get_executor_init(workers) else: self.executor_fn = lambda _: get_pool_class(False)(workers) self.workers = workers self.queue = queue.Queue(max_queue_size) self.stop_signal = threading.Event() self.run_thread = threading.Thread(target=self._run) self.run_thread.daemon = True self.run_thread.start()
Starts the handler's workers. Args: workers: Number of workers. max_queue_size: queue size (when full, workers could block on `put()`)
github-repos
def __init__(self, args=None, cmd=None): if args is None or cmd is None: raise errors.FormatError('Missing args or cmd value.') super(CommandSourceType, self).__init__() self.args = args self.cmd = cmd
Initializes a source type. Args: args (list[str]): arguments to the command to run. cmd (str): command to run. Raises: FormatError: when args or cmd is not set.
juraj-google-style
def ReadVarString(self, max=sys.maxsize): length = self.ReadVarInt(max) return self.unpack(str(length) + 's', length)
Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator. Args: max (int): (Optional) maximum number of bytes to read. Returns: bytes:
juraj-google-style
def AddCommandLineArguments(cls, argument_group, category=None, names=None): for (helper_name, helper_class) in sorted(cls._helper_classes.items()): if ((category and (helper_class.CATEGORY != category)) or (names and (helper_name not in names))): continue helper_class.AddArguments(argument_group)
Adds command line arguments to a configuration object. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group. category (Optional[str]): category of helpers to apply to the group, such as storage, output, where None will apply the arguments to all helpers. The category can be used to add arguments to a specific group of registered helpers. names (Optional[list[str]]): names of argument helpers to apply, where None will apply the arguments to all helpers.
codesearchnet
def weight_memory_size(weights): unique_weights = {id(w): w for w in weights}.values() total_memory_size = 0 for w in unique_weights: total_memory_size += _compute_memory_size(w.shape, w.dtype) return total_memory_size / 8
Compute the memory footprint for weights based on their dtypes. Args: weights: An iterable contains the weights to compute weight size. Returns: The total memory size (in Bytes) of the weights.
github-repos
def most_specific_common_supertype(self, others: Sequence[trace.TraceType]) -> Optional['TypeSpec']: if any((type(self) is not type(other) for other in others)): return None has_supertype = True def make_supertype_attribute(attribute_self, *attribute_others): nonlocal has_supertype if not has_supertype: return if isinstance(attribute_self, trace.TraceType): attribute_supertype = attribute_self.most_specific_common_supertype(attribute_others) if attribute_supertype is None: has_supertype = False return return attribute_supertype else: if not all((attribute_self == attribute_other for attribute_other in attribute_others)): has_supertype = False return return attribute_self try: serialized_supertype = nest.map_structure(make_supertype_attribute, self._serialize(), *(o._serialize() for o in others)) except (ValueError, TypeError): return None return self._deserialize(serialized_supertype) if has_supertype else None
Returns the most specific supertype TypeSpec of `self` and `others`. Implements the tf.types.experimental.func.TraceType interface. If not overridden by a subclass, the default behavior is to assume the TypeSpec is covariant upon attributes that implement TraceType and invariant upon rest of the attributes as well as the structure and type of the TypeSpec. Args: others: A sequence of TraceTypes.
github-repos
def run(self, args): kwargs = {} kwargs['path'] = args.file[0] kwargs['addr'] = args.addr kwargs['on_progress'] = pylink.util.flash_progress_callback jlink = self.create_jlink(args) _ = jlink.flash_file(**kwargs) print('Flashed device successfully.')
Flashes the device connected to the J-Link. Args: self (FlashCommand): the ``FlashCommand`` instance args (Namespace): the arguments passed on the command-line Returns: ``None``
codesearchnet
def _UpdateCampaignDSASetting(client, campaign_id, feed_id): campaign_service = client.GetService('CampaignService', version='v201809') selector = { 'fields': ['Id', 'Settings'], 'predicates': [{ 'field': 'Id', 'operator': 'EQUALS', 'values': [campaign_id] }] } response = campaign_service.get(selector) if response['totalNumEntries']: campaign = response['entries'][0] else: raise ValueError('No campaign with ID "%d" exists.' % campaign_id) if not campaign['settings']: raise ValueError('This is not a DSA campaign.') dsa_setting = None campaign_settings = campaign['settings'] for setting in campaign_settings: if setting['Setting.Type'] == 'DynamicSearchAdsSetting': dsa_setting = setting break if dsa_setting is None: raise ValueError('This is not a DSA campaign.') dsa_setting['pageFeed'] = { 'feedIds': [feed_id] } dsa_setting['useSuppliedUrlsOnly'] = True operation = { 'operand': { 'id': campaign_id, 'settings': campaign_settings }, 'operator': 'SET' } campaign_service.mutate([operation]) print 'DSA page feed for campaign ID "%d" was updated with feed ID "%d".' % ( campaign_id, feed_id)
Updates the campaign DSA setting to DSA pagefeeds. Args: client: an AdWordsClient instance. campaign_id: a str Campaign ID. feed_id: a str page Feed ID. Raises: ValueError: If the given campaign is found not to be a dynamic search ad campaign.
juraj-google-style
def describe_enum_value(enum_value): enum_value_descriptor = EnumValueDescriptor() enum_value_descriptor.name = six.text_type(enum_value.name) enum_value_descriptor.number = enum_value.number return enum_value_descriptor
Build descriptor for Enum instance. Args: enum_value: Enum value to provide descriptor for. Returns: Initialized EnumValueDescriptor instance describing the Enum instance.
juraj-google-style
def configure_from_environment(self, whitelist_keys=False, whitelist=None): self._configure_from_mapping(os.environ, whitelist_keys=whitelist_keys, whitelist=whitelist) return self
Configure from the entire set of available environment variables. This is really a shorthand for grabbing ``os.environ`` and passing to :meth:`_configure_from_mapping`. As always, only uppercase keys are loaded. Keyword Args: whitelist_keys (bool): Should we whitelist the keys by only pulling those that are already present in the config? Useful for avoiding adding things like ``LESSPIPE`` to your app config. If no whitelist is provided, we use the current config keys as our whitelist. whitelist (list[str]): An explicit list of keys that should be allowed. If provided and ``whitelist_keys`` is true, we will use that as our whitelist instead of pre-existing app config keys. Returns: fleaker.base.BaseApplication: Returns itself.
codesearchnet
def clean_file(self): data = self.cleaned_data['file'] available_parsers = self.get_parsers() for parser in available_parsers: try: return parser.parse_file(data) except parsers.ParserError: pass raise forms.ValidationError(('No parser could read the file. Tried with parsers %s.' % (', ' % (force_text(p) for p in available_parsers))))
Analyse the uploaded file, and return the parsed lines. Returns: tuple of tuples of cells content (as text).
codesearchnet
def create_monitoring_info(urn, type_urn, payload, labels=None) -> metrics_pb2.MonitoringInfo: try: return metrics_pb2.MonitoringInfo(urn=urn, type=type_urn, labels=labels or {}, payload=payload) except TypeError as e: raise RuntimeError(f'Failed to create MonitoringInfo for urn {urn} type {type} labels ' + '{labels} and payload {payload}') from e
Return the gauge monitoring info for the URN, type, metric and labels. Args: urn: The URN of the monitoring info/metric. type_urn: The URN of the type of the monitoring info/metric. i.e. beam:metrics:sum_int_64, beam:metrics:latest_int_64. payload: The payload field to use in the monitoring info. labels: The label dictionary to use in the MonitoringInfo.
github-repos
def _parse_date(dataset_date, date_format): if (date_format is None): try: return parser.parse(dataset_date) except (ValueError, OverflowError) as e: raisefrom(HDXError, 'Invalid dataset date!', e) else: try: return datetime.strptime(dataset_date, date_format) except ValueError as e: raisefrom(HDXError, 'Invalid dataset date!', e)
Parse dataset date from string using specified format. If no format is supplied, the function will guess. For unambiguous formats, this should be fine. Args: dataset_date (str): Dataset date string date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None. Returns: datetime.datetime
codesearchnet
def parse(self, string, strict=True): if isinstance(string, bytes): errors = 'strict' if strict else 'replace' string = string.decode(self.encoding, errors=errors) if not self.raw: self.raw = string else: self.raw += string lines = unfold_lines(string).splitlines() for line in lines: if line: if ':' not in line: if strict: raise ValueError('Field missing colon.') else: continue name, value = line.split(':', 1) name = name.strip() value = value.strip() self.add(name, value)
Parse the string or bytes. Args: strict (bool): If True, errors will not be ignored Raises: :class:`ValueError` if the record is malformed.
juraj-google-style
def send(self, msg): slipDriver = sliplib.Driver() slipData = slipDriver.send(msg) res = self._serialPort.write(slipData) return res
Encodes data to slip protocol and then sends over serial port Uses the SlipLib module to convert the message data into SLIP format. The message is then sent over the serial port opened with the instance of the Faraday class used when invoking send(). Args: msg (bytes): Bytes format message to send over serial port. Returns: int: Number of bytes transmitted over the serial port.
codesearchnet
def dump_migration_session_state(raw): class BlockStyle(str): pass class SessionDumper(yaml.SafeDumper): pass def str_block_formatter(dumper, data): return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|') SessionDumper.add_representer(BlockStyle, str_block_formatter) raw = deepcopy(raw) for step in raw: step['output'] = BlockStyle(step['output']) step['traceback'] = BlockStyle(step['traceback']) return yaml.dump(raw, Dumper=SessionDumper)
Serialize a migration session state to yaml using nicer formatting Args: raw: object to serialize Returns: string (of yaml) Specifically, this forces the "output" member of state step dicts (e.g. state[0]['output']) to use block formatting. For example, rather than this: - migration: [app, migration_name] output: "line 1\nline2\nline3" You get this: - migration: [app, migration_name] output: | line 1 line 2 line 3
juraj-google-style
def split_list_by_n(l, n): n = max(1, n) return list((l[i:(i + n)] for i in range(0, len(l), n)))
Split a list into lists of size n. Args: l: List of stuff. n: Size of new lists. Returns: list: List of lists each of size n derived from l.
codesearchnet
def logs_urlpatterns(admin_view=(lambda x: x)): return [url('^$', admin_view(LogsMenu.as_view()), name='logs'), url('^status_codes$', admin_view(LogsStatusCodes.as_view()), name='logs_status_codes'), url('^status_codes_by_date$', admin_view(LogsStatusCodesByDate.as_view()), name='logs_status_codes_by_date'), url('^most_visited_pages$', admin_view(LogsMostVisitedPages.as_view()), name='logs_most_visited_pages')]
Return the URL patterns for the logs views. Args: admin_view (callable): admin_view method from an AdminSite instance. Returns: list: the URL patterns for the logs views.
codesearchnet