code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def GetFileEntryByPathSpec(self, path_spec): return encrypted_stream_file_entry.EncryptedStreamFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
Retrieves a file entry for a path specification. Args: path_spec (PathSpec): a path specification. Returns: EncryptedStreamFileEntry: a file entry or None if not available.
juraj-google-style
def segment(self, source, language=None): if language and not language in self.supported_languages: raise ValueError( 'Language {} is not supported by NLAPI segmenter'.format(language)) chunks = ChunkList() results = tinysegmenter.tokenize(source) seek = 0 for word in results: word = word.strip() if not word: continue if source[seek: seek + len(word)] != word: assert source[seek] == ' ' assert source[seek + 1: seek + len(word) + 1] == word chunks.append(Chunk.space()) seek += 1 dependency = None if word in _PARTICLES or word in _AUX_VERBS or is_hiragana(word): dependency = False chunk = Chunk(word, dependency=dependency) if chunk.is_punct(): chunk.dependency = chunk.is_open_punct() chunks.append(chunk) seek += len(word) chunks.resolve_dependencies() return chunks
Returns a chunk list from the given sentence. Args: source (str): Source string to segment. language (:obj:`str`, optional): A language code. Returns: A chunk list. (:obj:`budou.chunk.ChunkList`) Raises: ValueError: If :obj:`language` is given and it is not included in :obj:`supported_languages`.
juraj-google-style
def GreaterThanOrEqualTo(self, value): self._awql = self._CreateSingleValueCondition(value, '>=') return self._query_builder
Sets the type of the WHERE clause as "greater than or equal to". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
codesearchnet
def setValues(self, values): ncols = self.getNumCols() nindices = self.getNumIndices() for key, value in values.items(): key = Utils.convToList(key) assert len(key) == nindices value = Utils.convToList(value) assert len(value) == ncols-nindices self.addRow(key + value)
Set the values of a DataFrame from a dictionary. Args: values: Dictionary with the values to set.
juraj-google-style
def _GetGdbThreadMapping(self, position): if (len(gdb.selected_inferior().threads()) == 1): return {position[1]: 1} thread_line_regexp = '\\s*\\**\\s*([0-9]+)\\s+[a-zA-Z]+\\s+([x0-9a-fA-F]+)\\s.*' output = gdb.execute('info threads', to_string=True) matches = [re.match(thread_line_regexp, line) for line in output.split('\n')[1:]] return {int(match.group(2), 16): int(match.group(1)) for match in matches if match}
Gets a mapping from python tid to gdb thread num. There's no way to get the thread ident from a gdb thread. We only get the "ID of the thread, as assigned by GDB", which is completely useless for everything except talking to gdb. So in order to translate between these two, we have to execute 'info threads' and parse its output. Note that this may only work on linux, and only when python was compiled to use pthreads. It may work elsewhere, but we won't guarantee it. Args: position: array of pid, tid, framedepth specifying the requested position. Returns: A dictionary of the form {python_tid: gdb_threadnum}.
codesearchnet
def int64_user_gauge(namespace, name, metric, ptransform=None) -> metrics_pb2.MonitoringInfo: labels = create_labels(ptransform=ptransform, namespace=namespace, name=name) if isinstance(metric, GaugeData): coder = coders.VarIntCoder() value = metric.value timestamp = metric.timestamp else: raise TypeError('Expected GaugeData metric type but received %s with value %s' % (type(metric), metric)) payload = _encode_gauge(coder, timestamp, value) return create_monitoring_info(USER_GAUGE_URN, LATEST_INT64_TYPE, payload, labels)
Return the gauge monitoring info for the URN, metric and labels. Args: namespace: User-defined namespace of gauge metric. name: Name of gauge metric. metric: The GaugeData containing the metrics. ptransform: The ptransform id used as a label.
github-repos
def start(self, wait=False): if self._status is not TaskStatus.IDLE: raise RuntimeError("Cannot start %s in state %s" % (self, self._status)) self._status = TaskStatus.STARTED STARTED_TASKS.add(self) self._start() if wait: self.wait() return self.return_values
Start a task. This function depends on the underlying implementation of _start, which any subclass of ``Task`` should implement. Args: wait (bool): Whether or not to wait on the task to finish before returning from this function. Default `False`. Raises: RuntimeError: If the task has already been started without a subsequent call to ``reset()``.
juraj-google-style
def get_random_transform(self, img_shape, seed=None): img_row_axis = self.row_axis - 1 img_col_axis = self.col_axis - 1 if seed is not None: np.random.seed(seed) if self.rotation_range: theta = np.random.uniform(-self.rotation_range, self.rotation_range) else: theta = 0 if self.height_shift_range: try: tx = np.random.choice(self.height_shift_range) tx *= np.random.choice([-1, 1]) except ValueError: tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) if np.max(self.height_shift_range) < 1: tx *= img_shape[img_row_axis] else: tx = 0 if self.width_shift_range: try: ty = np.random.choice(self.width_shift_range) ty *= np.random.choice([-1, 1]) except ValueError: ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) if np.max(self.width_shift_range) < 1: ty *= img_shape[img_col_axis] else: ty = 0 if self.shear_range: shear = np.random.uniform(-self.shear_range, self.shear_range) else: shear = 0 if self.zoom_range[0] == 1 and self.zoom_range[1] == 1: zx, zy = (1, 1) else: zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2) flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip flip_vertical = (np.random.random() < 0.5) * self.vertical_flip channel_shift_intensity = None if self.channel_shift_range != 0: channel_shift_intensity = np.random.uniform(-self.channel_shift_range, self.channel_shift_range) brightness = None if self.brightness_range is not None: brightness = np.random.uniform(self.brightness_range[0], self.brightness_range[1]) transform_parameters = {'theta': theta, 'tx': tx, 'ty': ty, 'shear': shear, 'zx': zx, 'zy': zy, 'flip_horizontal': flip_horizontal, 'flip_vertical': flip_vertical, 'channel_shift_intensity': channel_shift_intensity, 'brightness': brightness} return transform_parameters
Generates random parameters for a transformation. Args: img_shape: Tuple of integers. Shape of the image that is transformed. seed: Random seed. Returns: A dictionary containing randomly chosen parameters describing the transformation.
github-repos
def remove(self, layers): if not isinstance(layers, list): layers = [layers] for l in layers: if isinstance(l, string_types): if l not in self.layers: raise ValueError("There's no image/layer named '%s' in " "the masking stack!" % l) self.stack.remove(l) else: l = self.stack.pop(l) del self.layers[l] self.set_mask()
Remove one or more layers from the stack of masking layers. Args: layers: An int, string or list of strings and/or ints. Ints are interpreted as indices in the stack to remove; strings are interpreted as names of layers to remove. Negative ints will also work--i.e., remove(-1) will drop the last layer added.
juraj-google-style
def _FormatMessages(self, format_string, short_format_string, event_values): message_string = self._FormatMessage(format_string, event_values) if short_format_string: short_message_string = self._FormatMessage( short_format_string, event_values) else: short_message_string = message_string if len(short_message_string) > 80: short_message_string = '{0:s}...'.format(short_message_string[:77]) return message_string, short_message_string
Determines the formatted message strings. Args: format_string (str): message format string. short_format_string (str): short message format string. event_values (dict[str, object]): event values. Returns: tuple(str, str): formatted message string and short message string.
juraj-google-style
def _plot_cwt(ts, coefs, freqs, tsize=1024, fsize=512): import matplotlib.style import matplotlib as mpl mpl.style.use('classic') import matplotlib.pyplot as plt from scipy import interpolate channels = ts.shape[1] fig = plt.figure() for i in range(channels): rect = (0.1, 0.85*(channels - i - 1)/channels + 0.1, 0.8, 0.85/channels) ax = fig.add_axes(rect) logpowers = np.log((coefs[:, :, i] * coefs[:, :, i].conj()).real) tmin, tmax = ts.tspan[0], ts.tspan[-1] fmin, fmax = freqs[0], freqs[-1] tgrid, fgrid = np.mgrid[tmin:tmax:tsize*1j, fmin:fmax:fsize*1j] gd = interpolate.interpn((ts.tspan, freqs), logpowers, (tgrid, fgrid)).T ax.imshow(gd, cmap='gnuplot2', aspect='auto', origin='lower', extent=(tmin, tmax, fmin, fmax)) ax.set_ylabel('freq (Hz)') fig.axes[0].set_title(u'log(power spectral density)') fig.axes[channels - 1].set_xlabel('time (s)') fig.show()
Plot time resolved power spectral density from cwt results Args: ts: the original Timeseries coefs: continuous wavelet transform coefficients as calculated by cwt() freqs: list of frequencies (in Hz) corresponding to coefs. tsize, fsize: size of the plot (time axis and frequency axis, in pixels)
juraj-google-style
def init_from_adversarial_batches(self, adv_batches): for idx, (adv_batch_id, adv_batch_val) in enumerate(iteritems(adv_batches)): work_id = ATTACK_WORK_ID_PATTERN.format(idx) self.work[work_id] = { 'claimed_worker_id': None, 'claimed_worker_start_time': None, 'is_completed': False, 'error': None, 'elapsed_time': None, 'submission_id': adv_batch_val['submission_id'], 'shard_id': None, 'output_adversarial_batch_id': adv_batch_id, }
Initializes work pieces from adversarial batches. Args: adv_batches: dict with adversarial batches, could be obtained as AversarialBatches.data
juraj-google-style
def mark_causative(self, institute, case, user, link, variant): display_name = variant['display_name'] LOG.info("Mark variant {0} as causative in the case {1}".format( display_name, case['display_name'])) LOG.info("Adding variant to causatives in case {0}".format( case['display_name'])) LOG.info("Marking case {0} as solved".format( case['display_name'])) updated_case = self.case_collection.find_one_and_update( {'_id': case['_id']}, { '$push': {'causatives': variant['_id']}, '$set': {'status': 'solved'} }, return_document=pymongo.ReturnDocument.AFTER ) LOG.info("Creating case event for marking {0}" \ " causative".format(variant['display_name'])) self.create_event( institute=institute, case=case, user=user, link=link, category='case', verb='mark_causative', variant=variant, subject=variant['display_name'], ) LOG.info("Creating variant event for marking {0}" \ " causative".format(case['display_name'])) self.create_event( institute=institute, case=case, user=user, link=link, category='variant', verb='mark_causative', variant=variant, subject=variant['display_name'], ) return updated_case
Create an event for marking a variant causative. Arguments: institute (dict): A Institute object case (dict): Case object user (dict): A User object link (str): The url to be used in the event variant (variant): A variant object Returns: updated_case(dict)
juraj-google-style
def noisy_moments(self, moments: 'Iterable[cirq.Moment]', system_qubits: Sequence['cirq.Qid']) -> Sequence['cirq.OP_TREE']: if (not hasattr(self.noisy_moment, '_not_overridden')): result = [] for moment in moments: result.append(self.noisy_moment(moment, system_qubits)) return result if (not hasattr(self.noisy_operation, '_not_overridden')): result = [] for moment in moments: result.append([self.noisy_operation(op) for op in moment]) return result assert False, 'Should be unreachable.'
Adds possibly stateful noise to a series of moments. Args: moments: The moments to add noise to. system_qubits: A list of all qubits in the system. Returns: A sequence of OP_TREEs, with the k'th tree corresponding to the noisy operations for the k'th moment.
codesearchnet
def market_open(self, session, mins) -> Session: if session not in self.exch: return SessNA start_time = self.exch[session][0] return Session(start_time, shift_time(start_time, int(mins)))
Time intervals for market open Args: session: [allday, day, am, pm, night] mins: mintues after open Returns: Session of start_time and end_time
juraj-google-style
def sg_sum(tensor, opt): r return tf.reduce_sum(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
r"""Computes the sum of elements across axis of a tensor. See `tf.reduce_sum()` in tensorflow. Args: tensor: A `Tensor` with zero-padding (automatically given by chain). opt: axis: A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`.
juraj-google-style
def FullTransactions(self): is_trimmed = False try: tx = self.Transactions[0] if (type(tx) is str): is_trimmed = True except Exception as e: pass if (not is_trimmed): return self.Transactions txs = [] for hash in self.Transactions: (tx, height) = GetBlockchain().GetTransaction(hash) txs.append(tx) self.Transactions = txs return self.Transactions
Get the list of full Transaction objects. Note: Transactions can be trimmed to contain only the header and the hash. This will get the full data if trimmed transactions are found. Returns: list: of neo.Core.TX.Transaction.Transaction objects.
codesearchnet
def get_camera_imageseries(self, number_of_imageseries=10, offset=0): response = None try: response = requests.get( urls.get_imageseries(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "numberOfImageSeries": int(number_of_imageseries), "offset": int(offset), "fromDate": "", "toDate": "", "onlyNotViewed": "", "_": self._giid}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
Get smartcam image series Args: number_of_imageseries (int): number of image series to get offset (int): skip offset amount of image series
juraj-google-style
def on_graph_def(self, graph_def, device_name, wall_time): raise NotImplementedError('on_graph_def() is not implemented in the base servicer class')
Callback for Event proto received through the gRPC stream. This Event proto carries a GraphDef, encoded as bytes, in its graph_def field. Args: graph_def: A GraphDef object. device_name: Name of the device on which the graph was created. wall_time: An epoch timestamp (in microseconds) for the graph. Returns: `None` or an `EventReply` proto to be sent back to the client. If `None`, an `EventReply` proto construct with the default no-arg constructor will be sent back to the client.
github-repos
def get_plot(self, xlim=None, ylim=None): plt = pretty_plot(12, 8) base = 0.0 i = 0 for key, sp in self._spectra.items(): if not self.stack: plt.plot(sp.x, sp.y + self.yshift * i, color=self.colors[i], label=str(key), linewidth=3) else: plt.fill_between(sp.x, base, sp.y + self.yshift * i, color=self.colors[i], label=str(key), linewidth=3) base = sp.y + base plt.xlabel(sp.XLABEL) plt.ylabel(sp.YLABEL) i += 1 if xlim: plt.xlim(xlim) if ylim: plt.ylim(ylim) plt.legend() leg = plt.gca().get_legend() ltext = leg.get_texts() plt.setp(ltext, fontsize=30) plt.tight_layout() return plt
Get a matplotlib plot showing the DOS. Args: xlim: Specifies the x-axis limits. Set to None for automatic determination. ylim: Specifies the y-axis limits.
juraj-google-style
def getEstTraitCovar(self, term_i=None): assert (self.P > 1), 'Trait covars not defined for single trait analysis' if (term_i == None): RV = SP.zeros((self.P, self.P)) for term_i in range(self.n_terms): RV += self.vd.getTerm(term_i).getTraitCovar().K() else: assert (term_i < self.n_terms), 'Term index non valid' RV = self.vd.getTerm(term_i).getTraitCovar().K() return RV
Returns explicitly the estimated trait covariance matrix Args: term_i: index of the term we are interested in
codesearchnet
def is_displayed(target): is_displayed = getattr(target, 'is_displayed', None) if ((not is_displayed) or (not callable(is_displayed))): raise TypeError("Target has no attribute 'is_displayed' or not callable") if (not is_displayed()): raise WebDriverException('element not visible')
Assert whether the target is displayed Args: target(WebElement): WebElement Object. Returns: Return True if the element is displayed or return False otherwise.
codesearchnet
def read_uint64(self, little_endian=True): if little_endian: endian = "<" else: endian = ">" return self.unpack('%sQ' % endian, 8)
Read 8 bytes as an unsigned integer value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int:
juraj-google-style
def get_crypt_class(self): crypt_type = getattr(settings, 'ENCRYPTED_FIELD_MODE', 'DECRYPT_AND_ENCRYPT') if (crypt_type == 'ENCRYPT'): crypt_class_name = 'Encrypter' elif (crypt_type == 'DECRYPT_AND_ENCRYPT'): crypt_class_name = 'Crypter' else: raise ImproperlyConfigured(('ENCRYPTED_FIELD_MODE must be either DECRYPT_AND_ENCRYPT or ENCRYPT, not %s.' % crypt_type)) return getattr(keyczar, crypt_class_name)
Get the Keyczar class to use. The class can be customized with the ENCRYPTED_FIELD_MODE setting. By default, this setting is DECRYPT_AND_ENCRYPT. Set this to ENCRYPT to disable decryption. This is necessary if you are only providing public keys to Keyczar. Returns: keyczar.Encrypter if ENCRYPTED_FIELD_MODE is ENCRYPT. keyczar.Crypter if ENCRYPTED_FIELD_MODE is DECRYPT_AND_ENCRYPT. Override this method to customize the type of Keyczar class returned.
codesearchnet
async def pull( self, from_image: str, *, auth: Optional[Union[MutableMapping, str, bytes]] = None, tag: str = None, repo: str = None, stream: bool = False ) -> Mapping: image = from_image params = {"fromImage": image} headers = {} if repo: params["repo"] = repo if tag: params["tag"] = tag if auth is not None: registry, has_registry_host, _ = image.partition("/") if not has_registry_host: raise ValueError( "Image should have registry host " "when auth information is provided" ) headers["X-Registry-Auth"] = compose_auth_header(auth, registry) response = await self.docker._query( "images/create", "POST", params=params, headers=headers ) return await json_stream_result(response, stream=stream)
Similar to `docker pull`, pull an image locally Args: fromImage: name of the image to pull repo: repository name given to an image when it is imported tag: if empty when pulling an image all tags for the given image to be pulled auth: special {'auth': base64} pull private repo
juraj-google-style
def __init__(self, devices=None, cross_device_ops=None, *, mesh=None): self._validate_init_args(mesh, devices) if not mesh: mesh = self._build_mesh_from_device_list(devices) extended = dtensor_strategy_extended.DTensorStrategyExtended(container_strategy=self, mesh=mesh) super().__init__(extended) self._mesh = mesh self._devices = devices
Synchronous training across multiple replicas on one machine. Args: devices: a list of device strings, such as ['/gpu:0', '/gpu:1']. If both `mesh` and `devices` are None, all the available GPU/TPU will be used. If no accelerators are found, CPU is used. cross_device_ops: optional, a descendant of `CrossDeviceOps`. The value is ignored at the moment, and support will be added later. mesh: optional DTensor mesh for the computation. Note that either `mesh` or `devices` should be provided, and not both. The mesh should be 1D, and will be used to split the input data among that dimension.
github-repos
def invert_dict(d): inverted = collections.defaultdict(list) for key, value_list in d.items(): for val in value_list: inverted[val].append(key) return inverted
Invert a dictionary. Converts a dictionary (mapping strings to lists of strings) to a dictionary that maps into the other direction. Arguments: d: Dictionary to be inverted Returns: A dictionary n with the property that if "y in d[x]", then "x in n[y]".
github-repos
def _parse_date(dataset_date, date_format): if date_format is None: try: return parser.parse(dataset_date) except (ValueError, OverflowError) as e: raisefrom(HDXError, 'Invalid dataset date!', e) else: try: return datetime.strptime(dataset_date, date_format) except ValueError as e: raisefrom(HDXError, 'Invalid dataset date!', e)
Parse dataset date from string using specified format. If no format is supplied, the function will guess. For unambiguous formats, this should be fine. Args: dataset_date (str): Dataset date string date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None. Returns: datetime.datetime
juraj-google-style
def phase_histogram(dts, times=None, nbins=30, colormap=mpl.cm.Blues): if times is None: times = np.linspace(dts.tspan[0], dts.tspan[-1], num=4) elif isinstance(times, numbers.Number): times = np.array([times], dtype=np.float64) indices = distob.gather(dts.tspan.searchsorted(times)) if indices[-1] == len(dts.tspan): indices[-1] -= 1 nplots = len(indices) fig = plt.figure() n = np.zeros((nbins, nplots)) for i in range(nplots): index = indices[i] time = dts.tspan[index] phases = distob.gather(dts.mod2pi()[index, 0, :]) ax = fig.add_subplot(1, nplots, i + 1, projection='polar') n[:,i], bins, patches = ax.hist(phases, nbins, (-np.pi, np.pi), density=True, histtype='bar') ax.set_title('time = %d s' % time) ax.set_xticklabels(['0', r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$', r'$\frac{3\pi}{4}$', r'$\pi$', r'$\frac{-3\pi}{4}$', r'$\frac{-\pi}{2}$', r'$\frac{-\pi}{4}$']) nmin, nmax = n.min(), n.max() norm = mpl.colors.Normalize(1.2*nmin - 0.2*nmax, 0.6*nmin + 0.4*nmax, clip=True) for i in range(nplots): ax = fig.get_axes()[i] ax.set_ylim(0, nmax) for this_n, thispatch in zip(n[:,i], ax.patches): color = colormap(norm(this_n)) thispatch.set_facecolor(color) thispatch.set_edgecolor(color) fig.show()
Plot a polar histogram of a phase variable's probability distribution Args: dts: DistTimeseries with axis 2 ranging over separate instances of an oscillator (time series values are assumed to represent an angle) times (float or sequence of floats): The target times at which to plot the distribution nbins (int): number of histogram bins colormap
juraj-google-style
def _LogForwardedIpChanges(self, configured, desired, to_add, to_remove, interface): if ((not to_add) and (not to_remove)): return self.logger.info('Changing %s IPs from %s to %s by adding %s and removing %s.', interface, (configured or None), (desired or None), (to_add or None), (to_remove or None))
Log the planned IP address changes. Args: configured: list, the IP address strings already configured. desired: list, the IP address strings that will be configured. to_add: list, the forwarded IP address strings to configure. to_remove: list, the forwarded IP address strings to delete. interface: string, the output device to modify.
codesearchnet
def update_pipeline_and_auto_class_table(table: Dict[str, Tuple[str, str]]) -> Dict[str, Tuple[str, str]]: auto_modules = [transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: model_mappings = [model_mapping, f'TF_{model_mapping}', f'FLAX_{model_mapping}'] auto_classes = [auto_class, f'TF_{auto_class}', f'Flax_{auto_class}'] for module, cls, mapping in zip(auto_modules, auto_classes, model_mappings): if not hasattr(module, mapping): continue model_names = [] for name in getattr(module, mapping).values(): if isinstance(name, str): model_names.append(name) else: model_names.extend(list(name)) table.update(dict.fromkeys(model_names, (pipeline_tag, cls))) return table
Update the table mapping models to pipelines and auto classes without removing old keys if they don't exist anymore. Args: table (`Dict[str, Tuple[str, str]]`): The existing table mapping model names to a tuple containing the pipeline tag and the auto-class name with which they should be used. Returns: `Dict[str, Tuple[str, str]]`: The updated table in the same format.
github-repos
def fit(self, col): dates = self.safe_datetime_cast(col) self.default_val = (dates.groupby(dates).count().index[0].timestamp() * 1000000000.0)
Prepare the transformer to convert data. Args: col(pandas.DataFrame): Data to transform. Returns: None
codesearchnet
def ParseNumericOption(self, options, name, base=10, default_value=None): numeric_value = getattr(options, name, None) if (not numeric_value): return default_value try: return int(numeric_value, base) except (TypeError, ValueError): name = name.replace('_', ' ') raise errors.BadConfigOption('Unsupported numeric value {0:s}: {1!s}.'.format(name, numeric_value))
Parses a numeric option. If the option is not set the default value is returned. Args: options (argparse.Namespace): command line arguments. name (str): name of the numeric option. base (Optional[int]): base of the numeric value. default_value (Optional[object]): default value. Returns: int: numeric value. Raises: BadConfigOption: if the options are invalid.
codesearchnet
def get_caching_key(self, user_context): raise NotImplementedError('subclasses must override this')
Returns a unique key to use for caching. Subclasses must override this. Calls made to `transform_function` with functions that have the same code object and caching key will return a cached instance on subsequent invocations. Args: user_context: The context object which was passed to `transform`. Returns: extra_locals: A hashable.
github-repos
def __convertLongToString(self, iValue): string = '' strValue = str(hex(iValue)) string = strValue.lstrip('0x') string = string.rstrip('L') return string
convert a long hex integer to string remove '0x' and 'L' return string Args: iValue: long integer in hex format Returns: string of this long integer without "0x" and "L"
juraj-google-style
def ToName(param_type): items = inspect.getmembers(ContractParameterType) if type(param_type) is bytes: param_type = int.from_bytes(param_type, 'little') for item in items: name = item[0] val = int(item[1].value) if val == param_type: return name return None
Gets the name of a ContractParameterType based on its value Args: param_type (ContractParameterType): type to get the name of Returns: str
juraj-google-style
def _query(self, url, xpath): return self.session.query(CachedRequest).filter((CachedRequest.url == url)).filter((CachedRequest.xpath == xpath))
Base query for an url and xpath Args: url (str): URL to search xpath (str): xpath to search (may be ``None``)
codesearchnet
def MapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): data_type_size = self._data_type_definition.GetByteSize() self._CheckByteStreamSize(byte_stream, byte_offset, data_type_size) try: if self._byte_order == definitions.BYTE_ORDER_BIG_ENDIAN: mapped_value = uuid.UUID( bytes=byte_stream[byte_offset:byte_offset + 16]) elif self._byte_order == definitions.BYTE_ORDER_LITTLE_ENDIAN: mapped_value = uuid.UUID( bytes_le=byte_stream[byte_offset:byte_offset + 16]) except Exception as exception: error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: {2!s}').format( self._data_type_definition.name, byte_offset, exception) raise errors.MappingError(error_string) if context: context.byte_size = data_type_size return mapped_value
Maps the data type on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: uuid.UUID: mapped value. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
juraj-google-style
def firmware_version(self): namespace = "urn:brocade.com:mgmt:brocade-firmware-ext" request_ver = ET.Element("show-firmware-version", xmlns=namespace) ver = self._callback(request_ver, handler='get') return ver.find('.
Returns firmware version. Args: None Returns: Dictionary Raises: None
juraj-google-style
def preprocess_input(x, data_format=None): return x
A placeholder method for backward compatibility. The preprocessing logic has been included in the efficientnet model implementation. Users are no longer required to call this method to normalize the input data. This method does nothing and only kept as a placeholder to align the API surface between old and new version of model. Args: x: A floating point `numpy.array` or a tensor. data_format: Optional data format of the image tensor/array. `None` means the global setting `keras.backend.image_data_format()` is used (unless you changed it, it uses `"channels_last"`). Defaults to `None`. Returns: Unchanged `numpy.array` or tensor.
github-repos
def build_panel(panel_info, adapter): panel_name = panel_info.get('panel_id', panel_info.get('panel_name')) if (not panel_name): raise KeyError('Panel has to have a id') panel_obj = dict(panel_name=panel_name) LOG.info('Building panel with name: {0}'.format(panel_name)) try: institute_id = panel_info['institute'] except KeyError as err: raise KeyError('Panel has to have a institute') if (adapter.institute(institute_id) is None): raise IntegrityError(('Institute %s could not be found' % institute_id)) panel_obj['institute'] = panel_info['institute'] panel_obj['version'] = float(panel_info['version']) try: panel_obj['date'] = panel_info['date'] except KeyError as err: raise KeyError('Panel has to have a date') panel_obj['display_name'] = panel_info.get('display_name', panel_obj['panel_name']) gene_objs = [] fail = False for gene_info in panel_info.get('genes', []): try: gene_obj = build_gene(gene_info, adapter) gene_objs.append(gene_obj) except IntegrityError as err: LOG.warning(err) fail = True if fail: raise IntegrityError('Some genes did not exist in database. Please see log messages.') panel_obj['genes'] = gene_objs return panel_obj
Build a gene_panel object Args: panel_info(dict): A dictionary with panel information adapter (scout.adapter.MongoAdapter) Returns: panel_obj(dict) gene_panel = dict( panel_id = str, # required institute = str, # institute_id, required version = float, # required date = datetime, # required display_name = str, # default is panel_name genes = list, # list of panel genes, sorted on panel_gene['symbol'] )
codesearchnet
def run(self, dag): for node in dag.threeQ_or_more_gates(): rule = node.op.definition if not rule: raise QiskitError("Cannot unroll all 3q or more gates. " "No rule to expand instruction %s." % node.op.name) decomposition = DAGCircuit() decomposition.add_qreg(rule[0][1][0][0]) for inst in rule: decomposition.apply_operation_back(*inst) decomposition = self.run(decomposition) dag.substitute_node_with_dag(node, decomposition) return dag
Expand 3+ qubit gates using their decomposition rules. Args: dag(DAGCircuit): input dag Returns: DAGCircuit: output dag with maximum node degrees of 2 Raises: QiskitError: if a 3q+ gate is not decomposable
juraj-google-style
def to_proj4(self, as_dict=False, toplevel=True): if toplevel: string = "+proj=longlat %s %s +nodef" % (self.datum.to_proj4(), self.prime_mer.to_proj4()) else: string = "%s %s" % (self.datum.to_proj4(), self.prime_mer.to_proj4()) if as_dict: return dict([ entry.lstrip('+').split('=') for entry in string.split() if entry != "+no_defs" ]) else: return string
Returns the CS as a proj4 formatted string or dict. Arguments: - **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False). - **toplevel** (optional): If True, treats this CS as the final toplevel CS and adds the necessary proj4 elements (defaults to True).
juraj-google-style
def right_shift_blockwise(x, query_shape, name=None): with tf.variable_scope(name, default_name='right_shift_blockwise', values=[x]): x_list_shape = x.get_shape().as_list() x_shape = common_layers.shape_list(x) x = tf.expand_dims(x, axis=1) x = pad_to_multiple_2d(x, query_shape) padded_x_shape = common_layers.shape_list(x) x_indices = gather_indices_2d(x, query_shape, query_shape) x_new = get_shifted_center_blocks(x, x_indices) output = scatter_blocks_2d(x_new, x_indices, padded_x_shape) output = tf.squeeze(output, axis=1) output = tf.slice(output, [0, 0, 0, 0], [(- 1), x_shape[1], x_shape[2], (- 1)]) output.set_shape(x_list_shape) return output
Right shifts once in every block. Args: x: a tensor of shape [batch, height, width, depth] query_shape: A 2d tuple of ints name: a string Returns: output: a tensor of the same shape as x
codesearchnet
def get_service_name(*args): raw_services = _get_services() services = dict() for raw_service in raw_services: if args: if ((raw_service['DisplayName'] in args) or (raw_service['ServiceName'] in args) or (raw_service['ServiceName'].lower() in args)): services[raw_service['DisplayName']] = raw_service['ServiceName'] else: services[raw_service['DisplayName']] = raw_service['ServiceName'] return services
The Display Name is what is displayed in Windows when services.msc is executed. Each Display Name has an associated Service Name which is the actual name of the service. This function allows you to discover the Service Name by returning a dictionary of Display Names and Service Names, or filter by adding arguments of Display Names. If no args are passed, return a dict of all services where the keys are the service Display Names and the values are the Service Names. If arguments are passed, create a dict of Display Names and Service Names Returns: dict: A dictionary of display names and service names CLI Examples: .. code-block:: bash salt '*' service.get_service_name salt '*' service.get_service_name 'Google Update Service (gupdate)' 'DHCP Client'
codesearchnet
class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer): def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): if strides is None: raise ValueError('Argument `strides` must not be None.') super(MaxPooling1D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)
Max Pooling layer for 1D inputs. Args: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer.
github-repos
def hash_stream(self, url): md5hash = hashlib.md5() try: for chunk in self.response.iter_content(chunk_size=10240): if chunk: md5hash.update(chunk) return md5hash.hexdigest() except Exception as e: raisefrom(DownloadError, ('Download of %s failed in retrieval of stream!' % url), e)
Stream file from url and hash it using MD5. Must call setup method first. Args: url (str): URL to download Returns: str: MD5 hash of file
codesearchnet
def _get_arg_parser(func, types, args_and_defaults, delimiter_chars): _LOG.debug("Creating ArgumentParser for '%s'", func.__name__) (description, arg_help) = _prepare_doc(func, [x for (x, _) in args_and_defaults], delimiter_chars) parser = argparse.ArgumentParser(description=description) for ((arg, default), arg_type) in zip_longest(args_and_defaults, types): help_msg = arg_help[arg] if (default is NoDefault): arg_type = (arg_type or identity_type) if (arg_type == bool): _LOG.debug('Adding optional flag %s.%s', func.__name__, arg) parser.add_argument(('--%s' % arg), default=True, required=False, action='store_false', help=('%s. Defaults to True if not specified' % help_msg)) else: _LOG.debug('Adding positional argument %s.%s', func.__name__, arg) parser.add_argument(arg, help=help_msg, type=arg_type) else: if ((default is None) and (arg_type is None)): raise ParseThisError("To use default value of 'None' you need to specify the type of the argument '{}' for the method '{}'".format(arg, func.__name__)) arg_type = (arg_type or type(default)) if (arg_type == bool): action = ('store_false' if default else 'store_true') _LOG.debug('Adding optional flag %s.%s', func.__name__, arg) parser.add_argument(('--%s' % arg), help=help_msg, default=default, action=action) else: _LOG.debug('Adding optional argument %s.%s', func.__name__, arg) parser.add_argument(('--%s' % arg), help=help_msg, default=default, type=arg_type) return parser
Return an ArgumentParser for the given function. Arguments are defined from the function arguments and their associated defaults. Args: func: function for which we want an ArgumentParser types: types to which the command line arguments should be converted to args_and_defaults: list of 2-tuples (arg_name, arg_default) delimiter_chars: characters used to separate the parameters from their help message in the docstring
codesearchnet
def _parse_state(self, config): value = STATE_RE.search(config).group('value') return dict(state=value)
_parse_state scans the provided configuration block and extracts the vlan state value. The config block is expected to always return the vlan state config. The return dict is inteded to be merged into the response dict. Args: config (str): The vlan configuration block from the nodes running configuration Returns: dict: resource dict attribute
codesearchnet
def is_str(string): if (sys.version_info[:2] >= (3, 0)): return isinstance(string, str) return isinstance(string, basestring)
Python 2 and 3 compatible string checker. Args: string (str | basestring): the string to check Returns: bool: True or False
codesearchnet
def notify(self, method_name: str, *args: Any, trim_log_values: Optional[bool]=None, validate_against_schema: Optional[bool]=None, **kwargs: Any) -> Response: return self.send(Notification(method_name, *args, **kwargs), trim_log_values=trim_log_values, validate_against_schema=validate_against_schema)
Send a JSON-RPC request, without expecting a response. Args: method_name: The remote procedure's method name. args: Positional arguments passed to the remote procedure. kwargs: Keyword arguments passed to the remote procedure. trim_log_values: Abbreviate the log entries of requests and responses. validate_against_schema: Validate response against the JSON-RPC schema.
codesearchnet
def execute_with_cancellation(op_name, num_outputs, inputs, attrs, ctx, cancellation_manager, name=None): device_name = ctx.device_name try: ctx.ensure_initialized() tensors = pywrap_tfe.TFE_Py_ExecuteCancelable(ctx._handle, device_name, op_name, inputs, attrs, cancellation_manager._impl, num_outputs) except core._NotOkStatusException as e: if name is not None: e.message += ' name: ' + name raise core._status_to_exception(e) from None except TypeError as e: keras_symbolic_tensors = [x for x in inputs if _is_keras_symbolic_tensor(x)] if keras_symbolic_tensors: raise core._SymbolicException('Inputs to eager execution function cannot be Keras symbolic tensors, but found {}'.format(keras_symbolic_tensors)) raise e return tensors
Execute a TensorFlow operation. Args: op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to execute. num_outputs: The number of outputs of the operation to fetch. (Explicitly provided instead of being inferred for performance reasons). inputs: A list of inputs to the operation. Each entry should be a Tensor, or a value which can be passed to the Tensor constructor to create one. attrs: A tuple with alternating string attr names and attr values for this operation. ctx: The value of context.context(). cancellation_manager: a `CancellationManager` object that can be used to cancel the operation. name: Customized name for the operation. Returns: List of output Tensor objects. The list is empty if there are no outputs Raises: An exception on error.
github-repos
def make_decoder(activation, latent_size, output_shape, base_depth): deconv = functools.partial( tf.keras.layers.Conv2DTranspose, padding="SAME", activation=activation) conv = functools.partial( tf.keras.layers.Conv2D, padding="SAME", activation=activation) decoder_net = tf.keras.Sequential([ deconv(2 * base_depth, 7, padding="VALID"), deconv(2 * base_depth, 5), deconv(2 * base_depth, 5, 2), deconv(base_depth, 5), deconv(base_depth, 5, 2), deconv(base_depth, 5), conv(output_shape[-1], 5, activation=None), ]) def decoder(codes): original_shape = tf.shape(input=codes) codes = tf.reshape(codes, (-1, 1, 1, latent_size)) logits = decoder_net(codes) logits = tf.reshape( logits, shape=tf.concat([original_shape[:-1], output_shape], axis=0)) return tfd.Independent(tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=len(output_shape), name="image") return decoder
Creates the decoder function. Args: activation: Activation function in hidden layers. latent_size: Dimensionality of the encoding. output_shape: The output image shape. base_depth: Smallest depth for a layer. Returns: decoder: A `callable` mapping a `Tensor` of encodings to a `tfd.Distribution` instance over images.
juraj-google-style
def is_type(url: str, message_or_descriptor: annotation_utils.MessageOrDescriptorBase) -> bool: return annotation_utils.get_structure_definition_url(message_or_descriptor) == url
Returns True if message_or_descriptor has a structure definition of url. Args: url: The FHIR structure definition URL to compare against. message_or_descriptor: The Message or Descriptor to examine. Returns: True if message_or_descriptor has a structure definition equal to url.
github-repos
def bulk_write(self, metrics): actions = [] index = self.get_index() for metric in metrics: actions.append({'index': {'_index': index, '_type': self.doc_type}}) actions.append(metric) try: self.client.bulk(actions) except TransportError as exc: logger.warning('bulk_write metrics %r failure %r', metrics, exc)
Write multiple metrics to elasticsearch in one request Args: metrics (list): data with mappings to send to elasticsearch
juraj-google-style
def get_owner_emails(self, partial_owner_match=True): for tag in self.tags: if (tag.key.lower() == 'owner'): rgx = re.compile(RGX_EMAIL_VALIDATION_PATTERN, re.I) if partial_owner_match: match = rgx.findall(tag.value) if match: return [NotificationContact('email', email) for email in match] else: match = rgx.match(tag.value) if match: return [NotificationContact('email', email) for email in match.groups()] return None
Return a list of email addresses associated with the instance, based on tags Returns: List of email addresses if any, else None
codesearchnet
class CombinePerKey(PTransformWithSideInputs): def with_hot_key_fanout(self, fanout): from apache_beam.transforms.combiners import curry_combine_fn if fanout is None: return self else: return _CombinePerKeyWithHotKeyFanout(curry_combine_fn(self.fn, self.args, self.kwargs), fanout) def display_data(self): return {'combine_fn': DisplayDataItem(self.fn.__class__, label='Combine Function'), 'combine_fn_dd': self.fn} def make_fn(self, fn, has_side_inputs): self._fn_label = ptransform.label_from_callable(fn) return CombineFn.maybe_from_callable(fn, has_side_inputs) def default_label(self): return '%s(%s)' % (self.__class__.__name__, self._fn_label) def _process_argspec_fn(self): return lambda element, *args, **kwargs: None def expand(self, pcoll): args, kwargs = util.insert_values_in_args(self.args, self.kwargs, self.side_inputs) return pcoll | GroupByKey() | 'Combine' >> CombineValues(self.fn, *args, **kwargs) def default_type_hints(self): result = self.fn.get_type_hints() k = typehints.TypeVariable('K') if result.input_types: args, kwargs = result.input_types args = (typehints.Tuple[k, args[0]],) + args[1:] result = result.with_input_types(*args, **kwargs) else: result = result.with_input_types(typehints.Tuple[k, typehints.Any]) if result.output_types: main_output_type = result.simple_output_type('') result = result.with_output_types(typehints.Tuple[k, main_output_type]) else: result = result.with_output_types(typehints.Tuple[k, typehints.Any]) return result def to_runner_api_parameter(self, context): if self.args or self.kwargs: from apache_beam.transforms.combiners import curry_combine_fn combine_fn = curry_combine_fn(self.fn, self.args, self.kwargs) else: combine_fn = self.fn return (common_urns.composites.COMBINE_PER_KEY.urn, _combine_payload(combine_fn, context)) @staticmethod @PTransform.register_urn(common_urns.composites.COMBINE_PER_KEY.urn, beam_runner_api_pb2.CombinePayload) def from_runner_api_parameter(unused_ptransform, combine_payload, context): return CombinePerKey(CombineFn.from_runner_api(combine_payload.combine_fn, context)) def runner_api_requires_keyed_input(self): return True
A per-key Combine transform. Identifies sets of values associated with the same key in the input PCollection, then applies a CombineFn to condense those sets to single values. See documentation in CombineFn for details on the specifics on how CombineFns are applied. Args: pcoll: input pcollection. fn: instance of CombineFn to apply to all values under the same key in pcoll, or a callable whose signature is ``f(iterable, *args, **kwargs)`` (e.g., sum, max). *args: arguments and side inputs, passed directly to the CombineFn. **kwargs: arguments and side inputs, passed directly to the CombineFn. Returns: A PObject holding the result of the combine operation.
github-repos
def AddEventTags(self, event_tags): self._RaiseIfNotWritable() for event_tag in event_tags: self.AddEventTag(event_tag)
Adds event tags. Args: event_tags (list[EventTag]): event tags. Raises: IOError: when the storage file is closed or read-only or if the event tags cannot be serialized. OSError: when the storage file is closed or read-only or if the event tags cannot be serialized.
codesearchnet
def __init__(self, sources, stacker_cache_dir=None): if not stacker_cache_dir: stacker_cache_dir = os.path.expanduser("~/.stacker") package_cache_dir = os.path.join(stacker_cache_dir, 'packages') self.stacker_cache_dir = stacker_cache_dir self.package_cache_dir = package_cache_dir self.sources = sources self.configs_to_merge = [] self.create_cache_directories()
Process a config's defined package sources. Args: sources (dict): Package sources from Stacker config dictionary stacker_cache_dir (string): Path where remote sources will be cached.
juraj-google-style
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): local_stream = utils.BytearrayStream() if self._unique_identifier is not None: self._unique_identifier.write( local_stream, kmip_version=kmip_version ) if self._key_format_type is not None: self._key_format_type.write( local_stream, kmip_version=kmip_version ) if self._key_compression_type is not None: self._key_compression_type.write( local_stream, kmip_version=kmip_version ) if self._key_wrapping_specification is not None: self._key_wrapping_specification.write( local_stream, kmip_version=kmip_version ) self.length = local_stream.length() super(GetRequestPayload, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer)
Write the data encoding the Get request payload to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def tf_retrieve_indices(self, indices): states = dict() for name in sorted(self.states_memory): states[name] = tf.gather(params=self.states_memory[name], indices=indices) internals = dict() for name in sorted(self.internals_memory): internals[name] = tf.gather(params=self.internals_memory[name], indices=indices) actions = dict() for name in sorted(self.actions_memory): actions[name] = tf.gather(params=self.actions_memory[name], indices=indices) terminal = tf.gather(params=self.terminal_memory, indices=indices) reward = tf.gather(params=self.reward_memory, indices=indices) if self.include_next_states: assert (util.rank(indices) == 1) next_indices = ((indices + 1) % self.capacity) next_states = dict() for name in sorted(self.states_memory): next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices) next_internals = dict() for name in sorted(self.internals_memory): next_internals[name] = tf.gather(params=self.internals_memory[name], indices=next_indices) return dict(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals) else: return dict(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward)
Fetches experiences for given indices. Args: indices: Index tensor Returns: Batch of experiences
codesearchnet
def get_framework(model, revision: Optional[str]=None): warnings.warn('`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.', FutureWarning) if not is_tf_available() and (not is_torch_available()): raise RuntimeError('At least one of TensorFlow 2.0 or PyTorch should be installed. To install TensorFlow 2.0, read the instructions at https: if isinstance(model, str): if is_torch_available() and (not is_tf_available()): model = AutoModel.from_pretrained(model, revision=revision) elif is_tf_available() and (not is_torch_available()): model = TFAutoModel.from_pretrained(model, revision=revision) else: try: model = AutoModel.from_pretrained(model, revision=revision) except OSError: model = TFAutoModel.from_pretrained(model, revision=revision) framework = infer_framework(model.__class__) return framework
Select framework (TensorFlow or PyTorch) to use. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): If both frameworks are installed, picks the one corresponding to the model passed (either a model class or the model name). If no specific model is provided, defaults to using PyTorch.
github-repos
def get_data(img_path): mean = np.array([123.68, 116.779, 103.939]) img = Image.open(img_path) img = np.array(img, dtype=np.float32) reshaped_mean = mean.reshape(1, 1, 3) img = img - reshaped_mean img = np.swapaxes(img, 0, 2) img = np.swapaxes(img, 1, 2) img = np.expand_dims(img, axis=0) return img
get the (1, 3, h, w) np.array data for the supplied image Args: img_path (string): the input image path Returns: np.array: image data in a (1, 3, h, w) shape
juraj-google-style
def normalize(self, text, normalizations=None): for normalization, kwargs in self._parse_normalizations( normalizations or self._config.normalizations): try: text = getattr(self, normalization)(text, **kwargs) except AttributeError as e: self._logger.debug('Invalid normalization: %s', e) return text
Normalize a given text applying all normalizations. Normalizations to apply can be specified through a list of parameters and will be executed in that order. Args: text: The text to be processed. normalizations: List of normalizations to apply. Returns: The text normalized.
juraj-google-style
def update_configuration(self): uri = '{}/configuration'.format(self.data['uri']) result = self._helper.update({}, uri) self.refresh() return result
Asynchronously applies or re-applies the SAS Logical Interconnect configuration to all managed interconnects of a SAS Logical Interconnect. Returns: dict: SAS Logical Interconnect.
codesearchnet
def call_fn(fn, args): if expand_as_args(args): return fn(*args) elif _expand_as_kwargs(args): return fn(**args) else: return fn(args)
Calls `fn` with `args`, possibly expanding `args`. Use this function when calling a user-provided callable using user-provided arguments. The expansion rules are as follows: `fn(*args)` if `args` is a `list` or a `tuple`, but not a `namedtuple`. `fn(**args)` if `args` is a `dict`. `fn(args)` otherwise. Args: fn: A callable that takes either `args` as an argument(s). args: Arguments to `fn`. Returns: result: Return value of `fn`.
juraj-google-style
def setup(__pkg: str) -> jinja2.Environment: dirs = [path.join(d, 'templates') for d in xdg_basedir.get_data_dirs(__pkg)] env = jinja2.Environment( autoescape=jinja2.select_autoescape(['html', 'xml']), loader=jinja2.ChoiceLoader([jinja2.FileSystemLoader(s) for s in dirs])) env.loader.loaders.append(jinja2.PackageLoader(__pkg, 'templates')) env.filters.update(FILTERS) return env
Configure a new Jinja environment with our filters. Args: __pkg: Package name to use as base for templates searches Returns: Configured Jinja environment
juraj-google-style
def allele_clusters(dists, t=0.025): clusters = fcluster(linkage(dists), 0.025, criterion='distance') cluster_idx = defaultdict(list) for idx, cl in enumerate(clusters): cluster_idx[cl].append(idx) return cluster_idx
Flat clusters from distance matrix Args: dists (numpy.array): pdist distance matrix t (float): fcluster (tree cutting) distance threshold Returns: dict of lists: cluster number to list of indices of distances in cluster
juraj-google-style
def generate_states(state_count, process_matrix, process_covariance, initial_state=None): process_matrix = np.atleast_2d(process_matrix) process_covariance = np.atleast_2d(process_covariance) state_dim = process_matrix.shape[0] if process_matrix.shape != (state_dim, state_dim): raise ValueError("Process matrix has inconsistent shape: {}".format( process_matrix.shape)) if process_covariance.shape != (state_dim, state_dim): raise ValueError("Process covariance has inconsistent shape: {}".format( process_covariance.shape)) if initial_state is None: initial_state = np.zeros(process_matrix.shape[0]) states = [initial_state] while len(states) < state_count: states.append( process_matrix.dot(states[-1]) + np.random.multivariate_normal( mean=np.zeros(state_dim), cov=process_covariance ) ) return np.vstack(states)
Generate states by simulating a linear system with constant process matrix and process noise covariance. Args: state_count (int): Number of states to generate. process_matrix (array): Square array process_covariance (array): Square array specifying process noise covariance. initial_state (array or None): If omitted, use zero-filled vector as initial state.
juraj-google-style
def persist(self, status=None): self._persist = (status if (type(status) is bool) else True)
Enables persistent mode for the current mock. Returns: self: current Mock instance.
codesearchnet
def get_dict_definition(self, dict, get_list=False): list_def_candidate = [] for definition_name in self.specification['definitions'].keys(): if self.validate_definition(definition_name, dict): if (not get_list): return definition_name list_def_candidate.append(definition_name) if get_list: return list_def_candidate return None
Get the definition name of the given dict. Args: dict: dict to test. get_list: if set to true, return a list of definition that match the body. if False, only return the first. Returns: The definition name or None if the dict does not match any definition. If get_list is True, return a list of definition_name.
codesearchnet
def call(poly, args): args = list(args) if len(args) < poly.dim: args = args + [np.nan]*(poly.dim-len(args)) elif len(args) > poly.dim: raise ValueError("too many arguments") x0, x1 = [], [] for idx, arg in enumerate(args): if isinstance(arg, Poly): poly_ = Poly({ tuple(np.eye(poly.dim)[idx]): np.array(1) }) x0.append(poly_) x1.append(arg) args[idx] = np.nan if x0: poly = call(poly, args) return substitute(poly, x0, x1) masks = np.zeros(len(args), dtype=bool) for idx, arg in enumerate(args): if np.ma.is_masked(arg) or np.any(np.isnan(arg)): masks[idx] = True args[idx] = 0 shape = np.array( args[ np.argmax( [np.prod(np.array(arg).shape) for arg in args] ) ] ).shape args = np.array([np.ones(shape, dtype=int)*arg for arg in args]) A = {} for key in poly.keys: key_ = np.array(key)*(1-masks) val = np.outer(poly.A[key], np.prod((args.T**key_).T, \ axis=0)) val = np.reshape(val, poly.shape + tuple(shape)) val = np.where(val != val, 0, val) mkey = tuple(np.array(key)*(masks)) if not mkey in A: A[mkey] = val else: A[mkey] = A[mkey] + val out = Poly(A, poly.dim, None, None) if out.keys and not np.sum(out.keys): out = out.A[out.keys[0]] elif not out.keys: out = np.zeros(out.shape, dtype=out.dtype) return out
Evaluate a polynomial along specified axes. Args: poly (Poly): Input polynomial. args (numpy.ndarray): Argument to be evaluated. Masked values keeps the variable intact. Returns: (Poly, numpy.ndarray): If masked values are used the Poly is returned. Else an numpy array matching the polynomial's shape is returned.
juraj-google-style
def __init__(self, jss, data, **kwargs): self.jss = jss if isinstance(data, basestring): super(JSSObject, self).__init__(tag=self.list_type) self._new(data, **kwargs) elif isinstance(data, ElementTree.Element): super(JSSObject, self).__init__(tag=data.tag) for child in data.getchildren(): self.append(child) else: raise TypeError("JSSObjects data argument must be of type " "xml.etree.ElemenTree.Element, or a string for the" " name.")
Initialize a new JSSObject Args: jss: JSS object. data: xml.etree.ElementTree.Element data to use for creating the object OR a string name to use for creating a new object (provided it has an implemented _new() method.
juraj-google-style
def scan_servos(): servos = [] for servo_id in range(0, 254): model = get_model(servo_id) if model: servos += [(servo_id, model)] return servos
Scan for the herkulex servos connected This function will scan for all the herkulex servos connected to the bus. Args: none Returns: list: a list of tuples of the form [(id, model)]
codesearchnet
def GetMap(self, cache_info, data): for obj in json.loads(cache_info.read()): key = obj.get('Key', '') value = obj.get('Value', '') if not value or not key: continue map_entry = self._ReadEntry(key, value) if map_entry is None: self.log.warning('Could not create entry from line %r in cache, skipping', value) continue if not data.Add(map_entry): self.log.warning('Could not add entry %r read from line %r in cache', map_entry, value) return data
Returns a map from a cache. Args: cache_info: file like object containing the cache. data: a Map to populate. Returns: A child of Map containing the cache data.
github-repos
def get_data_path(self, filename, env_prefix=None): if env_prefix == None: target_file = filename else: target_file = os.path.join(env_prefix, filename) if os.path.exists(os.path.join(self._data_path, target_file)): return os.path.join(self._data_path, target_file) else: raise DataNotFoundError( u("Cannot find data file: {0}").format(target_file))
Get data path. Args: filename (string) : Name of file inside of /data folder to retrieve. Kwargs: env_prefix (string) : Name of subfolder, ex: 'qa' will find files in /data/qa Returns: String - path to file. Usage:: open(WTF_DATA_MANAGER.get_data_path('testdata.csv') Note: WTF_DATA_MANAGER is a provided global instance of DataManager
juraj-google-style
def _convert_value(value, expected_type, path, context=_ConversionContext.VALUE): assert isinstance(path, tuple) if expected_type is None: expected_type = _NoneType if expected_type is tensor.Tensor: return _convert_tensor(value, path, context) elif isinstance(expected_type, type) and _issubclass(expected_type, composite_tensor.CompositeTensor): return _convert_composite_tensor(value, expected_type, path, context) elif expected_type is tensor_shape.TensorShape: try: return tensor_shape.as_shape(value) except TypeError as e: raise TypeError(f"{''.join(path)}: expected 'tf.TensorShape', got {type(value).__name__!r}") from e elif expected_type is dtypes.DType: try: return dtypes.as_dtype(value) except TypeError as e: raise TypeError(f"{''.join(path)}: expected 'tf.DType', got {type(value).__name__!r}") from e elif expected_type in (int, float, bool, str, bytes, _NoneType): if not isinstance(value, expected_type): raise TypeError(f'{''.join(path)}: expected {expected_type.__name__!r}, got {type(value).__name__!r}') return value elif type_annotations.is_generic_tuple(expected_type): return _convert_tuple(value, expected_type, path, context) elif type_annotations.is_generic_mapping(expected_type): return _convert_mapping(value, expected_type, path, context) elif type_annotations.is_generic_union(expected_type): return _convert_union(value, expected_type, path, context) else: raise TypeError(f'{''.join(path)}: Unsupported type annotation {expected_type!r}')
Type-checks and converts a value. Args: value: The value to type-check. expected_type: The expected type for the value. path: Tuple of `str` naming the value (used for exception messages). context: _ConversionContext, indicates what kind of value we are converting. Returns: A copy of `value`, converted to the expected type. Raises: TypeError: If `value` can not be converted to the expected type.
github-repos
def SetPermissions(path, mode=None, uid=None, gid=None, mkdir=False): if mkdir and not os.path.exists(path): os.mkdir(path, mode or 0o777) elif mode: os.chmod(path, mode) if uid and gid: os.chown(path, uid, gid) _SetSELinuxContext(path)
Set the permissions and ownership of a path. Args: path: string, the path for which owner ID and group ID needs to be setup. mode: octal string, the permissions to set on the path. uid: int, the owner ID to be set for the path. gid: int, the group ID to be set for the path. mkdir: bool, True if the directory needs to be created.
juraj-google-style
def ExpectingFunctionArgs(clean_lines, linenum): line = clean_lines.elided[linenum] return (Match('^\\s*MOCK_(CONST_)?METHOD\\d+(_T)?\\(', line) or ((linenum >= 2) and (Match('^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\((?:\\S+,)?\\s*$', clean_lines.elided[(linenum - 1)]) or Match('^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\(\\s*$', clean_lines.elided[(linenum - 2)]) or Search('\\bstd::m?function\\s*\\<\\s*$', clean_lines.elided[(linenum - 1)]))))
Checks whether where function type arguments are expected. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if the line at 'linenum' is inside something that expects arguments of function types.
codesearchnet
def get_config(self, key, default=MISSING): keyname = ('config:' + key) try: return self.kvstore.get(keyname) except KeyError: if (default is MISSING): raise ArgumentError('No config value found for key', key=key) return default
Get the value of a persistent config key from the registry If no default is specified and the key is not found ArgumentError is raised. Args: key (string): The key name to fetch default (string): an optional value to be returned if key cannot be found Returns: string: the key's value
codesearchnet
def _force_float(v): try: return float(v) except Exception as exc: return float('nan') logger.warning('Failed to convert {} to float with {} error. Using 0 instead.'.format(v, exc))
Converts given argument to float. On fail logs warning and returns 0.0. Args: v (any): value to convert to float Returns: float: converted v or 0.0 if conversion failed.
juraj-google-style
def __init__(self, axis=-1, validate_args=False, name="cumsum"): if not isinstance(axis, int) or axis >= 0: raise ValueError("`axis` must be a negative integer.") self._axis = axis super(Cumsum, self).__init__( is_constant_jacobian=True, forward_min_event_ndims=-axis, validate_args=validate_args, name=name)
Instantiates the `Cumsum` bijector. Args: axis: Negative Python `int` indicating the axis along which to compute the cumulative sum. Note that positive (and zero) values are not supported. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. Raises: ValueError: If `axis` is not a negative `int`.
juraj-google-style
def indicator_associations_types( self, indicator_type, api_entity=None, api_branch=None, params=None ): if params is None: params = {} if not self.can_update(): self._tcex.handle_error(910, [self.type]) target = self._tcex.ti.indicator(indicator_type) for at in self.tc_requests.indicator_associations_types( self.api_type, self.api_sub_type, self.unique_id, target, api_entity=api_entity, api_branch=api_branch, owner=self.owner, params=params, ): yield at
Gets the indicator association from a Indicator/Group/Victim Args: indicator_type: api_entity: api_branch: params: Returns:
juraj-google-style
def execute_show(args, root_dir): key = None if args.get('key'): key = args['key'] status = command_factory('status')({}, root_dir=root_dir) if key not in status['data'] or status['data'][key]['status'] != 'running': print('No running process with this key, use `log` to show finished processes.') return else: status = command_factory('status')({}, root_dir=root_dir) if isinstance(status['data'], str): print(status['data']) return for k in sorted(status['data'].keys()): if status['data'][k]['status'] == 'running': key = k break if key is None: print('No running process, use `log` to show finished processes.') return config_dir = os.path.join(root_dir, '.config/pueue') stdoutFile = os.path.join(config_dir, 'pueue_process_{}.stdout'.format(key)) stderrFile = os.path.join(config_dir, 'pueue_process_{}.stderr'.format(key)) stdoutDescriptor = open(stdoutFile, 'r') stderrDescriptor = open(stderrFile, 'r') running = True if args['watch']: stdscr = curses.initscr() curses.noecho() curses.cbreak() curses.curs_set(2) stdscr.keypad(True) stdscr.refresh() try: while running: stdscr.clear() stdoutDescriptor.seek(0) message = stdoutDescriptor.read() stdscr.addstr(0, 0, message) stdscr.refresh() time.sleep(2) except Exception: curses.nocbreak() stdscr.keypad(False) curses.echo() curses.endwin() else: print('Stdout output:\n') stdoutDescriptor.seek(0) print(get_descriptor_output(stdoutDescriptor, key)) print('\n\nStderr output:\n') stderrDescriptor.seek(0) print(get_descriptor_output(stderrDescriptor, key))
Print stderr and stdout of the current running process. Args: args['watch'] (bool): If True, we open a curses session and tail the output live in the console. root_dir (string): The path to the root directory the daemon is running in.
juraj-google-style
def is_macos_gfortran(f90_compiler): from numpy.distutils.fcompiler import gnu if (sys.platform != MAC_OS): return False if (not isinstance(f90_compiler, gnu.Gnu95FCompiler)): return False return True
Checks if the current build is ``gfortran`` on macOS. Args: f90_compiler (numpy.distutils.fcompiler.FCompiler): A Fortran compiler instance. Returns: bool: Only :data:`True` if * Current OS is macOS (checked via ``sys.platform``). * ``f90_compiler`` corresponds to ``gfortran``.
codesearchnet
def _read_single(parser, filepath): from os import path global packages if path.isfile(filepath): parser.readfp(open(filepath))
Reads a single config file into the parser, silently failing if the file does not exist. Args: parser (ConfigParser): parser to read the file into. filepath (str): full path to the config file.
codesearchnet
def import_string_code_as_module(code): sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest() module = imp.new_module(sha256) try: exec_(code, module.__dict__) except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) sys.modules[sha256] = module return module
Used to run arbitrary passed code as a module Args: code (string): Python code to import as module Returns: module: Python module
codesearchnet
def cumulative_distribution(self, X): self.check_fit() return norm.cdf(X, loc=self.mean, scale=self.std)
Cumulative distribution function for gaussian distribution. Arguments: X: `np.ndarray` of shape (n, 1). Returns: np.ndarray: Cumulative density for X.
juraj-google-style
def get_blob(profile, sha): resource = "/blobs/" + sha data = api.get_request(profile, resource) return prepare(data)
Fetch a blob. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of the blob to fetch. Returns: A dict with data about the blob.
juraj-google-style
def from_string(header_str): lines = tuple(clean_lines(header_str.split("\n"), False)) comment1 = lines[0] feffpmg = comment1.find("pymatgen") if feffpmg: comment2 = ' '.join(lines[1].split()[2:]) source = ' '.join(lines[2].split()[2:]) basis_vec = lines[6].split(":")[-1].split() a = float(basis_vec[0]) b = float(basis_vec[1]) c = float(basis_vec[2]) lengths = [a, b, c] basis_ang = lines[7].split(":")[-1].split() alpha = float(basis_ang[0]) beta = float(basis_ang[1]) gamma = float(basis_ang[2]) angles = [alpha, beta, gamma] lattice = Lattice.from_lengths_and_angles(lengths, angles) natoms = int(lines[8].split(":")[-1].split()[0]) atomic_symbols = [] for i in range(9, 9 + natoms): atomic_symbols.append(lines[i].split()[2]) coords = [] for i in range(natoms): toks = lines[i + 9].split() coords.append([float(s) for s in toks[3:]]) struct = Structure(lattice, atomic_symbols, coords, False, False, False) h = Header(struct, source, comment2) return h else: return "Header not generated by pymatgen, cannot return header object"
Reads Header string and returns Header object if header was generated by pymatgen. Note: Checks to see if generated by pymatgen, if not it is impossible to generate structure object so it is not possible to generate header object and routine ends Args: header_str: pymatgen generated feff.inp header Returns: Structure object.
juraj-google-style
def set_doc_ids(self, doc_ids): if isinstance(doc_ids, list): self.set_documents(dict.fromkeys(doc_ids)) else: self.set_documents({doc_ids: None})
Build xml documents from a list of document ids. Args: doc_ids -- A document id or a lost of those.
juraj-google-style
def from_las3(cls, string, lexicon=None, source='LAS', dlm=',', abbreviations=False): f = (re.DOTALL | re.IGNORECASE) regex = '\\~\\w+?_Data.+?\\n(.+?)(?:\\n\\n+|\\n*\\~|\\n*$)' pattern = re.compile(regex, flags=f) text = pattern.search(string).group(1) s = re.search('\\.(.+?)\\: ?.+?source', string) if s: source = s.group(1).strip() return cls.from_descriptions(text, lexicon, source=source, dlm=dlm, abbreviations=abbreviations)
Turn LAS3 'lithology' section into a Striplog. Args: string (str): A section from an LAS3 file. lexicon (Lexicon): The language for conversion to components. source (str): A source for the data. dlm (str): The delimiter. abbreviations (bool): Whether to expand abbreviations. Returns: Striplog: The ``striplog`` object. Note: Handles multiple 'Data' sections. It would be smarter for it to handle one at a time, and to deal with parsing the multiple sections in the Well object. Does not read an actual LAS file. Use the Well object for that.
codesearchnet
def _get_napp_key(self, key, user=None, napp=None): if user is None: user = self.user if napp is None: napp = self.napp kytos_json = self._installed / user / napp / 'kytos.json' try: with kytos_json.open() as file_descriptor: meta = json.load(file_descriptor) return meta[key] except (FileNotFoundError, json.JSONDecodeError, KeyError): return ''
Return a value from kytos.json. Args: user (string): A Username. napp (string): A NApp name key (string): Key used to get the value within kytos.json. Returns: meta (object): Value stored in kytos.json.
juraj-google-style
def _CheckPenalties(self, tree, list_of_expected): def FlattenRec(tree): if pytree_utils.NodeName(tree) in pytree_utils.NONSEMANTIC_TOKENS: return [] if isinstance(tree, pytree.Leaf): return [(tree.value, pytree_utils.GetNodeAnnotation(tree, pytree_utils.Annotation.SPLIT_PENALTY))] nodes = [] for node in tree.children: nodes += FlattenRec(node) return nodes self.assertEqual(list_of_expected, FlattenRec(tree))
Check that the tokens in the tree have the correct penalties. Args: tree: the pytree. list_of_expected: list of (name, penalty) pairs. Non-semantic tokens are filtered out from the expected values.
github-repos
def from_iterable(cls, frames, sort=False): return FrameSet(sorted(frames) if sort else frames)
Build a :class:`FrameSet` from an iterable of frames. Args: frames (collections.Iterable): an iterable object containing frames as integers sort (bool): True to sort frames before creation, default is False Returns: :class:`FrameSet`:
juraj-google-style
def get_dfa_conjecture(self): dfa = DFA(self.alphabet) for s in self.observation_table.sm_vector: for i in self.alphabet: dst = self.observation_table.equiv_classes[s + i] if dst == None: logging.debug('Conjecture attempt on non closed table.') return None obsrv = self.observation_table[s, i] src_id = self.observation_table.sm_vector.index(s) dst_id = self.observation_table.sm_vector.index(dst) dfa.add_arc(src_id, dst_id, i, obsrv) i = 0 for s in self.observation_table.sm_vector: dfa[i].final = self.observation_table[s, self.epsilon] i += 1 return dfa
Utilize the observation table to construct a Mealy Machine. The library used for representing the Mealy Machine is the python bindings of the openFST library (pyFST). Args: None Returns: MealyMachine: A mealy machine build based on a closed and consistent observation table.
juraj-google-style
def restore(self, x): with tf.name_scope("pad_reduce/restore"): x = tf.scatter_nd( indices=self.nonpad_ids, updates=x, shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0), ) return x
Add padding back to the given tensor. Args: x (tf.Tensor): of shape [dim_compressed,...] Returns: a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The dim is restored from the original reference tensor
juraj-google-style
def _publish_scan_response(self, client): devices = self._manager.scanned_devices converted_devs = [] for (uuid, info) in devices.items(): slug = self._build_device_slug(uuid) message = {} message['uuid'] = uuid if (uuid in self._connections): message['user_connected'] = True elif ('user_connected' in info): message['user_connected'] = info['user_connected'] else: message['user_connected'] = False message['connection_string'] = slug message['signal_strength'] = info['signal_strength'] converted_devs.append({x: y for (x, y) in message.items()}) message['type'] = 'notification' message['operation'] = 'advertisement' self.client.publish(self.topics.gateway_topic(slug, 'data/advertisement'), message) probe_message = {} probe_message['type'] = 'response' probe_message['client'] = client probe_message['success'] = True probe_message['devices'] = converted_devs self.client.publish(self.topics.status, probe_message)
Publish a scan response message The message contains all of the devices that are currently known to this agent. Connection strings for direct connections are translated to what is appropriate for this agent. Args: client (string): A unique id for the client that made this request
codesearchnet
def get_instances_with_configs(configs): results = [] for c in configs: try: serial = c.pop('serial') except KeyError: raise Error(('Required value "serial" is missing in AndroidDevice config %s.' % c)) is_required = c.get(KEY_DEVICE_REQUIRED, True) try: ad = AndroidDevice(serial) ad.load_config(c) except Exception: if is_required: raise ad.log.exception('Skipping this optional device due to error.') continue results.append(ad) return results
Create AndroidDevice instances from a list of dict configs. Each config should have the required key-value pair 'serial'. Args: configs: A list of dicts each representing the configuration of one android device. Returns: A list of AndroidDevice objects.
codesearchnet
def set(self, name, permission): assert isinstance(permission, BasePermission), 'Only permission instances can be added to the set' self._permissions[name] = permission
Adds permission with the given name to the set. Permission with the same name will be overridden. Args: name: name of the permission permission: permission instance
juraj-google-style