code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __pipeline_image(image, options):
results = []
if 'resolutions' in options:
resolutions = options['resolutions']
for res in resolutions:
img_rs = resize(image, res)
results.append(img_rs)
if 'wmark-img' in options:
wtrmk_path = options['wmark-img']
if wtrmk_path:
if len(results) == 0:
image = watermark_image(image, wtrmk_path)
else:
for i in range(0, len(results)):
results[i] = watermark_image(
results[i], wtrmk_path)
if 'wmark-txt' in options:
wtrmk_txt = options['wmark-txt']
if wtrmk_txt:
if len(results) == 0:
image = watermark_text(image, wtrmk_txt)
else:
for i in range(0, len(results)):
results[i] = watermark_text(results[i],
wtrmk_txt)
if len(results) == 0:
results.append(image)
return results
|
Sends an image through a processing pipeline.
Applies all (relevant) provided options to a given image.
Args:
image: An instance of a PIL Image.
options: Options to apply to the image (i.e. resolutions).
Returns:
A list containing instances of PIL Images. This list will always be length
1 if no options exist that require multiple copies to be created for a single
image (i.e resolutions).
|
juraj-google-style
|
def get_subset_in_chemsys(self, chemsys: List[str]):
chemsys = set(chemsys)
if not chemsys.issubset(self.chemsys):
raise ValueError("%s is not a subset of %s" % (chemsys,
self.chemsys))
subset = set()
for e in self.entries:
elements = [sp.symbol for sp in e.composition.keys()]
if chemsys.issuperset(elements):
subset.add(e)
return EntrySet(subset)
|
Returns an EntrySet containing only the set of entries belonging to
a particular chemical system (in this definition, it includes all sub
systems). For example, if the entries are from the
Li-Fe-P-O system, and chemsys=["Li", "O"], only the Li, O,
and Li-O entries are returned.
Args:
chemsys: Chemical system specified as list of elements. E.g.,
["Li", "O"]
Returns:
EntrySet
|
juraj-google-style
|
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
temporary_directory = getattr(options, 'temporary_directory', None)
if temporary_directory and not os.path.isdir(temporary_directory):
raise errors.BadConfigOption(
'No such temporary directory: {0:s}'.format(temporary_directory))
setattr(configuration_object, '_temporary_directory', temporary_directory)
|
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
|
juraj-google-style
|
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_buffer = utils.BytearrayStream()
if self._private_key_unique_identifier:
self._private_key_unique_identifier.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField('The CreateKeyPair response payload is missing the private key unique identifier field.')
if self._public_key_unique_identifier:
self._public_key_unique_identifier.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField('The CreateKeyPair response payload is missing the public key unique identifier field.')
if self._private_key_template_attribute:
self._private_key_template_attribute.write(local_buffer, kmip_version=kmip_version)
if self._public_key_template_attribute:
self._public_key_template_attribute.write(local_buffer, kmip_version=kmip_version)
self.length = local_buffer.length()
super(CreateKeyPairResponsePayload, self).write(output_buffer, kmip_version=kmip_version)
output_buffer.write(local_buffer.buffer)
|
Write the data encoding the CreateKeyPair response payload to a buffer.
Args:
output_buffer (stream): A data buffer in which to encode object
data, supporting a write method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidField: Raised if the private key unique identifier or the
public key unique identifier is not defined.
|
codesearchnet
|
def hist(self, lumping=None, summary=False, sort=True, plot=True, legend=None, ax=None):
comps = []
labels = []
entries = defaultdict(int)
for i in self:
if lumping:
k = i.primary[lumping]
elif summary:
k = i.primary.summary()
else:
k = i.primary
comps.append(i.primary)
labels.append(i.primary.summary())
entries[k] += i.thickness
if sort:
allitems = sorted(entries.items(), key=(lambda i: i[1]), reverse=True)
(ents, counts) = zip(*allitems)
else:
(ents, counts) = (tuple(entries.keys()), tuple(entries.values()))
if plot:
if (ax is None):
(fig, ax) = plt.subplots()
return_ax = False
else:
return_ax = True
ind = np.arange(len(ents))
bars = ax.bar(ind, counts, align='center')
ax.set_xticks(ind)
ax.set_xticklabels(labels)
if legend:
colours = [legend.get_colour(c) for c in comps]
for (b, c) in zip(bars, colours):
b.set_color(c)
ax.set_ylabel('Thickness [m]')
else:
bars = []
if (plot and return_ax):
return (counts, ents, ax)
return (counts, ents, bars)
|
Plots a histogram and returns the data for it.
Args:
lumping (str): If given, the bins will be lumped based on this
attribute of the primary components of the intervals
encountered.
summary (bool): If True, the summaries of the components are
returned as the bins. Otherwise, the default behaviour is to
return the Components themselves.
sort (bool): If True (default), the histogram is sorted by value,
starting with the largest.
plot (bool): If True (default), produce a bar plot.
legend (Legend): The legend with which to colour the bars.
ax (axis): An axis object, which will be returned if provided.
If you don't provide one, it will be created but not returned.
Returns:
Tuple: A tuple of tuples of entities and counts.
TODO:
Deal with numeric properties, so I can histogram 'Vp' values, say.
|
codesearchnet
|
def url_to_filename(url, index='index.html', alt_char=False):
assert isinstance(url, str), 'Expect str. Got {}.'.format(type(url))
url_split_result = urllib.parse.urlsplit(url)
filename = url_split_result.path.split('/')[-1]
if not filename:
filename = index
if url_split_result.query:
if alt_char:
query_delim = '@'
else:
query_delim = '?'
filename = '{0}{1}{2}'.format(
filename, query_delim, url_split_result.query
)
return filename
|
Return a filename from a URL.
Args:
url (str): The URL.
index (str): If a filename could not be derived from the URL path,
use index instead. For example, ``/images/`` will return
``index.html``.
alt_char (bool): If True, the character for the query deliminator
will be ``@`` intead of ``?``.
This function does not include the directories and does not sanitize
the filename.
Returns:
str
|
juraj-google-style
|
def GetIapKey(key_id):
global _KEY_CACHE
key = _KEY_CACHE.get(key_id)
if (not key):
resp = requests.get('https:
if (resp.status_code != 200):
raise KeysCanNotBeFetchedError('Unable to fetch IAP keys: {} / {} / {}'.format(resp.status_code, resp.headers, resp.text))
_KEY_CACHE = resp.json()
key = _KEY_CACHE.get(key_id)
if (not key):
raise KeyNotFoundError('Key {!r} not found'.format(key_id))
return key
|
Retrieves a public key from the list published by Identity-Aware Proxy.
The key file is re-fetched if necessary.
Args:
key_id: Key id.
Returns:
String with a key.
Raises:
KeyNotFoundError: if the key is not found in the key file.
KeysCanNotBeFetchedError: if the key file can't be fetched.
|
codesearchnet
|
def token_accuracy(labels, outputs):
weights = tf.to_float(tf.not_equal(labels, 0))
return tf.metrics.accuracy(labels, outputs, weights=weights)
|
Compute tokenwise (elementwise) accuracy.
Args:
labels: ground-truth labels, shape=(batch, seq_length)
outputs: predicted tokens, shape=(batch, seq_length)
Returns:
Two ops, one for getting the current average accuracy and another for
updating the running average estimate.
|
juraj-google-style
|
def write(self, value, *labels):
raise NotImplementedError
|
Writes the value to the given cache.
Args:
value: An encodable (with corresponding PCoder) value
*labels: List of labels for PCollection instance
|
github-repos
|
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError(f'`len(dilation_rate)` should be {num_spatial_dims}. Received: dilation_rate={dilation_rate} of length {len(dilation_rate)}')
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError(f'all values of `dilation_rate` must be positive. Received: dilation_rate={dilation_rate}')
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError(f'`len(strides)` should be {num_spatial_dims}. Received: strides={strides} of length {len(strides)}')
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError(f'all values of `strides` must be positive. Received: strides={strides}')
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(f'`strides > 1` not supported in conjunction with `dilation_rate > 1`. Received: strides={strides} and dilation_rate={dilation_rate}')
return (strides, dilation_rate)
|
Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to `[1]*N`. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to `[1]*N`. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
|
github-repos
|
def check_url(url):
request = urllib2.Request(url)
try:
response = urlopen(request)
return True, response.code
except urllib2.HTTPError as e:
return False, e.code
|
Check if resource at URL is fetchable. (by trying to fetch it and checking for 200 status.
Args:
url (str): Url to check.
Returns:
Returns a tuple of {True/False, response code}
|
juraj-google-style
|
def build_counter_list(counter_list):
counters = []
index = 0
for (obj, instance, counter_name) in counter_list:
try:
counter = Counter.build_counter(obj, instance, index, counter_name)
index += 1
counters.append(counter)
except CommandExecutionError as exc:
log.debug(exc.strerror)
continue
return counters
|
r'''
Create a list of Counter objects to be used in the pdh query
Args:
counter_list (list):
A list of tuples containing counter information. Each tuple should
contain the object, instance, and counter name. For example, to
get the ``% Processor Time`` counter for all Processors on the
system (``\Processor(*)\% Processor Time``) you would pass a tuple
like this:
```
counter_list = [('Processor', '*', '% Processor Time')]
```
If there is no ``instance`` for the counter, pass ``None``
Multiple counters can be passed like so:
```
counter_list = [('Processor', '*', '% Processor Time'),
('System', None, 'Context Switches/sec')]
```
.. note::
Invalid counters are ignored
Returns:
list: A list of Counter objects
|
codesearchnet
|
def _fillBorder(self, image, color):
height, width, depth = image.shape
if depth != color.shape[0]:
raise ValueError('Image (%d) and color (%d) depths must match.' % (depth, color.shape[0]))
image[0:height, 0, 0:depth] = color
image[0:height, width - 1, 0:depth] = color
image[0, 0:width, 0:depth] = color
image[height - 1, 0:width, 0:depth] = color
return image
|
Fill the border of the image.
Args:
image: Numpy array of shape [height, width, depth].
color: Numpy color of shape [depth] and either contents RGB/RGBA.
Returns:
image of original shape with border filled with "color".
Raises:
ValueError: Depths of image and color don"t match.
|
github-repos
|
def rebin(d, n_x, n_y=None):
if d.ndim == 2:
if n_y is None:
n_y = 1
if n_x is None:
n_x = 1
d = d[:int(d.shape[0]
d = d.reshape((d.shape[0]
d = d.mean(axis=3)
d = d.mean(axis=1)
elif d.ndim == 1:
d = d[:int(d.shape[0]
d = d.reshape((d.shape[0]
d = d.mean(axis=1)
else:
raise RuntimeError("Only NDIM <= 2 supported")
return d
|
Rebin data by averaging bins together
Args:
d (np.array): data
n_x (int): number of bins in x dir to rebin into one
n_y (int): number of bins in y dir to rebin into one
Returns:
d: rebinned data with shape (n_x, n_y)
|
juraj-google-style
|
def diagflat(x, k=0):
if any_symbolic_tensors((x,)):
return Diagflat(k=k).symbolic_call(x)
return backend.numpy.diagflat(x, k=k)
|
Create a two-dimensional array with the flattened input on
the k-th diagonal.
Args:
x: Input tensor to be flattened and placed on the diagonal.
k: The diagonal to place the flattened input. Defaults to `0`.
Use `k > 0` for diagonals above the main diagonal,
and `k < 0` for diagonals below the main diagonal.
Returns:
A 2-D tensor with the flattened input on the specified diagonal.
|
github-repos
|
def delete_note(self, note_id):
(note, status) = self.trash_note(note_id)
if (status == (- 1)):
return (note, status)
params = ('/i/%s' % str(note_id))
request = Request(url=(DATA_URL + params), method='DELETE')
request.add_header(self.header, self.get_token())
try:
response = urllib2.urlopen(request)
except IOError as e:
return (e, (- 1))
except HTTPError as e:
if (e.code == 401):
raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')
else:
return (e, (- 1))
return ({}, 0)
|
Method to permanently delete a note
Arguments:
- note_id (string): key of the note to trash
Returns:
A tuple `(note, status)`
- note (dict): an empty dict or an error message
- status (int): 0 on success and -1 otherwise
|
codesearchnet
|
def _resize_volumes(self, x, depth_factor, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = ops.repeat(x, depth_factor, axis=2)
output = ops.repeat(output, height_factor, axis=3)
output = ops.repeat(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = ops.repeat(x, depth_factor, axis=1)
output = ops.repeat(output, height_factor, axis=2)
output = ops.repeat(output, width_factor, axis=3)
return output
else:
raise ValueError(f'Invalid data_format: {data_format}')
|
Resizes the volume contained in a 5D tensor.
Args:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
Resized tensor.
|
github-repos
|
def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size):
batch_size = array_ops.shape(boxes)[0]
new_slice = array_ops.slice(boxes, [0, inner_idx * tile_size, 0], [batch_size, tile_size, 4])
iou = _bbox_overlap(new_slice, box_slice)
box_slice_after_suppression = array_ops.expand_dims(math_ops.cast(math_ops.reduce_all(iou < iou_threshold, [1]), box_slice.dtype), 2) * box_slice
return (boxes, box_slice_after_suppression, iou_threshold, inner_idx + 1)
|
Suppress boxes between different tiles.
Args:
boxes: a tensor of shape [batch_size, num_boxes_with_padding, 4]
box_slice: a tensor of shape [batch_size, tile_size, 4]
iou_threshold: a scalar tensor
inner_idx: a scalar tensor representing the tile index of the tile
that is used to supress box_slice
tile_size: an integer representing the number of boxes in a tile
Returns:
boxes: unchanged boxes as input
box_slice_after_suppression: box_slice after suppression
iou_threshold: unchanged
|
github-repos
|
def AddDirectory(self, path):
if self.file_system.FileEntryExistsByPath(path):
raise ValueError('Path: {0:s} already set.'.format(path))
self._AddParentDirectories(path)
self.file_system.AddFileEntry(
path, file_entry_type=definitions.FILE_ENTRY_TYPE_DIRECTORY)
|
Adds a directory to the fake file system.
Note that this function will create parent directories if needed.
Args:
path (str): path of the directory within the fake file system.
Raises:
ValueError: if the path is already set.
|
juraj-google-style
|
def sudo_remove_dirtree(dir_name):
try:
subprocess.check_output(['sudo', 'rm', '-rf', dir_name])
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t remove directory {0}'.format(dir_name), e)
|
Removes directory tree as a superuser.
Args:
dir_name: name of the directory to remove.
This function is necessary to cleanup directories created from inside a
Docker, since they usually written as a root, thus have to be removed as a
root.
|
juraj-google-style
|
def editTemplate(id, data):
conn = Qubole.agent()
return conn.put(Template.element_path(id), data)
|
Edit an existing template.
Args:
`id`: ID of the template to edit
`data`: json data to be updated
Returns:
Dictionary containing the updated details of the template.
|
codesearchnet
|
def from_config(cls, gitlab_id=None, config_files=None):
config = gitlab.config.GitlabConfigParser(gitlab_id=gitlab_id,
config_files=config_files)
return cls(config.url, private_token=config.private_token,
oauth_token=config.oauth_token,
ssl_verify=config.ssl_verify, timeout=config.timeout,
http_username=config.http_username,
http_password=config.http_password,
api_version=config.api_version,
per_page=config.per_page)
|
Create a Gitlab connection from configuration files.
Args:
gitlab_id (str): ID of the configuration section.
config_files list[str]: List of paths to configuration files.
Returns:
(gitlab.Gitlab): A Gitlab connection.
Raises:
gitlab.config.GitlabDataError: If the configuration is not correct.
|
juraj-google-style
|
def _align_output_features_output_indices(out_features: Optional[list[str]], out_indices: Optional[Union[list[int], tuple[int]]], stage_names: list[str]):
if out_indices is None and out_features is None:
out_indices = [len(stage_names) - 1]
out_features = [stage_names[-1]]
elif out_indices is None and out_features is not None:
out_indices = [stage_names.index(layer) for layer in out_features]
elif out_features is None and out_indices is not None:
out_features = [stage_names[idx] for idx in out_indices]
return (out_features, out_indices)
|
Finds the corresponding `out_features` and `out_indices` for the given `stage_names`.
The logic is as follows:
- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the
`out_indices`.
- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the
`out_features`.
- `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.
- `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned.
Args:
out_features (`List[str]`): The names of the features for the backbone to output.
out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.
stage_names (`List[str]`): The names of the stages of the backbone.
|
github-repos
|
def train(cluster_info, cluster_meta, feed_timeout=600, qname='input'):
def _train(iter):
mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id())
try:
queue = mgr.get_queue(qname)
equeue = mgr.get_queue('error')
except (AttributeError, KeyError):
msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(qname)
raise Exception(msg)
state = str(mgr.get('state'))
logging.info("mgr.state={0}".format(state))
terminating = state == "'terminating'"
if terminating:
logging.info("mgr is terminating, skipping partition")
count = sum(1 for item in iter)
logging.info("Skipped {0} items from partition".format(count))
else:
logging.info("Feeding partition {0} into {1} queue {2}".format(iter, qname, queue))
count = 0
for item in iter:
count += 1
queue.put(item, block=True)
joinThr = Thread(target=queue.join)
joinThr.start()
timeout = feed_timeout
while (joinThr.isAlive()):
if (not equeue.empty()):
e_str = equeue.get()
equeue.task_done()
raise Exception("exception in worker:\n" + e_str)
time.sleep(1)
timeout -= 1
if timeout <= 0:
raise Exception("Timeout while feeding partition")
logging.info("Processed {0} items in partition".format(count))
if not terminating:
state = str(mgr.get('state'))
terminating = state == "'terminating'"
if terminating:
try:
logging.info("TFSparkNode: requesting stop")
client = reservation.Client(cluster_meta['server_addr'])
client.request_stop()
client.close()
except Exception as e:
logging.debug("Error while requesting stop: {0}".format(e))
return [terminating]
return _train
|
Feeds Spark partitions into the shared multiprocessing.Queue.
Args:
:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc)
:cluster_meta: dictionary of cluster metadata (e.g. cluster_id, reservation.Server address, etc)
:feed_timeout: number of seconds after which data feeding times out (600 sec default)
:qname: *INTERNAL_USE*
Returns:
A dataRDD.mapPartitions() function
|
juraj-google-style
|
def str(name, default=None, allow_none=False, fallback=None):
value = read(name, default, allow_none, fallback=fallback)
if value is None and allow_none:
return None
else:
return builtins.str(value).strip()
|
Get a string based environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
|
juraj-google-style
|
def GetUserById(self, local_id):
user = self.rpc_helper.GetAccountInfoById(local_id)
return GitkitUser.FromApiResponse(user)
|
Gets user info by id.
Args:
local_id: string, the user id at Gitkit server.
Returns:
GitkitUser, containing the user info.
|
codesearchnet
|
def get(self, page=0, size=10):
dash_list = r_db.zrevrange(config.DASH_ID_KEY, 0, (- 1), True)
id_list = dash_list[(page * size):((page * size) + size)]
dash_meta = []
data = []
if id_list:
dash_meta = r_db.hmget(config.DASH_META_KEY, [i[0] for i in id_list])
data = [json.loads(i) for i in dash_meta]
return build_response(dict(data=data, code=200))
|
Get dashboard meta info from in page `page` and page size is `size`.
Args:
page: page number.
size: size number.
Returns:
list of dict containing the dash_id and accordingly meta info.
maybe empty list [] when page * size > total dashes in db. that's reasonable.
|
codesearchnet
|
def get_converter(in_type, out_type, *args, **kwargs):
convs = pliers.converters.__all__
out_type = listify(out_type)[::-1]
default_convs = config.get_option('default_converters')
for ot in out_type:
conv_str = '%s->%s' % (in_type.__name__, ot.__name__)
if conv_str in default_convs:
convs = list(default_convs[conv_str]) + convs
for name in convs:
cls = getattr(pliers.converters, name)
if not issubclass(cls, Converter):
continue
available = cls.available if issubclass(
cls, EnvironmentKeyMixin) else True
if cls._input_type == in_type and cls._output_type in out_type \
and available:
conv = cls(*args, **kwargs)
return conv
return None
|
Scans the list of available Converters and returns an instantiation
of the first one whose input and output types match those passed in.
Args:
in_type (type): The type of input the converter must have.
out_type (type): The type of output the converter must have.
args, kwargs: Optional positional and keyword arguments to pass onto
matching Converter's initializer.
|
juraj-google-style
|
def compute_matches(self, args: list[types.Arg], match_all_views: bool, keep_all_views: bool=False, alias_map: datatypes.UnionFind | None=None) -> list[GoodMatch]:
matches = None
has_self = args and args[0].name == 'self'
for arg in args:
match_result = self.compute_one_match(arg.value, arg.typ, arg.name, match_all_views, keep_all_views, alias_map)
if not match_result.success:
if matches:
self._error_subst = matches[0].subst
bad_param = self._get_bad_type(arg.name, arg.typ)
else:
bad_param = match_result.bad_matches[0].expected
raise error_types.MatchError(bad_param)
if keep_all_views or any((m.subst for m in match_result.good_matches)):
typ = cast(abstract.BaseValue, arg.typ)
matches = self._merge_matches(arg.name, typ, matches, match_result.good_matches, keep_all_views, has_self)
return matches if matches else [GoodMatch.default()]
|
Compute information about type parameters using one-way unification.
Given the arguments of a function call, try to find substitutions that match
them against their expected types.
Args:
args: A sequence of function arguments.
match_all_views: If True, every possible match must succeed for the
overall match to be considered a success. Otherwise, the overall match
succeeds as long as at least one possible match succeeds.
keep_all_views: If True, avoid optimizations that discard views.
alias_map: Optionally, a datatypes.UnionFind, which stores all the type
renaming information, mapping of type parameter name to its
representative.
Returns:
A sequence of GoodMatch results containing the computed substitutions.
Raises:
MatchError: if any of the arguments does not match its expected type.
|
github-repos
|
def __get_conn(self, flag_force_new=False, filename=None):
flag_open_new = flag_force_new or not self._conn_is_open()
if flag_open_new:
if filename is None:
filename = self.filename
conn = self._get_conn(filename)
self._conn = conn
else:
conn = self._conn
return conn
|
Returns connection to database. Tries to return existing connection, unless flag_force_new
Args:
flag_force_new:
filename:
Returns: sqlite3.Connection object
**Note** this is a private method because you can get a connection to any file, so it has to
be used in the right moment
|
juraj-google-style
|
def make_same_degree(nodes1, nodes2):
(_, num_nodes1) = nodes1.shape
(_, num_nodes2) = nodes2.shape
for _ in six.moves.xrange((num_nodes2 - num_nodes1)):
nodes1 = _curve_helpers.elevate_nodes(nodes1)
for _ in six.moves.xrange((num_nodes1 - num_nodes2)):
nodes2 = _curve_helpers.elevate_nodes(nodes2)
return (nodes1, nodes2)
|
Degree-elevate a curve so two curves have matching degree.
Args:
nodes1 (numpy.ndarray): Set of control points for a
B |eacute| zier curve.
nodes2 (numpy.ndarray): Set of control points for a
B |eacute| zier curve.
Returns:
Tuple[numpy.ndarray, numpy.ndarray]: The potentially degree-elevated
nodes passed in.
|
codesearchnet
|
def method_not_allowed(cls, errors=None):
if cls.expose_status:
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json
|
Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
|
codesearchnet
|
def events(config):
celery_app = create_app(config)
for event in event_stream(celery_app, filter_by_prefix='task'):
try:
yield create_event_model(event)
except JobEventTypeUnsupported:
pass
|
Return a generator that yields workflow events.
For every workflow event that is sent from celery this generator yields an event
object.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
Returns:
generator: A generator that returns workflow events.
|
juraj-google-style
|
def leaves(self, nodes=None, unique=True):
if nodes is None:
return super(DependencyTree, self).leaves(unique=unique)
res = list()
for child_id in nodes:
for sub_child in self._all_nodes[child_id].leaves(unique=unique):
if not unique or sub_child not in res:
res.append(sub_child)
return res
|
Get the leaves of the tree starting at this root.
Args:
nodes (iterable): limit leaves for these node names
unique: only include individual leaf nodes once
Returns:
list of leaf nodes
|
juraj-google-style
|
def random_restore(
rnd: Optional[tcod.random.Random], backup: tcod.random.Random
) -> None:
lib.TCOD_random_restore(rnd.random_c if rnd else ffi.NULL, backup.random_c)
|
Restore a random number generator from a backed up copy.
Args:
rnd (Optional[Random]): A Random instance, or None to use the default.
backup (Random): The Random instance which was used as a backup.
.. deprecated:: 8.4
You can use the standard library copy and pickle modules to save a
random state.
|
juraj-google-style
|
def _trigger(self):
self._completed.set()
for callback in self._callbacks:
callback(self)
|
Trigger all callbacks registered to this Future.
This method is called internally by the batch once the batch
completes.
Args:
message_id (str): The message ID, as a string.
|
codesearchnet
|
def select_inputs(self, address, nfees, ntokens, min_confirmations=6):
unspents = self._t.get(address, min_confirmations=min_confirmations)['unspents']
unspents = [u for u in unspents if (u not in self._spents.queue)]
if (len(unspents) == 0):
raise Exception('No spendable outputs found')
fees = [u for u in unspents if (u['amount'] == self.fee)][:nfees]
tokens = [u for u in unspents if (u['amount'] == self.token)][:ntokens]
if ((len(fees) != nfees) or (len(tokens) != ntokens)):
raise SpoolFundsError('Not enough outputs to spend. Refill your wallet')
if (self._spents.qsize() > (self.SPENTS_QUEUE_MAXSIZE - (nfees + ntokens))):
[self._spents.get() for i in range((((self._spents.qsize() + nfees) + ntokens) - self.SPENTS_QUEUE_MAXSIZE))]
[self._spents.put(fee) for fee in fees]
[self._spents.put(token) for token in tokens]
return (fees + tokens)
|
Selects the inputs for the spool transaction.
Args:
address (str): bitcoin address to select inputs for
nfees (int): number of fees
ntokens (int): number of tokens
min_confirmations (Optional[int]): minimum number of required
confirmations; defaults to 6
|
codesearchnet
|
def _FormatSourceShort(self, event):
source_short, _ = self._output_mediator.GetFormattedSources(event)
if source_short is None:
data_type = getattr(event, 'data_type', 'UNKNOWN')
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
return source_short
|
Formats the short source.
Args:
event (EventObject): event.
Returns:
str: short source field.
Raises:
NoFormatterFound: If no event formatter can be found to match the data
type in the event.
|
juraj-google-style
|
def insert_tag(tag, before, root):
if not before:
root.childs.append(tag)
tag.parent = root
return
if type(before) in [tuple, list]:
before = first(before)
if not hasattr(before, "parent"):
raise ValueError("Input must be double-linked!")
parent = before.parent
parent.childs.insert(
parent.childs.index(before),
tag
)
tag.parent = parent
|
Insert `tag` before `before` tag if present. If not, insert it into `root`.
Args:
tag (obj): HTMLElement instance.
before (obj): HTMLElement instance.
root (obj): HTMLElement instance.
|
juraj-google-style
|
def Add(self, other):
if (len(self.data) != len(other.data)):
raise RuntimeError('Can only add series of identical lengths.')
for i in range(len(self.data)):
if (self.data[i][1] != other.data[i][1]):
raise RuntimeError('Timestamp mismatch.')
if ((self.data[i][0] is None) and (other.data[i][0] is None)):
continue
self.data[i][0] = ((self.data[i][0] or 0) + (other.data[i][0] or 0))
|
Add other to self pointwise.
Requires that both self and other are of the same length, and contain
identical timestamps. Typically this means that Normalize has been called
on both with identical time parameters.
Args:
other: The sequence to add to self.
Raises:
RuntimeError: other does not contain the same timestamps as self.
|
codesearchnet
|
def top(self, **kwargs):
return self.client.api.top(self.id, **kwargs)
|
Display the running processes of the container.
Args:
ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
Returns:
(str): The output of the top
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def forward(self, inputs, expert_size):
input_list = inputs.split(expert_size, dim=0)
output_list = []
for i in range(self.num_experts):
output_list.append(F.linear(input_list[i], self.weight[i]))
results = torch.cat(output_list, dim=0)
return results
|
Forward pass of the GraniteMoeParallelExperts module.
Args:
inputs (Tensor):
Input tensor.
expert_size:
Expert size information.
Returns:
Tensor: Output tensor.
|
github-repos
|
def as_allocate_quota_request(self, timer=datetime.utcnow):
if (not self.service_name):
raise ValueError(u'the service name must be set')
if (not self.operation_id):
raise ValueError(u'the operation id must be set')
if (not self.operation_name):
raise ValueError(u'the operation name must be set')
op = super(Info, self).as_operation(timer=timer)
labels = {}
if self.client_ip:
labels[_KNOWN_LABELS.SCC_CALLER_IP.label_name] = self.client_ip
if self.referer:
labels[_KNOWN_LABELS.SCC_REFERER.label_name] = self.referer
qop = sc_messages.QuotaOperation(operationId=op.operationId, methodName=op.operationName, consumerId=op.consumerId, quotaMode=sc_messages.QuotaOperation.QuotaModeValueValuesEnum.BEST_EFFORT)
qop.labels = encoding.PyValueToMessage(sc_messages.QuotaOperation.LabelsValue, labels)
quota_info = (self.quota_info if self.quota_info else {})
qop.quotaMetrics = [sc_messages.MetricValueSet(metricName=name, metricValues=[sc_messages.MetricValue(int64Value=cost)]) for (name, cost) in quota_info.items()]
allocate_quota_request = sc_messages.AllocateQuotaRequest(allocateOperation=qop)
if self.config_id:
allocate_quota_request.serviceConfigId = self.config_id
return sc_messages.ServicecontrolServicesAllocateQuotaRequest(serviceName=self.service_name, allocateQuotaRequest=allocate_quota_request)
|
Makes a `ServicecontrolServicesAllocateQuotaRequest` from this instance
Returns:
a ``ServicecontrolServicesAllocateQuotaRequest``
Raises:
ValueError: if the fields in this instance are insufficient to
to create a valid ``ServicecontrolServicesAllocateQuotaRequest``
|
codesearchnet
|
def _PrintParsersCounter(self, parsers_counter, session_identifier=None):
if (not parsers_counter):
return
title = 'Events generated per parser'
if session_identifier:
title = '{0:s}: {1:s}'.format(title, session_identifier)
table_view = views.ViewsFactory.GetTableView(self._views_format_type, column_names=['Parser (plugin) name', 'Number of events'], title=title)
for (key, value) in sorted(parsers_counter.items()):
if (key == 'total'):
continue
table_view.AddRow([key, value])
table_view.AddRow(['Total', parsers_counter['total']])
table_view.Write(self._output_writer)
|
Prints the parsers counter
Args:
parsers_counter (collections.Counter): number of events per parser or
parser plugin.
session_identifier (Optional[str]): session identifier.
|
codesearchnet
|
def iso_date(d) -> str:
if isinstance(d, datetime):
return d.isoformat()
elif isinstance(d, date):
return datetime.combine(d, datetime.min.time()).isoformat()
else:
try:
datetime.strptime(d, '%Y-%m-%dT%H:%M:%S')
return d
except ValueError:
try:
datetime.strptime(d, '%Y-%m-%d')
return (d + 'T00:00:00')
except ValueError:
pass
raise ISODateError('Can not convert value to ISO format for kg')
|
Return iso format of a date
Args:
d:
Returns: str
|
codesearchnet
|
def Convert(self, metadata, grr_message, token=None):
return self.BatchConvert([(metadata, grr_message)], token=token)
|
Converts GrrMessage into a set of RDFValues.
Args:
metadata: ExportedMetadata to be used for conversion.
grr_message: GrrMessage to be converted.
token: Security token.
Returns:
List or generator with resulting RDFValues.
|
juraj-google-style
|
def _elevate_nodes(nodes):
(dimension, num_nodes) = np.shape(nodes)
new_nodes = np.empty((dimension, (num_nodes + 1)), order='F')
multipliers = np.arange(1, num_nodes, dtype=_FLOAT64)[(np.newaxis, :)]
denominator = float(num_nodes)
new_nodes[(:, 1:(- 1))] = ((multipliers * nodes[(:, :(- 1))]) + ((denominator - multipliers) * nodes[(:, 1:)]))
new_nodes /= denominator
new_nodes[(:, 0)] = nodes[(:, 0)]
new_nodes[(:, (- 1))] = nodes[(:, (- 1))]
return new_nodes
|
r"""Degree-elevate a B |eacute| zier curves.
Does this by converting the current nodes :math:`v_0, \ldots, v_n`
to new nodes :math:`w_0, \ldots, w_{n + 1}` where
.. math::
\begin{align*}
w_0 &= v_0 \\
w_j &= \frac{j}{n + 1} v_{j - 1} + \frac{n + 1 - j}{n + 1} v_j \\
w_{n + 1} &= v_n
\end{align*}
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): The nodes defining a curve.
Returns:
numpy.ndarray: The nodes of the degree-elevated curve.
|
codesearchnet
|
def from_question_encoder_generator_configs(cls, question_encoder_config: PretrainedConfig, generator_config: PretrainedConfig, **kwargs) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs)
|
Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
decoder model configuration.
Returns:
[`EncoderDecoderConfig`]: An instance of a configuration object
|
github-repos
|
def _merge_beam_dim(tensor):
shape = common_layers.shape_list(tensor)
shape[0] *= shape[1]
shape.pop(1)
return tf.reshape(tensor, shape)
|
Reshapes first two dimensions in to single dimension.
Args:
tensor: Tensor to reshape of shape [A, B, ...]
Returns:
Reshaped tensor of shape [A*B, ...]
|
juraj-google-style
|
def _add_dependency(self, dependency, var_name=None):
if (var_name is None):
var_name = next(self.temp_var_names)
if ((dependency, var_name) not in self.dependencies):
self.dependencies.append((dependency, var_name))
return var_name
|
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
|
codesearchnet
|
def parse_criteria(criteria_string):
toks = criteria_string.split()
def parse_sym(sym):
if (sym == '*'):
return [el.symbol for el in Element]
else:
m = re.match('\\{(.*)\\}', sym)
if m:
return [s.strip() for s in m.group(1).split(',')]
else:
return [sym]
def parse_tok(t):
if re.match('\\w+-\\d+', t):
return {'task_id': t}
elif ('-' in t):
elements = [parse_sym(sym) for sym in t.split('-')]
chemsyss = []
for cs in itertools.product(*elements):
if (len(set(cs)) == len(cs)):
cs = [Element(s).symbol for s in cs]
chemsyss.append('-'.join(sorted(cs)))
return {'chemsys': {'$in': chemsyss}}
else:
all_formulas = set()
explicit_els = []
wild_card_els = []
for sym in re.findall('(\\*[\\.\\d]*|\\{.*\\}[\\.\\d]*|[A-Z][a-z]*)[\\.\\d]*', t):
if (('*' in sym) or ('{' in sym)):
wild_card_els.append(sym)
else:
m = re.match('([A-Z][a-z]*)[\\.\\d]*', sym)
explicit_els.append(m.group(1))
nelements = (len(wild_card_els) + len(set(explicit_els)))
parts = re.split('(\\*|\\{.*\\})', t)
parts = [parse_sym(s) for s in parts if (s != '')]
for f in itertools.product(*parts):
c = Composition(''.join(f))
if (len(c) == nelements):
for e in c.keys():
Element(e.symbol)
all_formulas.add(c.reduced_formula)
return {'pretty_formula': {'$in': list(all_formulas)}}
if (len(toks) == 1):
return parse_tok(toks[0])
else:
return {'$or': list(map(parse_tok, toks))}
|
Parses a powerful and simple string criteria and generates a proper
mongo syntax criteria.
Args:
criteria_string (str): A string representing a search criteria.
Also supports wild cards. E.g.,
something like "*2O" gets converted to
{'pretty_formula': {'$in': [u'B2O', u'Xe2O', u"Li2O", ...]}}
Other syntax examples:
mp-1234: Interpreted as a Materials ID.
Fe2O3 or *2O3: Interpreted as reduced formulas.
Li-Fe-O or *-Fe-O: Interpreted as chemical systems.
You can mix and match with spaces, which are interpreted as
"OR". E.g., "mp-1234 FeO" means query for all compounds with
reduced formula FeO or with materials_id mp-1234.
Returns:
A mongo query dict.
|
codesearchnet
|
def Calls(self, conditions=None):
results = set()
if (conditions is None):
conditions = [None]
for condition in conditions:
for c in self.Match(*condition):
results.update(self._registry.get(c, []))
return results
|
Find the methods that evaluate data that meets this condition.
Args:
conditions: A tuple of (artifact, os_name, cpe, label)
Returns:
A list of methods that evaluate the data.
|
codesearchnet
|
def update_fitness(objective_function, particle):
fitness = objective_function(particle.position)
best_fitness = particle.best_fitness
cmp = comparator(fitness)
if ((best_fitness is None) or cmp(fitness, best_fitness)):
best_position = particle.position
return particle._replace(fitness=fitness, best_fitness=fitness, best_position=best_position)
else:
return particle._replace(fitness=fitness)
|
Calculates and updates the fitness and best_fitness of a particle.
Fitness is calculated using the 'problem.fitness' function.
Args:
problem: The optimization problem encapsulating the fitness function
and optimization type.
particle: cipy.algorithms.pso.Particle: Particle to update the fitness
for.
Returns:
cipy.algorithms.pso.Particle: A new particle with the updated fitness.
|
codesearchnet
|
def sg_gpus():
global _gpus
if (_gpus is None):
local_device_protos = device_lib.list_local_devices()
_gpus = len([x.name for x in local_device_protos if (x.device_type == 'GPU')])
return max(_gpus, 1)
|
r""" Gets current available GPU nums
Returns:
A integer : total # of GPUs available
|
codesearchnet
|
def with_inverse(points, noise):
n_points = (len(points) / 2)
break_point = n_points
points_part = copy.deepcopy(points)
points_part = list(reversed(points_part))
part = kalman_filter(points_part, noise)
total = kalman_filter(points, noise)
result = (list(reversed(part))[:break_point] + total[break_point:])
result[break_point] = point_mean(part[break_point], total[break_point])
return result
|
Smooths a set of points
It smooths them twice, once in given order, another one in the reverse order.
The the first half of the results will be taken from the reverse order and
the second half from the normal order.
Args:
points (:obj:`list` of :obj:`Point`)
noise (float): Expected noise, the higher it is the more the path will
be smoothed.
Returns:
:obj:`list` of :obj:`Point`
|
codesearchnet
|
def emit(self, event, *args, **kwargs):
listeners = self._listeners[event]
listeners = itertools.chain(listeners, self._once[event])
self._once[event] = []
for listener in listeners:
self._loop.call_soon(functools.partial(self._dispatch, event, listener, *args, **kwargs))
return self
|
Call each listener for the event with the given arguments.
Args:
event (str): The event to trigger listeners on.
*args: Any number of positional arguments.
**kwargs: Any number of keyword arguments.
This method passes all arguments other than the event name directly
to the listeners. If a listener raises an exception for any reason the
'listener-error', or current value of LISTENER_ERROR_EVENT, is emitted.
Listeners to this event are given the event name, listener object, and
the exception raised. If an error listener fails it does so silently.
All event listeners are fired in a deferred way so this method returns
immediately. The calling coro must yield at some point for the event
to propagate to the listeners.
|
codesearchnet
|
def repr_result(obj: Any, elements: List[str],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
if with_addr:
return "<{qualname}({elements}) at {addr}>".format(
qualname=obj.__class__.__qualname__,
elements=joiner.join(elements),
addr=hex(id(obj)))
else:
return "{qualname}({elements})".format(
qualname=obj.__class__.__qualname__,
elements=joiner.join(elements))
|
Internal function to make a :func:`repr`-style representation of an object.
Args:
obj: object to display
elements: list of object ``attribute=value`` strings
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
|
juraj-google-style
|
def endpoint_from_production_variants(self, name, production_variants, tags=None, kms_key=None, wait=True):
if (not _deployment_entity_exists((lambda : self.sagemaker_client.describe_endpoint_config(EndpointConfigName=name)))):
config_options = {'EndpointConfigName': name, 'ProductionVariants': production_variants}
if tags:
config_options['Tags'] = tags
if kms_key:
config_options['KmsKeyId'] = kms_key
self.sagemaker_client.create_endpoint_config(**config_options)
return self.create_endpoint(endpoint_name=name, config_name=name, tags=tags, wait=wait)
|
Create an SageMaker ``Endpoint`` from a list of production variants.
Args:
name (str): The name of the ``Endpoint`` to create.
production_variants (list[dict[str, str]]): The list of production variants to deploy.
tags (list[dict[str, str]]): A list of key-value pairs for tagging the endpoint (default: None).
kms_key (str): The KMS key that is used to encrypt the data on the storage volume attached
to the instance hosting the endpoint.
wait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True).
Returns:
str: The name of the created ``Endpoint``.
|
codesearchnet
|
def _render_our_module_key_flags(self, module, output_lines, prefix=''):
key_flags = self.get_key_flags_for_module(module)
if key_flags:
self._render_module_flags(module, key_flags, output_lines, prefix)
|
Returns a help string for the key flags of a given module.
Args:
module: module|str, the module to render key flags for.
output_lines: [str], a list of strings. The generated help message
lines will be appended to this list.
prefix: str, a string that is prepended to each generated help line.
|
juraj-google-style
|
def generate_output_events(self, source, key, val, line='2', hr=True, show_name=False, colorize=True):
output = generate_output(line=line, short=(HR_RDAP[source][key]['_short'] if hr else key), name=(HR_RDAP[source][key]['_name'] if (hr and show_name) else None), is_parent=(False if ((val is None) or (len(val) == 0)) else True), value=('None' if ((val is None) or (len(val) == 0)) else None), colorize=colorize)
if (val is not None):
count = 0
for item in val:
try:
action = item['action']
except KeyError:
action = None
try:
timestamp = item['timestamp']
except KeyError:
timestamp = None
try:
actor = item['actor']
except KeyError:
actor = None
if (count > 0):
output += generate_output(line=str((int(line) + 1)), is_parent=True, colorize=colorize)
output += generate_output(line=str((int(line) + 1)), short=(HR_RDAP_COMMON[key]['action']['_short'] if hr else 'action'), name=(HR_RDAP_COMMON[key]['action']['_name'] if (hr and show_name) else None), value=action, colorize=colorize)
output += generate_output(line=str((int(line) + 1)), short=(HR_RDAP_COMMON[key]['timestamp']['_short'] if hr else 'timestamp'), name=(HR_RDAP_COMMON[key]['timestamp']['_name'] if (hr and show_name) else None), value=timestamp, colorize=colorize)
output += generate_output(line=str((int(line) + 1)), short=(HR_RDAP_COMMON[key]['actor']['_short'] if hr else 'actor'), name=(HR_RDAP_COMMON[key]['actor']['_name'] if (hr and show_name) else None), value=actor, colorize=colorize)
count += 1
return output
|
The function for generating CLI output RDAP events results.
Args:
source (:obj:`str`): The parent key 'network' or 'objects'
(required).
key (:obj:`str`): The event key 'events' or 'events_actor'
(required).
val (:obj:`dict`): The event dictionary (required).
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
|
codesearchnet
|
def new_product(self, name):
n = self._product_cls(self, name, summary_cls=self._summary_cls)
self.graph.add_node(n)
self.products.append(n)
return n
|
Create a new product.
Args:
name: name of the new product.
Returns:
A new product instance.
|
codesearchnet
|
def emit(self, record):
try:
message = self.format(record)
log_record = LogRecord(
record.levelno, record.name, os.path.basename(record.pathname),
record.lineno, int(record.created * 1000), message,
)
self._test_record.add_log_record(log_record)
self._notify_update()
except Exception:
self.handleError(record)
|
Save a logging.LogRecord to our test record.
Logs carry useful metadata such as the logger name and level information.
We capture this in a structured format in the test record to enable
filtering by client applications.
Args:
record: A logging.LogRecord to record.
|
juraj-google-style
|
def is_custom_device(device_name):
return context().is_custom_device(device_name)
|
Calls TFE_IsCustomDevice.
Enables using C extensions specifying a custom device from Python. See the
experimental eager C API in tensorflow/c/eager/c_api_experimental.h for
details.
Args:
device_name: A string indicating the name to check whether it is a
registered custom device.
Returns:
A boolean.
|
github-repos
|
def read_excel(filename, dataset_class=dataset.pandas_dataset.PandasDataset, expectations_config=None, autoinspect_func=None, *args, **kwargs):
df = pd.read_excel(filename, *args, **kwargs)
if isinstance(df, dict):
for key in df:
df[key] = _convert_to_dataset_class(df[key], dataset_class, expectations_config, autoinspect_func)
else:
df = _convert_to_dataset_class(df, dataset_class, expectations_config, autoinspect_func)
return df
|
Read a file using Pandas read_excel and return a great_expectations dataset.
Args:
filename (string): path to file to read
dataset_class (Dataset class): class to which to convert resulting Pandas df
expectations_config (string): path to great_expectations config file
Returns:
great_expectations dataset or ordered dict of great_expectations datasets,
if multiple worksheets are imported
|
codesearchnet
|
def get_energy_tersoff(structure, gulp_cmd='gulp'):
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.tersoff_input(structure)
gout = gc.run(gin)
return gio.get_energy(gout)
|
Compute the energy of a structure using Tersoff potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
|
juraj-google-style
|
def run(self, input_dir, output_dir, epsilon):
logging.info('Running attack %s', self.submission_id)
tmp_run_dir = self.temp_copy_extracted_submission()
cmd = ['--network=none',
'-m=24g',
'--cpus=3.75',
'-v', '{0}:/input_images:ro'.format(input_dir),
'-v', '{0}:/output_images'.format(output_dir),
'-v', '{0}:/code'.format(tmp_run_dir),
'-w', '/code',
self.container_name,
'./' + self.entry_point,
'/input_images',
'/output_images',
str(epsilon)]
elapsed_time_sec = self.run_with_time_limit(cmd)
sudo_remove_dirtree(tmp_run_dir)
return elapsed_time_sec
|
Runs attack inside Docker.
Args:
input_dir: directory with input (dataset).
output_dir: directory where output (adversarial images) should be written.
epsilon: maximum allowed size of adversarial perturbation,
should be in range [0, 255].
Returns:
how long it took to run submission in seconds
|
juraj-google-style
|
def fn(x: tuple[int, str]):
return x
|
Test function
Args:
x: The input
Returns:
The output
|
github-repos
|
def MergeMessage(self, source, destination, replace_message_field=False, replace_repeated_field=False):
tree = _FieldMaskTree(self)
tree.MergeMessage(source, destination, replace_message_field, replace_repeated_field)
|
Merges fields specified in FieldMask from source to destination.
Args:
source: Source message.
destination: The destination message to be merged into.
replace_message_field: Replace message field if True. Merge message
field if False.
replace_repeated_field: Replace repeated field if True. Append
elements of repeated field if False.
|
codesearchnet
|
def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
height, width = image_size
raw_size = None
if max_size is not None:
min_original_size = float(min((height, width)))
max_original_size = float(max((height, width)))
if max_original_size / min_original_size * size > max_size:
raw_size = max_size * min_original_size / max_original_size
size = int(round(raw_size))
if height <= width and height == size or (width <= height and width == size):
oh, ow = (height, width)
elif width < height:
ow = size
if max_size is not None and raw_size is not None:
oh = int(raw_size * height / width)
else:
oh = int(size * height / width)
else:
oh = size
if max_size is not None and raw_size is not None:
ow = int(raw_size * width / height)
else:
ow = int(size * width / height)
return (oh, ow)
|
Computes the output image size given the input image size and the desired output size.
Args:
image_size (`Tuple[int, int]`):
The input image size.
size (`int`):
The desired output size.
max_size (`int`, *optional*):
The maximum allowed output size.
|
github-repos
|
def gunzip_file(infile, outfile=None, outdir=None, delete_original=False, force_rerun_flag=False):
if (not outfile):
outfile = infile.replace('.gz', '')
if (not outdir):
outdir = ''
else:
outdir = op.dirname(infile)
outfile = op.join(outdir, op.basename(outfile))
if force_rerun(flag=force_rerun_flag, outfile=outfile):
gz = gzip.open(infile, 'rb')
decoded = gz.read()
with open(outfile, 'wb') as new_file:
new_file.write(decoded)
gz.close()
log.debug('{}: file unzipped'.format(outfile))
else:
log.debug('{}: file already unzipped'.format(outfile))
if delete_original:
os.remove(infile)
return outfile
|
Decompress a gzip file and optionally set output values.
Args:
infile: Path to .gz file
outfile: Name of output file
outdir: Path to output directory
delete_original: If original .gz file should be deleted
force_rerun_flag: If file should be decompressed if outfile already exists
Returns:
str: Path to decompressed file
|
codesearchnet
|
def subtree(self, root_path: Union[int, str, KeyPath]) -> Optional['KeyPathSet']:
root_path = KeyPath.from_value(root_path)
if not root_path:
return self
root = self._trie
for key in root_path.keys:
if key not in root:
return None
root = root[key]
ret = KeyPathSet()
ret._trie = root
return ret
|
Returns the relative paths of the sub-tree rooted at the given path.
Args:
root_path: A KeyPath for the root of the sub-tree.
Returns:
A KeyPathSet that contains all the child paths of the given root path.
Please note that the returned value share the same trie as the current
value. So addition/removal of paths in the returned value will also
affect the current value. If there is no child path under the given root
path, None will be returned.
|
github-repos
|
def get_min_max_value(self) -> tuple[float, float]:
return (self._statistics.min_max_statistics.global_min, self._statistics.min_max_statistics.global_max)
|
Calculates the global min and max values.
Returns:
(min_value, max_value): Min and max calculated using MinMax
|
github-repos
|
def _ConvertInteger(value):
if isinstance(value, float) and not value.is_integer():
raise ParseError('Couldn\'t parse integer: {0}.'.format(value))
if isinstance(value, six.text_type) and value.find(' ') != -1:
raise ParseError('Couldn\'t parse integer: "{0}".'.format(value))
return int(value)
|
Convert an integer.
Args:
value: A scalar value to convert.
Returns:
The integer value.
Raises:
ParseError: If an integer couldn't be consumed.
|
juraj-google-style
|
def asset(self, asset_id, asset_type, action='GET'):
if not self.can_update():
self._tcex.handle_error(910, [self.type])
if asset_type == 'PHONE':
return self.tc_requests.victim_phone_asset(
self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action
)
if asset_type == 'EMAIL':
return self.tc_requests.victim_email_asset(
self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action
)
if asset_type == 'NETWORK':
return self.tc_requests.victim_network_asset(
self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action
)
if asset_type == 'SOCIAL':
return self.tc_requests.victim_social_asset(
self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action
)
if asset_type == 'WEB':
return self.tc_requests.victim_web_asset(
self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action
)
self._tcex.handle_error(
925, ['asset_type', 'asset', 'asset_type', 'asset_type', asset_type]
)
return None
|
Gets a asset of a Victim
Valid asset_type:
+ PHONE
+ EMAIL
+ NETWORK
+ SOCIAL
+ WEB
Args:
asset_type:
asset_id:
action:
Returns:
|
juraj-google-style
|
def _embedding_lookup_for_sparse_tensor(inp: sparse_tensor.SparseTensor, weight: Optional[sparse_tensor.SparseTensor], table: tf_variables.Variable, feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:
inp_rank = inp.shape.rank
if not feature.output_shape and feature.max_sequence_length > 0 and (inp_rank is None or inp_rank == 2):
batch_size = math_ops.cast(array_ops.shape(inp)[0], dtype=dtypes.int64)
sparse_shape = array_ops_stack.stack([batch_size, feature.max_sequence_length], axis=0)
truncated_inp = sparse_ops.sparse_slice(inp, start=[0, 0], size=sparse_shape)
dense_output_shape = array_ops_stack.stack([batch_size, feature.max_sequence_length, feature.table.dim], axis=0)
return array_ops.scatter_nd(truncated_inp.indices, array_ops.gather(table.read_value(), truncated_inp.values), dense_output_shape)
else:
if feature.max_sequence_length > 0:
logging.warning('max_sequence_length setting will be ignored because the rank of the input tensor is %d which is not 2.', inp_rank)
if not feature.validate_weights_and_indices and inp_rank is not None and (inp_rank <= 2):
return embedding_ops.embedding_lookup_sparse_v2(table, inp, sp_weights=weight, combiner=feature.table.combiner)
else:
return embedding_ops.safe_embedding_lookup_sparse_v2(table, inp, sparse_weights=weight, combiner=feature.table.combiner)
|
Embedding lookup for sparse tensor based on its feature config.
Args:
inp: a single SparseTensor input.
weight: None or SparseTensor which has the same shape of the input.
table: a table variable.
feature: a feature config.
Returns:
Embedding lookup result.
|
github-repos
|
def MakeHistFromList(t, name=''):
hist = Hist(name=name)
[hist.Incr(x) for x in t]
return hist
|
Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
name: string name for this histogram
Returns:
Hist object
|
juraj-google-style
|
def kill_all_processes(self, check_alive=True, allow_graceful=False):
if (ray_constants.PROCESS_TYPE_RAYLET in self.all_processes):
self._kill_process_type(ray_constants.PROCESS_TYPE_RAYLET, check_alive=check_alive, allow_graceful=allow_graceful)
for process_type in list(self.all_processes.keys()):
self._kill_process_type(process_type, check_alive=check_alive, allow_graceful=allow_graceful)
|
Kill all of the processes.
Note that This is slower than necessary because it calls kill, wait,
kill, wait, ... instead of kill, kill, ..., wait, wait, ...
Args:
check_alive (bool): Raise an exception if any of the processes were
already dead.
|
codesearchnet
|
def do_reset_ids(concatenated_meta_df, data_df, concat_direction):
if concat_direction == "horiz":
assert concatenated_meta_df.index.equals(data_df.columns), (
"cids in concatenated_meta_df do not agree with cids in data_df.")
reset_ids_in_meta_df(concatenated_meta_df)
data_df.columns = pd.Index(concatenated_meta_df.index.values)
elif concat_direction == "vert":
assert concatenated_meta_df.index.equals(data_df.index), (
"rids in concatenated_meta_df do not agree with rids in data_df.")
reset_ids_in_meta_df(concatenated_meta_df)
data_df.index = pd.Index(concatenated_meta_df.index.values)
|
Reset ids in concatenated metadata and data dfs to unique integers and
save the old ids in a metadata column.
Note that the dataframes are modified in-place.
Args:
concatenated_meta_df (pandas df)
data_df (pandas df)
concat_direction (string): 'horiz' or 'vert'
Returns:
None (dfs modified in-place)
|
juraj-google-style
|
def _manual_repartition(self, axis, repartition_func, **kwargs):
func = self._prepare_method(repartition_func, **kwargs)
return self.data.manual_shuffle(axis, func)
|
This method applies all manual partitioning functions.
Args:
axis: The axis to shuffle data along.
repartition_func: The function used to repartition data.
Returns:
A `BaseFrameManager` object.
|
juraj-google-style
|
def __init__(self, source_dict, url, path, token):
self._vault_url = url
self._path = path
self._token = token
super(VaultLoader, self).__init__(source_dict)
|
Initializer.
Args:
source_dict: used to initialize the class. Use constructors to read from Vault.
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
|
juraj-google-style
|
def run_parallel(self, para_func):
if self.timer:
start_timer = time.time()
with mp.Pool(self.num_processors) as pool:
print('start pool with {} processors: {} total processes.\n'.format(self.num_processors, len(self.args)))
results = [pool.apply_async(para_func, arg) for arg in self.args]
out = [r.get() for r in results]
out = {key: np.concatenate([out_i[key] for out_i in out]) for key in out[0].keys()}
if self.timer:
print('SNR calculation time:', (time.time() - start_timer))
return out
|
Run parallel calulation
This will run the parallel calculation on self.num_processors.
Args:
para_func (obj): Function object to be used in parallel.
Returns:
(dict): Dictionary with parallel results.
|
codesearchnet
|
def _encode_fhir_path_builder(self, builder: expressions.Builder) -> Optional[str]:
try:
sql_expression = self._bq_interpreter.encode(builder)
except Exception as e:
self._error_reporter.report_fhir_path_error(self._abs_path_invocation(builder), str(builder), self._error_message_for_exception(e))
return None
return sql_expression
|
Returns a Standard SQL translation of the constraint `fhir_path_expression`.
If an error is encountered during encoding, the associated error reporter
will be notified, and this method will return `None`.
Args:
builder: Builder containing the information to be encoded to Standard SQL.
Returns:
A Standard SQL encoding of the constraint `fhir_path_expression` upon
successful completion. The SQL will evaluate to a single boolean
indicating whether the constraint is satisfied.
|
github-repos
|
def _read_metrics(repo, metrics, branch):
res = {}
for (out, typ, xpath) in metrics:
assert (out.scheme == 'local')
if (not typ):
typ = os.path.splitext(out.path.lower())[1].replace('.', '')
if out.use_cache:
open_fun = open
path = repo.cache.local.get(out.checksum)
else:
open_fun = repo.tree.open
path = out.path
try:
with open_fun(path) as fd:
metric = _read_metric(fd, typ=typ, xpath=xpath, rel_path=out.rel_path, branch=branch)
except IOError as e:
if (e.errno == errno.ENOENT):
logger.warning(NO_METRICS_FILE_AT_REFERENCE_WARNING.format(out.rel_path, branch))
metric = None
else:
raise
if (not metric):
continue
res[out.rel_path] = metric
return res
|
Read the content of each metric file and format it.
Args:
metrics (list): List of metric touples
branch (str): Branch to look up for metrics.
Returns:
A dict mapping keys with metrics path name and content.
For example:
{'metric.csv': ("value_mse deviation_mse data_set\n"
"0.421601 0.173461 train\n"
"0.67528 0.289545 testing\n"
"0.671502 0.297848 validation\n")}
|
codesearchnet
|
def check_response_code(response, expected_response_code):
if (response.status_code == expected_response_code):
pass
elif (response.status_code == RATE_LIMIT_RESPONSE_CODE):
raise RateLimitError(response)
else:
raise ApiError(response)
|
Check response code against the expected code; raise ApiError.
Checks the requests.response.status_code against the provided expected
response code (erc), and raises a ApiError if they do not match.
Args:
response(requests.response): The response object returned by a request
using the requests package.
expected_response_code(int): The expected response code (HTTP response
code).
Raises:
ApiError: If the requests.response.status_code does not match the
provided expected response code (erc).
|
codesearchnet
|
async def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
with async_timeout.timeout(self.timeout):
async with self.session.post(
self.endpoint, data=request, ssl=self.ssl
) as response:
response_text = await response.text()
return Response(response_text, raw=response)
|
Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object.
|
juraj-google-style
|
def _handle_failure_and_recovery(self, e, on_failure_fn, on_transient_failure_fn, on_recovery_fn, worker_device_name):
if on_failure_fn:
on_failure_fn(e)
with self._cluster_update_lock:
self._cluster_due_for_update_or_finish.set()
self._worker_up_cond.wait(_WORKER_MAXIMUM_RECOVERY_SEC)
if self._error_from_recovery:
try:
raise self._error_from_recovery
finally:
self._error_from_recovery = None
logging.info('Worker %s has been recovered.', worker_device_name)
if on_recovery_fn:
logging.info('Worker %s calling on_recovery_fn', worker_device_name)
with self.wait_on_failure(on_recovery_fn=on_recovery_fn, on_transient_failure_fn=on_transient_failure_fn, worker_device_name=worker_device_name):
on_recovery_fn()
|
Call failure fn, wait for cluster to recover, then call recovery fn.
Args:
e: the Exception thrown during closure execution.
on_failure_fn: an optional function to run if preemption happens.
on_transient_failure_fn: an optional function to run if transient failure
happens.
on_recovery_fn: an optional function to run when a worker is recovered
from preemption.
worker_device_name: the device name of the worker instance that is passing
through the failure.
|
github-repos
|
def _GetDaysPerMonth(self, year, month):
if month not in range(1, 13):
raise ValueError('Month value out of bounds.')
days_per_month = self._DAYS_PER_MONTH[month - 1]
if month == 2 and self._IsLeapYear(year):
days_per_month += 1
return days_per_month
|
Retrieves the number of days in a month of a specific year.
Args:
year (int): year e.g. 1970.
month (int): month, where 1 represents January.
Returns:
int: number of days in the month.
Raises:
ValueError: if the month value is out of bounds.
|
juraj-google-style
|
def merge_json_fhir_string_into_proto(raw_json: str, target: message.Message, *, validate: bool=True, default_timezone: str=_primitive_time_utils.SIMPLE_ZULU) -> None:
json_value = load_json(raw_json)
merge_json_fhir_object_into_proto(json_value, target, validate=validate, default_timezone=default_timezone)
|
Merges the provided raw_json string into a target Message.
Args:
raw_json: The JSON to parse and merge into target.
target: The Message instance to merge raw_json into.
validate: A Boolean value indicating if validation should be performed on
the resultant Message. Validation takes the form of ensuring that basic
checks such as cardinality guarantees, required field adherence, etc. are
met. Defaults to True.
default_timezone: A string specifying the timezone string to use for time-
like FHIR data during parsing. Defaults to 'Z' for UTC.
Raises:
fhir_errors.InvalidFhirError: In the event that validation fails after
parsing.
|
github-repos
|
def parse_cscore(infile):
cscore_dict = {}
with open(infile, 'r') as f:
for ll in f.readlines():
if ll.lower().startswith('model1'):
l = ll.split()
cscore = l[1]
tmscore_full = l[2].split('+-')
tmscore = tmscore_full[0]
tmscore_err = tmscore_full[1]
rmsd_full = l[3].split('+-')
rmsd = rmsd_full[0]
rmsd_err = rmsd_full[1]
cscore_dict['c_score'] = float(cscore)
cscore_dict['tm_score'] = float(tmscore)
cscore_dict['tm_score_err'] = float(tmscore_err)
cscore_dict['rmsd'] = float(rmsd)
cscore_dict['rmsd_err'] = float(rmsd_err)
return cscore_dict
|
Parse the cscore file to return a dictionary of scores.
Args:
infile (str): Path to cscore
Returns:
dict: Dictionary of scores
|
juraj-google-style
|
def process_attributes_of_node(attrs, node_name, class_type):
attrs['_nodes'] = {}
attrs['_linked_models'] = defaultdict(list)
attrs['_debug_linked_models'] = defaultdict(list)
attrs['_lazy_linked_models'] = defaultdict(list)
attrs['_fields'] = {}
attrs['_uniques'] = []
for (key, attr) in list(attrs.items()):
if (hasattr(attr, '__base__') and (getattr(attr.__base__, '_TYPE', '') in ['Node', 'ListNode'])):
attrs['_nodes'][key] = attrs[key]
else:
attr_type = getattr(attr, '_TYPE', '')
if (attr_type == 'Model'):
attrs[('%s_id' % key)] = ''
lnk_mdl_ins = attrs[key]
lnk = {'null': (lnk_mdl_ins.null or (class_type == 'ListNode')), 'link_source': True, 'mdl': lnk_mdl_ins.__class__, 'o2o': lnk_mdl_ins._is_one_to_one, 'm2m': (class_type == 'ListNode'), 'reverse': lnk_mdl_ins.reverse_name, 'verbose': lnk_mdl_ins.verbose_name, 'field': key, 'is_set': False}
attrs['_linked_models'][attr.__class__.__name__].append(lnk)
debug_lnk = lnk.copy()
debug_lnk['lnksrc'] = 'process_attributes_of_node'
attrs['_debug_linked_models'][attr.__class__.__name__].append(debug_lnk)
elif (attr_type == 'Field'):
attr.name = key
attrs['_fields'][key] = attr
if attr.unique:
attrs['_uniques'].append(key)
elif (attr_type == 'Link'):
attrs[('%s_id' % key)] = ''
lzy_lnk = attrs[key]
attrs['_lazy_linked_models'][key].append({'from': node_name, 'to': lzy_lnk.link_to, 'o2o': lzy_lnk.one_to_one, 'verbose': lzy_lnk.verbose_name, 'reverse': lzy_lnk.reverse_name, 'field': key})
|
prepare the model fields, nodes and relations
Args:
node_name (str): name of the node we are currently processing
attrs (dict): attribute dict
class_type (str): Type of class.
Can be one of these: 'ListNode', 'Model', 'Node'
|
codesearchnet
|
def serialize(self):
lines = []
for criterion in self.filter_criteria:
lines.append(criterion.name())
lines.append(criterion.serialize())
return '\n'.join(lines)
|
Return a string representing the subview with all of its filter criteria.
Returns:
str: String with subview definition.
|
codesearchnet
|
def __init__(self, channel):
self.ListEntityTypes = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.EntityTypes/ListEntityTypes',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.ListEntityTypesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.ListEntityTypesResponse.FromString,
)
self.GetEntityType = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.EntityTypes/GetEntityType',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.GetEntityTypeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.EntityType.FromString,
)
self.CreateEntityType = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.EntityTypes/CreateEntityType',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.CreateEntityTypeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.EntityType.FromString,
)
self.UpdateEntityType = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.EntityTypes/UpdateEntityType',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.UpdateEntityTypeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.EntityType.FromString,
)
self.DeleteEntityType = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.EntityTypes/DeleteEntityType',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.DeleteEntityTypeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.BatchUpdateEntityTypes = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.EntityTypes/BatchUpdateEntityTypes',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchUpdateEntityTypesRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.BatchDeleteEntityTypes = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.EntityTypes/BatchDeleteEntityTypes',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchDeleteEntityTypesRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.BatchCreateEntities = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.EntityTypes/BatchCreateEntities',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchCreateEntitiesRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.BatchUpdateEntities = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.EntityTypes/BatchUpdateEntities',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchUpdateEntitiesRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.BatchDeleteEntities = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.EntityTypes/BatchDeleteEntities',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchDeleteEntitiesRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def analyze(fqdn, result, argl, argd):
package = fqdn.split('.')[0]
if (package not in _methods):
_load_methods(package)
if ((_methods[package] is not None) and (fqdn in _methods[package])):
return _methods[package][fqdn](fqdn, result, *argl, **argd)
|
Analyzes the result from calling the method with the specified FQDN.
Args:
fqdn (str): full-qualified name of the method that was called.
result: result of calling the method with `fqdn`.
argl (tuple): positional arguments passed to the method call.
argd (dict): keyword arguments passed to the method call.
|
codesearchnet
|
def create_tensorboard_process(self):
port = 6006
for _ in range(100):
p = subprocess.Popen(['tensorboard', '--logdir', self.logdir, '--host', 'localhost', '--port', str(port)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.event.wait(5)
if p.poll():
port += 1
else:
return (port, p)
raise OSError('No available ports to start TensorBoard. Attempted all ports between 6006 and 6105')
|
Create a TensorBoard process.
Returns:
tuple: A tuple containing:
int: The port number.
process: The TensorBoard process.
Raises:
OSError: If no ports between 6006 and 6105 are available for starting TensorBoard.
|
codesearchnet
|
def filter_children(self, ctype: ContentType = None) -> List[SchemaNode]:
if ctype is None:
ctype = self.content_type()
return [c for c in self.children if
not isinstance(c, (RpcActionNode, NotificationNode)) and
c.content_type().value & ctype.value != 0]
|
Return receiver's children based on content type.
Args:
ctype: Content type.
|
juraj-google-style
|
def _child(details):
if isinstance(details, list):
return OptionsNode(details)
elif isinstance(details, dict):
if '__array__' in details:
return ArrayNode(details)
elif '__hash__' in details:
return HashNode(details)
elif '__type__' in details:
if isinstance(details['__type__'], (dict,list)):
return _child(details['__type__'])
else:
return Node(details)
else:
return Parent(details)
elif isinstance(details, basestring):
return Node(details)
else:
raise TypeError('details')
|
Child
A private function to figure out the child node type
Arguments:
details {dict} -- A dictionary describing a data point
Returns:
_NodeInterface
|
juraj-google-style
|
def reformat_to_pretty_xml(doc_xml):
assert isinstance(doc_xml, str)
dom_obj = xml.dom.minidom.parseString(doc_xml)
pretty_xml = dom_obj.toprettyxml(indent=' ')
return re.sub(r'^\s*$\n', r'', pretty_xml, flags=re.MULTILINE)
|
Pretty print XML doc.
Args:
doc_xml : str
Well formed XML doc
Returns:
str: Pretty printed XML doc
|
juraj-google-style
|
def filter_moving_count(self: EventSetOrNode, window_length: Duration) -> EventSetOrNode:
from temporian.core.operators.filter_moving_count import filter_moving_count
return filter_moving_count(self, window_length=window_length)
|
Filters out events such that no more than one output event is within
a tailing time window of `window_length`.
Filtering is applied in chronological order: An event received at time t
is filtered out if there is a non-filtered out event in
(t-window_length, t].
This operator is different from `(evset.moving_count(window_length)
== 0).filter()`. In `filter_moving_count` a filtered event does not
block following events.
Usage example:
```python
>>> a = tp.event_set(timestamps=[1, 2, 3])
>>> b = a.filter_moving_count(window_length=1.5)
>>> b
indexes: []
features: []
events:
(2 events):
timestamps: [1. 3.]
...
```
Returns:
EventSet without features with the filtered events.
|
github-repos
|
def _pool(inputs, initial_value, reduce_fn, pool_size, strides=None, padding='valid'):
if padding not in ('same', 'valid'):
raise ValueError(f"Invalid padding '{padding}', must be 'same' or 'valid'.")
padding = padding.upper()
return np.array(lax.reduce_window(inputs, initial_value, reduce_fn, pool_size, strides, padding))
|
Helper function to define pooling functions.
Args:
inputs: input data of shape `N+2`.
initial_value: the initial value for the reduction.
reduce_fn: a reduce function of the form `(T, T) -> T`.
pool_size: a sequence of `N` integers, representing the window size to
reduce over.
strides: a sequence of `N` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: either the string `same` or `valid`.
Returns:
The output of the reduction for each window slice.
|
github-repos
|
def parse_gene_panel(path, institute='cust000', panel_id='test', panel_type='clinical', date=datetime.now(),
version=1.0, display_name=None, genes = None):
LOG.info("Parsing gene panel %s", panel_id)
gene_panel = {}
gene_panel['path'] = path
gene_panel['type'] = panel_type
gene_panel['date'] = date
gene_panel['panel_id'] = panel_id
gene_panel['institute'] = institute
version = version or 1.0
gene_panel['version'] = float(version)
gene_panel['display_name'] = display_name or panel_id
if not path:
panel_handle = genes
else:
panel_handle = get_file_handle(gene_panel['path'])
gene_panel['genes'] = parse_genes(gene_lines=panel_handle)
return gene_panel
|
Parse the panel info and return a gene panel
Args:
path(str): Path to panel file
institute(str): Name of institute that owns the panel
panel_id(str): Panel id
date(datetime.datetime): Date of creation
version(float)
full_name(str): Option to have a long name
Returns:
gene_panel(dict)
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.