code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _buckets(data, bucket_count=None):
import tensorflow.compat.v1 as tf
if (bucket_count is None):
bucket_count = summary_v2.DEFAULT_BUCKET_COUNT
with tf.name_scope('buckets', values=[data, bucket_count]), tf.control_dependencies([tf.assert_scalar(bucket_count), tf.assert_type(bucket_count, tf.int32)]):
data = tf.reshape(data, shape=[(- 1)])
data = tf.cast(data, tf.float64)
is_empty = tf.equal(tf.size(input=data), 0)
def when_empty():
return tf.constant([], shape=(0, 3), dtype=tf.float64)
def when_nonempty():
min_ = tf.reduce_min(input_tensor=data)
max_ = tf.reduce_max(input_tensor=data)
range_ = (max_ - min_)
is_singular = tf.equal(range_, 0)
def when_nonsingular():
bucket_width = (range_ / tf.cast(bucket_count, tf.float64))
offsets = (data - min_)
bucket_indices = tf.cast(tf.floor((offsets / bucket_width)), dtype=tf.int32)
clamped_indices = tf.minimum(bucket_indices, (bucket_count - 1))
one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots, axis=0), dtype=tf.float64)
edges = tf.linspace(min_, max_, (bucket_count + 1))
left_edges = edges[:(- 1)]
right_edges = edges[1:]
return tf.transpose(a=tf.stack([left_edges, right_edges, bucket_counts]))
def when_singular():
center = min_
bucket_starts = tf.stack([(center - 0.5)])
bucket_ends = tf.stack([(center + 0.5)])
bucket_counts = tf.stack([tf.cast(tf.size(input=data), tf.float64)])
return tf.transpose(a=tf.stack([bucket_starts, bucket_ends, bucket_counts]))
return tf.cond(is_singular, when_singular, when_nonsingular)
return tf.cond(is_empty, when_empty, when_nonempty)
|
Create a TensorFlow op to group data into histogram buckets.
Arguments:
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int` or scalar `int32` `Tensor`.
Returns:
A `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is
a triple `[left_edge, right_edge, count]` for a single bucket.
The value of `k` is either `bucket_count` or `1` or `0`.
|
codesearchnet
|
def line_on_device(
device: 'cirq.google.XmonDevice',
length: int,
method: LinePlacementStrategy = greedy.GreedySequenceSearchStrategy()
) -> GridQubitLineTuple:
return method.place_line(device, length)
|
Searches for linear sequence of qubits on device.
Args:
device: Google Xmon device instance.
length: Desired number of qubits making up the line.
method: Line placement method. Defaults to
cirq.greedy.GreedySequenceSearchMethod.
Returns:
Line sequences search results.
|
juraj-google-style
|
def ResolveSubjectDestination(subject, regexes):
components = Components(subject)
if not components:
return "aff4", ""
path = utils.JoinPath(*[ConvertStringToFilename(x) for x in components])
for route in regexes:
m = route.match(path)
if m:
value = m.group("path")
if value:
base = os.path.basename(value)
dirname = os.path.dirname(value)
return base, dirname
return "aff4", ""
|
Returns the directory/filename where the subject will be stored.
Args:
subject: The subject.
regexes: The list of regular expressions by priority.
Returns:
File name and directory.
|
juraj-google-style
|
def resize_annotation(self, annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float=0.5, interpolation: 'F.InterpolationMode'=None):
interpolation = interpolation if interpolation is not None else F.InterpolationMode.NEAREST
ratio_height, ratio_width = [target / orig for target, orig in zip(target_size, orig_size)]
new_annotation = {}
new_annotation['size'] = target_size
for key, value in annotation.items():
if key == 'boxes':
boxes = value
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32, device=boxes.device)
new_annotation['boxes'] = scaled_boxes
elif key == 'area':
area = value
scaled_area = area * (ratio_width * ratio_height)
new_annotation['area'] = scaled_area
elif key == 'masks':
masks = value[:, None]
masks = [F.resize(mask, target_size, interpolation=interpolation) for mask in masks]
masks = torch.stack(masks).to(torch.float32)
masks = masks[:, 0] > threshold
new_annotation['masks'] = masks
elif key == 'size':
new_annotation['size'] = target_size
else:
new_annotation[key] = value
return new_annotation
|
Resizes an annotation to a target size.
Args:
annotation (`Dict[str, Any]`):
The annotation dictionary.
orig_size (`Tuple[int, int]`):
The original size of the input image.
target_size (`Tuple[int, int]`):
The target size of the image, as returned by the preprocessing `resize` step.
threshold (`float`, *optional*, defaults to 0.5):
The threshold used to binarize the segmentation masks.
resample (`InterpolationMode`, defaults to `InterpolationMode.NEAREST`):
The resampling filter to use when resizing the masks.
|
github-repos
|
def range(self, location, distance):
return (segment.range(location, distance) for segment in self)
|
Test whether locations are within a given range of ``location``.
Args:
location (Point): Location to test range against
distance (float): Distance to test location is within
Returns:
list of list of Point: Groups of points in range per segment
|
juraj-google-style
|
def start_apppool(name):
ps_cmd = ['Start-WebAppPool', "'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return (cmd_ret['retcode'] == 0)
|
Start an IIS application pool.
.. versionadded:: 2017.7.0
Args:
name (str): The name of the App Pool to start.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.start_apppool name='MyTestPool'
|
codesearchnet
|
def __init__(self, shape=None, dtype=dtypes.float32):
self._shape = tensor_shape.as_shape(shape)
self._dtype = dtypes.as_dtype(dtype)
|
Constructs a type specification for a `tf.sparse.SparseTensor`.
Args:
shape: The dense shape of the `SparseTensor`, or `None` to allow any dense
shape.
dtype: `tf.DType` of values in the `SparseTensor`.
|
github-repos
|
def parse_request() -> Dict[(str, str)]:
in_lines = sys.stdin.readlines()
LOGGER.debug('Received request "%s"', in_lines)
request = {}
for line in in_lines:
if (not line.strip()):
continue
parts = line.split('=', 1)
assert (len(parts) == 2)
request[parts[0].strip()] = parts[1].strip()
return request
|
Parse the request of the git credential API from stdin.
Returns:
A dictionary with all key-value pairs of the request
|
codesearchnet
|
def write_markdown_to_file(self, f):
print("---", file=f)
print("---", file=f)
print("<!-- This file is machine generated: DO NOT EDIT! -->", file=f)
print("", file=f)
print("
if self._prefix:
print(self._prefix, file=f)
print("[TOC]", file=f)
print("", file=f)
if self._module is not None:
self._write_module_markdown_to_file(f, self._module)
|
Prints this library to file `f`.
Args:
f: File to write to.
Returns:
Dictionary of documented members.
|
juraj-google-style
|
def register_model(self, *fields, **kw):
index = PonyWhooshIndex(pw=self)
index._kw = kw
index._fields = fields
def inner(model):
index._name = model._table_
if not index._name:
index._name = model.__name__
self._entities[index._name] = model
index._schema_attrs = {}
index._primary_key_is_composite = model._pk_is_composite_
index._primary_key = [f.name for f in model._pk_attrs_]
index._primary_key_type = 'list'
type_attribute = {}
for field in model._attrs_:
if field.is_relation:
continue
assert hasattr(field, "name") and hasattr(field, "py_type")
fname = field.name
if hasattr(field.name, "__name__"):
fname = field.name.__name__
stored = kw.get("stored", False)
if fname in index._primary_key:
kw["stored"] = True
ftype = field.py_type.__name__
if ftype in ['date', 'datetime', 'datetime.date']:
kw["stored"] = stored
continue
fwhoosh = fwhoosh = whoosh.fields.TEXT(**kw)
if field == model._pk_:
index._primary_key_type = ftype
fwhoosh = whoosh.fields.ID(stored=True, unique=True)
if fname in index._fields:
if not field.is_string:
if ftype in ['int', 'float']:
fwhoosh = whoosh.fields.NUMERIC(**kw)
elif ftype == 'bool':
fwhoosh = whoosh.fields.BOOLEAN(stored=True)
type_attribute[fname] = ftype
index._schema_attrs[fname] = fwhoosh
kw["stored"] = stored
index._schema = whoosh.fields.Schema(**index._schema_attrs)
self.register_index(index)
def _middle_save_(obj, status):
writer = index._whoosh.writer(timeout=self.writer_timeout)
dict_obj = obj.to_dict()
def dumps(v):
if sys.version_info[0] < 3:
if isinstance(v, int):
return unicode(v)
if isinstance(v, float):
return '%.9f' % v
return unicode(v)
else:
if isinstance(v, int):
return str(v)
if isinstance(v, float):
return int(float(v))
return str(v)
attrs = {}
if sys.version_info[0] < 3:
for k, v in dict_obj.iteritems():
if k in index._schema_attrs.keys():
attrs[k] = dumps(v)
else:
for k, v in dict_obj.items():
if k in list(index._schema_attrs.keys()):
attrs[k] = dumps(v)
if status == 'inserted':
writer.add_document(**attrs)
elif status == 'updated':
writer.update_document(**attrs)
elif status in set(['marked_to_delete', 'deleted', 'cancelled']):
writer.delete_by_term(primary, attrs[primary])
writer.commit()
return obj._after_save_
index._model = model
model._after_save_ = _middle_save_
model._pw_index_ = index
model.search = model._pw_index_.search
return model
return inner
|
Registers a single model for fulltext search. This basically creates
a simple PonyWhoosh.Index for the model and calls self.register_index on it.
Args:
*fields: all the fields indexed from the model.
**kw: The options for each field, sortedby, stored ...
|
juraj-google-style
|
def select(self, index_or_name: Union[int, str, List[str]]) -> Union[int, str]:
selected_name = index_or_name if isinstance(index_or_name, str) else None
index = -1
if isinstance(index_or_name, list):
for name in index_or_name:
index = self.indexof(name)
if index != -1:
selected_name = name
break
else:
index = self.indexof(index_or_name)
if index == -1:
raise ValueError(f'Tab not found: {index_or_name!r}')
self._sync_members(selected=index)
self._run_javascript(f"\n const tabButtons = document.querySelectorAll('
return selected_name or index
|
Selects a tab identified by an index or name.
Args:
index_or_name: The index or name of the tab to select. If a list of names
is provided, the first name in the list that is found will be selected.
Returns:
The index (if the index was provided) or name of the selected tab.
|
github-repos
|
def geojson_polygon_to_mask(feature, shape, lat_idx, lon_idx):
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
if feature.geometry.type not in ('Polygon', 'MultiPolygon'):
raise ValueError("Cannot handle feature of type " + feature.geometry.type)
dpi = 100
fig = plt.figure(frameon=False, dpi=dpi, )
fig.set_size_inches(shape[1] / float(dpi), shape[0] / float(dpi))
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
ax.set_xlim([0, shape[1]])
ax.set_ylim([0, shape[0]])
fig.add_axes(ax)
if feature.geometry.type == 'Polygon':
coords = [feature.geometry.coordinates]
else:
coords = feature.geometry.coordinates
for poly_coords in coords:
for i, outline in enumerate(poly_coords):
value = 0. if i == 0 else 1.
outline = np.array(outline)
xs = lon_idx(outline[:, 0])
ys = lat_idx(outline[:, 1])
poly = patches.Polygon(list(zip(xs, ys)),
facecolor=(value, value, value),
edgecolor='none',
antialiased=True)
ax.add_patch(poly)
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))[:, :, 0]
assert data.shape[0] == shape[0]
assert data.shape[1] == shape[1]
data = 1. - data.astype(float) / 255.
data = data[::-1, :]
plt.close('all')
return data
|
Convert a GeoJSON polygon feature to a numpy array
Args:
feature (pygeoj.Feature): polygon feature to draw
shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in
lat_idx (func): function converting a latitude to the (fractional) row index in the map
lon_idx (func): function converting a longitude to the (fractional) column index in the map
Returns:
np.array: mask, background is zero, foreground is one
|
juraj-google-style
|
def set_headline(self, level, message, timestamp=None, now_reference=None):
if self.headline is not None and self.headline.message == message:
self.headline.created = monotonic()
self.headline.count += 1
return
msg_object = ServiceMessage(level, message, self._last_message_id, timestamp, now_reference)
self.headline = msg_object
self._last_message_id += 1
|
Set the persistent headline message for this service.
Args:
level (int): The level of the message (info, warning, error)
message (string): The message contents
timestamp (float): An optional monotonic value in seconds for when the message was created
now_reference (float): If timestamp is not relative to monotonic() as called from this
module then this should be now() as seen by whoever created the timestamp.
|
juraj-google-style
|
def set_description(self, vrf_name, description=None, default=False,
disable=False):
cmds = self.command_builder('description', value=description,
default=default, disable=disable)
return self.configure_vrf(vrf_name, cmds)
|
Configures the VRF description
Args:
vrf_name (str): The VRF name to configure
description(str): The string to set the vrf description to
default (bool): Configures the vrf description to its default value
disable (bool): Negates the vrf description
Returns:
True if the operation was successful otherwise False
|
juraj-google-style
|
def expected_error(self, expected: str) -> str:
if self.finished:
return 'Expected {} but found end of source'.format(expected)
else:
return 'Expected {} but found {} at index {}'.format(expected, self.next_token(), self.position)
|
Generate a basic error to include the current state.
A parser can supply only a representation of what it is expecting to
this method and the reader will provide the context, including the index
to the error.
Args:
expected: A representation of what the parser is currently expecting
Returns:
A full error message
|
juraj-google-style
|
def find_modules(module_path):
if module_path.is_file():
if module_path.suffix == '.py':
yield module_path
elif module_path.is_dir():
pyfiles = glob.glob('{}*.py'.format(module_path), recursive=True)
yield from (Path(pyfile) for pyfile in pyfiles)
|
Find all modules in the module (possibly package) represented by `module_path`.
Args:
module_path: A pathlib.Path to a Python package or module.
Returns: An iterable of paths Python modules (i.e. *py files).
|
juraj-google-style
|
def find_matching_model_files(check_all: bool=False):
module_diff_files = None
if not check_all:
module_diff_files = set()
repo = Repo(PATH_TO_REPO)
for modified_file_diff in repo.index.diff(None):
if modified_file_diff.a_path.startswith('src/transformers'):
module_diff_files.add(os.path.join(PATH_TO_REPO, modified_file_diff.a_path))
for modified_file_diff in repo.index.diff(repo.refs.main.commit):
if modified_file_diff.a_path.startswith('src/transformers'):
module_diff_files.add(os.path.join(PATH_TO_REPO, modified_file_diff.a_path))
if len(module_diff_files) == 0:
return None
modeling_glob_pattern = os.path.join(PATH_TO_TRANSFORMERS, 'modelsmodeling_**')
potential_files = glob.glob(modeling_glob_pattern)
image_processing_glob_pattern = os.path.join(PATH_TO_TRANSFORMERS, 'modelsimage_processing_*_fast.py')
potential_files += glob.glob(image_processing_glob_pattern)
exclude_substrings = ['modeling_tf_', 'modeling_flax_']
matching_files = []
for file_path in potential_files:
if os.path.isfile(file_path):
filename = os.path.basename(file_path)
is_excluded = any((exclude in filename for exclude in exclude_substrings))
if not is_excluded:
matching_files.append(file_path)
if not check_all:
matching_files = sorted([file for file in matching_files if file in module_diff_files])
print(' Checking auto_docstrings in the following files:' + '\n - ' + '\n - '.join(matching_files))
return matching_files
|
Find all model files in the transformers repo that should be checked for @auto_docstring,
excluding files with certain substrings.
Returns:
List of file paths.
|
github-repos
|
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: Optional[torch.Tensor]=None, output_attentions: bool=False):
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
object_queries (`torch.FloatTensor`, *optional*): object queries, to be added to hidden_states.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
|
github-repos
|
def __init__(self, coder=coders.BytesCoder(), compression_type=CompressionTypes.AUTO, with_filename=False):
super().__init__()
source_from_file = partial(_create_tfrecordio_source, compression_type=compression_type, coder=coder)
self._read_all_files = ReadAllFiles(splittable=False, compression_type=compression_type, desired_bundle_size=0, min_bundle_size=0, source_from_file=source_from_file, with_filename=with_filename)
|
Initialize the ``ReadAllFromTFRecord`` transform.
Args:
coder: Coder used to decode each record.
compression_type: Used to handle compressed input files. Default value
is CompressionTypes.AUTO, in which case the file_path's extension will
be used to detect the compression.
with_filename: If True, returns a Key Value with the key being the file
name and the value being the actual data. If False, it only returns
the data.
|
github-repos
|
def _set_textarea(el, value):
if isinstance(value, dict):
el.text = value['val']
elif (type(value) in [list, tuple]):
el.text = '\n\n'.join((('-- %s --\n%s' % (item['source'], item['val'])) for item in value))
else:
el.text = value
|
Set content of given textarea element `el` to `value`.
Args:
el (obj): Reference to textarea element you wish to set.
value (obj/list): Value to which the `el` will be set.
|
codesearchnet
|
def get_input(self, name, ds):
columns = self.inputs.get(name)
df = ds.get_dataframe()
for column in columns:
if (column not in df.columns):
df[column] = self.defaults.get(column)
return df[columns]
|
Retrieves the content of an input given a DataSource. The input acts like a filter over the outputs of the DataSource.
Args:
name (str): The name of the input.
ds (openflow.DataSource): The DataSource that will feed the data.
Returns:
pandas.DataFrame: The content of the input.
|
codesearchnet
|
def zeros(shape, dtype=None):
return backend.numpy.zeros(shape, dtype=dtype)
|
Return a new tensor of given shape and type, filled with zeros.
Args:
shape: Shape of the new tensor.
dtype: Desired data type of the tensor.
Returns:
Tensor of zeros with the given shape and dtype.
|
github-repos
|
def try_storage(self, identifier, req, resp, resource, uri_kwargs):
if identifier is None:
user = None
elif self.user_storage is not None:
user = self.user_storage.get_user(
self, identifier, req, resp, resource, uri_kwargs
)
elif self.user_storage is None and not self.only_with_storage:
user = {
'identified_with': self,
'identifier': identifier
}
else:
user = None
return user
|
Try to find user in configured user storage object.
Args:
identifier: User identifier.
Returns:
user object.
|
juraj-google-style
|
def __init__(self, value=None):
super(QueryFunction, self).__init__(
QueryFunctionEnum, value, Tags.QUERY_FUNCTION)
|
Construct a QueryFunction object.
Args:
value (QueryFunction enum): A QueryFunction enumeration value,
(e.g., QueryFunction.QUERY_OPERATIONS). Optional, default to
None.
|
juraj-google-style
|
def compare_modules(file_, imports):
modules = parse_requirements(file_)
imports = [imports[i]["name"] for i in range(len(imports))]
modules = [modules[i]["name"] for i in range(len(modules))]
modules_not_imported = set(modules) - set(imports)
return modules_not_imported
|
Compare modules in a file to imported modules in a project.
Args:
file_ (str): File to parse for modules to be compared.
imports (tuple): Modules being imported in the project.
Returns:
tuple: The modules not imported in the project, but do exist in the
specified file.
|
juraj-google-style
|
def load_text_file(self, filename, encoding='utf-8', tokenizer=None):
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer)
|
Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string
|
codesearchnet
|
def _ReadFileEntry(self, file_object, file_offset):
if self.file_format == 'bin-big-endian':
data_type_map = self._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY
file_entry_data_size = self._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY_SIZE
elif self.file_format == 'bin-little-endian':
data_type_map = self._CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY
file_entry_data_size = self._CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY_SIZE
elif self.file_format == 'odc':
data_type_map = self._CPIO_PORTABLE_ASCII_FILE_ENTRY
file_entry_data_size = self._CPIO_PORTABLE_ASCII_FILE_ENTRY_SIZE
elif self.file_format in ('crc', 'newc'):
data_type_map = self._CPIO_NEW_ASCII_FILE_ENTRY
file_entry_data_size = self._CPIO_NEW_ASCII_FILE_ENTRY_SIZE
file_entry = self._ReadStructure(
file_object, file_offset, file_entry_data_size, data_type_map,
'file entry')
file_offset += file_entry_data_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
file_entry.modification_time = (
(file_entry.modification_time.upper << 16) |
file_entry.modification_time.lower)
file_entry.file_size = (
(file_entry.file_size.upper << 16) | file_entry.file_size.lower)
if self.file_format == 'odc':
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_ODC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 8)
except ValueError:
raise errors.FileFormatError(
'Unable to convert attribute: {0:s} into an integer'.format(
attribute_name))
value = setattr(file_entry, attribute_name, value)
elif self.file_format in ('crc', 'newc'):
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_CRC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 16)
except ValueError:
raise errors.FileFormatError(
'Unable to convert attribute: {0:s} into an integer'.format(
attribute_name))
value = setattr(file_entry, attribute_name, value)
path_data = file_object.read(file_entry.path_size)
file_offset += file_entry.path_size
path = path_data.decode('ascii')
path, _, _ = path.partition('\x00')
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size
elif self.file_format == 'odc':
padding_size = 0
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size
file_offset += padding_size
archive_file_entry = CPIOArchiveFileEntry()
archive_file_entry.data_offset = file_offset
archive_file_entry.data_size = file_entry.file_size
archive_file_entry.group_identifier = file_entry.group_identifier
archive_file_entry.inode_number = file_entry.inode_number
archive_file_entry.modification_time = file_entry.modification_time
archive_file_entry.path = path
archive_file_entry.mode = file_entry.mode
archive_file_entry.size = (
file_entry_data_size + file_entry.path_size + padding_size +
file_entry.file_size)
archive_file_entry.user_identifier = file_entry.user_identifier
file_offset += file_entry.file_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size
elif self.file_format == 'odc':
padding_size = 0
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size
if padding_size > 0:
archive_file_entry.size += padding_size
return archive_file_entry
|
Reads a file entry.
Args:
file_object (FileIO): file-like object.
file_offset (int): offset of the data relative from the start of
the file-like object.
Returns:
CPIOArchiveFileEntry: a file entry.
Raises:
FileFormatError: if the file entry cannot be read.
|
juraj-google-style
|
def random_masking(self, sequence: tf.Tensor, noise: tf.Tensor | None=None):
batch_size, seq_length, dim = shape_list(sequence)
len_keep = int(seq_length * (1 - self.config.mask_ratio))
if noise is None:
noise = tf.random.uniform(shape=(batch_size, seq_length), minval=0.0, maxval=1.0)
ids_shuffle = tf.argsort(noise, axis=1)
ids_restore = tf.argsort(ids_shuffle, axis=1)
ids_keep = ids_shuffle[:, :len_keep]
sequence_unmasked = tf.gather(sequence, axis=1, batch_dims=1, indices=ids_keep)
mask_keep = tf.zeros((batch_size, len_keep))
mask_remove = tf.ones((batch_size, seq_length - len_keep))
mask = tf.concat([mask_keep, mask_remove], axis=-1)
mask = tf.gather(mask, axis=1, batch_dims=1, indices=ids_restore)
return (sequence_unmasked, mask, ids_restore)
|
Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random
noise.
Args:
sequence (`tf.Tensor` of shape `(batch_size, sequence_length, dim)`)
noise (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*) which is
mainly used for testing purposes to control randomness and maintain the reproducibility
|
github-repos
|
def _EvaluateExpression(frame, expression):
try:
code = compile(expression, '<watched_expression>', 'eval')
except (TypeError, ValueError) as e:
return (False, {'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': {'format': 'Invalid expression', 'parameters': [str(e)]}})
except SyntaxError as e:
return (False, {'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': {'format': 'Expression could not be compiled: $0', 'parameters': [e.msg]}})
try:
return (True, native.CallImmutable(frame, code))
except BaseException as e:
return (False, {'isError': True, 'refersTo': 'VARIABLE_VALUE', 'description': {'format': 'Exception occurred: $0', 'parameters': [str(e)]}})
|
Compiles and evaluates watched expression.
Args:
frame: evaluation context.
expression: watched expression to compile and evaluate.
Returns:
(False, status) on error or (True, value) on success.
|
codesearchnet
|
def _maxSizeCheck(cls, obj):
fail = False
size = 0
if isinstance(obj, numbers.Number):
if (obj > constants.MAX_FRAME_SIZE):
fail = True
size = obj
elif hasattr(obj, '__len__'):
size = len(obj)
fail = (size > constants.MAX_FRAME_SIZE)
if fail:
raise MaxSizeException(('Frame size %s > %s (MAX_FRAME_SIZE)' % (size, constants.MAX_FRAME_SIZE)))
|
Raise a MaxSizeException if ``obj`` exceeds MAX_FRAME_SIZE
Args:
obj (numbers.Number or collection):
Raises:
:class:`fileseq.exceptions.MaxSizeException`:
|
codesearchnet
|
def AddStopTimeObject(self, stoptime, schedule=None, problems=None):
if schedule is None:
schedule = self._schedule
if schedule is None:
warnings.warn("No longer supported. _schedule attribute is used to get "
"stop_times table", DeprecationWarning)
if problems is None:
problems = schedule.problem_reporter
new_secs = stoptime.GetTimeSecs()
cursor = schedule._connection.cursor()
cursor.execute("SELECT max(stop_sequence), max(arrival_secs), "
"max(departure_secs) FROM stop_times WHERE trip_id=?",
(self.trip_id,))
row = cursor.fetchone()
if row[0] is None:
stoptime.stop_sequence = 1
if new_secs == None:
problems.OtherProblem(
'No time for first StopTime of trip_id "%s"' % (self.trip_id,))
else:
stoptime.stop_sequence = row[0] + 1
prev_secs = max(row[1], row[2])
if new_secs != None and new_secs < prev_secs:
problems.OtherProblem(
'out of order stop time for stop_id=%s trip_id=%s %s < %s' %
(util.EncodeUnicode(stoptime.stop_id),
util.EncodeUnicode(self.trip_id),
util.FormatSecondsSinceMidnight(new_secs),
util.FormatSecondsSinceMidnight(prev_secs)))
self._AddStopTimeObjectUnordered(stoptime, schedule)
|
Add a StopTime object to the end of this trip.
Args:
stoptime: A StopTime object. Should not be reused in multiple trips.
schedule: Schedule object containing this trip which must be
passed to Trip.__init__ or here
problems: ProblemReporter object for validating the StopTime in its new
home
Returns:
None
|
juraj-google-style
|
def _kl_beta_beta(d1, d2, name=None):
def delta(fn, is_property=True):
fn1 = getattr(d1, fn)
fn2 = getattr(d2, fn)
return fn2 - fn1 if is_property else fn2() - fn1()
with ops.name_scope(name, 'kl_beta_beta', values=[d1.concentration1, d1.concentration0, d1.total_concentration, d2.concentration1, d2.concentration0, d2.total_concentration]):
return delta('_log_normalization', is_property=False) - math_ops.digamma(d1.concentration1) * delta('concentration1') - math_ops.digamma(d1.concentration0) * delta('concentration0') + math_ops.digamma(d1.total_concentration) * delta('total_concentration')
|
Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.
Args:
d1: instance of a Beta distribution object.
d2: instance of a Beta distribution object.
name: (optional) Name to use for created operations.
default is "kl_beta_beta".
Returns:
Batchwise KL(d1 || d2)
|
github-repos
|
def init(name, *args):
matcher = get(name)
if (not matcher):
raise ValueError('Cannot find matcher: {}'.format(name))
return matcher(*args)
|
Initializes a matcher instance passing variadic arguments to
its constructor. Acts as a delegator proxy.
Arguments:
name (str): matcher class name or alias to execute.
*args (mixed): variadic argument
Returns:
matcher: matcher instance.
Raises:
ValueError: if matcher was not found.
|
codesearchnet
|
def LogHttpAdminUIAccess(self, request, response):
event_id = self.GetNewEventId()
api_method = response.headers.get('X-API-Method', 'unknown')
api_reason = response.headers.get('X-GRR-Reason', 'none')
log_msg = ('%s API call [%s] by %s (reason: %s): %s [%d]' % (event_id, api_method, request.user, api_reason, request.full_path, response.status_code))
logging.info(log_msg)
if (response.headers.get('X-No-Log') != 'True'):
if data_store.RelationalDBEnabled():
entry = rdf_objects.APIAuditEntry.FromHttpRequestResponse(request, response)
data_store.REL_DB.WriteAPIAuditEntry(entry)
|
Log an http based api call.
Args:
request: A WSGI request object.
response: A WSGI response object.
|
codesearchnet
|
def apply_to_structure(self, structure):
def_struct = structure.copy()
old_latt = def_struct.lattice.matrix
new_latt = np.transpose(np.dot(self, np.transpose(old_latt)))
def_struct.lattice = Lattice(new_latt)
return def_struct
|
Apply the deformation gradient to a structure.
Args:
structure (Structure object): the structure object to
be modified by the deformation
|
codesearchnet
|
def _load_hdf5(self, filename, parent_level='CellpyData'):
if (not os.path.isfile(filename)):
self.logger.info(f'file does not exist: {filename}')
raise IOError
store = pd.HDFStore(filename)
required_keys = ['dfdata', 'dfsummary', 'info']
required_keys = [((('/' + parent_level) + '/') + _) for _ in required_keys]
for key in required_keys:
if (key not in store.keys()):
self.logger.info(f'This hdf-file is not good enough - at least one key is missing: {key}')
raise Exception(f'OH MY GOD! At least one crucial keyis missing {key}!')
self.logger.debug(f'Keys in current hdf5-file: {store.keys()}')
data = DataSet()
if (parent_level != 'CellpyData'):
self.logger.debug('Using non-default parent label for the hdf-store: {}'.format(parent_level))
infotable = store.select((parent_level + '/info'))
try:
data.cellpy_file_version = self._extract_from_dict(infotable, 'cellpy_file_version')
except Exception as e:
data.cellpy_file_version = 0
warnings.warn(f'Unhandled exception raised: {e}')
if (data.cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION):
raise WrongFileVersion
if (data.cellpy_file_version > CELLPY_FILE_VERSION):
raise WrongFileVersion
data.dfsummary = store.select((parent_level + '/dfsummary'))
data.dfdata = store.select((parent_level + '/dfdata'))
try:
data.step_table = store.select((parent_level + '/step_table'))
except Exception as e:
self.logging.debug('could not get step_table from cellpy-file')
data.step_table = pd.DataFrame()
warnings.warn(f'Unhandled exception raised: {e}')
try:
fidtable = store.select((parent_level + '/fidtable'))
fidtable_selected = True
except Exception as e:
self.logging.debug('could not get fid-table from cellpy-file')
fidtable = []
warnings.warn('no fidtable - you should update your hdf5-file')
fidtable_selected = False
self.logger.debug(' h5')
newtests = []
data = self._load_infotable(data, infotable, filename)
if fidtable_selected:
(data.raw_data_files, data.raw_data_files_length) = self._convert2fid_list(fidtable)
else:
data.raw_data_files = None
data.raw_data_files_length = None
newtests.append(data)
store.close()
return newtests
|
Load a cellpy-file.
Args:
filename (str): Name of the cellpy file.
parent_level (str) (optional): name of the parent level
(defaults to "CellpyData")
Returns:
loaded datasets (DataSet-object)
|
codesearchnet
|
def user_activity_stats_by_date(self, username, date, grouped=None):
request_url = '{}/api/0/user/{}/activity/{}'.format(self.instance, username, date)
payload = {}
if (username is not None):
payload['username'] = username
if (date is not None):
payload['date'] = date
if (grouped is not None):
payload['grouped'] = grouped
return_value = self._call_api(request_url, params=payload)
return return_value['activities']
|
Retrieve activity information about a specific user on the specified date.
Params:
username (string): filters the username of the user whose activity you are interested in.
date (string): filters by the date of interest, best provided in ISO format: YYYY-MM-DD
grouped (boolean): filters whether or not to group the commits
Returns:
list: A list of activities done by a given user on some particular
date for all the projects for given Pagure instance.
|
codesearchnet
|
def __init__(self, datastore, serializer=None):
super(SerializerShimDatastore, self).__init__(datastore)
if serializer:
self.serializer = serializer
test = { 'value': repr(self) }
errstr = 'Serializer error: serialized value does not match original'
assert self.serializer.loads(self.serializer.dumps(test)) == test, errstr
|
Initializes internals and tests the serializer.
Args:
datastore: a child datastore for the ShimDatastore superclass.
serializer: a serializer object (responds to loads and dumps).
|
juraj-google-style
|
def power(x1, x2):
if any_symbolic_tensors((x1, x2)):
return Power().symbolic_call(x1, x2)
return backend.numpy.power(x1, x2)
|
First tensor elements raised to powers from second tensor, element-wise.
Args:
x1: The bases.
x2: The exponents.
Returns:
Output tensor, the bases in `x1` raised to the exponents in `x2`.
|
github-repos
|
def _ParseIdentifierMappingRecord(self, parser_mediator, table_name, esedb_record):
record_values = self._GetRecordValues(parser_mediator, table_name, esedb_record)
identifier = record_values.get('IdIndex', None)
if (identifier is None):
parser_mediator.ProduceExtractionWarning('IdIndex value missing from table: SruDbIdMapTable')
return (None, None)
identifier_type = record_values.get('IdType', None)
if (identifier_type not in self._SUPPORTED_IDENTIFIER_TYPES):
parser_mediator.ProduceExtractionWarning('unsupported IdType value: {0!s} in table: SruDbIdMapTable'.format(identifier_type))
return (None, None)
mapped_value = record_values.get('IdBlob', None)
if (mapped_value is None):
parser_mediator.ProduceExtractionWarning('IdBlob value missing from table: SruDbIdMapTable')
return (None, None)
if (identifier_type == 3):
try:
fwnt_identifier = pyfwnt.security_identifier()
fwnt_identifier.copy_from_byte_stream(mapped_value)
mapped_value = fwnt_identifier.get_string()
except IOError:
parser_mediator.ProduceExtractionWarning('unable to decode IdBlob value as Windows NT security identifier')
return (None, None)
else:
try:
mapped_value = mapped_value.decode('utf-16le').rstrip('\x00')
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning('unable to decode IdBlob value as UTF-16 little-endian string')
return (None, None)
return (identifier, mapped_value)
|
Extracts an identifier mapping from a SruDbIdMapTable record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
table_name (str): name of the table the record is stored in.
esedb_record (pyesedb.record): record.
Returns:
tuple[int, str]: numeric identifier and its string representation or
None, None if no identifier mapping can be retrieved from the record.
|
codesearchnet
|
def get_processors(processor_cat, prop_defs, data_attr=None):
processor_defs = prop_defs.get(processor_cat,[])
processor_list = []
for processor in processor_defs:
proc_class = PropertyProcessor[processor['rdf_type'][0]]
processor_list.append(proc_class(processor.get('kds_params', [{}]),
data_attr))
return processor_list
|
reads the prop defs and adds applicable processors for the property
Args:
processor_cat(str): The category of processors to retreive
prop_defs: property defintions as defined by the rdf defintions
data_attr: the attr to manipulate during processing.
Returns:
list: a list of processors
|
juraj-google-style
|
def get_string(strings: Sequence[str],
prefix: str,
ignoreleadingcolon: bool = False,
precedingline: str = "") -> Optional[str]:
s = get_what_follows(strings, prefix, precedingline=precedingline)
if ignoreleadingcolon:
f = s.find(":")
if f != -1:
s = s[f+1:].strip()
if len(s) == 0:
return None
return s
|
Find a string as per :func:`get_what_follows`.
Args:
strings: see :func:`get_what_follows`
prefix: see :func:`get_what_follows`
ignoreleadingcolon: if ``True``, restrict the result to what comes
after its first colon (and whitespace-strip that)
precedingline: see :func:`get_what_follows`
Returns:
the line fragment
|
juraj-google-style
|
def usage(shorthelp=False, writeto_stdout=False, detailed_error=None,
exitcode=None):
if writeto_stdout:
stdfile = sys.stdout
else:
stdfile = sys.stderr
doc = sys.modules['__main__'].__doc__
if not doc:
doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
doc = flags.text_wrap(doc, indent=' ', firstline_indent='')
else:
num_specifiers = doc.count('%') - 2 * doc.count('%%')
try:
doc %= (sys.argv[0],) * num_specifiers
except (OverflowError, TypeError, ValueError):
pass
if shorthelp:
flag_str = FLAGS.main_module_help()
else:
flag_str = FLAGS.get_help()
try:
stdfile.write(doc)
if flag_str:
stdfile.write('\nflags:\n')
stdfile.write(flag_str)
stdfile.write('\n')
if detailed_error is not None:
stdfile.write('\n%s\n' % detailed_error)
except IOError as e:
if e.errno != errno.EPIPE:
raise
if exitcode is not None:
sys.exit(exitcode)
|
Writes __main__'s docstring to stderr with some help text.
Args:
shorthelp: bool, if True, prints only flags from the main module,
rather than all flags.
writeto_stdout: bool, if True, writes help message to stdout,
rather than to stderr.
detailed_error: str, additional detail about why usage info was presented.
exitcode: optional integer, if set, exits with this status code after
writing help.
|
juraj-google-style
|
def GetObject(self, identifier):
cache_value = self._values.get(identifier, None)
if not cache_value:
return None
return cache_value.vfs_object
|
Retrieves a cached object based on the identifier.
This method ignores the cache value reference count.
Args:
identifier (str): VFS object identifier.
Returns:
object: cached VFS object or None if not cached.
|
juraj-google-style
|
def get_priority(priority):
if isinstance(priority, int):
if priority < 0 or priority > 100:
raise ValueError('priority must be between 0 and 100')
return priority
elif isinstance(priority, Priority):
return priority.value
elif isinstance(priority, str):
return Priority[priority.upper()].value
else:
raise TypeError('priority must be an integer or Priority enum value')
|
Get priority value.
Args:
priority (int or str or :obj:`Priority`): Priority.
Returns:
int: The priority value.
|
juraj-google-style
|
def _on_trace(self, sequence, topic, message):
try:
conn_key = self._find_connection(topic)
conn_id = self.conns.get_connection_id(conn_key)
except ArgumentError:
self._logger.warn('Dropping trace message that does not correspond with a known connection, topic=%s', topic)
return
try:
tracing = messages.TracingNotification.verify(message)
self._trigger_callback('on_trace', conn_id, tracing['trace'])
except Exception:
self._logger.exception('Error processing trace conn_id=%d', conn_id)
|
Process a trace received from a device.
Args:
sequence (int): The sequence number of the packet received
topic (string): The topic this message was received on
message (dict): The message itself
|
codesearchnet
|
def _build_colocation_attr_map(input_map, absolute_import_scope):
colocation_attr_map = collections.defaultdict(_ConsistentValue)
used_outputs_of_imported_ops = collections.defaultdict(set)
for imported_tensor_name, mapped_tensor in input_map.items():
imported_tensor_name = absolute_import_scope + "/" + imported_tensor_name
imported_op_name, imported_index = _split_tensor_name(imported_tensor_name)
key = tf.compat.as_bytes("loc:@" + imported_op_name)
colocation_attr_map[key].Set(
mapped_tensor.op.colocation_groups(),
{"reason": "input '%s' is substituted by '%s'" % (
imported_tensor_name, mapped_tensor.name)})
used_outputs_of_imported_ops[imported_op_name].add(imported_index)
for imported_op_name, used_outputs in used_outputs_of_imported_ops.items():
imported_op = tf_v1.get_default_graph().get_operation_by_name(
imported_op_name)
unused_outputs = set(range(len(imported_op.outputs))) - used_outputs
if not unused_outputs: continue
key = tf.compat.as_bytes("loc:@" + imported_op_name)
if imported_op.colocation_groups() != [key]:
raise ValueError(
"Internal error: tensors from op '%s' are partially remapped in "
"import but op.colocation_groups=%s cannot be captured in a "
"simple rewrite rule." %
(imported_op_name, imported_op.colocation_groups()))
colocation_attr_map[key].Set(
[key],
{"reason": "tensor '%s:%s' is not substituted by inputs" % (
imported_op_name,
",".join(str(i) for i in sorted(unused_outputs)))})
return colocation_attr_map
|
Returns a dict mapping from pre-import to post-import colocation attrs.
Args:
input_map: as for fix_colocation_after_import.
absolute_import_scope: as for fix_colocation_after_import.
Returns:
A dict that maps bytes `"loc:@" + absolute_import_scope + "/foo"`
to _ConsistentValues set to the lists of bytes `["loc:@...", ...]`
according to the rewriting scheme of fix_colocation_after_import.
In case of an inconsistent rewriting, _ConsistentValue.has_error is true.
|
juraj-google-style
|
def parse_GDS(filepath):
dataset_lines = []
subsets = {}
database = None
dataset_name = None
with utils.smart_open(filepath) as soft:
groupper = groupby(soft, (lambda x: x.startswith('^')))
for (is_new_entry, group) in groupper:
if is_new_entry:
(entry_type, entry_name) = __parse_entry(next(group))
logger.debug(('%s: %s' % (entry_type.upper(), entry_name)))
if (entry_type == 'SUBSET'):
(is_data, data_group) = next(groupper)
message = 'The key is not False, probably there is an error in the SOFT file'
assert (not is_data), message
subset_metadata = parse_metadata(data_group)
subsets[entry_name] = GDSSubset(name=entry_name, metadata=subset_metadata)
elif (entry_type == 'DATABASE'):
(is_data, data_group) = next(groupper)
message = 'The key is not False, probably there is an error in the SOFT file'
assert (not is_data), message
database_metadata = parse_metadata(data_group)
database = GEODatabase(name=entry_name, metadata=database_metadata)
elif (entry_type == 'DATASET'):
(is_data, data_group) = next(groupper)
dataset_name = entry_name
for line in data_group:
dataset_lines.append(line.rstrip())
else:
logger.error(('Cannot recognize type %s' % entry_type))
metadata = parse_metadata(dataset_lines)
columns = parse_GDS_columns(dataset_lines, subsets)
table = parse_table_data(dataset_lines)
return GDS(name=dataset_name, metadata=metadata, columns=columns, table=table, subsets=subsets, database=database)
|
Parse GDS SOFT file.
Args:
filepath (:obj:`str`): Path to GDS SOFT file.
Returns:
:obj:`GEOparse.GDS`: A GDS object.
|
codesearchnet
|
def random_shift(image, wsr=0.1, hsr=0.1):
height, width, _ = common_layers.shape_list(image)
width_range, height_range = wsr*width, hsr*height
height_translations = tf.random_uniform((1,), -height_range, height_range)
width_translations = tf.random_uniform((1,), -width_range, width_range)
translations = tf.concat((height_translations, width_translations), axis=0)
return tf.contrib.image.translate(image, translations=translations)
|
Apply random horizontal and vertical shift to images.
This is the default data-augmentation strategy used on CIFAR in Glow.
Args:
image: a 3-D Tensor
wsr: Width shift range, as a float fraction of the width.
hsr: Height shift range, as a float fraction of the width.
Returns:
images: images translated by the provided wsr and hsr.
|
juraj-google-style
|
def get_2d_local_memory(x, query_shape, memory_flange):
(_, height, width, depth_x) = common_layers.shape_list(x)
x_center_blocks = _extract_blocks(x, query_shape[0], query_shape[1])
paddings = [[0, 0], [memory_flange[0], memory_flange[0]],
[memory_flange[1], memory_flange[1]], [0, 0]]
padded_x = tf.pad(x, paddings)
padded_x.set_shape([None, height+2*memory_flange[0],
width+2*memory_flange[1], depth_x])
x_outer_memory_blocks = _extract_blocks(padded_x,
memory_flange[0], memory_flange[1])
x_left_blocks, x_right_blocks = _get_left_right_blocks(
x_outer_memory_blocks)
t_hw_block = lambda x: tf.transpose(x, [0, 2, 1, 4, 3, 5])
x_top_center_blocks, x_bottom_center_blocks = (
map(t_hw_block, _get_left_right_blocks(
t_hw_block(x_outer_memory_blocks))))
x_left_corner_blocks, x_right_corner_blocks = _split_along_width(
x_outer_memory_blocks)
t_hw = lambda x: tf.transpose(x, [0, 2, 1, 3, 4, 5])
x_top_left_corner_blocks, x_bottom_left_corner_blocks = (
map(t_hw, _split_along_width(t_hw(x_left_corner_blocks))))
x_top_right_corner_blocks, x_bottom_right_corner_blocks = (
map(t_hw, _split_along_width(t_hw(x_right_corner_blocks))))
x_top_memory = tf.concat(
[x_top_left_corner_blocks,
x_top_center_blocks,
x_top_right_corner_blocks], axis=4)
x_middle_memory = tf.concat(
[x_left_blocks, x_center_blocks, x_right_blocks], axis=4)
x_bottom_memory = tf.concat(
[x_bottom_left_corner_blocks,
x_bottom_center_blocks,
x_bottom_right_corner_blocks], axis=4)
x = tf.concat([x_top_memory, x_middle_memory, x_bottom_memory], axis=3)
return x
|
Stitches together the local 2d memory blocks.
Args:
x: a [batch, height, width, depth tensor]
query_shape: 2-d integer list of query shape
memory_flange: 2-d integer list of memory flanges
Returns:
x: A [batch, num_h_blocks, num_w_blocks,
query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]]
tensor.
|
juraj-google-style
|
def get_ops_from_nodedef(node_def):
if not node_def.device:
node_def.device = '/cpu:0'
kernel_class = _pywrap_kernel_registry.TryFindKernelClass(node_def.SerializeToString())
op = str(node_def.op)
if kernel_class or op in OPS_WITHOUT_KERNEL_ALLOWLIST:
return (op, str(kernel_class.decode('utf-8')) if kernel_class else None)
else:
tf_logging.warning('Warning: no kernel found for op %s', op)
return None
|
Gets the op and kernel needed from the given NodeDef.
Args:
node_def: TF NodeDef to get op/kernel information.
Returns:
A tuple of (op_name, kernel_name). If the op is not in the allowlist of ops
without kernel and there is no kernel found, then return None.
|
github-repos
|
def __init__(self, filename, compression_type=None):
self._filename = ops.convert_to_tensor(filename, dtypes.string, name='filename')
self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)
|
Initializes a `TFRecordWriter`.
Args:
filename: a string path indicating where to write the TFRecord data.
compression_type: (Optional.) a string indicating what type of compression
to use when writing the file. See `tf.io.TFRecordCompressionType` for
what types of compression are available. Defaults to `None`.
|
github-repos
|
def read_full(fileobj, size):
if size < 0:
raise ValueError("size must not be negative")
data = fileobj.read(size)
if len(data) != size:
raise IOError
return data
|
Like fileobj.read but raises IOError if not all requested data is
returned.
If you want to distinguish IOError and the EOS case, better handle
the error yourself instead of using this.
Args:
fileobj (fileobj)
size (int): amount of bytes to read
Raises:
IOError: In case read fails or not enough data is read
|
juraj-google-style
|
def stat(self, path=None, client_kwargs=None, header=None):
stat = OrderedDict((
("st_mode", 0), ("st_ino", 0), ("st_dev", 0), ("st_nlink", 0),
("st_uid", 0), ("st_gid", 0), ("st_size", 0), ("st_atime", 0),
("st_mtime", 0), ("st_ctime", 0)))
header = self.head(path, client_kwargs, header)
for key, method in (
('st_size', self._getsize_from_header),
('st_ctime', self._getctime_from_header),
('st_mtime', self._getmtime_from_header),):
try:
stat[key] = int(method(header))
except UnsupportedOperation:
continue
if self.islink(path=path, header=header):
stat['st_mode'] = S_IFLNK
elif ((not path or path[-1] == '/' or self.is_locator(path)) and not
stat['st_size']):
stat['st_mode'] = S_IFDIR
else:
stat['st_mode'] = S_IFREG
sub = self._CHAR_FILTER.sub
for key, value in tuple(header.items()):
stat['st_' + sub('', key.lower())] = value
stat_result = namedtuple('stat_result', tuple(stat))
stat_result.__name__ = 'os.stat_result'
stat_result.__module__ = 'pycosio'
return stat_result(**stat)
|
Get the status of an object.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
os.stat_result: Stat result object
|
juraj-google-style
|
def mtr_tr_dense(sz):
n = (2 ** sz)
hparams = mtf_bitransformer_base()
hparams.d_model = 1024
hparams.max_length = 256
hparams.batch_size = 128
hparams.d_ff = int((4096 * n))
hparams.d_kv = 128
hparams.encoder_num_heads = int((8 * n))
hparams.decoder_num_heads = int((8 * n))
hparams.learning_rate_decay_steps = 51400
hparams.layout = 'batch:batch;vocab:model;d_ff:model;heads:model'
hparams.mesh_shape = 'batch:32'
hparams.label_smoothing = 0.1
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
return hparams
|
Series of machine translation models.
All models are trained on sequences of 256 tokens.
You can use the dataset translate_enfr_wmt32k_packed.
154000 steps = 3 epochs.
Args:
sz: an integer
Returns:
a hparams
|
codesearchnet
|
def insert(self, keys, values, name=None):
with ops.name_scope(name, '%s_lookup_table_insert' % self.name, [self.resource_handle, keys, values]):
keys = ops.convert_to_tensor(keys, self._key_dtype, name='keys')
values = ops.convert_to_tensor(values, self._value_dtype, name='values')
with ops.colocate_with(self.resource_handle):
op = gen_lookup_ops.lookup_table_insert_v2(self.resource_handle, keys, values)
return op
|
Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the table's
key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
|
github-repos
|
def delete_node(self, node_name):
graph = self.graph
if node_name not in graph:
raise KeyError('node %s does not exist' % node_name)
graph.pop(node_name)
for node, edges in graph.items():
if node_name in edges:
edges.remove(node_name)
|
Deletes this node and all edges referencing it.
Args:
node_name (str): The name of the node to delete.
Raises:
KeyError: Raised if the node does not exist in the graph.
|
juraj-google-style
|
def default_local_init_op():
return control_flow_ops.group(variables.local_variables_initializer(), lookup_ops.tables_initializer(), resources.initialize_resources(resources.local_resources()))
|
Returns an op that groups the default local init ops.
This op is used during session initialization when a Scaffold is
initialized without specifying the local_init_op arg. It includes
`tf.compat.v1.local_variables_initializer`,
`tf.compat.v1.tables_initializer`, and also
initializes local session resources.
Returns:
The default Scaffold local init op.
|
github-repos
|
def IsEquivalent(self, other):
if (self.name and other.name):
return (self.name == other.name)
if self.name:
(self_family, self_version_tuple) = self._FAMILY_AND_VERSION_PER_NAME.get(self.name, self._DEFAULT_FAMILY_AND_VERSION)
return ((self_family == other.family) and (self_version_tuple == other.version_tuple))
if (self.family and self.version):
if other.name:
(other_family, other_version_tuple) = self._FAMILY_AND_VERSION_PER_NAME.get(other.name, self._DEFAULT_FAMILY_AND_VERSION)
else:
other_family = other.family
other_version_tuple = other.version_tuple
return ((self.family == other_family) and (self.version_tuple == other_version_tuple))
if self.family:
if other.name:
(other_family, _) = self._FAMILY_AND_VERSION_PER_NAME.get(other.name, self._DEFAULT_FAMILY_AND_VERSION)
else:
other_family = other.family
return (self.family == other_family)
return False
|
Determines if 2 operating system artifacts are equivalent.
This function compares the operating systems based in order of:
* name derived from product
* family and version
* family
Args:
other (OperatingSystemArtifact): operating system artifact attribute
container to compare with.
Returns:
bool: True if the operating systems are considered equivalent, False if
the most specific criteria do no match, or no criteria are available.
|
codesearchnet
|
def load_values(self, dictionary, as_defaults=False, flat=False):
if flat:
separator = self.settings.str_path_separator
flat_dictionary = dictionary
dictionary = collections.OrderedDict()
for (k, v) in flat_dictionary.items():
k_parts = k.split(separator)
c = dictionary
for (i, kp) in enumerate(k_parts):
if (i >= (len(k_parts) - 1)):
c[kp] = v
else:
if (kp not in c):
c[kp] = collections.OrderedDict()
c = c[kp]
for (name, value) in dictionary.items():
if (name not in self):
if as_defaults:
if isinstance(value, dict):
self[name] = self.create_section()
self[name].load_values(value, as_defaults=as_defaults)
else:
self[name] = self.create_item(name, default=value)
else:
pass
continue
resolution = self._get_item_or_section(name, handle_not_found=False)
if is_config_item(resolution):
if as_defaults:
resolution.default = value
else:
resolution.value = value
else:
resolution.load_values(value, as_defaults=as_defaults)
|
Import config values from a dictionary.
When ``as_defaults`` is set to ``True``, the values
imported will be set as defaults. This can be used to
declare the sections and items of configuration.
Values of sections and items in ``dictionary`` can be
dictionaries as well as instances of :class:`.Item` and
:class:`.Config`.
Args:
dictionary:
as_defaults: if ``True``, the imported values will be set as defaults.
|
codesearchnet
|
def length(self, rows=None):
rows = (tf.range(self._capacity) if (rows is None) else rows)
return tf.gather(self._length, rows)
|
Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths.
|
codesearchnet
|
def update(self, config_dict: dict[str, Any]):
for key, value in config_dict.items():
setattr(self, key, value)
|
Updates attributes of this class with attributes from `config_dict`.
Args:
config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.
|
github-repos
|
def return_selected_form_items(form_info):
selected_keys = []
selected_names = []
for chosen in form_info:
if chosen['choice']:
selected_keys.append(chosen['key'])
selected_names.append(chosen['name'])
return selected_keys, selected_names
|
It returns chosen keys list from a given form.
Args:
form_info: serialized list of dict form data
Returns:
selected_keys(list): Chosen keys list
selected_names(list): Chosen channels' or subscribers' names.
|
juraj-google-style
|
def random_name(num_surnames=2):
a = []
if random.random() < _PROB_PREF:
a.append(_prefixes[random.randint(0, len(_prefixes) - 1)])
a.append(_forenames[random.randint(0, len(_forenames) - 1)])
for i in range(num_surnames):
a.append(_surnames[random.randint(0, len(_surnames) - 1)])
if random.random() < _PROB_SUFF:
a.append(_suffixes[random.randint(0, len(_suffixes) - 1)])
return " ".join(a)
|
Returns a random person name
Arguments:
num_surnames -- number of surnames
|
juraj-google-style
|
def ring_position(self):
if (self.type != EventType.TABLET_PAD_RING):
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_ring_position(self._handle)
|
The current position of the ring, in degrees
counterclockwise from the northern-most point of the ring in
the tablet's current logical orientation.
If the source is
:attr:`~libinput.constant.TabletPadRingAxisSource.FINGER`,
libinput sends a terminating event with a ring value of -1 when
the finger is lifted from the ring. A caller may use this information
to e.g. determine if kinetic scrolling should be triggered.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_RING`, this property
raises :exc:`AttributeError`.
Returns:
float: The current value of the the axis. -1 if the finger was
lifted.
Raises:
AttributeError
|
codesearchnet
|
def agent_heartbeat(self, agent_id, metrics, run_states):
mutation = gql('\n mutation Heartbeat(\n $id: ID!,\n $metrics: JSONString,\n $runState: JSONString\n ) {\n agentHeartbeat(input: {\n id: $id,\n metrics: $metrics,\n runState: $runState\n }) {\n agent {\n id\n }\n commands\n }\n }\n ')
try:
response = self.gql(mutation, variable_values={'id': agent_id, 'metrics': json.dumps(metrics), 'runState': json.dumps(run_states)})
except Exception as e:
message = ast.literal_eval(e.args[0])['message']
logger.error('Error communicating with W&B: %s', message)
return []
else:
return json.loads(response['agentHeartbeat']['commands'])
|
Notify server about agent state, receive commands.
Args:
agent_id (str): agent_id
metrics (dict): system metrics
run_states (dict): run_id: state mapping
Returns:
List of commands to execute.
|
codesearchnet
|
def flip(x, axis=None):
if any_symbolic_tensors((x,)):
return Flip(axis=axis).symbolic_call(x)
return backend.numpy.flip(x, axis=axis)
|
Reverse the order of elements in the tensor along the given axis.
The shape of the tensor is preserved, but the elements are reordered.
Args:
x: Input tensor.
axis: Axis or axes along which to flip the tensor. The default,
`axis=None`, will flip over all of the axes of the input tensor.
Returns:
Output tensor with entries of `axis` reversed.
|
github-repos
|
def get_parent(self, tree, alt=None):
parent = self.parent_db.get(tree.path)
if (not parent):
return alt
return list(parent)[0]
|
Get parent for given `tree` or `alt` if not found.
Args:
tree (obj): :class:`.Tree` instance, which is already stored in DB.
alt (obj, default None): Alternative value returned when `tree` is
not found.
Returns:
obj: :class:`.Tree` parent to given `tree`.
|
codesearchnet
|
def _render_fluent_timestep(self, fluent_type: str, fluents: Sequence[Tuple[(str, np.array)]], fluent_variables: Sequence[Tuple[(str, List[str])]]) -> None:
for (fluent_pair, variable_list) in zip(fluents, fluent_variables):
(name, fluent) = fluent_pair
(_, variables) = variable_list
print(name)
fluent = fluent.flatten()
for (variable, value) in zip(variables, fluent):
print('- {}: {} = {}'.format(fluent_type, variable, value))
print()
|
Prints `fluents` of given `fluent_type` as list of instantiated variables
with corresponding values.
Args:
fluent_type (str): Fluent type.
fluents (Sequence[Tuple[str, np.array]]): List of pairs (fluent_name, fluent_values).
fluent_variables (Sequence[Tuple[str, List[str]]]): List of pairs (fluent_name, args).
|
codesearchnet
|
def handler(self, direction, verb, priority=10):
def parent_fn(func):
@functools.wraps(func)
def child_fn(msg):
func(msg)
self.register_event(direction, verb, child_fn, priority=priority)
return child_fn
return parent_fn
|
Register this function as an event handler.
Args:
direction (str): ``in``, ``out``, ``both``, ``raw``.
verb (str): Event name.
priority (int): Handler priority (lower priority executes first).
Example:
These handlers print out a pretty raw log::
reactor = girc.Reactor()
@reactor.handler('in', 'raw', priority=1)
def handle_raw_in(event):
print(event['server'].name, ' ->', escape(event['data']))
@reactor.handler('out', 'raw', priority=1)
def handle_raw_out(event):
print(event['server'].name, '<- ', escape(event['data']))
|
codesearchnet
|
def from_file(cls, path):
with open(path, 'r', errors='replace') as f:
return cls(f.read())
|
Create a text from a file.
Args:
path (str): The file path.
|
codesearchnet
|
def start(self, host, nornir):
self.host = host
self.nornir = nornir
try:
logger.debug("Host %r: running task %r", self.host.name, self.name)
r = self.task(self, **self.params)
if not isinstance(r, Result):
r = Result(host=host, result=r)
except NornirSubTaskError as e:
tb = traceback.format_exc()
logger.error(
"Host %r: task %r failed with traceback:\n%s",
self.host.name,
self.name,
tb,
)
r = Result(host, exception=e, result=str(e), failed=True)
except Exception as e:
tb = traceback.format_exc()
logger.error(
"Host %r: task %r failed with traceback:\n%s",
self.host.name,
self.name,
tb,
)
r = Result(host, exception=e, result=tb, failed=True)
r.name = self.name
r.severity_level = logging.ERROR if r.failed else self.severity_level
self.results.insert(0, r)
return self.results
|
Run the task for the given host.
Arguments:
host (:obj:`nornir.core.inventory.Host`): Host we are operating with. Populated right
before calling the ``task``
nornir(:obj:`nornir.core.Nornir`): Populated right before calling
the ``task``
Returns:
host (:obj:`nornir.core.task.MultiResult`): Results of the task and its subtasks
|
juraj-google-style
|
def query(starttime, endtime, output=None, *filenames):
if (not output):
output = ((((filenames[0].replace('.pcap', '') + starttime.isoformat()) + '-') + endtime.isoformat()) + '.pcap')
else:
output = output
with open(output, 'w') as outfile:
for filename in filenames:
log.info(('pcap.query: processing %s...' % filename))
with open(filename, 'r') as stream:
for (header, packet) in stream:
if (packet is not None):
if ((header.timestamp >= starttime) and (header.timestamp <= endtime)):
outfile.write(packet, header=header)
|
Given a time range and input file, query creates a new file with only
that subset of data. If no outfile name is given, the new file name is the
old file name with the time range appended.
Args:
starttime:
The datetime of the beginning time range to be extracted from the files.
endtime:
The datetime of the end of the time range to be extracted from the files.
output:
Optional: The output file name. Defaults to
[first filename in filenames][starttime]-[endtime].pcap
filenames:
A tuple of one or more file names to extract data from.
|
codesearchnet
|
def check_interactive_docker_worker(link):
errors = []
log.info('Checking for {} {} interactive docker-worker'.format(link.name, link.task_id))
try:
if link.task['payload']['features'].get('interactive'):
errors.append('{} is interactive: task.payload.features.interactive!'.format(link.name))
if link.task['payload']['env'].get('TASKCLUSTER_INTERACTIVE'):
errors.append('{} is interactive: task.payload.env.TASKCLUSTER_INTERACTIVE!'.format(link.name))
except KeyError:
errors.append('check_interactive_docker_worker: {} task definition is malformed!'.format(link.name))
return errors
|
Given a task, make sure the task was not defined as interactive.
* ``task.payload.features.interactive`` must be absent or False.
* ``task.payload.env.TASKCLUSTER_INTERACTIVE`` must be absent or False.
Args:
link (LinkOfTrust): the task link we're checking.
Returns:
list: the list of error errors. Success is an empty list.
|
codesearchnet
|
def _expand_json(self, j):
decompressed_json = copy.copy(j)
decompressed_json.pop('blob', None)
compressed_data = base64.b64decode(j['blob'])
original_json = zlib.decompress(compressed_data).decode('utf-8')
decompressed_json['users'] = json.loads(original_json)
return decompressed_json
|
Decompress the BLOB portion of the usernotes.
Arguments:
j: the JSON returned from the wiki page (dict)
Returns a Dict with the 'blob' key removed and a 'users' key added
|
juraj-google-style
|
def _assert_same_graph(original_item, item) -> None:
original_graph = getattr(original_item, 'graph', None)
graph = getattr(item, 'graph', None)
if original_graph and graph and (original_graph is not graph):
raise ValueError('%s must be from the same graph as %s (graphs are %s and %s).' % (item, original_item, graph, original_graph))
|
Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
|
github-repos
|
def list_vmss(access_token, subscription_id, resource_group):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets',
'?api-version=', COMP_API])
return do_get_next(endpoint, access_token)
|
List VM Scale Sets in a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
Returns:
HTTP response. JSON body of a list of scale set model views.
|
juraj-google-style
|
def get_user(self, identified_with, identifier, req, resp, resource, uri_kwargs):
stored_value = self.kv_store.get(self._get_storage_key(identified_with, identifier))
if (stored_value is not None):
user = self.serialization.loads(stored_value.decode())
else:
user = None
return user
|
Get user object for given identifier.
Args:
identified_with (object): authentication middleware used
to identify the user.
identifier: middleware specifix user identifier (string or tuple
in case of all built in authentication middleware classes).
Returns:
dict: user object stored in Redis if it exists, otherwise ``None``
|
codesearchnet
|
def export_as_file(self, file_path, cv_source):
if os.path.exists(file_path):
raise exceptions.UserError('{} already exists'.format(file_path))
with open(file_path, 'wb') as f:
f.write(self.export_as_code(cv_source).encode('utf8'))
|
Export the ensemble as a single Python file and saves it to `file_path`.
This is EXPERIMENTAL as putting different modules together would probably wreak havoc
especially on modules that make heavy use of global variables.
Args:
file_path (str, unicode): Absolute/local path of place to save file in
cv_source (str, unicode): String containing actual code for base learner
cross-validation used to generate secondary meta-features.
|
codesearchnet
|
def add_topic(self, topic):
if (topic in self._topics):
return Future().success(set(self._topics))
self._topics.add(topic)
return self.cluster.request_update()
|
Add a topic to the list of topics tracked via metadata.
Arguments:
topic (str): topic to track
Returns:
Future: resolves after metadata request/response
|
codesearchnet
|
def forward(self, hidden_states: torch.Tensor, prev_group_token: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:
if self.with_group_token:
group_token = self.group_token.expand(hidden_states.size(0), -1, -1)
if self.group_projector is not None:
group_token = group_token + self.group_projector(prev_group_token)
else:
group_token = None
x = hidden_states
cat_x = self.concat_x(x, group_token)
for layer in self.layers:
layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None)
cat_x = layer_out[0]
x, group_token = self.split_x(cat_x)
attention = None
if self.downsample is not None:
x, attention = self.downsample(x, group_token)
outputs = (x, group_token)
if output_attentions:
outputs = outputs + (attention,)
return outputs
|
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the grouping tensors of Grouping block.
|
github-repos
|
def get_tensor_layout(self, path):
raise NotImplementedError()
|
Retrieve the `TensorLayout` for the intermediate tensor.
Args:
path: a string path for the corresponding tensor.
return:
The `TensorLayout` for the intermediate tensor, which can be used
by `backend.relayout()` to reshard the tensor. Could also return
None.
|
github-repos
|
def __getitem__(self, id):
if not isinstance(id, int):
raise TypeError(id)
return self._map[id]
|
Return the worksheet with the given id.
Args:
id: numeric id of the worksheet
Returns:
WorkSheet: contained worksheet object
Raises:
TypeError: if ``id`` is not an ``int``
KeyError: if the spreadsheet has no worksheet with the given ``id``
|
juraj-google-style
|
def set_distribution(value):
global_state.set_global_attribute(GLOBAL_ATTRIBUTE_NAME, value)
|
Set the distribution as the global distribution setting.
Args:
value: a `Distribution` instance.
|
github-repos
|
def _ExtractExtensionInstallEvents(self, settings_dict, parser_mediator):
for (extension_id, extension) in sorted(settings_dict.items()):
install_time = extension.get('install_time', None)
if (not install_time):
parser_mediator.ProduceExtractionWarning('installation time missing for extension ID {0:s}'.format(extension_id))
continue
try:
install_time = int(install_time, 10)
except ValueError:
parser_mediator.ProduceExtractionWarning('unable to convert installation time for extension ID {0:s}'.format(extension_id))
continue
manifest = extension.get('manifest', None)
if (not manifest):
parser_mediator.ProduceExtractionWarning('manifest missing for extension ID {0:s}'.format(extension_id))
continue
event_data = ChromeExtensionInstallationEventData()
event_data.extension_id = extension_id
event_data.extension_name = manifest.get('name', None)
event_data.path = extension.get('path', None)
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=install_time)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extract extension installation events.
Args:
settings_dict (dict[str: object]): settings data from a Preferences file.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
|
codesearchnet
|
def node_name(self):
return self._node_name
|
Name of the node from which the tensor value was dumped.
Returns:
(`str`) name of the node watched by the debug op.
|
github-repos
|
def modify_site(name, sourcepath=None, apppool=None, preload=None):
site_path = 'IIS:\\Sites\\{0}'.format(name)
current_sites = list_sites()
if (name not in current_sites):
log.debug("Site '%s' not defined.", name)
return False
ps_cmd = list()
if sourcepath:
ps_cmd.extend(['Set-ItemProperty', '-Path', "'{0}'".format(site_path), '-Name', 'PhysicalPath', '-Value', "'{0}'".format(sourcepath)])
if apppool:
if (apppool in list_apppools()):
log.debug('Utilizing pre-existing application pool: %s', apppool)
else:
log.debug('Application pool will be created: %s', apppool)
create_apppool(apppool)
if ps_cmd:
ps_cmd.append(';')
ps_cmd.extend(['Set-ItemProperty', '-Path', "'{0}'".format(site_path), '-Name', 'ApplicationPool', '-Value', "'{0}'".format(apppool)])
if preload:
ps_cmd.extend(['Set-ItemProperty', '-Path', "'{0}'".format(site_path), '-Name', 'applicationDefaults.preloadEnabled', '-Value', '{0};'.format(preload)])
cmd_ret = _srvmgr(ps_cmd)
if (cmd_ret['retcode'] != 0):
msg = 'Unable to modify site: {0}\nError: {1}'.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
log.debug('Site modified successfully: %s', name)
return True
|
Modify a basic website in IIS.
.. versionadded:: 2017.7.0
Args:
name (str): The IIS site name.
sourcepath (str): The physical path of the IIS site.
apppool (str): The name of the IIS application pool.
preload (bool): Whether preloading should be enabled
Returns:
bool: True if successful, otherwise False.
.. note::
If an application pool is specified, and that application pool does not
already exist, it will be created.
CLI Example:
.. code-block:: bash
salt '*' win_iis.modify_site name='My Test Site' sourcepath='c:\\new_path' apppool='NewTestPool' preload=True
|
codesearchnet
|
def aggregate_global_cache(self, global_tt_summary_cache):
agg_fn_map = self._parameters.get_signature_to_agg_fn_map()
signature_idx_map = self._signature_types()
aggregation_result = []
for signature, idx in sorted(signature_idx_map.items(), key=operator.itemgetter(1)):
if signature not in agg_fn_map:
raise RuntimeError('No aggregation function is defined for signature %s.' % signature)
signature_tensor = global_tt_summary_cache[:, :, idx]
agg_fn = agg_fn_map[signature]
agg_tensor = agg_fn(signature_tensor, axis=0)
aggregation_result.append(agg_tensor)
merged_signatures = array_ops_stack.stack(aggregation_result)
transposed_signatures = array_ops.transpose(merged_signatures)
return array_ops.expand_dims(transposed_signatures, axis=0)
|
Merges the given caches on tpu.
Args:
global_tt_summary_cache: The global tensor tracer summary cache tensor
with shape (num_cores, num_traced_tensors, num_traced_signatures). First
dimension corresponds to core_id, where global_tpu_cache_tensor[i]
correspond to the local cache from core-i.
Returns:
An aggregated tf.Tensor.
Raises:
RuntimeError: if there is no aggregate function defined for a signature.
|
github-repos
|
def _log_every_n_to_logger(n, logger, level, message, *args):
logger = (logger or logging.getLogger())
def _gen():
while True:
for _ in range(n):
(yield False)
logger.log(level, message, *args)
(yield True)
gen = _gen()
return (lambda : six.next(gen))
|
Logs the given message every n calls to a logger.
Args:
n: Number of calls before logging.
logger: The logger to which to log.
level: The logging level (e.g. logging.INFO).
message: A message to log
*args: Any format args for the message.
Returns:
A method that logs and returns True every n calls.
|
codesearchnet
|
def _qInstallMessageHandler(handler):
def messageOutputHandler(*args):
if (len(args) == 3):
(msgType, logContext, msg) = args
elif (len(args) == 2):
(msgType, msg) = args
logContext = None
else:
raise TypeError('handler expected 2 or 3 arguments, got {0}'.format(len(args)))
if isinstance(msg, bytes):
msg = msg.decode()
handler(msgType, logContext, msg)
passObject = (messageOutputHandler if handler else handler)
if (Qt.IsPySide or Qt.IsPyQt4):
return Qt._QtCore.qInstallMsgHandler(passObject)
elif (Qt.IsPySide2 or Qt.IsPyQt5):
return Qt._QtCore.qInstallMessageHandler(passObject)
|
Install a message handler that works in all bindings
Args:
handler: A function that takes 3 arguments, or None
|
codesearchnet
|
def log_get(recipe_id=[], timezone='America/Los_Angeles', days=1):
body = {'resourceNames': ['projects/%s' % UI_PROJECT], 'filter': ' logName="projects/%s/logs/StarThinker" AND labels.version="%s" AND labels.layer="JOB" ' % (UI_PROJECT, LOG_VERSION), 'orderBy': 'timestamp desc', 'pageSize': 1000}
if recipe_id:
if isinstance(recipe_id, str):
recipe_id = [recipe_id]
body['filter'] += ' AND ( %s )' % ' OR '.join(('operation.id="%s"' % r for r in recipe_id))
for entry in API_StackDriver(Configuration(service=UI_SERVICE, project=UI_PROJECT), 'service', iterate=True).entries().list(body=body).execute():
yield entry
|
Returns last actionable job run for a specific recipe or all recipes.
Pulls status entries from StackDriver in reverse order. A single recipe may
be run multiple times for multiple tasks at different hours, do not
assume a JOB_END means a recipe is complete. Only way to ensure a recipe is
complete
is to compare all tasks run against all tasks in recipe ( not done by log
code).
Args: - recipe_id ( string or list ) - Optional, if provided returns a single
record for a single job. - timezone ( string ) - The local timezone to cast
all record times into.
Returns:
- ( iterator ) - Each log entry.
|
github-repos
|
def __init__(self, parser, *, pytype_single_args=None, overrides=None):
self._parser = parser
self._overrides = overrides or []
self.pytype_single_args = pytype_single_args or {}
|
Initialize a parser.
Args:
parser: An argparse.ArgumentParser or compatible object
pytype_single_args: Args passed to pytype
overrides: Pytype args that the tool overrides (will be put into the tool
args, with the corresponding pytype opts getting their default values)
|
github-repos
|
def optimizer(name):
warn_msg = ("Please update `registry.optimizer` callsite "
"(likely due to a `HParams.optimizer` value)")
if name == "SGD":
name = "sgd"
tf.logging.warning("'SGD' optimizer now keyed by 'sgd'. %s" % warn_msg)
elif name == "RMSProp":
name = "rms_prop"
tf.logging.warning(
"'RMSProp' optimizer now keyed by 'rms_prop'. %s" % warn_msg)
else:
snake_name = misc_utils.camelcase_to_snakecase(name)
if name != snake_name:
tf.logging.warning(
"optimizer names now keyed by snake_case names. %s" % warn_msg)
name = snake_name
return Registries.optimizers[name]
|
Get pre-registered optimizer keyed by name.
`name` should be snake case, though SGD -> sgd, RMSProp -> rms_prop and
UpperCamelCase -> snake_case conversions included for legacy support.
Args:
name: name of optimizer used in registration. This should be a snake case
identifier, though others supported for legacy reasons.
Returns:
optimizer
|
juraj-google-style
|
def _CountClientStatisticByLabel(self, day_buckets, extract_statistic_fn):
counts = collections.defaultdict(int)
now = rdfvalue.RDFDatetime.Now()
for info in self.IterateAllClientsFullInfo(batch_size=db.MAX_COUNT):
if not info.metadata.ping:
continue
statistic_value = extract_statistic_fn(info)
for client_label in info.GetLabelsNames(owner="GRR"):
for day_bucket in day_buckets:
time_boundary = now - rdfvalue.Duration.FromDays(day_bucket)
if info.metadata.ping > time_boundary:
counts[(statistic_value, client_label, day_bucket)] += 1
return dict(counts)
|
Returns client-activity metrics for a particular statistic.
Args:
day_buckets: A set of n-day-active buckets.
extract_statistic_fn: A function that extracts the statistic's value from
a ClientFullInfo object.
|
juraj-google-style
|
def is_subgroup(self, supergroup):
warnings.warn('This is not fully functional. Only trivial subsets are tested right now. ')
return set(self.symmetry_ops).issubset(supergroup.symmetry_ops)
|
True if this group is a subgroup of the supplied group.
Args:
supergroup (SymmetryGroup): Supergroup to test.
Returns:
True if this group is a subgroup of the supplied group.
|
codesearchnet
|
def FromTimestampToLdap(self, ts):
if self.conf.get('ad'):
t = time.strftime('%Y%m%d%H%M%S.0Z', time.gmtime(ts))
else:
t = time.strftime('%Y%m%d%H%M%SZ', time.gmtime(ts))
return t
|
Transforms nss_cache internal timestamp into a LDAP timestamp.
Args:
ts: number of seconds since epoch
Returns:
LDAP format timestamp string.
|
github-repos
|
def insert(self, index, value):
if (value in self):
raise ValueError
index = self._fix_neg_index(index)
self._dict[value] = index
for elem in self._list[index:]:
self._dict[elem] += 1
self._list.insert(index, value)
|
Insert value at index.
Args:
index (int): Index to insert value at
value: Value to insert
Raises:
ValueError: If value already in self
IndexError: If start or end are out of range
|
codesearchnet
|
def check_coordinates(chromosome, pos, coordinates):
chrom_match = CHR_PATTERN.match(chromosome)
chrom = chrom_match.group(2)
if chrom != coordinates['chrom']:
return False
if (pos >= coordinates['start'] and pos <= coordinates['end']):
return True
return False
|
Check if the variant is in the interval given by the coordinates
Args:
chromosome(str): Variant chromosome
pos(int): Variant position
coordinates(dict): Dictionary with the region of interest
|
juraj-google-style
|
def get_ip_address(domain):
if ":
domain = "http:
hostname = urlparse(domain).netloc
if not hostname:
raise ValueError("Can't parse hostname!")
return socket.gethostbyname(hostname)
|
Get IP address for given `domain`. Try to do smart parsing.
Args:
domain (str): Domain or URL.
Returns:
str: IP address.
Raises:
ValueError: If can't parse the domain.
|
juraj-google-style
|
def update(self, forecasts, observations):
if len(observations.shape) == 1:
obs_cdfs = np.zeros((observations.size, self.thresholds.size))
for o, observation in enumerate(observations):
obs_cdfs[o, self.thresholds >= observation] = 1
else:
obs_cdfs = observations
self.errors["F_2"] += np.sum(forecasts ** 2, axis=0)
self.errors["F_O"] += np.sum(forecasts * obs_cdfs, axis=0)
self.errors["O_2"] += np.sum(obs_cdfs ** 2, axis=0)
self.errors["O"] += np.sum(obs_cdfs, axis=0)
self.num_forecasts += forecasts.shape[0]
|
Update the statistics with forecasts and observations.
Args:
forecasts: The discrete Cumulative Distribution Functions of
observations:
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.