code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def update_container(self, container, blkio_weight=None, cpu_period=None, cpu_quota=None, cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None, mem_reservation=None, memswap_limit=None, kernel_memory=None, restart_policy=None):
url = self._url('/containers/{0}/update', container)
data = {}
if blkio_weight:
data['BlkioWeight'] = blkio_weight
if cpu_period:
data['CpuPeriod'] = cpu_period
if cpu_shares:
data['CpuShares'] = cpu_shares
if cpu_quota:
data['CpuQuota'] = cpu_quota
if cpuset_cpus:
data['CpusetCpus'] = cpuset_cpus
if cpuset_mems:
data['CpusetMems'] = cpuset_mems
if mem_limit:
data['Memory'] = utils.parse_bytes(mem_limit)
if mem_reservation:
data['MemoryReservation'] = utils.parse_bytes(mem_reservation)
if memswap_limit:
data['MemorySwap'] = utils.parse_bytes(memswap_limit)
if kernel_memory:
data['KernelMemory'] = utils.parse_bytes(kernel_memory)
if restart_policy:
if utils.version_lt(self._version, '1.23'):
raise errors.InvalidVersion('restart policy update is not supported for API version < 1.23')
data['RestartPolicy'] = restart_policy
res = self._post_json(url, data=data)
return self._result(res, True)
|
Update resource configs of one or more containers.
Args:
container (str): The container to inspect
blkio_weight (int): Block IO (relative weight), between 10 and 1000
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
mem_limit (int or str): Memory limit
mem_reservation (int or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
restart_policy (dict): Restart policy dictionary
Returns:
(dict): Dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def __get_scope(cls, expr: Union[('Expression', Tuple)]) -> Set[str]:
scope = set()
for (i, atom) in enumerate(expr):
if isinstance(atom, Expression):
scope.update(cls.__get_scope(atom._expr))
elif (type(atom) in [tuple, list]):
scope.update(cls.__get_scope(atom))
elif (atom == 'pvar_expr'):
(functor, params) = expr[(i + 1)]
arity = (len(params) if (params is not None) else 0)
name = '{}/{}'.format(functor, arity)
scope.add(name)
break
return scope
|
Returns the set of fluents in the expression's scope.
Args:
expr: Expression object or nested tuple of Expressions.
Returns:
The set of fluents in the expression's scope.
|
codesearchnet
|
def _lease_owned(self, lease, current_uuid_path):
prev_uuid_path, prev_uuid = lease.metadata
with open(current_uuid_path) as f:
current_uuid = f.read()
return \
current_uuid_path == prev_uuid_path and \
prev_uuid == current_uuid
|
Checks if the given lease is owned by the prefix whose uuid is in
the given path
Note:
The prefix must be also in the same path it was when it took the
lease
Args:
path (str): Path to the lease
current_uuid_path (str): Path to the uuid to check ownership of
Returns:
bool: ``True`` if the given lease in owned by the prefix,
``False`` otherwise
|
juraj-google-style
|
def links(res: requests.models.Response,
search: str = None,
pattern: str = None) -> list:
hrefs = [link.to_text() for link in find_all_links(res.text)]
if search:
hrefs = [href for href in hrefs if search in href]
if pattern:
hrefs = [href for href in hrefs if re.findall(pattern, href)]
return list(set(hrefs))
|
Get the links of the page.
Args:
res (requests.models.Response): The response of the page.
search (str, optional): Defaults to None. Search the links you want.
pattern (str, optional): Defaults to None. Search the links use a regex pattern.
Returns:
list: All the links of the page.
|
juraj-google-style
|
def _pycurl_post(self, url, json=None, data=None, username='', password='', headers={}, timeout=30):
response_headers = {}
curl = pycurl.Curl()
curl.setopt(curl.URL, url)
if (sys.version_info[0] >= 3):
stringbuffer = BytesIO()
else:
stringbuffer = StringIO()
curl.setopt(curl.WRITEDATA, stringbuffer)
headers['User-Agent'] = self.user_agent
if (sys.version_info[0] >= 3):
header_list = [('%s:%s' % (k, v)) for (k, v) in headers.items()]
else:
header_list = [('%s:%s' % (k, v)) for (k, v) in headers.iteritems()]
if json:
header_list.append('Content-Type:application/json')
curl.setopt(pycurl.HTTPHEADER, header_list)
raw_store = json
raw_request = (json_lib.dumps(json) if json else urlencode(data))
curl.setopt(curl.POSTFIELDS, raw_request)
if (username and password):
curl.setopt(curl.USERPWD, ('%s:%s' % (username, password)))
curl.setopt(curl.TIMEOUT, timeout)
curl.perform()
result = stringbuffer.getvalue()
status_code = curl.getinfo(curl.RESPONSE_CODE)
curl.close()
raw_request = raw_store
return (result, raw_request, status_code, response_headers)
|
This function will POST to the url endpoint using pycurl. returning
an AdyenResult object on 200 HTTP responce. Either json or data has to
be provided. If username and password are provided, basic auth will be
used.
Args:
url (str): url to send the POST
json (dict, optional): Dict of the JSON to POST
data (dict, optional): Dict, presumed flat structure
of key/value of request to place
username (str, optional): Username for basic auth. Must be included
as part of password.
password (str, optional): Password for basic auth. Must be included
as part of username.
headers (dict, optional): Key/Value pairs of headers to include
timeout (int, optional): Default 30. Timeout for the request.
Returns:
str: Raw response received
str: Raw request placed
int: HTTP status code, eg 200,404,401
dict: Key/Value pairs of the headers received.
|
codesearchnet
|
def load_pickle(file, encoding=None):
if encoding:
with open(file, 'rb') as f:
return pickle.load(f, encoding=encoding)
with open(file, 'rb') as f:
return pickle.load(f)
|
Load a pickle file.
Args:
file (str): Path to pickle file
Returns:
object: Loaded object from pickle file
|
juraj-google-style
|
def delete_contexts(self, context_id_list):
for c_id in context_id_list:
if c_id in self._contexts:
del self._contexts[c_id]
|
Delete contexts from the ContextManager.
Args:
context_id_list (list): a list of context ids
Returns:
None
|
juraj-google-style
|
def _save_function_alias(saved_model_dir: str, tags: Collection[str], function_aliases: Mapping[str, str]) -> None:
loader = saved_model_loader.SavedModelLoader(saved_model_dir)
meta_graph_def = loader.get_meta_graph_def_from_tags(tags)
for function_name, function_alias in function_aliases.items():
meta_graph_def.meta_info_def.function_aliases[function_name] = function_alias
saved_model_proto_serialized = loader.saved_model.SerializeToString()
path = file_io.join(saved_model_dir, saved_model_constants.SAVED_MODEL_FILENAME_PB)
file_io.atomic_write_string_to_file(path, saved_model_proto_serialized)
|
Saves the function alias to the SavedModel.
SavedModelBuilder (TF1 saved model saver) does not support saving function
aliases, so this function loads the SavedModel proto and adds the
`function_aliases` field.
Args:
saved_model_dir: Path to the saved model directory.
tags: A collection of tags to specify the meta graph.
function_aliases: Function name -> function alias mapping.
|
github-repos
|
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
if end_pos >= 0 and matched.group(1) == 'if':
opening_linenum = end_linenum
opening_line_fragment = end_line[end_pos:]
while not Search(r'^\s*\{', opening_line_fragment):
if Search(r'^(?!\s*$)', opening_line_fragment):
return
opening_linenum += 1
if opening_linenum == len(clean_lines.elided):
return
opening_line_fragment = clean_lines.elided[opening_linenum]
opening_line = clean_lines.elided[opening_linenum]
opening_pos = opening_line_fragment.find('{')
if opening_linenum == end_linenum:
opening_pos += end_pos
(closing_line, closing_linenum, closing_pos) = CloseExpression(
clean_lines, opening_linenum, opening_pos)
if closing_pos < 0:
return
if (clean_lines.raw_lines[opening_linenum] !=
CleanseComments(clean_lines.raw_lines[opening_linenum])):
return
if closing_linenum > opening_linenum:
bodylist = list(opening_line[opening_pos+1:])
bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
body = '\n'.join(bodylist)
else:
body = opening_line[opening_pos+1:closing_pos-1]
if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
return
current_linenum = closing_linenum
current_line_fragment = closing_line[closing_pos:]
while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
if Search(r'^(?=\s*else)', current_line_fragment):
return
current_linenum += 1
if current_linenum == len(clean_lines.elided):
break
current_line_fragment = clean_lines.elided[current_linenum]
error(filename, end_linenum, 'whitespace/empty_if_body', 4,
('If statement had no body and no else clause'))
|
Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
juraj-google-style
|
def is_clockwise(vertices):
it = iterator.consecutive(cycle(vertices), 3)
clockwise = 0
counter = 0
for _ in range(len(vertices)):
p0, p1, p2 = next(it)
cross = cross_product(p1, p2, p0)
int_angle = interior_angle(p0, p2, p1)
if cross < 0:
clockwise += int_angle
counter += 2 * pi - int_angle
else:
clockwise += 2 * pi - int_angle
counter += int_angle
if round(clockwise / pi) == len(vertices) - 2:
return True
elif round(counter / pi) == len(vertices) - 2:
return False
else:
raise ValueError("the polygon is complex or overlapped")
|
Evaluate whether vertices are in clockwise order.
Args:
vertices: list of vertices (x, y) in polygon.
Returns:
True: clockwise, False: counter-clockwise
Raises:
ValueError: the polygon is complex or overlapped.
|
juraj-google-style
|
def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None):
with tf.variable_scope(name, default_name='edge_vectors'):
att_adj_vectors_shape = [num_edge_types, depth]
adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix)
adj_vectors = (tf.get_variable('adj_vectors', att_adj_vectors_shape, initializer=tf.random_normal_initializer(0, (depth ** (- 0.5)))) * (depth ** 0.5))
adjacency_matrix_one_hot = tf.one_hot(adjacency_matrix, num_edge_types)
att_adj_vectors = tf.matmul(tf.reshape(tf.to_float(adjacency_matrix_one_hot), [(- 1), num_edge_types]), adj_vectors)
return tf.reshape(att_adj_vectors, [adjacency_matrix_shape[0], adjacency_matrix_shape[1], adjacency_matrix_shape[2], depth])
|
Gets edge vectors for the edge types in the adjacency matrix.
Args:
adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints.
num_edge_types: Number of different edge types
depth: Number of channels
name: a string
Returns:
A [batch, num_nodes, num_nodes, depth] vector of tensors
|
codesearchnet
|
def ms_bot_framework(self) -> dict:
card_action = {}
card_action['type'] = 'postBack'
card_action['title'] = self.name
card_action['value'] = self.callback = self.callback
return card_action
|
Returns MS Bot Framework compatible state of the Button instance.
Creates MS Bot Framework CardAction (button) with postBack value return.
Returns:
control_json: MS Bot Framework representation of Button state.
|
codesearchnet
|
def _combine_named_parameters(**kwargs) -> list[OrderedDict[str, Any]]:
sort_by_key = lambda k: k[0]
combinations: list[list[tuple[str, Any]]] = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
|
Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
|
github-repos
|
def copy_foreign_keys(self, event):
event_keys = set(event._meta.fields.keys())
obj_keys = self._meta.fields.keys()
matching_keys = event_keys.intersection(obj_keys)
for key in matching_keys:
if key == 'created_by':
continue
if not isinstance(self._meta.fields[key], peewee.ForeignKeyField):
continue
setattr(event, key, getattr(self, key))
possible_key = self.__class__.__name__.lower()
if possible_key in event_keys and event.code != 'AUDIT_DELETE':
setattr(event, possible_key, self)
|
Copies possible foreign key values from the object into the Event,
skipping common keys like modified and created.
Args:
event (Event): The Event instance to copy the FKs into
obj (fleaker.db.Model): The object to pull the values from
|
juraj-google-style
|
def _draw_breakpoint_icon(self, top, painter, icon_name):
rect = QRect(0, top, self.sizeHint().width(),
self.sizeHint().height())
try:
icon = self.icons[icon_name]
except KeyError as e:
debug_print("Breakpoint icon doen't exist, {}".format(e))
else:
icon.paint(painter, rect)
|
Draw the given breakpoint pixmap.
Args:
top (int): top of the line to draw the breakpoint icon.
painter (QPainter)
icon_name (srt): key of icon to draw (see: self.icons)
|
juraj-google-style
|
def MakePmfFromHist(hist, name=None):
if (name is None):
name = hist.name
d = dict(hist.GetDict())
pmf = Pmf(d, name)
pmf.Normalize()
return pmf
|
Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
name: string name
Returns:
Pmf object
|
codesearchnet
|
def most_specific_common_supertype(self, others):
if not all((isinstance(other, TensorArraySpec) for other in others)):
return False
common_shape = self._element_shape.most_specific_common_supertype((other._element_shape for other in others))
if common_shape is None:
return None
if not all((self._dtype == other._dtype for other in others)):
return None
if not all((self._dynamic_size == other._dynamic_size for other in others)):
return None
infer_shape = self._infer_shape and all((other._infer_shape for other in others))
return TensorArraySpec(common_shape, self._dtype, self._dynamic_size, infer_shape)
|
Returns the most specific supertype of `self` and `others`.
Args:
others: A Sequence of `TypeSpec`.
Returns `None` if a supertype does not exist.
|
github-repos
|
def imag(self):
def im(val):
if hasattr(val, 'imag'):
return val.imag
elif hasattr(val, 'as_real_imag'):
return val.as_real_imag()[1]
elif hasattr(val, 'conjugate'):
return ((val.conjugate() - val) / (2 * I))
else:
raise NoConjugateMatrix(('Matrix entry %s contains has no defined conjugate' % str(val)))
return self.element_wise(im)
|
Element-wise imaginary part
Raises:
NoConjugateMatrix: if entries have no `conjugate` method and no
other way to determine the imaginary part
Note:
A mathematically equivalent way to obtain an imaginary matrix from
a complex matrix ``M`` is::
(M.conjugate() - M) / (I * 2)
with same same caveats as :attr:`real`.
|
codesearchnet
|
def get(self):
return self._diff_median_tracker.get()
|
Retrieves the current MAD value.
Returns:
float: The MAD of the values within the defined window. Returns `NaN` if
the window is empty.
|
github-repos
|
def convert_dict_to_compatible_tensor(values, targets):
result = {}
for key, value in sorted(values.items()):
result[key] = _convert_to_compatible_tensor(
value, targets[key], error_prefix="Can't convert %r" % key)
return result
|
Converts dict `values` in tensors that are compatible with `targets`.
Args:
values: A dict to objects to convert with same keys as `targets`.
targets: A dict returned by `parse_tensor_info_map`.
Returns:
A map with the same keys as `values` but values converted into
Tensor/SparseTensors that can be fed into `protomap`.
Raises:
TypeError: If it fails to convert.
|
juraj-google-style
|
def pad_image(self, image: 'torch.Tensor', size: SizeDict, random_padding: bool=False) -> 'torch.Tensor':
output_height, output_width = (size.height, size.width)
input_height, input_width = image.shape[-2:]
delta_width = output_width - input_width
delta_height = output_height - input_height
if random_padding:
pad_top = torch.random.randint(low=0, high=delta_height + 1)
pad_left = torch.random.randint(low=0, high=delta_width + 1)
else:
pad_top = delta_height
pad_left = delta_width
pad_bottom = delta_height - pad_top
pad_right = delta_width - pad_left
padding = (pad_left, pad_top, pad_right, pad_bottom)
return F.pad(image, padding)
|
Pad the image to the specified size.
Args:
image (`torch.Tensor`):
The image to be padded.
size (`Dict[str, int]`):
The size `{"height": h, "width": w}` to pad the image to.
random_padding (`bool`, *optional*, defaults to `False`):
Whether to use random padding or not.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
|
github-repos
|
def resolve_variables(self, provided_variables):
self.resolved_variables = {}
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, _var_def in variable_dict.items():
value = resolve_variable(
variable_dict.get(var_name),
self.name
)
if value is not None:
self.resolved_variables[var_name] = value
defined_variables = self.get_parameter_definitions()
self.resolved_variables = {}
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, _var_def in defined_variables.items():
value = resolve_variable(
variable_dict.get(var_name),
self.name
)
if value is not None:
self.resolved_variables[var_name] = value
|
Resolve the values of the blueprint variables.
This will resolve the values of the template parameters with values
from the env file, the config, and any lookups resolved. The
resolution is run twice, in case the blueprint is jinja2 templated
and requires provided variables to render.
Args:
provided_variables (list of :class:`stacker.variables.Variable`):
list of provided variables
|
juraj-google-style
|
def _Extract(
self, source_path_specs, destination_path, output_writer,
skip_duplicates=True):
output_writer.Write('Extracting file entries.\n')
path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(
source_path_specs, resolver_context=self._resolver_context)
for path_spec in path_spec_generator:
self._ExtractFileEntry(
path_spec, destination_path, output_writer,
skip_duplicates=skip_duplicates)
|
Extracts files.
Args:
source_path_specs (list[dfvfs.PathSpec]): path specifications to extract.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
|
juraj-google-style
|
def reset(self, indices, observations):
assert isinstance(indices, np.ndarray)
assert (len(indices.shape) == 1)
assert isinstance(observations, np.ndarray)
assert (indices.shape[0] == observations.shape[0])
for (index, observation) in zip(indices, observations):
trajectory = self._trajectories[index]
if (not trajectory.is_active):
trajectory.add_time_step(observation=observation)
continue
self._complete_trajectory(trajectory, index)
self._trajectories[index].add_time_step(observation=observation)
|
Resets trajectories at given indices and populates observations.
Reset can either be called right at the beginning, when there are no
time-steps, or to reset a currently active trajectory.
If resetting a currently active trajectory then we save it in
self._completed_trajectories.
Args:
indices: 1-D np.ndarray stating the indices to reset.
observations: np.ndarray of shape (indices len, obs.shape) of observations
|
codesearchnet
|
def find_yang_file(profile, filename, path):
module_dir = os.path.dirname(__file__)
full_path = os.path.join(module_dir, 'mappings', profile, path, filename)
if os.path.exists(full_path):
return full_path
else:
msg = "Couldn't find parsing file: {}".format(full_path)
logger.error(msg)
raise IOError(msg)
|
Find the necessary file for the given test case.
Args:
device(napalm device connection): for which device
filename(str): file to find
path(str): where to find it relative to where the module is installed
|
codesearchnet
|
def _ConvertFieldValuePair(js, message):
names = []
message_descriptor = message.DESCRIPTOR
for name in js:
try:
field = message_descriptor.fields_by_camelcase_name.get(name, None)
if not field:
raise ParseError(
'Message type "{0}" has no field named "{1}".'.format(
message_descriptor.full_name, name))
if name in names:
raise ParseError(
'Message type "{0}" should not have multiple "{1}" fields.'.format(
message.DESCRIPTOR.full_name, name))
names.append(name)
if field.containing_oneof is not None:
oneof_name = field.containing_oneof.name
if oneof_name in names:
raise ParseError('Message type "{0}" should not have multiple "{1}" '
'oneof fields.'.format(
message.DESCRIPTOR.full_name, oneof_name))
names.append(oneof_name)
value = js[name]
if value is None:
message.ClearField(field.name)
continue
if _IsMapEntry(field):
message.ClearField(field.name)
_ConvertMapFieldValue(value, message, field)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
message.ClearField(field.name)
if not isinstance(value, list):
raise ParseError('repeated field {0} must be in [] which is '
'{1}.'.format(name, value))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
for item in value:
sub_message = getattr(message, field.name).add()
if (item is None and
sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'):
raise ParseError('null is not allowed to be used as an element'
' in a repeated field.')
_ConvertMessage(item, sub_message)
else:
for item in value:
if item is None:
raise ParseError('null is not allowed to be used as an element'
' in a repeated field.')
getattr(message, field.name).append(
_ConvertScalarFieldValue(item, field))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
sub_message = getattr(message, field.name)
_ConvertMessage(value, sub_message)
else:
setattr(message, field.name, _ConvertScalarFieldValue(value, field))
except ParseError as e:
if field and field.containing_oneof is None:
raise ParseError('Failed to parse {0} field: {1}'.format(name, e))
else:
raise ParseError(str(e))
except ValueError as e:
raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
except TypeError as e:
raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
|
Convert field value pairs into regular message.
Args:
js: A JSON object to convert the field value pairs.
message: A regular protocol message to record the data.
Raises:
ParseError: In case of problems converting.
|
juraj-google-style
|
def resolve_workdir_path(cls, start_path=os.curdir):
if start_path == 'auto':
start_path = os.curdir
cur_path = start_path
LOGGER.debug(
'Checking if %s is a workdir',
os.path.abspath(cur_path),
)
if cls.is_workdir(cur_path):
return os.path.abspath(cur_path)
cur_path = os.path.join(start_path, '.lago')
while not cls.is_workdir(cur_path):
LOGGER.debug('%s is not a workdir', cur_path)
cur_path = os.path.normpath(
os.path.join(cur_path, '..', '..', '.lago')
)
LOGGER.debug('Checking %s for a workdir', cur_path)
if os.path.realpath(os.path.join(cur_path, '..')) == '/':
candidates = []
for path in os.listdir(os.curdir):
if os.path.isdir(path):
dirs = os.listdir(path)
if 'current' in dirs:
candidates.append(
os.path.abspath(os.path.join(os.curdir, path))
)
elif '.lago' in dirs:
candidates.append(
os.path.abspath(
os.path.join(os.curdir, path, '.lago')
)
)
candidates = filter(Workdir.is_possible_workdir, candidates)
for idx in range(len(candidates)):
if os.path.split(candidates[idx])[1] == '.lago':
candidates[idx] = os.path.dirname(candidates[idx])
msg = 'Unable to find workdir in {0}'.format(
os.path.abspath(start_path)
)
if candidates:
msg += '\nFound possible workdirs in: {0}'.format(
', '.join(candidates)
)
raise LagoUserException(msg)
return os.path.abspath(cur_path)
|
Look for an existing workdir in the given path, in a path/.lago dir,
or in a .lago dir under any of it's parent directories
Args:
start_path (str): path to start the search from, if None passed, it
will use the current dir
Returns:
str: path to the found prefix
Raises:
LagoUserException: if no prefix was found
|
juraj-google-style
|
def save_image(byteio, imgfmt):
from os import path, mkdir
ptdir = '{}.{}'.format(project, task)
uuid = str(uuid4())
idir = path.join(dbdir, ptdir)
if (not path.isdir(idir)):
mkdir(idir)
ipath = path.join(idir, '{}.{}'.format(uuid, imgfmt))
with open(ipath, 'wb') as f:
f.write(byteio)
return uuid
|
Saves the specified image to disk.
Args:
byteio (bytes): image bytes to save to disk.
imgfmt (str): used as the extension of the saved file.
Returns:
str: a uuid for the saved image that can be added to the database entry.
|
codesearchnet
|
def get_service_credentials(pipeline_options):
return _Credentials.get_service_credentials(pipeline_options)
|
For internal use only; no backwards-compatibility guarantees.
Get credentials to access Azure services.
Args:
pipeline_options: Pipeline options, used in creating credentials
like managed identity credentials.
Returns:
A ``azure.identity.*Credential`` object or None if credentials
not found. Returned object is thread-safe.
|
github-repos
|
def filepath(self):
if hasattr(self, 'local_path'):
return self.local_path
if (self.scheme in ['ftp', 'http', 'https', 'globus']):
return self.filename
elif (self.scheme in ['file']):
return self.path
else:
raise Exception('Cannot return filepath for unknown scheme {}'.format(self.scheme))
|
Return the resolved filepath on the side where it is called from.
The appropriate filepath will be returned when called from within
an app running remotely as well as regular python on the client side.
Args:
- self
Returns:
- filepath (string)
|
codesearchnet
|
def rules(self):
list_of_rules = []
for main_row in self.dict_rules:
if ('rules' in main_row):
for rule_row in main_row['rules']:
if ('grants' in rule_row):
for grant_row in rule_row['grants']:
if ('group_id' in grant_row):
group_id = grant_row['group_id']
if ('name' in grant_row):
row_name = grant_row['name']
else:
row_name = None
fr = FirewallRule(main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port'], rules_grants_group_id=group_id, rules_grants_name=row_name, rules_description=grant_row['description'])
list_of_rules.append(fr)
elif ('cidr_ip' in grant_row):
fr = FirewallRule(main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port'], rules_grants_cidr_ip=grant_row['cidr_ip'], rules_description=grant_row['description'])
list_of_rules.append(fr)
else:
raise ValueError('Unsupported grant:', grant_row)
else:
fr = FirewallRule(main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port'])
list_of_rules.append(fr)
else:
fr = FirewallRule(main_row['id'], main_row['name'], main_row['description'])
list_of_rules.append(fr)
sorted_list = sorted(list_of_rules, key=(lambda fr: (str(fr.id), str(fr.name), str(fr.description), str(fr.rules_direction), str(fr.rules_ip_protocol), str(fr.rules_from_port), str(fr.rules_to_port), str(fr.rules_grants_group_id), str(fr.rules_grants_name), str(fr.rules_grants_cidr_ip))))
return sorted_list
|
Returns a sorted list of firewall rules.
Returns:
list
|
codesearchnet
|
def metadata(self, path):
try:
file_metadata = self._gcsIO()._status(path)
return FileMetadata(path, file_metadata['size'], file_metadata['updated'])
except Exception as e:
raise BeamIOError('Metadata operation failed', {path: e})
|
Fetch metadata fields of a file on the FileSystem.
Args:
path: string path of a file.
Returns:
:class:`~apache_beam.io.filesystem.FileMetadata`.
Raises:
``BeamIOError``: if path isn't a file or doesn't exist.
|
github-repos
|
def get_existing_test_names(self):
test_names = []
for name, _ in inspect.getmembers(type(self), callable):
if name.startswith('test_'):
test_names.append(name)
return test_names + list(self._generated_test_table.keys())
|
Gets the names of existing tests in the class.
A method in the class is considered a test if its name starts with
'test_*'.
Note this only gets the names of tests that already exist. If
`generate_tests` has not happened when this was called, the
generated tests won't be listed.
Returns:
A list of strings, each is a test method name.
|
github-repos
|
def save_lines(lines, filename):
with open(filename, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
|
Save an array of lines to a file.
Args:
lines: An array of strings that will be saved as individual lines.
filename: Path to the output file.
|
codesearchnet
|
def _ParseValueData(self, knowledge_base, value_data):
if not isinstance(value_data, py2to3.UNICODE_TYPE):
raise errors.PreProcessFail(
'Unsupported Windows Registry value type: {0:s} for '
'artifact: {1:s}.'.format(
type(value_data), self.ARTIFACT_DEFINITION_NAME))
if not knowledge_base.GetValue('operating_system_product'):
knowledge_base.SetValue('operating_system_product', value_data)
|
Parses Windows Registry value data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
value_data (object): Windows Registry value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
|
juraj-google-style
|
def sequential_spherical(xyz):
d_xyz = np.diff(xyz,axis=0)
r = np.linalg.norm(d_xyz,axis=1)
theta = np.arctan2(d_xyz[:,1], d_xyz[:,0])
hyp = d_xyz[:,0]**2 + d_xyz[:,1]**2
phi = np.arctan2(np.sqrt(hyp), d_xyz[:,2])
return (r,theta,phi)
|
Converts sequence of cartesian coordinates into a sequence of
line segments defined by spherical coordinates.
Args:
xyz = 2d numpy array, each row specifies a point in
cartesian coordinates (x,y,z) tracing out a
path in 3D space.
Returns:
r = lengths of each line segment (1D array)
theta = angles of line segments in XY plane (1D array)
phi = angles of line segments down from Z axis (1D array)
|
juraj-google-style
|
def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):
if topology_only:
self.points = drp(self.points, eps)
else:
self.points = spt(self.points, max_dist_error, max_speed_error)
return self
|
In-place segment simplification
See `drp` and `compression` modules
Args:
eps (float): Distance threshold for the `drp` function
max_dist_error (float): Max distance error, in meters
max_speed_error (float): Max speed error, in km/h
topology_only (bool, optional): True to only keep topology, not considering
times when simplifying. Defaults to False.
Returns:
:obj:`Segment`
|
juraj-google-style
|
def simple_lmdb_settings(path, map_size=1000000000.0, user_supplied_id=False):
def decorator(cls):
provider = (ff.UserSpecifiedIdProvider(key='_id') if user_supplied_id else ff.UuidProvider())
class Settings(ff.PersistenceSettings):
id_provider = provider
key_builder = ff.StringDelimitedKeyBuilder('|')
database = ff.LmdbDatabase(path, key_builder=key_builder, map_size=map_size)
class Model(cls, Settings):
pass
Model.__name__ = cls.__name__
Model.__module__ = cls.__module__
return Model
return decorator
|
Creates a decorator that can be used to configure sane default LMDB
persistence settings for a model
Args:
path (str): The path where the LMDB database files will be created
map_size (int): The amount of space to allot for the database
|
codesearchnet
|
def jwt_is_expired(self, access_token=None, leeway=0):
if access_token is not None:
exp = self._decode_exp(access_token)
else:
exp = self.jwt_exp
now = time()
if exp < (now - leeway):
return True
return False
|
Validate JWT access token expiration.
Args:
access_token (str): Access token to validate. Defaults to ``None``.
leeway (float): Time in seconds to adjust for local clock skew. Defaults to 0.
Returns:
bool: ``True`` if expired, otherwise ``False``.
|
juraj-google-style
|
def convert(cls, content, input_format, output_format):
assert (input_format in ('srt', 'sjson'))
assert (output_format in ('srt', 'sjson'))
content = content.decode('utf-8-sig')
if (input_format == output_format):
return content
if (input_format == 'srt'):
if (output_format == 'sjson'):
try:
srt_subs = SubRipFile.from_string(content, error_handling=SubRipFile.ERROR_RAISE)
except Error as ex:
raise TranscriptsGenerationException(text_type(ex))
return json.dumps(cls.generate_sjson_from_srt(srt_subs))
if (input_format == 'sjson'):
if (output_format == 'srt'):
return cls.generate_srt_from_sjson(json.loads(content))
|
Convert transcript `content` from `input_format` to `output_format`.
Arguments:
content: Transcript content byte-stream.
input_format: Input transcript format.
output_format: Output transcript format.
Accepted input formats: sjson, srt.
Accepted output format: srt, sjson.
Raises:
TranscriptsGenerationException: On parsing the invalid srt
content during conversion from srt to sjson.
|
codesearchnet
|
def optimize(node):
node = dead_code_elimination(node)
node = constant_folding(node)
node = assignment_propagation(node)
return node
|
Perform a series of optimization passes.
This function performs a series of optimizations (dead code elimination,
constant folding, variable folding) on the given AST.
It optimizes the code repeatedly until reaching a fixed point. The fixed
point is determine roughly by checking whether the number of lines of
generated source code changed after the latest pass.
Args:
node: The AST to optimize.
Returns:
The optimized AST.
|
juraj-google-style
|
def _add_genotype_calls(self, variant_obj, variant_line, case_obj):
variant_line = variant_line.split('\t')
if (len(variant_line) > 8):
gt_format = variant_line[8].split(':')
for individual in case_obj.individuals:
sample_id = individual.ind_id
index = individual.ind_index
gt_call = variant_line[(9 + index)].split(':')
raw_call = dict(zip(gt_format, gt_call))
genotype = Genotype(**raw_call)
variant_obj.add_individual(puzzle_genotype(sample_id=sample_id, genotype=genotype.genotype, case_id=case_obj.name, phenotype=individual.phenotype, ref_depth=genotype.ref_depth, alt_depth=genotype.alt_depth, genotype_quality=genotype.genotype_quality, depth=genotype.depth_of_coverage, supporting_evidence=genotype.supporting_evidence, pe_support=genotype.pe_support, sr_support=genotype.sr_support))
|
Add the genotype calls for the variant
Args:
variant_obj (puzzle.models.Variant)
variant_dict (dict): A variant dictionary
case_obj (puzzle.models.Case)
|
codesearchnet
|
def reset_state(self, reset_state):
if isinstance(reset_state, int):
self._pool.map(_reset_state, self._shard_num_args({'reset_state': reset_state}))
elif isinstance(reset_state, np.ndarray):
sim.validate_normalized_state(reset_state, self._num_qubits)
args = []
for kwargs in self._shard_num_args():
shard_num = kwargs['shard_num']
shard_size = (1 << kwargs['num_shard_qubits'])
start = (shard_num * shard_size)
end = (start + shard_size)
kwargs['reset_state'] = reset_state[start:end]
args.append(kwargs)
self._pool.map(_reset_state, args)
|
Reset the state to the given initial state.
Args:
reset_state: If this is an int, then this is the state to reset
the stepper to, expressed as an integer of the computational
basis. Integer to bitwise indices is little endian. Otherwise
if this is a np.ndarray this must be the correct size, be
normalized (L2 norm of 1), and have dtype of np.complex64.
Raises:
ValueError if the state is incorrectly sized or not of the correct
dtype.
|
codesearchnet
|
def assertAllCloseAccordingToType(self, a, b, rtol=1e-06, atol=1e-06, float_rtol=1e-06, float_atol=1e-06, half_rtol=0.001, half_atol=0.001, bfloat16_rtol=0.01, bfloat16_atol=0.01, msg=None):
a, b = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
if a.dtype == np.float32 or b.dtype == np.float32 or a.dtype == np.complex64 or (b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if a.dtype == dtypes.bfloat16.as_numpy_dtype or b.dtype == dtypes.bfloat16.as_numpy_dtype:
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
|
Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
|
github-repos
|
def _cancel_http(api_request, operation_name):
path = "operations/{}:cancel".format(operation_name)
api_request(method="POST", path=path)
|
Cancel an operation using a JSON/HTTP client.
Args:
api_request (Callable): A callable used to make an API request. This
should generally be
:meth:`google.cloud._http.Connection.api_request`.
operation_name (str): The name of the operation.
|
juraj-google-style
|
def embedding_lookup(params, ids: ragged_tensor.Ragged, partition_strategy='mod', name=None, validate_indices=True, max_norm=None):
if params is None:
raise ValueError('params must be specified.')
if isinstance(params, (list, tuple)) and (not params):
raise ValueError('params should not be empty.')
if ids.dtype != dtypes.int32 and ids.dtype != dtypes.int64:
raise ValueError(f'The values contained by the inputs have type {str(ids.dtype)} and cannot be processed. All values should be indices, either of type `int32` or `int64`.')
with ops.name_scope(name, 'embedding_lookup_ragged') as name:
looked_up_ragged = ragged_functional_ops.map_flat_values(embedding_ops.embedding_lookup, params=params, ids=ids, partition_strategy=partition_strategy, max_norm=max_norm)
return looked_up_ragged
|
Look up the ragged ids in a list of embedding tensors.
Args:
params: A tensor representing the complete embedding tensor having the shape
[e1, ...eM]
ragged_ids: A 'RaggedTensor' with type 'int32' or 'int64' containing the ids
to be looked up in 'params' of shape [r0, ..rN]. Values must be in the
range '[0, params.shape[0]]'.
partition_strategy: A string specifying the partitioning strategy.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value.
name: A name for the operation (optional)
Returns:
A ragged tensor of shape [r0, r1, ...rN, e1, ...eM].
Raises:
ValueError: When params is empty or the type of the ids is not int32 or
int64.
|
github-repos
|
def _flatten_subsection(subsection, _type, offset, parent):
for row in subsection:
if row in ('Low', 'Generated', 'High', ):
continue
elif isinstance(row[0], StringType):
if len(row) in (4, 5, ):
if len(row) == 5:
assert row[4][0] == 'S', \
'Only known usage of a fifth member is Sn, found: %s' % row[4][0]
yield (float(row[0]), float(row[1]), float(row[2]), float(row[3]) / 2.,
_type, offset, parent)
parent = offset
offset += 1
elif isinstance(row[0], list):
split_parent = offset - 1
start_offset = 0
slices = []
start = 0
for i, value in enumerate(row):
if value == '|':
slices.append(slice(start + start_offset, i))
start = i + 1
slices.append(slice(start + start_offset, len(row)))
for split_slice in slices:
for _row in _flatten_subsection(row[split_slice], _type, offset,
split_parent):
offset += 1
yield _row
|
Flatten a subsection from its nested version
Args:
subsection: Nested subsection as produced by _parse_section, except one level in
_type: type of section, ie: AXON, etc
parent: first element has this as it's parent
offset: position in the final array of the first element
Returns:
Generator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID]
|
juraj-google-style
|
def tv_credits(self, **kwargs):
path = self._get_id_path('tv_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get the TV credits for a specific person id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
|
codesearchnet
|
def _method_url(self, method_name):
return "{base_url}/api/{api}/{method}".format(
base_url=self._base_url(),
api=self.api_version,
method=method_name
)
|
Generate the URL for the requested method
Args:
method_name (str): Name of the method
Returns:
A string containing the URL of the method
|
juraj-google-style
|
def add_to_queue(self, queueable_item, position=0, as_next=False):
metadata = to_didl_string(queueable_item)
response = self.avTransport.AddURIToQueue([('InstanceID', 0), ('EnqueuedURI', queueable_item.resources[0].uri), ('EnqueuedURIMetaData', metadata), ('DesiredFirstTrackNumberEnqueued', position), ('EnqueueAsNext', int(as_next))])
qnumber = response['FirstTrackNumberEnqueued']
return int(qnumber)
|
Add a queueable item to the queue.
Args:
queueable_item (DidlObject or MusicServiceItem): The item to be
added to the queue
position (int): The index (1-based) at which the URI should be
added. Default is 0 (add URI at the end of the queue).
as_next (bool): Whether this URI should be played as the next
track in shuffle mode. This only works if `play_mode=SHUFFLE`.
Returns:
int: The index of the new item in the queue.
|
codesearchnet
|
def tokenize(self, vector_list):
if self.computable_distance is None:
self.computable_distance = EuclidDistance()
vector_arr = np.array(vector_list)
distance_arr = np.empty_like(vector_arr)
feature_arr = self.__dbm.get_feature_point(layer_number=0)
key_arr = np.empty(vector_arr.shape[0], dtype=int)
for i in range(vector_arr.shape[0]):
distance_arr = self.computable_distance.compute(
np.expand_dims(vector_arr[i], axis=0).repeat(feature_arr.shape[0], axis=0),
feature_arr
)
key_arr[i] = distance_arr.argmin(axis=0)
return self.token_arr[key_arr]
|
Tokenize vector.
Args:
vector_list: The list of vector of one token.
Returns:
token
|
juraj-google-style
|
def _get_authorization_headers(self, context):
headers = {}
self._credentials.before_request(self._request, context.method_name, context.service_url, headers)
return list(six.iteritems(headers))
|
Gets the authorization headers for a request.
Returns:
Sequence[Tuple[str, str]]: A list of request headers (key, value)
to add to the request.
|
codesearchnet
|
def _extract_response_chunks(self, all_responses, response_chunks, api_name):
for response_chunk in response_chunks:
if not isinstance(response_chunk, list):
response_chunk = [response_chunk]
for response in response_chunk:
if not response:
continue
if self._cache:
self._cache.cache_value(api_name, response['resource'], response)
all_responses[response['resource']] = response
|
Extracts and caches the responses from the response chunks in case
of the responses for the requests containing multiple concatenated
resources. Extracted responses are added to the already cached
responses passed in the all_responses parameter.
Args:
all_responses: a list containing already cached responses.
response_chunks: a list with response chunks.
api_name: a string name of the API.
|
juraj-google-style
|
def write(self, name, **data):
data['name'] = name
if (not ('timestamp' in data)):
data['timestamp'] = datetime.utcnow()
try:
self.client.index(index=self.get_index(), doc_type=self.doc_type, id=None, body=data)
except TransportError as exc:
logger.warning('writing metric %r failure %r', data, exc)
|
Write the metric to elasticsearch
Args:
name (str): The name of the metric to write
data (dict): Additional data to store with the metric
|
codesearchnet
|
def get_datas(callback, macs=[], run_flag=RunFlag(), bt_device=''):
log.info('Get latest data for sensors. Stop with Ctrl+C.')
log.info('MACs: %s', macs)
for new_data in RuuviTagSensor._get_ruuvitag_datas(macs, None, run_flag, bt_device):
callback(new_data)
|
Get data for all ruuvitag sensors or sensors in the MAC's list.
Args:
callback (func): callback funcion to be called when new data is received
macs (list): MAC addresses
run_flag (object): RunFlag object. Function executes while run_flag.running
bt_device (string): Bluetooth device id
|
juraj-google-style
|
def identifiers(config):
ids = []
if (config.klass_name == 'gen'):
for generator in os.listdir(config.generator_dir):
if (generator == '__init__.py'):
continue
(gid, ext) = os.path.splitext(generator)
if (ext == '.py' and
os.path.isfile(os.path.join(config.generator_dir, generator))):
ids.append(gid)
else:
for image_file in os.listdir(config.image_dir):
(iid, ext) = os.path.splitext(image_file)
if (ext in ['.jpg', '.png', '.tif'] and
os.path.isfile(os.path.join(config.image_dir, image_file))):
ids.append(iid)
return ids
|
Show list of identifiers for this prefix.
Handles both the case of local file based identifiers and
also image generators.
Arguments:
config - configuration object in which:
config.klass_name - 'gen' if a generator function
config.generator_dir - directory for generator code
config.image_dir - directory for images
Returns:
ids - a list of ids
|
juraj-google-style
|
def send_async(self, transaction, headers=None):
return self.transport.forward_request(method='POST', path=self.path, json=transaction, params={'mode': 'async'}, headers=headers)
|
Submit a transaction to the Federation with the mode `async`.
Args:
transaction (dict): the transaction to be sent
to the Federation node(s).
headers (dict): Optional headers to pass to the request.
Returns:
dict: The transaction sent to the Federation node(s).
|
codesearchnet
|
def migrate(self, id_or_uri, timeout=-1):
migrationInformation = {
'migrationState': 'Migrated',
'type': 'migratable-vc-domains',
'category': 'migratable-vc-domains'
}
complete_uri = self._client.build_uri(id_or_uri)
return self._client.update(migrationInformation, uri=complete_uri, timeout=timeout)
|
Initiates a migration of an enclosure specified by the ID or URI of a migration report.
Args:
id_or_uri: ID or URI of the migration report.
timeout: Timeout in seconds. Waits for task completion by default. The timeout does not abort the task in
OneView; just stops waiting for its completion.
Returns: dict: a migration report.
|
juraj-google-style
|
def __check_no_missing_attributes(self, node: yaml.Node,
mapping: CommentedMap) -> None:
logger.debug('Checking presence of required attributes')
for name, type_, required in class_subobjects(self.class_):
if required and name not in mapping:
raise RecognitionError(('{}{}Missing attribute {} needed for'
' constructing a {}').format(
node.start_mark, os.linesep, name,
self.class_.__name__))
if name in mapping and not self.__type_matches(
mapping[name], type_):
raise RecognitionError(('{}{}Attribute {} has incorrect type'
' {}, expecting a {}').format(
node.start_mark, os.linesep, name,
type(mapping[name]), type_))
|
Checks that all required attributes are present.
Also checks that they're of the correct type.
Args:
mapping: The mapping with subobjects of this object.
Raises:
RecognitionError: if an attribute is missing or the type \
is incorrect.
|
juraj-google-style
|
def tomography_data(results, name, tomoset):
labels = tomography_circuit_names(tomoset, name)
circuits = tomoset['circuits']
data = []
prep = None
for (j, _) in enumerate(labels):
counts = marginal_counts(results.get_counts(labels[j]), tomoset['qubits'])
shots = sum(counts.values())
meas = circuits[j]['meas']
prep = circuits[j].get('prep', None)
meas_qubits = sorted(meas.keys())
if prep:
prep_qubits = sorted(prep.keys())
circuit = {}
for c in counts.keys():
circuit[c] = {}
circuit[c]['meas'] = [(meas[meas_qubits[k]], int(c[((- 1) - k)])) for k in range(len(meas_qubits))]
if prep:
circuit[c]['prep'] = [prep[prep_qubits[k]] for k in range(len(prep_qubits))]
data.append({'counts': counts, 'shots': shots, 'circuit': circuit})
ret = {'data': data, 'meas_basis': tomoset['meas_basis']}
if prep:
ret['prep_basis'] = tomoset['prep_basis']
return ret
|
Return a results dict for a state or process tomography experiment.
Args:
results (Result): Results from execution of a process tomography
circuits on a backend.
name (string): The name of the circuit being reconstructed.
tomoset (tomography_set): the dict of tomography configurations.
Returns:
list: A list of dicts for the outcome of each process tomography
measurement circuit.
|
codesearchnet
|
def __init__(
self, name, aliases=None, description=None, maximum_value=None,
minimum_value=None, urls=None):
super(IntegerDefinition, self).__init__(
name, aliases=aliases, description=description, urls=urls)
self.format = definitions.FORMAT_SIGNED
self.maximum_value = maximum_value
self.minimum_value = minimum_value
|
Initializes an integer data type definition.
Args:
name (str): name.
aliases (Optional[list[str]]): aliases.
description (Optional[str]): description.
maximum_value (Optional[int]): maximum allowed value of the integer
data type.
minimum_value (Optional[int]): minimum allowed value of the integer
data type.
urls (Optional[list[str]]): URLs.
|
juraj-google-style
|
def get_what_follows_raw(s: str,
prefix: str,
onlyatstart: bool = True,
stripwhitespace: bool = True) -> Tuple[bool, str]:
prefixstart = s.find(prefix)
if ((prefixstart == 0 and onlyatstart) or
(prefixstart != -1 and not onlyatstart)):
resultstart = prefixstart + len(prefix)
result = s[resultstart:]
if stripwhitespace:
result = result.strip()
return True, result
return False, ""
|
Find the part of ``s`` that is after ``prefix``.
Args:
s: string to analyse
prefix: prefix to find
onlyatstart: only accept the prefix if it is right at the start of
``s``
stripwhitespace: remove whitespace from the result
Returns:
tuple: ``(found, result)``
|
juraj-google-style
|
def FilterItem(self, launchditem):
for regex in self.blacklist_regex:
if regex.match(launchditem.get('Label', '')):
return True
return False
|
Should this job be filtered.
Args:
launchditem: job NSCFDictionary
Returns:
True if the item should be filtered (dropped)
|
codesearchnet
|
def __init__(self, fsntfs_data_stream):
super(NTFSDataStream, self).__init__()
self._fsntfs_data_stream = fsntfs_data_stream
|
Initializes the data stream object.
Args:
fsntfs_data_stream (pyfsntfs.data_stream): NTFS data stream.
|
juraj-google-style
|
def create_course_completion(self, user_id, payload):
return self._post(
urljoin(
self.enterprise_configuration.degreed_base_url,
self.global_degreed_config.completion_status_api_path
),
payload,
self.COMPLETION_PROVIDER_SCOPE
)
|
Send a completion status payload to the Degreed Completion Status endpoint
Args:
user_id: Unused.
payload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit)
containing completion status fields per Degreed documentation.
Returns:
A tuple containing the status code and the body of the response.
Raises:
HTTPError: if we received a failure response code from Degreed
|
juraj-google-style
|
def fill_rects(self, *rects):
rect_array = ffi.new('SDL_Rect[]', len(rects))
for (i, r) in enumerate(rects):
rect_array[i] = r._ptr[0]
check_int_err(lib.SDL_RenderFillRects(self._ptr, rect_array, len(rects)))
|
Fill some number of rectangles on the current rendering target with the drawing color.
Args:
*rects (Rect): The destination rectangles.
Raises:
SDLError: If an error is encountered.
|
codesearchnet
|
def compose(self, r: Rigid) -> Rigid:
new_rot = self._rots.compose_r(r._rots)
new_trans = self._rots.apply(r._trans) + self._trans
return Rigid(new_rot, new_trans)
|
Composes the current rigid object with another.
Args:
r:
Another Rigid object
Returns:
The composition of the two transformations
|
github-repos
|
def encode_request(request_line, **headers):
lines = [request_line]
lines.extend(['%s: %s' % kv for kv in headers.items()])
return ('\r\n'.join(lines) + '\r\n\r\n').encode('utf-8')
|
Creates the data for a SSDP request.
Args:
request_line (string): The request line for the request (e.g.
``"M-SEARCH * HTTP/1.1"``).
headers (dict of string -> string): Dictionary of header name - header
value pairs to present in the request.
Returns:
bytes: The encoded request.
|
juraj-google-style
|
def __init__(self, cell, device, **kwargs):
super(DeviceWrapperBase, self).__init__(cell, **kwargs)
self._device = device
|
Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
**kwargs: dict of keyword arguments for base layer.
|
github-repos
|
def checksum1(data, stringlength):
value_buffer = 0
for count in range(0, stringlength):
value_buffer = (value_buffer ^ data[count])
return (value_buffer & 254)
|
Calculate Checksum 1
Calculate the ckecksum 1 required for the herkulex data packet
Args:
data (list): the data of which checksum is to be calculated
stringlength (int): the length of the data
Returns:
int: The calculated checksum 1
|
codesearchnet
|
def get_error_name(error):
error_type = type(error)
if (error_type.__module__ in ['__main__', 'builtins']):
return error_type.__name__
else:
return f'{error_type.__module__}.{error_type.__name__}'
|
Return canonical error name as string.
For builtin errors like ValueError or Exception, will return the bare
name, like ValueError or Exception.
For all other exceptions, will return modulename.errorname, such as
arbpackage.mod.myerror
Args:
error: Exception object.
Returns:
str. Canonical error name.
|
codesearchnet
|
def mark_typed_list(self, name, type_object):
if (not hasattr(type_object, 'dump')):
raise ArgumentError(('The passed type object %s is missing required method: dump()' % type_object))
if (not hasattr(type_object, 'Restore')):
raise ArgumentError(('The passed type object %s is missing required method: Restore()' % type_object))
def _dump_list(obj):
if (obj is None):
return None
if (not isinstance(obj, list)):
raise DataError(('Property %s marked as list was not a list: %s' % (name, repr(obj))))
return [x.dump() for x in obj]
def _restore_list(obj):
if (obj is None):
return obj
return [type_object.Restore(x) for x in obj]
self.mark_complex(name, _dump_list, _restore_list)
|
Mark a property as containing serializable objects of a given type.
This convenience method allows you to avoid having to call
``mark_complex()`` whenever you need to serialize a list of objects.
This method requires that all members of the given list be of a single
class that contains a dump() method and a Restore() class method where
type_object.Restore(x.dump()) == x.
Args:
name (str): The name of the complex property.
type_object: The class object that will be contained inside
this list.
|
codesearchnet
|
def dq_argument(self) -> str:
def escape():
self._escape = True
return 1
self._escape = False
self.offset += 1
start = self.offset
self.dfa([{'': (lambda : 0), '"': (lambda : (- 1)), '\\': escape}, {'': (lambda : 0)}])
self._arg += (self.unescape(self.input[start:self.offset]) if self._escape else self.input[start:self.offset])
self.offset += 1
|
Parse double-quoted argument.
Raises:
EndOfInput: If past the end of input.
|
codesearchnet
|
def __call__(self, fn):
def exception(app, *args, **kwargs):
try:
return fn(app, *args, **kwargs)
except Exception as e:
app.tcex.log.error('method failure ({})'.format(e))
app.tcex.exit(1, self.msg)
return exception
|
Implement __call__ function for decorator.
Args:
fn (function): The decorated function.
Returns:
function: The custom decorator function.
|
juraj-google-style
|
def get_feature(w1: str, w2: str, w3: str, w4: str, w5: str, w6: str) -> typing.List[str]:
raw_feature = {'UW1': w1, 'UW2': w2, 'UW3': w3, 'UW4': w4, 'UW5': w5, 'UW6': w6, 'BW1': w2 + w3, 'BW2': w3 + w4, 'BW3': w4 + w5, 'TW1': w1 + w2 + w3, 'TW2': w2 + w3 + w4, 'TW3': w3 + w4 + w5, 'TW4': w4 + w5 + w6}
for key, value in list(raw_feature.items()):
if INVALID in value:
del raw_feature[key]
return [f'{item[0]}:{item[1]}' for item in raw_feature.items()]
|
Generates a feature from characters around (w1-6).
Args:
w1 (str): The character 3 characters before the break point.
w2 (str): The character 2 characters before the break point.
w3 (str): The character right before the break point.
w4 (str): The character right after the break point.
w5 (str): The character 2 characters after the break point.
w6 (str): The character 3 characters after the break point.
Returns:
The feature (list[str]).
|
github-repos
|
def plugin_method(*plugin_names):
def wrapper(callable_obj):
for plugin_name in plugin_names:
if not hasattr(callable_obj, plugin_name):
setattr(callable_obj, plugin_name, True)
return callable_obj
return wrapper
|
Plugin Method decorator.
Signs a web handler function with the plugins to be applied as attributes.
Args:
plugin_names (list): A list of plugin callable names
Returns:
A wrapped handler callable.
Examples:
>>> @plugin_method('json', 'bill')
... def method():
... return "Hello!"
...
>>> print method.json
True
>>> print method.bill
True
|
juraj-google-style
|
def longestNumber(self, inp):
split = inp.split(' ')
numStart = None
numEnd = None
for i, w in enumerate(split):
if self.isValid(w):
if numStart is None:
numStart = i
numEnd = i
else:
w = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', w)
if w in self.__ordinals__:
if self.isValid(' '.join(split[numStart:i + 1])):
numEnd = i
break
description = ' '.join(split[numStart:numEnd + 1])
return self.parse(description)
|
Extracts the longest valid numerical description from a string.
Not guaranteed to return a result even if some valid numerical
description exists (i.e., method is not particularly advanced).
Args:
inp (str): An arbitrary string, hopefully containing a number.
Returns:
The number with the longest string description in input,
or None if not found.
|
juraj-google-style
|
def _ParseCachedEntryXP(self, value_data, cached_entry_offset):
try:
cached_entry = self._ReadStructureFromByteStream(
value_data[cached_entry_offset:], cached_entry_offset,
self._cached_entry_data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse cached entry value with error: {0!s}'.format(
exception))
string_size = 0
for string_index in range(0, 528, 2):
if (cached_entry.path[string_index] == 0 and
cached_entry.path[string_index + 1] == 0):
break
string_size += 2
try:
path = bytearray(cached_entry.path[0:string_size]).decode('utf-16-le')
except UnicodeDecodeError:
raise errors.ParseError('Unable to decode cached entry path to string')
cached_entry_object = AppCompatCacheCachedEntry()
cached_entry_object.cached_entry_size = (
self._cached_entry_data_type_map.GetByteSize())
cached_entry_object.file_size = cached_entry.file_size
cached_entry_object.last_modification_time = (
cached_entry.last_modification_time)
cached_entry_object.last_update_time = cached_entry.last_update_time
cached_entry_object.path = path
return cached_entry_object
|
Parses a Windows XP cached entry.
Args:
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
AppCompatCacheCachedEntry: cached entry.
Raises:
ParseError: if the value data could not be parsed.
|
juraj-google-style
|
def get_signatures_from_saved_model(saved_model_path: str, signature_keys: Optional[Sequence[str]]=None, tags: Optional[Collection[str]]=None) -> Dict[str, meta_graph_pb2.SignatureDef]:
if tags is None:
tags = {tag_constants.SERVING}
loader = saved_model_loader.SavedModelLoader(saved_model_path)
meta_graphdef = loader.get_meta_graph_def_from_tags(tags)
signatures = {}
for key, signature_def in meta_graphdef.signature_def.items():
if key == saved_model_constants.INIT_OP_SIGNATURE_KEY:
continue
if signature_keys is not None and key not in signature_keys:
continue
signatures[key] = signature_def
return signatures
|
Gets a map from signature keys to their SignatureDef.
Args:
saved_model_path: Path to the saved model.
signature_keys: List of keys identifying SignatureDef to retrieve. If None,
retrieve all except the init signature.
tags: Set of tags identifying the MetaGraphDef within the SavedModel.
Returns:
A map from signature_key to its SignatureDef.
|
github-repos
|
def select_embedding_from_tag(cur, embedding_tag, target_nodelist, target_edgelist):
encoded_data = {'num_nodes': len(target_nodelist), 'num_edges': len(target_edgelist), 'edges': json.dumps(target_edgelist, separators=(',', ':')), 'tag': embedding_tag}
select = '\n SELECT\n source_node,\n chain\n FROM\n embedding_component_view\n WHERE\n embedding_tag = :tag AND\n target_edges = :edges AND\n target_num_nodes = :num_nodes AND\n target_num_edges = :num_edges\n '
embedding = {v: json.loads(chain) for (v, chain) in cur.execute(select, encoded_data)}
return embedding
|
Select an embedding from the given tag and target graph.
Args:
cur (:class:`sqlite3.Cursor`):
An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.
source_nodelist (list):
The nodes in the source graph. Should be integer valued.
source_edgelist (list):
The edges in the source graph.
target_nodelist (list):
The nodes in the target graph. Should be integer valued.
target_edgelist (list):
The edges in the target graph.
Returns:
dict: The mapping from the source graph to the target graph.
In the form {v: {s, ...}, ...} where v is a variable in the
source model and s is a variable in the target model.
|
codesearchnet
|
def mkzip(archive, items, mode="w", save_full_paths=False):
close = False
try:
if not isinstance(archive, zipfile.ZipFile):
archive = zipfile.ZipFile(archive, mode, allowZip64=True)
close = True
logger.info("mkdzip: Creating %s, from: %s", archive.filename, items)
if isinstance(items, str):
items = [items]
for item in items:
item = os.path.abspath(item)
basename = os.path.basename(item)
if os.path.isdir(item):
for root, directoires, filenames in os.walk(item):
for filename in filenames:
path = os.path.join(root, filename)
if save_full_paths:
archive_path = path.encode("utf-8")
else:
archive_path = os.path.join(
basename, path.replace(item, "").strip("\\/")
).encode("utf-8")
archive.write(path, archive_path)
elif os.path.isfile(item):
if save_full_paths:
archive_name = item.encode("utf-8")
else:
archive_name = basename.encode("utf-8")
archive.write(item, archive_name)
return True
except Exception as e:
logger.error("Error occurred during mkzip: %s" % e)
return False
finally:
if close:
archive.close()
|
Recursively zip a directory.
Args:
archive (zipfile.ZipFile or str): ZipFile object add to or path to the
output zip archive.
items (str or list of str): Single item or list of items (files and
directories) to be added to zipfile.
mode (str): w for create new and write a for append to.
save_full_paths (bool): Preserve full paths.
|
juraj-google-style
|
def list(self,params=None, headers=None):
path = '/creditor_bank_accounts'
response = self._perform_request('GET', path, params, headers,
retry_failures=True)
return self._resource_for(response)
|
List creditor bank accounts.
Returns a [cursor-paginated](#api-usage-cursor-pagination) list of your
creditor bank accounts.
Args:
params (dict, optional): Query string parameters.
Returns:
CreditorBankAccount
|
juraj-google-style
|
def get(path, objectType, user=None):
ret = {'Path': path,
'ACLs': []}
sidRet = _getUserSid(user)
if path and objectType:
dc = daclConstants()
objectTypeBit = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectTypeBit)
tdacl = _get_dacl(path, objectTypeBit)
if tdacl:
for counter in range(0, tdacl.GetAceCount()):
tAce = tdacl.GetAce(counter)
if not sidRet['sid'] or (tAce[2] == sidRet['sid']):
ret['ACLs'].append(_ace_to_text(tAce, objectTypeBit))
return ret
|
Get the ACL of an object. Will filter by user if one is provided.
Args:
path: The path to the object
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
user: A user name to filter by
Returns (dict): A dictionary containing the ACL
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.get c:\temp directory
|
juraj-google-style
|
def attribute(self, name):
return super(Map, self).attribute(self._inputs[0], name)
|
Expression for an input attribute.
An input attribute is an attribute on the input
port of the operator invocation.
Args:
name(str): Name of the attribute.
Returns:
Expression: Expression representing the input attribute.
|
juraj-google-style
|
def ParseGshadowEntry(self, line):
fields = ("name", "passwd", "administrators", "members")
if line:
rslt = dict(zip(fields, line.split(":")))
name = rslt["name"]
pw_entry = self.shadow.setdefault(name, rdf_client.PwEntry())
pw_entry.store = self.shadow_store
pw_entry.hash_type = self.GetHashType(rslt["passwd"])
members = self.gshadow_members.setdefault(name, set())
for accts in rslt["administrators"], rslt["members"]:
if accts:
members.update(accts.split(","))
|
Extract the members of each group from /etc/gshadow.
Identifies the groups in /etc/gshadow and several attributes of the group,
including how the password is crypted (if set).
gshadow files have the format group_name:passwd:admins:members
admins are both group members and can manage passwords and memberships.
Args:
line: An entry in gshadow.
|
juraj-google-style
|
def decode(self):
if (self.encoding >= self.public_key.n):
raise ValueError('Attempted to decode corrupted number')
elif (self.encoding <= self.public_key.max_int):
mantissa = self.encoding
elif (self.encoding >= (self.public_key.n - self.public_key.max_int)):
mantissa = (self.encoding - self.public_key.n)
else:
raise OverflowError('Overflow detected in decrypted number')
if (self.exponent >= 0):
return (mantissa * (self.BASE ** self.exponent))
else:
try:
return (mantissa / (self.BASE ** (- self.exponent)))
except OverflowError as e:
raise OverflowError('decoded result too large for a float') from e
|
Decode plaintext and return the result.
Returns:
an int or float: the decoded number. N.B. if the number
returned is an integer, it will not be of type float.
Raises:
OverflowError: if overflow is detected in the decrypted number.
|
codesearchnet
|
def validate(cls, mapper_spec):
if mapper_spec.input_reader_class() != cls:
raise errors.BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec, allowed_keys=cls._PARAMS)
if (cls.VERSION_IDS_PARAM not in params and
cls.MODULE_VERSIONS_PARAM not in params):
raise errors.BadReaderParamsError("Must specify a list of version ids or "
"module/version ids for mapper input")
if (cls.VERSION_IDS_PARAM in params and
cls.MODULE_VERSIONS_PARAM in params):
raise errors.BadReaderParamsError("Can not supply both version ids or "
"module/version ids. Use only one.")
if (cls.START_TIME_PARAM not in params or
params[cls.START_TIME_PARAM] is None):
raise errors.BadReaderParamsError("Must specify a starting time for "
"mapper input")
if cls.END_TIME_PARAM not in params or params[cls.END_TIME_PARAM] is None:
params[cls.END_TIME_PARAM] = time.time()
if params[cls.START_TIME_PARAM] >= params[cls.END_TIME_PARAM]:
raise errors.BadReaderParamsError("The starting time cannot be later "
"than or the same as the ending time.")
if cls._PROTOTYPE_REQUEST_PARAM in params:
try:
params[cls._PROTOTYPE_REQUEST_PARAM] = log_service_pb.LogReadRequest(
params[cls._PROTOTYPE_REQUEST_PARAM])
except (TypeError, ProtocolBuffer.ProtocolBufferDecodeError):
raise errors.BadReaderParamsError("The prototype request must be "
"parseable as a LogReadRequest.")
try:
logservice.fetch(**params)
except logservice.InvalidArgumentError, e:
raise errors.BadReaderParamsError("One or more parameters are not valid "
"inputs to logservice.fetch(): %s" % e)
|
Validates the mapper's specification and all necessary parameters.
Args:
mapper_spec: The MapperSpec to be used with this InputReader.
Raises:
BadReaderParamsError: If the user fails to specify both a starting time
and an ending time, or if the starting time is later than the ending
time.
|
juraj-google-style
|
def get_servo_angle(self):
servoposition = self.get_servo_position()
if ((self.servomodel == 6) or (self.servomodel == 4)):
return scale(servoposition, 10627, 22129, (- 159.9), 159.6)
else:
return scale(servoposition, 21, 1002, (- 150), 150)
|
Gets the current angle of the servo in degrees
Args:
none
Returns:
int : the current servo angle
|
codesearchnet
|
def neighborhood_probability(self, threshold, radius):
weights = disk(radius, dtype=np.uint8)
thresh_data = np.zeros(self.data.shape[1:], dtype=np.uint8)
neighbor_prob = np.zeros(self.data.shape, dtype=np.float32)
for t in np.arange(self.data.shape[0]):
thresh_data[self.data[t] >= threshold] = 1
maximized = fftconvolve(thresh_data, weights, mode="same")
maximized[maximized > 1] = 1
maximized[maximized < 1] = 0
neighbor_prob[t] = fftconvolve(maximized, weights, mode="same")
thresh_data[:] = 0
neighbor_prob[neighbor_prob < 1] = 0
neighbor_prob /= weights.sum()
return neighbor_prob
|
Calculate a probability based on the number of grid points in an area that exceed a threshold.
Args:
threshold:
radius:
Returns:
|
juraj-google-style
|
def _FormatIPCPermToken(self, token_data):
return {
'user_id': token_data.user_identifier,
'group_id': token_data.group_identifier,
'creator_user_id': token_data.creator_user_identifier,
'creator_group_id': token_data.creator_group_identifier,
'access': token_data.access_mode}
|
Formats an IPC permissions token as a dictionary of values.
Args:
token_data (bsm_token_data_ipc_perm): AUT_IPC_PERM token data.
Returns:
dict[str, str]: token values.
|
juraj-google-style
|
def abort_all(reason, extras=None):
raise signals.TestAbortAll(reason, extras)
|
Abort all subsequent tests, including the ones not in this test class or
iteration.
Args:
reason: The reason to abort.
extras: An optional field for extra information to be included in
test result.
Raises:
signals.TestAbortAll: Abort all subsequent tests.
|
github-repos
|
def extend(self, elts):
elts = elts[:]
self._in_deque.append(elts)
event = self._event_for(elts)
self._event_deque.append(event)
return event
|
Adds elts to the tasks.
Args:
elts (Sequence): a iterable of elements that can be appended to the
task's bundle_field.
Returns:
Event: an event that can be used to wait on the response.
|
codesearchnet
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
if self._fsntfs_data_stream:
return self._fsntfs_data_stream.read(size=size)
return self._fsntfs_file_entry.read(size=size)
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
juraj-google-style
|
def _process_update(self, item, feed_item):
lp = self.landing_page_dao.get(feed_item, required=True)
feed_item[FieldMap.CAMPAIGN_LANDING_PAGE_ID] = lp['id']
feed_item[FieldMap.CAMPAIGN_LANDING_PAGE_NAME] = lp['name']
item['startDate'] = StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.CAMPAIGN_START_DATE, None))
item['endDate'] = StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.CAMPAIGN_END_DATE, None))
item['name'] = feed_item.get(FieldMap.CAMPAIGN_NAME, None)
item['defaultLandingPageId'] = lp['id']
|
Updates a campaign based on the values from the feed.
Args:
item: Object representing the campaign to be updated, this object is
updated directly.
feed_item: Feed item representing campaign values from the Bulkdozer feed.
|
github-repos
|
def ParseOptions(cls, options, analysis_plugin):
if not isinstance(analysis_plugin, nsrlsvr.NsrlsvrAnalysisPlugin):
raise errors.BadConfigObject(
'Analysis plugin is not an instance of NsrlsvrAnalysisPlugin')
label = cls._ParseStringOption(
options, 'nsrlsvr_label', default_value=cls._DEFAULT_LABEL)
analysis_plugin.SetLabel(label)
lookup_hash = cls._ParseStringOption(
options, 'nsrlsvr_hash', default_value=cls._DEFAULT_HASH)
analysis_plugin.SetLookupHash(lookup_hash)
host = cls._ParseStringOption(
options, 'nsrlsvr_host', default_value=cls._DEFAULT_HOST)
analysis_plugin.SetHost(host)
port = cls._ParseNumericOption(
options, 'nsrlsvr_port', default_value=cls._DEFAULT_PORT)
analysis_plugin.SetPort(port)
if not analysis_plugin.TestConnection():
raise errors.BadConfigOption(
'Unable to connect to nsrlsvr {0:s}:{1:d}'.format(host, port))
|
Parses and validates options.
Args:
options (argparse.Namespace): parser options object.
analysis_plugin (NsrlsvrAnalysisPlugin): analysis plugin to configure.
Raises:
BadConfigObject: when the analysis plugin is the wrong type.
BadConfigOption: when unable to connect to nsrlsvr instance.
|
juraj-google-style
|
def CreateSubdivision(self, parent=None, value=None):
division = {'xsi_type': 'ProductPartition', 'partitionType': 'SUBDIVISION', 'id': str(self.next_id)}
if (parent is not None):
division['parentCriterionId'] = parent['id']
division['caseValue'] = value
adgroup_criterion = {'xsi_type': 'BiddableAdGroupCriterion', 'adGroupId': self.adgroup_id, 'criterion': division}
self.CreateAddOperation(adgroup_criterion)
self.next_id -= 1
return division
|
Creates a subdivision node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
Returns:
A new subdivision node.
|
codesearchnet
|
def _send_offset_fetch_request(self, partitions):
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
if not partitions:
return Future().success({})
node_id = self.coordinator()
if node_id is None:
return Future().failure(Errors.GroupCoordinatorNotAvailableError)
if not self._client.ready(node_id):
log.debug("Node %s not ready -- failing offset fetch request",
node_id)
return Future().failure(Errors.NodeNotReadyError)
log.debug("Group %s fetching committed offsets for partitions: %s",
self.group_id, partitions)
topic_partitions = collections.defaultdict(set)
for tp in partitions:
topic_partitions[tp.topic].add(tp.partition)
if self.config['api_version'] >= (0, 8, 2):
request = OffsetFetchRequest[1](
self.group_id,
list(topic_partitions.items())
)
else:
request = OffsetFetchRequest[0](
self.group_id,
list(topic_partitions.items())
)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_fetch_response, future)
_f.add_errback(self._failed_request, node_id, request, future)
return future
|
Fetch the committed offsets for a set of partitions.
This is a non-blocking call. The returned future can be polled to get
the actual offsets returned from the broker.
Arguments:
partitions (list of TopicPartition): the partitions to fetch
Returns:
Future: resolves to dict of offsets: {TopicPartition: int}
|
juraj-google-style
|
def word_error_rate(raw_predictions,
labels,
lookup=None,
weights_fn=common_layers.weights_nonzero):
def from_tokens(raw, lookup_):
gathered = tf.gather(lookup_, tf.cast(raw, tf.int32))
joined = tf.regex_replace(tf.reduce_join(gathered, axis=1), b"<EOS>.*", b"")
cleaned = tf.regex_replace(joined, b"_", b" ")
tokens = tf.string_split(cleaned, " ")
return tokens
def from_characters(raw, lookup_):
corrected = tf.bitcast(
tf.clip_by_value(tf.subtract(raw, 2), 0, 255), tf.uint8)
gathered = tf.gather(lookup_, tf.cast(corrected, tf.int32))[:, :, 0]
joined = tf.reduce_join(gathered, axis=1)
cleaned = tf.regex_replace(joined, b"\0", b"")
tokens = tf.string_split(cleaned, " ")
return tokens
if lookup is None:
lookup = tf.constant([chr(i) for i in range(256)])
convert_fn = from_characters
else:
convert_fn = from_tokens
if weights_fn is not common_layers.weights_nonzero:
raise ValueError("Only weights_nonzero can be used for this metric.")
with tf.variable_scope("word_error_rate", values=[raw_predictions, labels]):
raw_predictions = tf.squeeze(
tf.argmax(raw_predictions, axis=-1), axis=(2, 3))
labels = tf.squeeze(labels, axis=(2, 3))
reference = convert_fn(labels, lookup)
predictions = convert_fn(raw_predictions, lookup)
distance = tf.reduce_sum(
tf.edit_distance(predictions, reference, normalize=False))
reference_length = tf.cast(
tf.size(reference.values, out_type=tf.int32), dtype=tf.float32)
return distance / reference_length, reference_length
|
Calculate word error rate.
Args:
raw_predictions: The raw predictions.
labels: The actual labels.
lookup: A tf.constant mapping indices to output tokens.
weights_fn: Weighting function.
Returns:
The word error rate.
|
juraj-google-style
|
def ends_with(self, suffix):
suffix = suffix.lower()
found_words = []
res = cgaddag.gdg_ends_with(self.gdg, suffix.encode(encoding='ascii'))
tmp = res
while tmp:
word = tmp.contents.str.decode('ascii')
found_words.append(word)
tmp = tmp.contents.next
cgaddag.gdg_destroy_result(res)
return found_words
|
Find all words ending with a suffix.
Args:
suffix: A suffix to be searched for.
Returns:
A list of all words found.
|
codesearchnet
|
def load_metadata_for_topics(self, *topics, **kwargs):
if ('ignore_leadernotavailable' in kwargs):
ignore_leadernotavailable = kwargs['ignore_leadernotavailable']
else:
ignore_leadernotavailable = False
if topics:
self.reset_topic_metadata(*topics)
else:
self.reset_all_metadata()
resp = self.send_metadata_request(topics)
log.debug('Updating broker metadata: %s', resp.brokers)
log.debug('Updating topic metadata: %s', [topic for (_, topic, _) in resp.topics])
self.brokers = dict([(nodeId, BrokerMetadata(nodeId, host, port, None)) for (nodeId, host, port) in resp.brokers])
for (error, topic, partitions) in resp.topics:
if error:
error_type = kafka.errors.kafka_errors.get(error, UnknownError)
if (error_type in (UnknownTopicOrPartitionError, LeaderNotAvailableError)):
log.error('Error loading topic metadata for %s: %s (%s)', topic, error_type, error)
if (topic not in topics):
continue
elif ((error_type is LeaderNotAvailableError) and ignore_leadernotavailable):
continue
raise error_type(topic)
self.topic_partitions[topic] = {}
for (error, partition, leader, _, _) in partitions:
self.topic_partitions[topic][partition] = leader
topic_part = TopicPartition(topic, partition)
if error:
error_type = kafka.errors.kafka_errors.get(error, UnknownError)
if (error_type is LeaderNotAvailableError):
log.error('No leader for topic %s partition %d', topic, partition)
self.topics_to_brokers[topic_part] = None
continue
elif (error_type is ReplicaNotAvailableError):
log.debug('Some (non-leader) replicas not available for topic %s partition %d', topic, partition)
else:
raise error_type(topic_part)
if (leader in self.brokers):
self.topics_to_brokers[topic_part] = self.brokers[leader]
else:
self.topics_to_brokers[topic_part] = BrokerMetadata(leader, None, None, None)
|
Fetch broker and topic-partition metadata from the server.
Updates internal data: broker list, topic/partition list, and
topic/partition -> broker map. This method should be called after
receiving any error.
Note: Exceptions *will not* be raised in a full refresh (i.e. no topic
list). In this case, error codes will be logged as errors.
Partition-level errors will also not be raised here (a single partition
w/o a leader, for example).
Arguments:
*topics (optional): If a list of topics is provided,
the metadata refresh will be limited to the specified topics
only.
ignore_leadernotavailable (bool): suppress LeaderNotAvailableError
so that metadata is loaded correctly during auto-create.
Default: False.
Raises:
UnknownTopicOrPartitionError: Raised for topics that do not exist,
unless the broker is configured to auto-create topics.
LeaderNotAvailableError: Raised for topics that do not exist yet,
when the broker is configured to auto-create topics. Retry
after a short backoff (topics/partitions are initializing).
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.