code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def CheckForNewlineAtEOF(filename, lines, error):
if ((len(lines) < 3) or lines[(- 2)]):
error(filename, (len(lines) - 2), 'whitespace/ending_newline', 5, 'Could not find a newline character at the end of the file.')
|
Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
|
codesearchnet
|
def create_file_writer(logdir, max_queue=None, flush_millis=None, filename_suffix=None, name=None):
if logdir is None:
return _NoopSummaryWriter()
logdir = str(logdir)
with ops.device('cpu:0'):
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_millis is None:
flush_millis = constant_op.constant(2 * 60 * 1000)
if filename_suffix is None:
filename_suffix = constant_op.constant('.v2')
if name is None:
name = 'logdir:' + logdir
resource = gen_summary_ops.summary_writer(shared_name=name)
return _LegacyResourceSummaryWriter(resource=resource, init_op_fn=functools.partial(gen_summary_ops.create_summary_file_writer, logdir=logdir, max_queue=max_queue, flush_millis=flush_millis, filename_suffix=filename_suffix))
|
Creates a summary file writer in the current context under the given name.
Args:
logdir: a string, or None. If a string, creates a summary file writer
which writes to the directory named by the string. If None, returns
a mock object which acts like a summary writer but does nothing,
useful to use as a context manager.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this. Defaults to 10.
flush_millis: the largest interval between flushes. Defaults to 120,000.
filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
name: Shared name for this SummaryWriter resource stored to default
Graph. Defaults to the provided logdir prefixed with `logdir:`. Note: if a
summary writer resource with this shared name already exists, the returned
SummaryWriter wraps that resource and the other arguments have no effect.
Returns:
Either a summary writer or an empty object which can be used as a
summary writer.
|
github-repos
|
def _starts_with(field, filter_value):
valid = False
if field.startswith(filter_value):
valid = True
return valid
|
Validate field starts with provided value.
Args:
filter_value (string): A string or list of values.
Returns:
(boolean): Results of validation
|
codesearchnet
|
def get(self, path_info):
assert (path_info['scheme'] == 'local')
path = path_info['path']
if (not os.path.exists(path)):
return None
(actual_mtime, actual_size) = get_mtime_and_size(path)
actual_inode = get_inode(path)
existing_record = self.get_state_record_for_inode(actual_inode)
if (not existing_record):
return None
(mtime, size, checksum, _) = existing_record
if self._file_metadata_changed(actual_mtime, mtime, actual_size, size):
return None
self._update_state_record_timestamp_for_inode(actual_inode)
return checksum
|
Gets the checksum for the specified path info. Checksum will be
retrieved from the state database if available.
Args:
path_info (dict): path info to get the checksum for.
Returns:
str or None: checksum for the specified path info or None if it
doesn't exist in the state database.
|
codesearchnet
|
def get_size_with_aspect_ratio(image_size: Tuple[int, int], size: int, max_size: Optional[int]=None, mod_size: int=16) -> Tuple[int, int]:
height, width = image_size
raw_size = None
if max_size is not None:
min_original_size = float(min((height, width)))
max_original_size = float(max((height, width)))
if max_original_size / min_original_size * size > max_size:
raw_size = max_size * min_original_size / max_original_size
size = int(round(raw_size))
if width < height:
ow = size
if max_size is not None and raw_size is not None:
oh = int(raw_size * height / width)
else:
oh = int(size * height / width)
elif height <= width and height == size or (width <= height and width == size):
oh, ow = (height, width)
else:
oh = size
if max_size is not None and raw_size is not None:
ow = int(raw_size * width / height)
else:
ow = int(size * width / height)
if mod_size is not None:
ow_mod = np.mod(ow, mod_size)
oh_mod = np.mod(oh, mod_size)
ow = ow - ow_mod
oh = oh - oh_mod
return (oh, ow)
|
Computes the output image size given the input image size and the desired output size with multiple of divisible_size.
Args:
image_size (`Tuple[int, int]`):
The input image size.
size (`int`):
The desired output size.
max_size (`int`, *optional*):
The maximum allowed output size.
mod_size (`int`, *optional*):
The size to make multiple of mod_size.
|
github-repos
|
def CaptureVariablesList(self, items, depth, empty_message, limits):
v = []
for (name, value) in items:
if ((self._total_size >= self.max_size) or (len(v) >= limits.max_list_items)):
v.append({'status': {'refersTo': 'VARIABLE_VALUE', 'description': {'format': 'Only first $0 items were captured. Use in an expression to see all items.', 'parameters': [str(len(v))]}}})
break
v.append(self.CaptureNamedVariable(name, value, depth, limits))
if (not v):
return [{'status': {'refersTo': 'VARIABLE_NAME', 'description': {'format': empty_message}}}]
return v
|
Captures list of named items.
Args:
items: iterable of (name, value) tuples.
depth: nested depth of dictionaries and vectors for items.
empty_message: info status message to set if items is empty.
limits: Per-object limits for capturing variable data.
Returns:
List of formatted variable objects.
|
codesearchnet
|
def dataset_docs_str(datasets=None):
module_to_builder = make_module_to_builder_dict(datasets)
sections = sorted(list(module_to_builder.keys()))
section_tocs = []
section_docs = []
for section in sections:
builders = tf.nest.flatten(module_to_builder[section])
builders = sorted(builders, key=(lambda b: b.name))
builder_docs = [document_single_builder(builder) for builder in builders]
section_doc = SECTION_DATASETS.format(section_name=section, datasets='\n'.join(builder_docs))
section_toc = create_section_toc(section, builders)
section_docs.append(section_doc)
section_tocs.append(section_toc)
full_doc = DOC.format(toc='\n'.join(section_tocs), datasets='\n'.join(section_docs))
return full_doc
|
Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
string describing the datasets (in the MarkDown format).
|
codesearchnet
|
def letter_score(letter):
score_map = {1: ['a', 'e', 'i', 'o', 'u', 'l', 'n', 'r', 's', 't'], 2: ['d', 'g'], 3: ['b', 'c', 'm', 'p'], 4: ['f', 'h', 'v', 'w', 'y'], 5: ['k'], 8: ['j', 'x'], 10: ['q', 'z']}
for (score, letters) in score_map.items():
if (letter.lower() in letters):
return score
else:
raise TypeError('Invalid letter: %s', letter)
|
Returns the Scrabble score of a letter.
Args:
letter: a single character string
Raises:
TypeError if a non-Scrabble character is supplied
|
codesearchnet
|
def send_batches(self, batch_list):
if isinstance(batch_list, BaseMessage):
batch_list = batch_list.SerializeToString()
return self._post('/batches', batch_list)
|
Sends a list of batches to the validator.
Args:
batch_list (:obj:`BatchList`): the list of batches
Returns:
dict: the json result data, as a dict
|
codesearchnet
|
def scroll(self, x, y):
assert isinstance(x, _INTTYPES), "x must be an integer, got %s" % repr(x)
assert isinstance(y, _INTTYPES), "y must be an integer, got %s" % repr(x)
def getSlide(x, length):
if x > 0:
srcx = 0
length -= x
elif x < 0:
srcx = abs(x)
x = 0
length -= srcx
else:
srcx = 0
return x, length, srcx
def getCover(x, length):
cover = (0, length)
uncover = None
if x > 0:
cover = (x, length - x)
uncover = (0, x)
elif x < 0:
x = abs(x)
cover = (0, length - x)
uncover = (length - x, x)
return cover, uncover
width, height = self.get_size()
if abs(x) >= width or abs(y) >= height:
return self.clear()
coverX, uncoverX = getCover(x, width)
coverY, uncoverY = getCover(y, height)
x, width, srcx = getSlide(x, width)
y, height, srcy = getSlide(y, height)
self.blit(self, x, y, width, height, srcx, srcy)
if uncoverX:
self.draw_rect(uncoverX[0], coverY[0], uncoverX[1], coverY[1],
0x20, self._fg, self._bg)
if uncoverY:
self.draw_rect(coverX[0], uncoverY[0], coverX[1], uncoverY[1],
0x20, self._fg, self._bg)
if uncoverX and uncoverY:
self.draw_rect(uncoverX[0], uncoverY[0], uncoverX[1], uncoverY[1],
0x20, self._fg, self._bg)
|
Scroll the contents of the console in the direction of x,y.
Uncovered areas will be cleared to the default background color.
Does not move the virutal cursor.
Args:
x (int): Distance to scroll along the x-axis.
y (int): Distance to scroll along the y-axis.
Returns:
Iterator[Tuple[int, int]]: An iterator over the (x, y) coordinates
of any tile uncovered after scrolling.
.. seealso:: :any:`set_colors`
|
juraj-google-style
|
def get_course_id(self, course_uuid):
course_data = self.get('courseguide/course?uuid={uuid}'.format(uuid=(course_uuid or self.course_id)), params=None)
try:
return course_data['response']['docs'][0]['id']
except KeyError:
failure_message = 'KeyError in get_course_id - got {0}'.format(course_data)
log.exception(failure_message)
raise PyLmodUnexpectedData(failure_message)
except TypeError:
failure_message = 'TypeError in get_course_id - got {0}'.format(course_data)
log.exception(failure_message)
raise PyLmodUnexpectedData(failure_message)
|
Get course id based on uuid.
Args:
uuid (str): course uuid, i.e. /project/mitxdemosite
Raises:
PyLmodUnexpectedData: No course data was returned.
requests.RequestException: Exception connection error
Returns:
int: numeric course id
|
codesearchnet
|
def split_data(X, y, ratio=(0.8, 0.1, 0.1)):
assert(sum(ratio) == 1 and len(ratio) == 3)
X_train, X_rest, y_train, y_rest = train_test_split(
X, y, train_size=ratio[0])
X_val, X_test, y_val, y_test = train_test_split(
X_rest, y_rest, train_size=ratio[1])
return X_train, X_val, X_test, y_train, y_val, y_test
|
Splits data into a training, validation, and test set.
Args:
X: text data
y: data labels
ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1)
Returns:
split data: X_train, X_val, X_test, y_train, y_val, y_test
|
juraj-google-style
|
class GraniteMoeMoE(nn.Module):
def __init__(self, config: GraniteMoeConfig):
super(GraniteMoeMoE, self).__init__()
self.input_size = config.hidden_size
self.hidden_size = config.intermediate_size
self.activation = ACT2FN[config.hidden_act]
self.input_linear = GraniteMoeParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2)
self.output_linear = GraniteMoeParallelExperts(config.num_local_experts, self.hidden_size, self.input_size)
self.router = GraniteMoeTopKGating(input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok)
def forward(self, layer_input):
bsz, length, emb_size = layer_input.size()
layer_input = layer_input.reshape(-1, emb_size)
_, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input)
expert_inputs = layer_input[batch_index]
hidden_states = self.input_linear(expert_inputs, expert_size)
chunked_hidden_states = hidden_states.chunk(2, dim=-1)
hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]
expert_outputs = self.output_linear(hidden_states, expert_size)
expert_outputs = expert_outputs * batch_gates[:, None]
zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device)
layer_output = zeros.index_add(0, batch_index, expert_outputs)
layer_output = layer_output.view(bsz, length, self.input_size)
return (layer_output, router_logits)
|
A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.
Args:
config:
Configuration object with model hyperparameters.
|
github-repos
|
def _process_event(self, event):
if ((not event.is_directory) and (not event.src_path.endswith(BATCH_EXTENSION))):
self._logger.info('Detected file change: %s', event.src_path)
self._batch.process_file(event.src_path)
|
Process received events.
Process events received, applying normalization for those
events referencing a new or changed file and only if it's
not the result of a previous normalization.
Args:
event: Event to process.
|
codesearchnet
|
def number_of_records_per_hour(self, value=None):
if (value is not None):
try:
value = int(value)
except ValueError:
raise ValueError('value {} need to be of type int for field `number_of_records_per_hour`'.format(value))
self._number_of_records_per_hour = value
|
Corresponds to IDD Field `number_of_records_per_hour`
Args:
value (int): value for IDD Field `number_of_records_per_hour`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def _num_relevant(labels, k):
if k < 1:
raise ValueError(f'Invalid k={k}')
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.minimum(sets.set_size(labels), k, name=scope)
num_labels = math_ops.reduce_sum(array_ops.where_v2(math_ops.greater_equal(labels, 0), array_ops.ones_like(labels), array_ops.zeros_like(labels)), axis=-1)
return math_ops.minimum(num_labels, k, name=scope)
|
Computes number of relevant values for each row in labels.
For labels with shape [D1, ... DN, num_labels], this is the minimum of
`num_labels` and `k`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels].
k: Integer, k for @k metric.
Returns:
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
|
github-repos
|
def _validate_query_parameters(self, query, action_spec):
processed_params = []
for (param_name, param_value) in query.items():
if (param_name in action_spec['parameters'].keys()):
processed_params.append(param_name)
if (action_spec['parameters'][param_name]['type'] == 'array'):
if (not isinstance(param_value, list)):
return False
else:
for i in param_value:
if (not self.check_type(i, action_spec['parameters'][param_name]['items']['type'])):
return False
elif (not self.check_type(param_value, action_spec['parameters'][param_name]['type'])):
return False
if (not all(((param in processed_params) for (param, spec) in action_spec['parameters'].items() if ((spec['in'] == 'query') and ('required' in spec) and spec['required'])))):
return False
return True
|
Check the query parameter for the action specification.
Args:
query: query parameter to check.
action_spec: specification of the action.
Returns:
True if the query is valid.
|
codesearchnet
|
def _get_left_right_blocks(x):
(_, x_num_outer_h_blocks, x_num_outer_w_blocks, x_memory_flange_h,
x_memory_flange_w, depth) = common_layers.shape_list(x)
x_left_right_blocks = tf.slice(x,
[0, 1, 0, 0, 0, 0],
[-1, x_num_outer_h_blocks-2, -1, -1,
-1, -1])
num_blocks_h = (x_num_outer_h_blocks-2)
x_left_right_blocks = tf.reshape(x_left_right_blocks,
[-1,
num_blocks_h,
2, x_num_outer_w_blocks,
x_memory_flange_h,
x_memory_flange_w, depth])
x_left_right_blocks = tf.transpose(x_left_right_blocks,
[0, 1, 3, 2, 4, 5, 6])
x_left_right_blocks = tf.reshape(x_left_right_blocks,
[-1, num_blocks_h,
x_num_outer_w_blocks, 2*x_memory_flange_h,
x_memory_flange_w, depth])
x_left_blocks, x_right_blocks = _split_along_width(x_left_right_blocks)
return x_left_blocks, x_right_blocks
|
Helper function. Assumes that memory_flange is half of query sizes.
This function splits the tensor of width 'n' into two halves, where the
first half gets the width indices 0, 2, 4.. and the second half gets the
width indices 3, 5, ... We also fuse two blocks along the h dimension.
Args:
x: a 6-d tensor.
Returns:
x_left_blocks, x_right_blocks: Two 6-d tensors
|
juraj-google-style
|
def _from_record(data):
if isinstance(data, dict):
return Schema._from_dict_record(data)
elif isinstance(data, list):
return Schema._from_list_record(data)
else:
raise Exception('Cannot create a schema from record %s' % str(data))
|
Infer a BigQuery table schema from a list of fields or a dictionary. The typeof the elements
is used. For a list, the field names are simply 'Column1', 'Column2', etc.
Args:
data: The list of fields or dictionary.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
|
juraj-google-style
|
def create_exponential(num_finite_buckets, growth_factor, scale):
if (num_finite_buckets <= 0):
raise ValueError(_BAD_NUM_FINITE_BUCKETS)
if (growth_factor <= 1.0):
raise ValueError((_BAD_FLOAT_ARG % (u'growth factor', 1.0)))
if (scale <= 0.0):
raise ValueError((_BAD_FLOAT_ARG % (u'scale', 0.0)))
return sc_messages.Distribution(bucketCounts=([0] * (num_finite_buckets + 2)), exponentialBuckets=sc_messages.ExponentialBuckets(numFiniteBuckets=num_finite_buckets, growthFactor=growth_factor, scale=scale))
|
Creates a new instance of distribution with exponential buckets
Args:
num_finite_buckets (int): initializes number of finite buckets
growth_factor (float): initializes the growth factor
scale (float): initializes the scale
Return:
:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`
Raises:
ValueError: if the args are invalid for creating an instance
|
codesearchnet
|
def add_http_endpoint(self, url, request_handler):
self.app.router.add_route('*', url, request_handler)
|
This method provides a programatic way of added invidual routes
to the http server.
Args:
url (str): the url to be handled by the request_handler
request_handler (nautilus.network.RequestHandler): The request handler
|
juraj-google-style
|
def _get_radius(site):
if hasattr(site.specie, 'oxi_state'):
el = site.specie.element
oxi = site.specie.oxi_state
if oxi == 0:
return CrystalNN._get_default_radius(site)
elif oxi in el.ionic_radii:
return el.ionic_radii[oxi]
elif int(math.floor(oxi)) in el.ionic_radii and \
int(math.ceil(oxi)) in el.ionic_radii:
oxi_low = el.ionic_radii[int(math.floor(oxi))]
oxi_high = el.ionic_radii[int(math.ceil(oxi))]
x = oxi - int(math.floor(oxi))
return (1 - x) * oxi_low + x * oxi_high
elif oxi > 0 and el.average_cationic_radius > 0:
return el.average_cationic_radius
elif oxi < 0 and el.average_anionic_radius > 0:
return el.average_anionic_radius
else:
warnings.warn("CrystalNN: distance cutoffs set but no oxidation "
"states specified on sites! For better results, set "
"the site oxidation states in the structure.")
return 0
|
An internal method to get the expected radius for a site with
oxidation state.
Args:
site: (Site)
Returns:
Oxidation-state dependent radius: ionic, covalent, or atomic.
Returns 0 if no oxidation state or appropriate radius is found.
|
juraj-google-style
|
def bump(component='patch', exact=None):
old_ver = current()
if exact is None:
new_ver = _bump_version(old_ver, component)
else:
new_ver = exact
write(new_ver)
return old_ver, new_ver
|
Bump the given version component.
Args:
component (str):
What part of the version should be bumped. Can be one of:
- major
- minor
- patch
exact (str):
The exact version that should be set instead of bumping the current
one.
Returns:
tuple(str, str): A tuple of old and bumped version.
|
juraj-google-style
|
def _parse_hostname(self):
value = 'localhost'
match = re.search('^hostname ([^\\s]+)$', self.config, re.M)
if match:
value = match.group(1)
return dict(hostname=value)
|
Parses the global config and returns the hostname value
Returns:
dict: The configured value for hostname. The returned dict
object is intended to be merged into the resource dict
|
codesearchnet
|
def distorted_inputs(data_dir, batch_size):
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
filename_queue = tf.train.string_input_producer(filenames)
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
float_image = tf.image.per_image_standardization(distorted_image)
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print ('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
|
Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
|
juraj-google-style
|
def segs(self, word):
return [m.group('all') for m in self.seg_regex.finditer(word)]
|
Returns a list of segments from a word
Args:
word (unicode): input word as Unicode IPA string
Returns:
list: list of strings corresponding to segments found in `word`
|
juraj-google-style
|
def run_iperf_client(self, server_host, extra_args=''):
out = self.adb.shell('iperf3 -c %s %s' % (server_host, extra_args))
clean_out = str(out, 'utf-8').strip().split('\n')
if 'error' in clean_out[0].lower():
return (False, clean_out)
return (True, clean_out)
|
Start iperf client on the device.
Return status as true if iperf client start successfully.
And data flow information as results.
Args:
server_host: Address of the iperf server.
extra_args: A string representing extra arguments for iperf client,
e.g. '-i 1 -t 30'.
Returns:
status: true if iperf client start successfully.
results: results have data flow information
|
github-repos
|
def batch_workflow_status(self, batch_workflow_id):
self.logger.debug(('Get status of batch workflow: ' + batch_workflow_id))
url = ('%(base_url)s/batch_workflows/%(batch_id)s' % {'base_url': self.base_url, 'batch_id': batch_workflow_id})
r = self.gbdx_connection.get(url)
return r.json()
|
Checks GBDX batch workflow status.
Args:
batch workflow_id (str): Batch workflow id.
Returns:
Batch Workflow status (str).
|
codesearchnet
|
def rtt_write(self, buffer_index, data):
buf_size = len(data)
buf = (ctypes.c_ubyte * buf_size)(*bytearray(data))
bytes_written = self._dll.JLINK_RTTERMINAL_Write(buffer_index, buf, buf_size)
if (bytes_written < 0):
raise errors.JLinkRTTException(bytes_written)
return bytes_written
|
Writes data to the RTT buffer.
This method will write at most len(data) bytes to the specified RTT
buffer.
Args:
self (JLink): the ``JLink`` instance
buffer_index (int): the index of the RTT buffer to write to
data (list): the list of bytes to write to the RTT buffer
Returns:
The number of bytes successfully written to the RTT buffer.
Raises:
JLinkRTTException if the underlying JLINK_RTTERMINAL_Write call fails.
|
codesearchnet
|
def clean_decodes(ids, vocab_size, eos_id=1):
ret = []
for i in ids:
if i == eos_id:
break
if i >= vocab_size:
break
ret.append(int(i))
return ret
|
Stop at EOS or padding or OOV.
Args:
ids: a list of integers
vocab_size: an integer
eos_id: EOS id
Returns:
a list of integers
|
juraj-google-style
|
def _is_valid_netmask(self, netmask):
mask = netmask.split('.')
if len(mask) == 4:
try:
for x in mask:
if int(x) not in self._valid_mask_octets:
return False
except ValueError:
return False
for idx, y in enumerate(mask):
if idx > 0 and y > mask[idx - 1]:
return False
return True
try:
netmask = int(netmask)
except ValueError:
return False
return 0 <= netmask <= self._max_prefixlen
|
Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask.
|
juraj-google-style
|
def _cardinality_test_combinations():
def _reduce_cases_to_combinations(result, case):
name, dataset_fn, sharding_policy, expected_result = case
return result + combinations.combine(dataset_fn=combinations.NamedObject(name, dataset_fn), sharding_policy=sharding_policy, expected_result=expected_result)
def _cases_to_combinations(cases):
return functools.reduce(_reduce_cases_to_combinations, cases, [])
def _infinite_dataset_with_hint_shard():
return dataset_ops.Dataset.range(10).shard(distribute.SHARD_HINT, distribute.SHARD_HINT).repeat()
def _empty_dataset_with_hint_shard():
return dataset_ops.Dataset.range(0).shard(distribute.SHARD_HINT, distribute.SHARD_HINT)
v2_only_cases = [('NoShardingInfinite', lambda: dataset_ops.Dataset.range(10).repeat(), data_service_ops.ShardingPolicy.OFF, dataset_ops.INFINITE), ('DynamicShardingInfinite', lambda: dataset_ops.Dataset.range(5).repeat(), data_service_ops.ShardingPolicy.DYNAMIC, dataset_ops.INFINITE), ('DataShardingInfinite', lambda: dataset_ops.Dataset.range(10).repeat(), data_service_ops.ShardingPolicy.DATA, dataset_ops.INFINITE), ('NoShardingZero', lambda: dataset_ops.Dataset.range(0), data_service_ops.ShardingPolicy.OFF, 0), ('DynamicShardingZero', lambda: dataset_ops.Dataset.range(0), data_service_ops.ShardingPolicy.DYNAMIC, 0), ('DataShardingZero', lambda: dataset_ops.Dataset.range(0), data_service_ops.ShardingPolicy.DATA, 0), ('FileOrDataShardingZero', lambda: dataset_ops.Dataset.range(0), data_service_ops.ShardingPolicy.FILE_OR_DATA, 0), ('HintShardingZero', _empty_dataset_with_hint_shard, data_service_ops.ShardingPolicy.HINT, dataset_ops.UNKNOWN)]
v1_and_v2_cases = [('Finite', lambda: dataset_ops.Dataset.range(10), data_service_ops.ShardingPolicy.OFF, dataset_ops.UNKNOWN), ('FileOrDataShardingUnknown', lambda: dataset_ops.Dataset.range(10).repeat(), data_service_ops.ShardingPolicy.FILE_OR_DATA, dataset_ops.UNKNOWN), ('HintShardingUnknown', _infinite_dataset_with_hint_shard, data_service_ops.ShardingPolicy.HINT, dataset_ops.UNKNOWN)]
v2_only_combinations = combinations.times(combinations.combine(tf_api_version=2, mode=['eager', 'graph']), _cases_to_combinations(v2_only_cases))
v1_and_v2_combinations = combinations.times(combinations.combine(tf_api_version=[1, 2], mode=['eager', 'graph']), _cases_to_combinations(v1_and_v2_cases))
return v2_only_combinations + v1_and_v2_combinations
|
Generate test combinations for data service cardinality tests.
We test only V2 combinations for the infinite and 0 cases because the `map`
transformation for compression makes the cardinality unknown in TF1.
Returns:
test combinations.
|
github-repos
|
def segs_safe(self, word):
segs = []
while word:
m = self.seg_regex.match(word)
if m:
segs.append(m.group(1))
word = word[len(m.group(1)):]
else:
segs.append(word[0])
word = word[1:]
return segs
|
Return a list of segments (as strings) from a word
Characters that are not valid segments are included in the list as
individual characters.
Args:
word (unicode): word as an IPA string
Returns:
list: list of Unicode IPA strings corresponding to segments in
`word`
|
juraj-google-style
|
def generate(organization, package, destination):
gen = ResourceGenerator(organization, package)
tmp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
try:
tmp.write(gen.conf())
finally:
tmp.close()
shutil.copy(tmp.name, os.path.join(destination, 'conf.py'))
tmp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
try:
tmp.write(gen.makefile())
finally:
tmp.close()
shutil.copy(tmp.name, os.path.join(destination, 'Makefile'))
|
Generates the Sphinx configuration and Makefile.
Args:
organization (str): the organization name.
package (str): the package to be documented.
destination (str): the destination directory.
|
juraj-google-style
|
def _classify_segment(self, address, length):
end_address = ((address + length) - 1)
(_, start_seg) = self._find_address(address)
(_, end_seg) = self._find_address(end_address)
if ((start_seg is not None) or (end_seg is not None)):
raise ArgumentError('Overlapping segments are not yet supported', address=address, length=length)
return DisjointSegment()
|
Determine how a new data segment fits into our existing world
Params:
address (int): The address we wish to classify
length (int): The length of the segment
Returns:
int: One of SparseMemoryMap.prepended
|
codesearchnet
|
def abort(self, abort_message=''):
if (self.async and self._root_pipeline_key == self._pipeline_key and
not self.try_cancel()):
return False
else:
return self._context.begin_abort(
self._root_pipeline_key, abort_message=abort_message)
|
Mark the entire pipeline up to the root as aborted.
Note this should only be called from *outside* the context of a running
pipeline. Synchronous and generator pipelines should raise the 'Abort'
exception to cause this behavior during execution.
Args:
abort_message: Optional message explaining why the abort happened.
Returns:
True if the abort signal was sent successfully; False if the pipeline
could not be aborted for any reason.
|
juraj-google-style
|
def __init__(self, dims):
if dims is None:
self._dims = None
elif isinstance(dims, compat.bytes_or_text_types):
raise TypeError(
"A string has ambiguous TensorShape, please wrap in a "
"list or convert to an int: %s" % dims
)
elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):
if dims.unknown_rank:
self._dims = None
else:
self._dims = [
as_dimension(dim.size if dim.size != -1 else None)
for dim in dims.dim
]
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
self._dims = [as_dimension(dims)]
else:
self._dims = [as_dimension(d) for d in dims_iter]
self._ndims = None
|
Creates a new TensorShape with the given dimensions.
Args:
dims: A list of Dimensions, or None if the shape is unspecified.
DEPRECATED: A single integer is treated as a singleton list.
Raises:
TypeError: If dims cannot be converted to a list of dimensions.
|
juraj-google-style
|
def Update(self, other, callback):
self.conditions.update(other.conditions)
self._Register(other.conditions, callback)
|
Adds existing triggers to this set, optionally rebuilding the registry.
Used to aggregate trigger methods from Probes to Methods to Checks.
Args:
other: Another Triggers object.
callback: Registers all the updated triggers to the specified function.
|
codesearchnet
|
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(tf.logical_and(tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
|
Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: `Tensor` image of shape [height, width, channels].
offset_height: `Tensor` indicating the height offset.
offset_width: `Tensor` indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
|
codesearchnet
|
def on_skip(self, record):
|
A function that is executed upon a test being skipped.
Implementation is optional.
Args:
record: records.TestResultRecord, a copy of the test record for
this test, containing all information of the test execution
including exception objects.
|
github-repos
|
def check(self, dsm, **kwargs):
logger.debug(('Entities = %s' % dsm.entities))
messages = []
code_clean = True
threshold = kwargs.pop('threshold', 1)
(rows, _) = dsm.size
for i in range(0, rows):
if (dsm.data[i][0] > threshold):
messages.append(('Number of issues (%d) in module %s > threshold (%d)' % (dsm.data[i][0], dsm.entities[i], threshold)))
code_clean = False
return (code_clean, '\n'.join(messages))
|
Check code clean.
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
Returns:
bool, str: True if code clean else False, messages
|
codesearchnet
|
def _get_sorted_methods(self, methods):
if not methods:
return methods
def _sorted_methods_comparison(method_info1, method_info2):
def _score_path(path):
score = 0
parts = path.split('/')
for part in parts:
score <<= 1
if not part or part[0] != '{':
score += 1
score <<= 31 - len(parts)
return score
path_score1 = _score_path(method_info1[1].get('path', ''))
path_score2 = _score_path(method_info2[1].get('path', ''))
if path_score1 != path_score2:
return path_score2 - path_score1
path_result = cmp(method_info1[1].get('path', ''),
method_info2[1].get('path', ''))
if path_result != 0:
return path_result
method_result = cmp(method_info1[1].get('httpMethod', ''),
method_info2[1].get('httpMethod', ''))
return method_result
return sorted(methods.items(), _sorted_methods_comparison)
|
Get a copy of 'methods' sorted the way they would be on the live server.
Args:
methods: JSON configuration of an API's methods.
Returns:
The same configuration with the methods sorted based on what order
they'll be checked by the server.
|
juraj-google-style
|
def clip_to_image_size(bounding_boxes, height=None, width=None, bounding_box_format='xyxy'):
box_utils = BoundingBox()
if backend_utils.in_tf_graph():
box_utils.backend.set_backend('tensorflow')
bounding_boxes = box_utils.clip_to_image_size(bounding_boxes, height=height, width=width, bounding_box_format=bounding_box_format)
box_utils.backend.reset()
return bounding_boxes
|
Clips bounding boxes to be within the image dimensions.
Args:
bounding_boxes: A dictionary with 'boxes' shape `(N, 4)` or
`(batch, N, 4)` and 'labels' shape `(N,)` or `(batch, N,)`.
height: Image height.
width: Image width.
bounding_box_format: The format of the input bounding boxes. Defaults to
`"xyxy"`.
Returns:
Clipped bounding boxes.
Example:
```python
boxes = {"boxes": np.array([[-10, -20, 150, 160], [50, 40, 70, 80]]),
"labels": np.array([0, 1])}
clipped_boxes = keras.utils.bounding_boxes.clip_to_image_size(
boxes, height=100, width=120,
)
# Output will have boxes clipped to the image boundaries, and labels
# potentially adjusted if the clipped area becomes zero
```
|
github-repos
|
def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
if type_ is not Any:
return "{}.{}[{}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
keyfunc_name,
)
return "{}.{}[{}]".format(cls.__module__, cls.__name__, keyfunc_name)
|
Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries.
keyfunc_name: The name of keyfunc.
Returns:
A string representing the class.
|
juraj-google-style
|
def _add_jump_node(self, ast_node, guards):
node = self._add_new_node(ast_node)
self.leaves = set()
self.finally_sections[node] = guards
return node
|
Grows the graph by adding a jump node.
Jump nodes are added to the current leaf set, and the leaf set becomes
empty. If the jump node is the last in a cond section, then it may be added
back to the leaf set by a separate mechanism.
Args:
ast_node: ast.AST
guards: Tuple[ast.AST, ...], the finally sections active for this node
Returns:
Node
|
github-repos
|
def translate_index(index_name):
uuid = SEARCH_INDEX_UUIDS.get(index_name.strip().lower())
if (not uuid):
try:
index_info = globus_sdk.SearchClient().get_index(index_name).data
if (not isinstance(index_info, dict)):
raise ValueError('Multiple UUIDs possible')
uuid = index_info.get('id', index_name)
except Exception:
uuid = index_name
return uuid
|
Translate a known Globus Search index into the index UUID.
The UUID is the proper way to access indices, and will eventually be the only way.
This method will return names it cannot disambiguate.
Arguments:
index_name (str): The name of the index.
Returns:
str: The UUID of the index. If the index is not known and is not unambiguous,
this will be the ``index_name`` unchanged instead.
|
codesearchnet
|
def guass(self, mu: float, sigma: float) -> float:
return float(lib.TCOD_random_get_gaussian_double(self.random_c, mu, sigma))
|
Return a random number using Gaussian distribution.
Args:
mu (float): The median returned value.
sigma (float): The standard deviation.
Returns:
float: A random float.
|
codesearchnet
|
def ping(request, timeout=_METADATA_DEFAULT_TIMEOUT, retry_count=3):
retries = 0
while (retries < retry_count):
try:
response = request(url=_METADATA_IP_ROOT, method='GET', headers=_METADATA_HEADERS, timeout=timeout)
metadata_flavor = response.headers.get(_METADATA_FLAVOR_HEADER)
return ((response.status == http_client.OK) and (metadata_flavor == _METADATA_FLAVOR_VALUE))
except exceptions.TransportError:
_LOGGER.info('Compute Engine Metadata server unavailable onattempt %s of %s', (retries + 1), retry_count)
retries += 1
return False
|
Checks to see if the metadata server is available.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
timeout (int): How long to wait for the metadata server to respond.
retry_count (int): How many times to attempt connecting to metadata
server using above timeout.
Returns:
bool: True if the metadata server is reachable, False otherwise.
|
codesearchnet
|
def _generate_malformed_query(data):
if isinstance(data, six.text_type):
query_str = data.replace(':', ' ')
else:
query_str = ' '.join([word.strip(':') for word in data.children])
return {'simple_query_string': {'fields': ['_all'], 'query': query_str}}
|
Generates a query on the ``_all`` field with all the query content.
Args:
data (six.text_type or list): The query in the format of ``six.text_type`` (when used from parsing driver)
or ``list`` when used from withing the ES visitor.
|
codesearchnet
|
def _handle_captcha(captcha_data, message=''):
from tempfile import NamedTemporaryFile
tmpf = NamedTemporaryFile(suffix='.png')
tmpf.write(captcha_data)
tmpf.flush()
captcha_text = input('Please take a look at the captcha image "%s" and provide the code:' % tmpf.name)
tmpf.close()
return captcha_text
|
Called when a captcha must be solved
Writes the image to a temporary file and asks the user to enter the code.
Args:
captcha_data: Bytestring of the PNG captcha image.
message: Optional. A message from Steam service.
Returns:
A string containing the solved captcha code.
|
juraj-google-style
|
def info(self, user_id):
resp = self._rtm_client.get('v1/user.info?user_id={}'.format(user_id))
if resp.is_fail():
raise RTMServiceError('Failed to get user information', resp)
return resp.data['result']
|
Gets user information by user id
Args:
user_id(int): the id of user
Returns:
User
Throws:
RTMServiceError when request failed
|
juraj-google-style
|
def __init__(self, options, queue_item):
self.options = options
self.queue_item = queue_item
|
Construct the HTMLSoupLinkScraper instance.
Args:
options (:class:`nyawc.Options`): The settins/options object.
queue_item (:class:`nyawc.QueueItem`): The queue item containing a response the scrape.
|
juraj-google-style
|
def clean(self, value):
if value is None and self._optional:
return None
if not isinstance(value, dict):
raise ValueError('value')
return {str(self._key.clean(k)):self._node.clean(v) for k,v in iteritems(value)}
|
Clean
Makes sure both the key and value are properly stored in their correct
representation
Arguments:
value {mixed} -- The value to clean
Raises:
ValueError
Returns:
mixed
|
juraj-google-style
|
def calculate_weights(correlation_matrix, min_wt):
np.fill_diagonal(correlation_matrix.values, np.nan)
correlation_matrix = correlation_matrix.clip(lower=0)
raw_weights = correlation_matrix.mean(axis=1)
raw_weights = raw_weights.clip(lower=min_wt)
weights = raw_weights / sum(raw_weights)
return raw_weights.round(rounding_precision), weights.round(rounding_precision)
|
Calculate a weight for each profile based on its correlation to other
replicates. Negative correlations are clipped to 0, and weights are clipped
to be min_wt at the least.
Args:
correlation_matrix (pandas df): Correlations between all replicates
min_wt (float): Minimum raw weight when calculating weighted average
Returns:
raw weights (pandas series): Mean correlation to other replicates
weights (pandas series): raw_weights normalized such that they add to 1
|
juraj-google-style
|
def allow_inbound_connection(self):
LOGGER.debug('Determining whether inbound connection should be allowed. num connections: %s max %s', len(self._connections), self._max_incoming_connections)
return (self._max_incoming_connections >= len(self._connections))
|
Determines if an additional incoming network connection
should be permitted.
Returns:
bool
|
codesearchnet
|
def shannon_entropy(time_series):
if not isinstance(time_series, str):
time_series = list(time_series)
data_set = list(set(time_series))
freq_list = []
for entry in data_set:
counter = 0.
for i in time_series:
if i == entry:
counter += 1
freq_list.append(float(counter) / len(time_series))
ent = 0.0
for freq in freq_list:
ent += freq * np.log2(freq)
ent = -ent
return ent
|
Return the Shannon Entropy of the sample data.
Args:
time_series: Vector or string of the sample data
Returns:
The Shannon Entropy as float value
|
juraj-google-style
|
def bytes_to_readable_str(num_bytes, include_b=False):
if (num_bytes is None):
return str(num_bytes)
if (num_bytes < 1024):
result = ('%d' % num_bytes)
elif (num_bytes < 1048576):
result = ('%.2fk' % (num_bytes / float((1 << 10))))
elif (num_bytes < 1073741824):
result = ('%.2fM' % (num_bytes / float((1 << 20))))
else:
result = ('%.2fG' % (num_bytes / float((1 << 30))))
if include_b:
result += 'B'
return result
|
Generate a human-readable string representing number of bytes.
The units B, kB, MB and GB are used.
Args:
num_bytes: (`int` or None) Number of bytes.
include_b: (`bool`) Include the letter B at the end of the unit.
Returns:
(`str`) A string representing the number of bytes in a human-readable way,
including a unit at the end.
|
codesearchnet
|
def write_temp_bird_conf(dummy_ip_prefix, config_file, variable_name, prefixes):
log = logging.getLogger(PROGRAM_NAME)
comment = '
tm_file = os.path.join(os.path.dirname(config_file), str(time.time()))
log.debug('going to write to %s', tm_file)
try:
with open(tm_file, 'w') as tmpf:
tmpf.write('
tmpf.write('{c}\n'.format(c=comment))
tmpf.write('define {n} =\n'.format(n=variable_name))
tmpf.write('{s}[\n'.format(s=(4 * ' ')))
tmpf.write(',\n'.join([((' ' * 8) + n) for n in prefixes]))
tmpf.write('\n{s}];\n'.format(s=(4 * ' ')))
except OSError as error:
log.critical('failed to write temporary file %s: %s. This is a FATAL error, this exiting main program', tm_file, error)
sys.exit(1)
else:
return tm_file
|
Write in a temporary file the list of IP-Prefixes.
A failure to create and write the temporary file will exit main program.
Arguments:
dummy_ip_prefix (str): The dummy IP prefix, which must be always
config_file (str): The file name of bird configuration
variable_name (str): The name of the variable set in bird configuration
prefixes (list): The list of IP-Prefixes to write
Returns:
The filename of the temporary file
|
codesearchnet
|
def _resolve_attribute_match(self, match):
if match.group(1) == 'cluster':
return str(self.cluster_id)
return self.get(match.group(1), match.group(0))
|
Replaces a reference to an attribute with the value of the attribute.
Args:
match (re.match object): A match object containing a match to a reference to an attribute.
|
juraj-google-style
|
def render_secrets(config_path, secret_path):
with open(secret_path, 'r') as s_fh:
secret_ini = anyconfig.load(s_fh, ac_parser='ini')
with open(config_path, 'r') as c_fh:
raw_cfg = c_fh.read()
rendered_cfg = anytemplate.renders(raw_cfg, secret_ini, at_engine='jinja2')
p_config = ProsperConfig(config_path)
local_config = configparser.ConfigParser()
local_config.optionxform = str
local_config.read_string(rendered_cfg)
p_config.local_config = local_config
return p_config
|
combine a jinja template with a secret .ini file
Args:
config_path (str): path to .cfg file with jinja templating
secret_path (str): path to .ini-like secrets file
Returns:
ProsperConfig: rendered configuration object
|
codesearchnet
|
def flatten(dictionary, separator='.', prefix=''):
new_dict = {}
for key, value in dictionary.items():
new_key = prefix + separator + key if prefix else key
if isinstance(value, collections.MutableMapping):
new_dict.update(flatten(value, separator, new_key))
elif isinstance(value, list):
new_value = []
for item in value:
if isinstance(item, collections.MutableMapping):
new_value.append(flatten(item, separator, new_key))
else:
new_value.append(item)
new_dict[new_key] = new_value
else:
new_dict[new_key] = value
return new_dict
|
Flatten the dictionary keys are separated by separator
Arguments:
dictionary {dict} -- The dictionary to be flattened.
Keyword Arguments:
separator {str} -- The separator to use (default is '.'). It will
crush items with key conflicts.
prefix {str} -- Used for recursive calls.
Returns:
dict -- The flattened dictionary.
|
juraj-google-style
|
def get_mpkg_ids(mpkg):
mpkg = _quote(mpkg)
package_infos = []
base_path = os.path.dirname(mpkg)
cmd = 'find {0} -name *.pkg'.format(base_path)
out = __salt__['cmd.run'](cmd, python_shell=True)
pkg_files = out.split('\n')
for p in pkg_files:
package_infos.extend(get_pkg_id(p))
return package_infos
|
Attempt to get the package IDs from a mounted .mpkg file
Args:
mpkg (str): The location of the mounted mpkg file
Returns:
list: List of package IDs
CLI Example:
.. code-block:: bash
salt '*' macpackage.get_mpkg_ids /dev/disk2
|
juraj-google-style
|
def _join_lines(lines):
if not lines:
return None
started = False
group_texts = []
group_lines = []
for line in lines:
stripped_line = line.strip()
if stripped_line:
started = True
group_lines.append(stripped_line)
elif started:
group_text = ' '.join(group_lines)
group_texts.append(group_text)
group_lines = []
if group_lines:
group_text = ' '.join(group_lines)
group_texts.append(group_text)
return '\n\n'.join(group_texts)
|
Joins lines with the appropriate connective whitespace.
This puts a single space between consecutive lines, unless there's a blank
line, in which case a full blank line is included.
Args:
lines: A list of lines to join.
Returns:
A string, the lines joined together.
|
github-repos
|
def _makedirs(self, path):
try:
oldmask = os.umask(0)
os.makedirs(path, self._conf['dmode'])
os.umask(oldmask)
except OSError as e:
if(e.errno == errno.EACCES):
raise Exception('not sufficent permissions to write on fsdb folder: "{0}"'.format(path))
elif(e.errno == errno.EEXIST):
fstat = os.stat(path)
if not stat.S_ISDIR(fstat.st_mode):
raise Exception('fsdb folder already exists but it is not a regular folder: "{0}"'.format(path))
elif not os.access(path, os.R_OK and os.W_OK):
raise Exception('not sufficent permissions to write on fsdb folder: "{0}"'.format(path))
else:
raise e
|
Make folders recursively for the given path and
check read and write permission on the path
Args:
path -- path to the leaf folder
|
juraj-google-style
|
def plot_job_history(jobs, interval='year'):
def get_date(job):
'Returns a datetime object from a IBMQJob instance.\n\n Args:\n job (IBMQJob): A job.\n\n Returns:\n dt: A datetime object.\n '
return datetime.datetime.strptime(job.creation_date(), '%Y-%m-%dT%H:%M:%S.%fZ')
current_time = datetime.datetime.now()
if (interval == 'year'):
bins = [(current_time - datetime.timedelta(days=((k * 365) / 12))) for k in range(12)]
elif (interval == 'month'):
bins = [(current_time - datetime.timedelta(days=k)) for k in range(30)]
elif (interval == 'week'):
bins = [(current_time - datetime.timedelta(days=k)) for k in range(7)]
binned_jobs = ([0] * len(bins))
if (interval == 'year'):
for job in jobs:
for (ind, dat) in enumerate(bins):
date = get_date(job)
if (date.month == dat.month):
binned_jobs[ind] += 1
break
else:
continue
else:
for job in jobs:
for (ind, dat) in enumerate(bins):
date = get_date(job)
if ((date.day == dat.day) and (date.month == dat.month)):
binned_jobs[ind] += 1
break
else:
continue
nz_bins = []
nz_idx = []
for (ind, val) in enumerate(binned_jobs):
if (val != 0):
nz_idx.append(ind)
nz_bins.append(val)
total_jobs = sum(binned_jobs)
colors = ['
if (interval == 'year'):
labels = ['{}-{}'.format(str(bins[b].year)[2:], bins[b].month) for b in nz_idx]
else:
labels = ['{}-{}'.format(bins[b].month, bins[b].day) for b in nz_idx]
(fig, ax) = plt.subplots(1, 1, figsize=(5, 5))
ax.pie(nz_bins[::(- 1)], labels=labels, colors=colors, textprops={'fontsize': 14}, rotatelabels=True, counterclock=False)
ax.add_artist(Circle((0, 0), 0.7, color='white', zorder=1))
ax.text(0, 0, total_jobs, horizontalalignment='center', verticalalignment='center', fontsize=26)
fig.tight_layout()
return fig
|
Plots the job history of the user from the given list of jobs.
Args:
jobs (list): A list of jobs with type IBMQjob.
interval (str): Interval over which to examine.
Returns:
fig: A Matplotlib figure instance.
|
codesearchnet
|
def edges(self, tail_head_iter):
edge = self._edge_plain
quote = self._quote_edge
lines = (edge % (quote(t), quote(h)) for t, h in tail_head_iter)
self.body.extend(lines)
|
Create a bunch of edges.
Args:
tail_head_iter: Iterable of ``(tail_name, head_name)`` pairs.
|
juraj-google-style
|
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
if not self._inbound_nodes:
raise RuntimeError('The layer has never been called and thus has no defined ' + attr_name + '.')
if not len(self._inbound_nodes) > node_index:
raise ValueError('Asked to get ' + attr_name + ' at node ' + str(node_index) + ', but the layer has only ' + str(len(self._inbound_nodes)) + ' inbound nodes.')
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
|
Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the methods:
- get_input_shape_at
- get_output_shape_at
- get_input_at
etc...
Args:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The layer's attribute `attr` at the node of index `node_index`.
Raises:
RuntimeError: If the layer has no inbound nodes, or if called in Eager
mode.
ValueError: If the index provided does not match any node.
|
github-repos
|
def add_paths_argument(cls, group, argname, dest=None, help_=None):
prefixed = ('%s-%s' % (cls.argument_prefix, argname))
if (dest is None):
dest = prefixed.replace('-', '_')
final_dest = dest[(len(cls.argument_prefix) + 1):]
else:
final_dest = dest
dest = ('%s_%s' % (cls.argument_prefix, dest))
group.add_argument(('--%s' % prefixed), action='store', nargs='+', dest=dest, help=help_)
cls.paths_arguments[dest] = final_dest
|
Subclasses may call this to expose a paths argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
argname: str, the name of the argument, will be namespaced.
dest: str, similar to the `dest` argument of
`argparse.ArgumentParser.add_argument`, will be namespaced.
help_: str, similar to the `help` argument of
`argparse.ArgumentParser.add_argument`.
|
codesearchnet
|
def prepare_words_list(wanted_words):
return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words
|
Prepends common tokens to the custom word list.
Args:
wanted_words: List of strings containing the custom words.
Returns:
List with the standard silence and unknown tokens added.
|
github-repos
|
def assignees(self, assignee=None, resource_id=None):
if (resource_id is not None):
self.resource_id(resource_id)
self._request_uri = '{}/assignees'.format(self._request_uri)
if (assignee is not None):
self._request_uri = '{}/{}'.format(self._request_uri, assignee)
|
Add an assignee to a Task
GET: /v2/tasks/{uniqueId}/assignees
GET: /v2/tasks/{uniqueId}/assignees/{assigneeId}
POST: /v2/tasks/{uniqueId}/assignees/{assigneeId}
DELETE: /v2/tasks/{uniqueId}/assignees/{assigneeId}
Args:
assignee (Optional [string]): The assignee name.
resource_id (Optional [string]): The task ID.
|
codesearchnet
|
def __init__(
self, name, data_type_definition, aliases=None, data_type=None,
description=None, urls=None):
super(StringDefinition, self).__init__(
name, data_type_definition, aliases=aliases, data_type=data_type,
description=description, urls=urls)
self.encoding = 'ascii'
|
Initializes a string data type definition.
Args:
name (str): name.
data_type_definition (DataTypeDefinition): string element data type
definition.
aliases (Optional[list[str]]): aliases.
data_type (Optional[str]): name of the string element data type.
description (Optional[str]): description.
urls (Optional[list[str]]): URLs.
|
juraj-google-style
|
def get_root_dir_with_all_resources():
script_dir = get_data_files_path()
directories = [script_dir]
data_files_dir = ''
while True:
candidate_dir = directories[-1]
current_directory = _os.path.basename(candidate_dir)
if '.runfiles' in current_directory:
if len(directories) > 1:
data_files_dir = directories[-2]
break
else:
new_candidate_dir = _os.path.dirname(candidate_dir)
if new_candidate_dir == candidate_dir:
break
else:
directories.append(new_candidate_dir)
return data_files_dir or script_dir
|
Get a root directory containing all the data attributes in the build rule.
Returns:
The path to the specified file present in the data attribute of py_test
or py_binary. Falls back to returning the same as get_data_files_path if it
fails to detect a bazel runfiles directory.
|
github-repos
|
def _send(self, line):
if (not line.endswith('\r\n')):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = (line[0:(- 1)] + '\r\n')
else:
logger.debug('Fixing missing CRLF before sending data to socket')
line = (line + '\r\n')
logger.debug(('Client sent: ' + line.rstrip()))
self._socket.send(line)
|
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
|
codesearchnet
|
def filter_embeddings(embeddings, vocab, dim):
if (not isinstance(embeddings, dict)):
return
_embeddings = np.zeros([len(vocab), dim])
for word in vocab:
if (word in embeddings):
word_idx = vocab[word]
_embeddings[word_idx] = embeddings[word]
return _embeddings
|
Loads word vectors in numpy array.
Args:
embeddings (dict): a dictionary of numpy array.
vocab (dict): word_index lookup table.
Returns:
numpy array: an array of word embeddings.
|
codesearchnet
|
def run_feature_selection(self, df_data, target, idx=0, **kwargs):
list_features = list(df_data.columns.values)
list_features.remove(target)
df_target = pd.DataFrame(df_data[target], columns=[target])
df_features = df_data[list_features]
return self.predict_features(df_features, df_target, idx=idx, **kwargs)
|
Run feature selection for one node: wrapper around
``self.predict_features``.
Args:
df_data (pandas.DataFrame): All the observational data
target (str): Name of the target variable
idx (int): (optional) For printing purposes
Returns:
list: scores of each feature relatively to the target
|
juraj-google-style
|
def push_doc(self, document):
msg = self._protocol.create('PUSH-DOC', document)
reply = self._send_message_wait_for_reply(msg)
if (reply is None):
raise RuntimeError('Connection to server was lost')
elif (reply.header['msgtype'] == 'ERROR'):
raise RuntimeError(('Failed to push document: ' + reply.content['text']))
else:
return reply
|
Push a document to the server, overwriting any existing server-side doc.
Args:
document : (Document)
A Document to push to the server
Returns:
The server reply
|
codesearchnet
|
def _postprocess_non_flat_outputs(outputs: Any, need_spmd_partitioning: bool) -> Tuple[List[Optional[core_types.Tensor]], List[ops.Operation], List[Any]]:
flat_outputs = nest.flatten(outputs, expand_composites=True)
for i, o in enumerate(flat_outputs):
if o is None:
flat_outputs[i] = None
continue
if isinstance(o, ops.Operation):
raise ValueError(f'tpu.rewrite does not support Operation as return value in non-flat output structure. You can set returned Operations as control dependencies of returned Tensors so Operations are triggered when Tensors are evaluated. Operation found: "{o.name}"')
try:
o = ops.convert_to_tensor(o)
except Exception as e:
raise ValueError(f'TPU function return values must all either be Operations or convertible to Tensors. Got error: "{e}"')
if need_spmd_partitioning:
o = array_ops.identity(o)
o.op._set_attr('_tpu_output_identity', attr_value_pb2.AttrValue(b=True))
flat_outputs[i] = array_ops.identity(o)
else:
with ops.device(o.device if o.device else core(0)):
o = array_ops.identity(o)
o.op._set_attr('_tpu_output_identity', attr_value_pb2.AttrValue(b=True))
flat_outputs[i] = array_ops.identity(o)
return (flat_outputs, [], outputs)
|
Validates non-flat outputs, add backs device assignments and other attrs.
Args:
outputs: Output from `computation` inside `tpu.rewrite`.
need_spmd_partitioning: Whether XLA SPMD partitioning is needed.
Returns:
- Tensors extracted from outputs.
- An empty Operations list because Operations are not allowed in non-flat
outputs.
- A pack template for use with nest.pack_sequence_as to pack the tensors.
|
github-repos
|
def pull_doc(self, document):
msg = self._protocol.create('PULL-DOC-REQ')
reply = self._send_message_wait_for_reply(msg)
if (reply is None):
raise RuntimeError('Connection to server was lost')
elif (reply.header['msgtype'] == 'ERROR'):
raise RuntimeError(('Failed to pull document: ' + reply.content['text']))
else:
reply.push_to_document(document)
|
Pull a document from the server, overwriting the passed-in document
Args:
document : (Document)
The document to overwrite with server content.
Returns:
None
|
codesearchnet
|
def reorder(
miz_file_path: typing.Union[str, Path],
target_dir: typing.Union[str, Path],
skip_options_file: bool,
):
miz_file_path = elib.path.ensure_file(miz_file_path)
target_dir_path = elib.path.ensure_dir(target_dir, must_exist=False)
LOGGER.debug('re-ordering miz file: %s', miz_file_path)
LOGGER.debug('destination folder: %s', target_dir)
LOGGER.debug('%s option file', "skipping" if skip_options_file else "including")
if not target_dir_path.exists():
LOGGER.debug('creating directory %s', target_dir_path)
target_dir_path.mkdir(exist_ok=True)
with Miz(miz_file_path, overwrite=True) as miz_:
def mirror_dir(src: Path, dst: Path):
LOGGER.debug('mirroring: %s -> %s', src, dst)
LOGGER.debug('comparing directories')
diff_ = dircmp(str(src), str(dst), ignore)
diff_list = diff_.left_only + diff_.diff_files
LOGGER.debug('differences: %s', diff_list)
for __diff in diff_list:
source = Path(diff_.left, __diff)
target = Path(diff_.right, __diff)
LOGGER.debug('looking at: %s', __diff)
if source.is_dir():
LOGGER.debug('isdir: %s', __diff)
if not target.exists():
LOGGER.debug('creating: %s', __diff)
target.mkdir()
mirror_dir(source, target)
else:
LOGGER.debug('copying: %s', __diff)
shutil.copy2(str(source), diff_.right)
for sub in diff_.subdirs.values():
mirror_dir(Path(sub.left), Path(sub.right))
miz_._encode()
if skip_options_file:
ignore = ['options']
else:
ignore = []
mirror_dir(Path(miz_.temp_dir), target_dir_path)
|
Re-orders a miz file into a folder (flattened)
Args:
miz_file_path: source miz file
target_dir: folder to flatten the content into
skip_options_file: do not re-order option file
|
juraj-google-style
|
def remove_time_limit_wrapper(env):
if isinstance(env, gym.wrappers.TimeLimit):
env = env.env
env_ = env
while isinstance(env_, gym.Wrapper):
if isinstance(env_, gym.wrappers.TimeLimit):
raise ValueError('Can remove only top-level TimeLimit gym.Wrapper.')
env_ = env_.env
return env
|
Removes top level TimeLimit Wrapper.
Removes TimeLimit Wrapper from top level if exists, throws error if any other
TimeLimit Wrapper is present in stack.
Args:
env: environment
Returns:
the env with removed time limit wrapper.
|
codesearchnet
|
def format_config(sensor_graph):
cmdfile = CommandFile('Config Variables', '1.0')
for slot in sorted(sensor_graph.config_database, key=(lambda x: x.encode())):
for (conf_var, conf_def) in sorted(sensor_graph.config_database[slot].items()):
(conf_type, conf_val) = conf_def
if (conf_type == 'binary'):
conf_val = ('hex:' + hexlify(conf_val))
cmdfile.add('set_variable', slot, conf_var, conf_type, conf_val)
return cmdfile.dump()
|
Extract the config variables from this sensor graph in ASCII format.
Args:
sensor_graph (SensorGraph): the sensor graph that we want to format
Returns:
str: The ascii output lines concatenated as a single string
|
codesearchnet
|
def add_keyed(self, value, key, date=None, return_value=False):
return self.add(value, date, return_value, key)
|
Add keyed metrics data to collection.
Args:
value (str): The value of the metric.
key (str): The key value for keyed metrics.
date (str, optional): The optional date of the metric.
return_value (bool, default:False): Tell the API to return the updates metric value.
Return:
dict: If return_value is True a dict with the current value for the time period
is returned.
|
juraj-google-style
|
def _GetAttributeNames(self, data_type_definition):
if not data_type_definition:
raise errors.FormatError('Missing data type definition')
attribute_names = []
for member_definition in data_type_definition.members:
attribute_names.append(member_definition.name)
return attribute_names
|
Determines the attribute (or field) names of the members.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Returns:
list[str]: attribute names.
Raises:
FormatError: if the attribute names cannot be determined from the data
type definition.
|
juraj-google-style
|
def applyFeatures(self, new_features, conflict='error', missing='error'):
OPTIONS = ['error', 'ignore', 'me', 'other']
assert (missing in OPTIONS), 'Invalid value in `missing`.'
assert (conflict in OPTIONS), 'Invalid value in `missing`.'
self0 = self.clone()
if isinstance(new_features, Features):
new_features = new_features.features
for f in new_features:
self0.addFeature(f, conflict=conflict, missing=missing)
self.props = self0.props
return self
|
Apply the constrain of the features passed to this instance.
.. warning::
Feature instances are only considered, that is, SoftFeatures will be
not considered.
Args:
- new_features(Features): features to apply
- conflict(str): if a property hasn't compatible values/constrains, do:
- ``"error"``: raise exception.
- ``"ignore"``: nothing.
- ``"me"``: preserve the original value.
- ``"other"``: set like the passed feature.
- missing(str): if a property is missing in some side, do:
- ``"error"``: raise exception.
- ``"ignore"``: nothing.
- ``"me"``: preserve the original value.
- ``"other"``: set like the passed feature.
|
codesearchnet
|
def quad_genz_keister_18(order):
order = sorted(GENZ_KEISTER_18.keys())[order]
abscissas, weights = GENZ_KEISTER_18[order]
abscissas = numpy.array(abscissas)
weights = numpy.array(weights)
weights /= numpy.sum(weights)
abscissas *= numpy.sqrt(2)
return abscissas, weights
|
Hermite Genz-Keister 18 rule.
Args:
order (int):
The quadrature order. Must be in the interval (0, 8).
Returns:
(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):
Abscissas and weights
Examples:
>>> abscissas, weights = quad_genz_keister_18(1)
>>> print(numpy.around(abscissas, 4))
[-1.7321 0. 1.7321]
>>> print(numpy.around(weights, 4))
[0.1667 0.6667 0.1667]
|
juraj-google-style
|
def _parse_config(self, requires_cfg=True):
if (len(self.config_paths) > 0):
try:
self._find_config()
except BisonError:
if (not requires_cfg):
return
raise
try:
with open(self.config_file, 'r') as f:
parsed = self._fmt_to_parser[self.config_format](f)
except Exception as e:
raise BisonError('Failed to parse config file: {}'.format(self.config_file)) from e
self._full_config = None
self._config = parsed
|
Parse the configuration file, if one is configured, and add it to
the `Bison` state.
Args:
requires_cfg (bool): Specify whether or not parsing should fail
if a config file is not found. (default: True)
|
codesearchnet
|
def _is_default_hook(default_hook, hook):
if (not hasattr(default_hook, '__call__')):
raise TypeError('Default hooks for ndb.model.Model must be callable')
if (not hasattr(hook, '__call__')):
raise TypeError('Hooks must be callable')
return (default_hook.im_func is hook.im_func)
|
Checks whether a specific hook is in its default state.
Args:
cls: A ndb.model.Model class.
default_hook: Callable specified by ndb internally (do not override).
hook: The hook defined by a model class using _post_*_hook.
Raises:
TypeError if either the default hook or the tested hook are not callable.
|
codesearchnet
|
def CreateDefaultPartition(client, ad_group_id):
ad_group_criterion_service = client.GetService('AdGroupCriterionService', version='v201809')
operations = [{'operator': 'ADD', 'operand': {'xsi_type': 'BiddableAdGroupCriterion', 'adGroupId': ad_group_id, 'criterion': {'xsi_type': 'ProductPartition', 'partitionType': 'UNIT'}, 'biddingStrategyConfiguration': {'bids': [{'xsi_type': 'CpcBid', 'bid': {'microAmount': 500000}}]}}}]
ad_group_criterion = ad_group_criterion_service.mutate(operations)['value'][0]
print(('Ad group criterion with ID "%d" in ad group with ID "%d" was added.' % (ad_group_criterion['criterion']['id'], ad_group_criterion['adGroupId'])))
|
Creates a default partition.
Args:
client: an AdWordsClient instance.
ad_group_id: an integer ID for an ad group.
|
codesearchnet
|
def __init__(self, queue_property=None, length=None):
super().__init__()
self.queue_property = queue_property
self.length = length
|
Create a QueuePropHeader with the optional parameters below.
Args:
queue_property (~pyof.v0x04.common.queue.QueueProperties):
The queue property.
length (int): Length of property, including this header.
|
juraj-google-style
|
def darken(self, amount):
hsl = self.to_hsl()
hsl.l = self.clamp(hsl.l - amount)
return self.from_hsl(hsl)
|
Darken (reduce the luminance) of this color.
Args:
amount (float) :
Amount to reduce the luminance by (clamped above zero)
Returns:
Color
|
juraj-google-style
|
def from_verb(cls, verb):
pattern = r'^(?P<meta>[A-Z]+)(?P<version>\d+)(?P<action>[A-Z]+)(?P<arg1>\d+)?(\/(?P<arg2>\d+))?$'
try:
verb = verb.decode()
except AttributeError:
pass
match = re.match(pattern, verb)
if not match:
raise SpoolverbError('Invalid spoolverb: {}'.format(verb))
data = match.groupdict()
meta = data['meta']
version = data['version']
action = data['action']
if action == 'EDITIONS':
num_editions = data['arg1']
return cls(meta=meta, version=version, action=action, num_editions=int(num_editions))
elif action == 'LOAN':
try:
edition_num = int(data['arg1'])
except TypeError:
edition_num = 0
loan_start = data['arg2'][:6]
loan_end = data['arg2'][6:]
return cls(meta=meta, version=version, action=action, edition_num=int(edition_num),
loan_start=loan_start, loan_end=loan_end)
elif action in ['FUEL', 'PIECE', 'CONSIGNEDREGISTRATION']:
return cls(meta=meta, version=version, action=action)
else:
edition_num = data['arg1']
return cls(meta=meta, version=version, action=action, edition_num=int(edition_num))
|
Constructs a :class:`Spoolverb` instance from the string
representation of the given verb.
Args:
verb (str): representation of the verb e.g.:
``'ASCRIBESPOOL01LOAN12/150526150528'``. Can also be in
binary format (:obj:`bytes`): ``b'ASCRIBESPOOL01PIECE'``.
Returns:
:class:`Spoolverb` instance.
|
juraj-google-style
|
def matches(self, desc):
return ((self.metric_name == desc.name) and (self.kind == desc.metricKind) and (self.value_type == desc.valueType))
|
Determines if a given metric descriptor matches this enum instance
Args:
desc (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricDescriptor`): the
instance to test
Return:
`True` if desc is supported, otherwise `False`
|
codesearchnet
|
def _step(time, output_ta_t, prev_output, *states):
current_input = tuple((ta.read(time) for ta in input_ta))
current_input = tf.nest.pack_sequence_as(inputs, current_input)
mask_t = masking_fn(time)
output, new_states = step_function(current_input, tuple(states) + tuple(constants))
flat_output = tf.nest.flatten(output)
flat_mask_output = flat_zero_output if zero_output_for_mask else tf.nest.flatten(prev_output)
flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output)
flat_state = tf.nest.flatten(states)
flat_new_state = tf.nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, tf.Tensor):
new_state.set_shape(state.shape)
flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state)
new_states = tf.nest.pack_sequence_as(new_states, flat_final_state)
ta_index_to_write = time if return_all_outputs else 0
output_ta_t = tuple((ta.write(ta_index_to_write, out) for ta, out in zip(output_ta_t, flat_new_output)))
return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states)
|
RNN step function.
Args:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
|
github-repos
|
def validate(self, *args, **kwargs):
return super(ParameterValidator, self)._validate(*args, **kwargs)
|
Validate a parameter dict against a parameter schema from an ocrd-tool.json
Args:
obj (dict):
schema (dict):
|
codesearchnet
|
def generate_sitemap(self, path='sitemap.xml', https=False):
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap)
|
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
|
codesearchnet
|
def sample_point(input_features: torch.Tensor, point_coordinates: torch.Tensor, add_dim=False, **kwargs) -> torch.Tensor:
if point_coordinates.dim() == 3:
add_dim = True
point_coordinates = point_coordinates.unsqueeze(2)
point_features = torch.nn.functional.grid_sample(input_features, 2.0 * point_coordinates - 1.0, **kwargs)
if add_dim:
point_features = point_features.squeeze(3)
return point_features
|
A wrapper around `torch.nn.functional.grid_sample` to support 3D point_coordinates tensors.
Args:
input_features (`torch.Tensor` of shape (batch_size, channels, height, width)):
A tensor that contains features map on a height * width grid
point_coordinates (`torch.Tensor` of shape (batch_size, num_points, 2) or (batch_size, grid_height, grid_width,:
2)):
A tensor that contains [0, 1] * [0, 1] normalized point coordinates
add_dim (`bool`):
boolean value to keep track of added dimension
Returns:
point_features (`torch.Tensor` of shape (batch_size, channels, num_points) or (batch_size, channels,
height_grid, width_grid):
A tensor that contains features for points in `point_coordinates`.
|
github-repos
|
def match_main(self, text, pattern, loc):
if text == None or pattern == None:
raise ValueError("Null inputs. (match_main)")
loc = max(0, min(loc, len(text)))
if text == pattern:
return 0
elif not text:
return -1
elif text[loc:loc + len(pattern)] == pattern:
return loc
else:
match = self.match_bitap(text, pattern, loc)
return match
|
Locate the best instance of 'pattern' in 'text' near 'loc'.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
|
juraj-google-style
|
def find_signature(self, signature_id=None, signer_email_address=None):
if self.signatures:
for signature in self.signatures:
if ((signature.signature_id == signature_id) or (signature.signer_email_address == signer_email_address)):
return signature
|
Return a signature for the given parameters
Args:
signature_id (str): Id of the signature to retrieve.
signer_email_address (str): Email address of the associated signer for the signature to retrieve.
Returns:
A Signature object or None
|
codesearchnet
|
def version(self):
cmd = b'version\r\n'
results = self._misc_cmd([cmd], b'version', False)
(before, _, after) = results[0].partition(b' ')
if (before != b'VERSION'):
raise MemcacheUnknownError(('Received unexpected response: %s' % results[0]))
return after
|
The memcached "version" command.
Returns:
A string of the memcached version.
|
codesearchnet
|
def validate_variable_type(var_name, var_type, value):
if isinstance(var_type, CFNType):
value = CFNParameter(name=var_name, value=value)
elif isinstance(var_type, TroposphereType):
try:
value = var_type.create(value)
except Exception as exc:
name = '{}.create'.format(var_type.resource_name)
raise ValidatorError(var_name, name, value, exc)
elif (not isinstance(value, var_type)):
raise ValueError(('Value for variable %s must be of type %s. Actual type: %s.' % (var_name, var_type, type(value))))
return value
|
Ensures the value is the correct variable type.
Args:
var_name (str): The name of the defined variable on a blueprint.
var_type (type): The type that the value should be.
value (obj): The object representing the value provided for the
variable
Returns:
object: Returns the appropriate value object. If the original value
was of CFNType, the returned value will be wrapped in CFNParameter.
Raises:
ValueError: If the `value` isn't of `var_type` and can't be cast as
that type, this is raised.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.