code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def split(self, n):
new_range_filters = []
name = self.start[0]
prop_cls = self.prop.__class__
if prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS:
splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls](
self.start[2], self.end[2], n,
self.start[1] == ">=", self.end[1] == "<=")
start_filter = (name, ">=", splitpoints[0])
for p in splitpoints[1:]:
end_filter = (name, "<", p)
new_range_filters.append([start_filter, end_filter])
start_filter = (name, ">=", p)
else:
splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls](
self.start[2], self.end[2], n)
start_filter = self.start
for p in splitpoints:
end_filter = (name, "<", p)
new_range_filters.append([start_filter, end_filter])
start_filter = (name, ">=", p)
new_range_filters.append([start_filter, self.end])
for f in new_range_filters:
f.extend(self._equality_filters)
return [self.__class__(f, self.model_class_path) for f in new_range_filters]
|
Evenly split this range into contiguous, non overlapping subranges.
Args:
n: number of splits.
Returns:
a list of contiguous, non overlapping sub PropertyRanges. Maybe less than
n when not enough subranges.
|
juraj-google-style
|
def is_valid(value, valid_values):
valid = False
if isinstance(valid_values, type) and type(value) is valid_values:
valid = True
elif isinstance(valid_values, type) and valid_values == float and type(value) == int:
valid = True
elif isinstance(value, dict) and isinstance(valid_values, dict):
assert set(value.keys()) & set(valid_values.keys()) == set(value.keys())
for k ,v in value.items():
valid = Parameter.is_valid(v, valid_values[k])
if valid ==False:
break
elif isinstance(value, dict) and valid_values == Parameter:
valid = True
elif isinstance(valid_values, list) and value in valid_values:
valid = True
return valid
|
check is the value is valid
Args:
value: value to be tested
valid_values: allowed valid values (type or list of values)
Returns:
|
juraj-google-style
|
def delete_case(self, case):
mongo_case = self.case(case)
if (not mongo_case):
raise CaseError('Tried to delete case {0} but could not find case'.format(case.get('case_id')))
LOG.info('Removing case {0} from database'.format(mongo_case.get('case_id')))
self.db.case.delete_one({'_id': mongo_case['_id']})
return
|
Delete case from the database
Delete a case from the database
Args:
case (dict): A case dictionary
|
codesearchnet
|
def preprocess_histories(self, max_coarse_history: int, semantic_to_coarse_ratio: int, batch_size: int, semantic_generation_config: int, codebook_size: int, history_prompt: Optional[Dict[str, torch.Tensor]]=None):
if history_prompt is not None:
x_semantic_history = torch.repeat_interleave(history_prompt['semantic_prompt'][None], batch_size, dim=0)
x_coarse_history = history_prompt['coarse_prompt'].clone()
if codebook_size is not None:
for n in range(1, x_coarse_history.shape[0]):
x_coarse_history[n, :] += codebook_size * n
x_coarse_history = torch.transpose(x_coarse_history, 0, 1).reshape(-1)
x_coarse_history = x_coarse_history + semantic_generation_config.semantic_vocab_size
x_coarse_history = torch.repeat_interleave(x_coarse_history[None], batch_size, dim=0)
max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio))
n_semantic_hist_provided = min([max_semantic_history, x_semantic_history.shape[1] - x_semantic_history.shape[1] % 2, int(np.floor(x_coarse_history.shape[1] / semantic_to_coarse_ratio))])
n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio))
x_semantic_history = x_semantic_history[:, -n_semantic_hist_provided:].int()
x_coarse_history = x_coarse_history[:, -n_coarse_hist_provided:].int()
x_coarse_history = x_coarse_history[:, :-2]
else:
x_semantic_history = torch.tensor([[]] * batch_size, dtype=torch.int, device=self.device)
x_coarse_history = torch.tensor([[]] * batch_size, dtype=torch.int, device=self.device)
return (x_semantic_history, x_coarse_history)
|
Preprocess the optional `Bark` speaker prompts before `self.generate`.
Args:
max_coarse_history (`int`):
Maximum size of coarse tokens used.
semantic_to_coarse_ratio (`int`):
Ratio of semantic to coarse frequency
batch_size (`int`):
Batch size, i.e the number of samples.
semantic_generation_config (`BarkSemanticGenerationConfig`):
Generation config indicating how to generate the semantic tokens.
codebook_size (`int`):
Codebook channel size, i.e. the size of the output vocabulary per codebook channel.
history_prompt (`Optional[Dict[str,torch.Tensor]]`):
Optional `Bark` speaker prompt.
Returns: Returns:
`tuple(torch.FloatTensor)`:
- **x_semantic_history** (`torch.FloatTensor` -- Processed semantic speaker prompt.
- **x_coarse_history** (`torch.FloatTensor`) -- Processed coarse speaker prompt.
|
github-repos
|
def get_subassistants(self):
if (not hasattr(self, '_subassistants')):
self._subassistants = []
if ('get_subassistant_classes' in vars(type(self))):
for a in self.get_subassistant_classes():
self._subassistants.append(a())
return self._subassistants
|
Return list of instantiated subassistants.
Usually, this needs not be overriden in subclasses, you should just override
get_subassistant_classes
Returns:
list of instantiated subassistants
|
codesearchnet
|
def method(cache_name, key_prefix=None):
def decorator(func):
if (func.__name__ in ['cause_repertoire', 'effect_repertoire'] and
not config.CACHE_REPERTOIRES):
return func
@wraps(func)
def wrapper(obj, *args, **kwargs):
cache = getattr(obj, cache_name)
key = cache.key(*args, _prefix=key_prefix, **kwargs)
value = cache.get(key)
if value is None:
value = func(obj, *args, **kwargs)
cache.set(key, value)
return value
return wrapper
return decorator
|
Caching decorator for object-level method caches.
Cache key generation is delegated to the cache.
Args:
cache_name (str): The name of the (already-instantiated) cache
on the decorated object which should be used to store results
of this method.
*key_prefix: A constant to use as part of the cache key in addition
to the method arguments.
|
juraj-google-style
|
def diff_commonSuffix(self, text1, text2):
if not text1 or not text2 or text1[-1] != text2[-1]:
return 0
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerend = 0
while pointermin < pointermid:
if (text1[-pointermid:len(text1) - pointerend] ==
text2[-pointermid:len(text2) - pointerend]):
pointermin = pointermid
pointerend = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin)
return pointermid
|
Determine the common suffix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the end of each string.
|
juraj-google-style
|
def PrintExtractionSummary(self, processing_status):
if not processing_status:
self._output_writer.Write(
'WARNING: missing processing status information.\n')
elif not processing_status.aborted:
if processing_status.error_path_specs:
self._output_writer.Write('Processing completed with errors.\n')
else:
self._output_writer.Write('Processing completed.\n')
number_of_warnings = (
processing_status.foreman_status.number_of_produced_warnings)
if number_of_warnings:
output_text = '\n'.join([
'',
('Number of warnings generated while extracting events: '
'{0:d}.').format(number_of_warnings),
'',
'Use pinfo to inspect warnings in more detail.',
''])
self._output_writer.Write(output_text)
if processing_status.error_path_specs:
output_text = '\n'.join([
'',
'Path specifications that could not be processed:',
''])
self._output_writer.Write(output_text)
for path_spec in processing_status.error_path_specs:
self._output_writer.Write(path_spec.comparable)
self._output_writer.Write('\n')
self._output_writer.Write('\n')
|
Prints a summary of the extraction.
Args:
processing_status (ProcessingStatus): processing status.
|
juraj-google-style
|
def WaitUntilValid(self, timeout=None):
return utils.Poll(
generator=self.Get,
condition=lambda f: f.data.is_valid,
timeout=timeout)
|
Wait until the approval is valid (i.e. - approved).
Args:
timeout: timeout in seconds. None means default timeout (1 hour).
0 means no timeout (wait forever).
Returns:
Operation object with refreshed target_file.
Raises:
PollTimeoutError: if timeout is reached.
|
juraj-google-style
|
def __init__(self, parent=None):
super(CSVImportDialog, self).__init__(parent)
self._modal = True
self._windowTitle = 'Import CSV'
self._encodingKey = None
self._filename = None
self._delimiter = None
self._header = None
self._initUI()
|
Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
|
juraj-google-style
|
def initialize_dual(neural_net_params_object, init_dual_file=None, random_init_variance=0.01, init_nu=200.0):
lambda_pos = []
lambda_neg = []
lambda_quad = []
lambda_lu = []
if (init_dual_file is None):
for i in range(0, (neural_net_params_object.num_hidden_layers + 1)):
initializer = np.random.uniform(0, random_init_variance, size=(neural_net_params_object.sizes[i], 1)).astype(np.float32)
lambda_pos.append(tf.get_variable(('lambda_pos_' + str(i)), initializer=initializer, dtype=tf.float32))
initializer = np.random.uniform(0, random_init_variance, size=(neural_net_params_object.sizes[i], 1)).astype(np.float32)
lambda_neg.append(tf.get_variable(('lambda_neg_' + str(i)), initializer=initializer, dtype=tf.float32))
initializer = np.random.uniform(0, random_init_variance, size=(neural_net_params_object.sizes[i], 1)).astype(np.float32)
lambda_quad.append(tf.get_variable(('lambda_quad_' + str(i)), initializer=initializer, dtype=tf.float32))
initializer = np.random.uniform(0, random_init_variance, size=(neural_net_params_object.sizes[i], 1)).astype(np.float32)
lambda_lu.append(tf.get_variable(('lambda_lu_' + str(i)), initializer=initializer, dtype=tf.float32))
nu = tf.get_variable('nu', initializer=init_nu)
else:
dual_var_init_val = np.load(init_dual_file).item()
for i in range(0, (neural_net_params_object.num_hidden_layers + 1)):
lambda_pos.append(tf.get_variable(('lambda_pos_' + str(i)), initializer=dual_var_init_val['lambda_pos'][i], dtype=tf.float32))
lambda_neg.append(tf.get_variable(('lambda_neg_' + str(i)), initializer=dual_var_init_val['lambda_neg'][i], dtype=tf.float32))
lambda_quad.append(tf.get_variable(('lambda_quad_' + str(i)), initializer=dual_var_init_val['lambda_quad'][i], dtype=tf.float32))
lambda_lu.append(tf.get_variable(('lambda_lu_' + str(i)), initializer=dual_var_init_val['lambda_lu'][i], dtype=tf.float32))
nu = tf.get_variable('nu', initializer=(1.0 * dual_var_init_val['nu']))
dual_var = {'lambda_pos': lambda_pos, 'lambda_neg': lambda_neg, 'lambda_quad': lambda_quad, 'lambda_lu': lambda_lu, 'nu': nu}
return dual_var
|
Function to initialize the dual variables of the class.
Args:
neural_net_params_object: Object with the neural net weights, biases
and types
init_dual_file: Path to file containing dual variables, if the path
is empty, perform random initialization
Expects numpy dictionary with
lambda_pos_0, lambda_pos_1, ..
lambda_neg_0, lambda_neg_1, ..
lambda_quad_0, lambda_quad_1, ..
lambda_lu_0, lambda_lu_1, ..
random_init_variance: variance for random initialization
init_nu: Value to initialize nu variable with
Returns:
dual_var: dual variables initialized appropriately.
|
codesearchnet
|
def generate_page(self, path, template, **kwargs):
directory = None
if kwargs.get('page'):
directory = kwargs['page'].dir
path = self._get_dist_path(path, directory=directory)
if (not path.endswith('.html')):
path = (path + '.html')
if (not os.path.isdir(os.path.dirname(path))):
os.makedirs(os.path.dirname(path))
html = self._get_template(template).render(**kwargs)
with open(path, 'w+') as file:
file.write(html)
|
Generate the HTML for a single page. You usually don't need to call this
method manually, it is used by a lot of other, more end-user friendly
methods.
Args:
path (str): Where to place the page relative to the root URL. Usually
something like "index", "about-me", "projects/example", etc.
template (str): Which jinja template to use to render the page.
**kwargs: Kwargs will be passed on to the jinja template. Also, if
the `page` kwarg is passed, its directory attribute will be
prepended to the path.
|
codesearchnet
|
def RemoveConnectedPeer(self, peer):
if peer in self.Peers:
self.Peers.remove(peer)
|
Remove a connected peer from the known peers list.
Args:
peer (NeoNode): instance.
|
juraj-google-style
|
def parse_saved_model_with_debug_info(export_dir):
saved_model = parse_saved_model(export_dir)
debug_info_path = file_io.join(path_helpers.get_debug_dir(export_dir), constants.DEBUG_INFO_FILENAME_PB)
debug_info = graph_debug_info_pb2.GraphDebugInfo()
if file_io.file_exists(debug_info_path):
with file_io.FileIO(debug_info_path, 'rb') as debug_file:
try:
debug_info.ParseFromString(debug_file.read())
except message.DecodeError as e:
raise IOError(f'Cannot parse file {debug_info_path}: {e}.')
return (saved_model, debug_info)
|
Reads the savedmodel as well as the graph debug info.
Args:
export_dir: Directory containing the SavedModel and GraphDebugInfo files.
Returns:
`SavedModel` and `GraphDebugInfo` protocol buffers.
Raises:
IOError: If the saved model file does not exist, or cannot be successfully
parsed. Missing graph debug info file is fine.
|
github-repos
|
def get_chip(self, coordinates, catid, chip_type='PAN', chip_format='TIF', filename='chip.tif'):
def t2s1(t):
return str(t).strip('(,)').replace(',', '')
def t2s2(t):
return str(t).strip('(,)').replace(' ', '')
if (len(coordinates) != 4):
print('Wrong coordinate entry')
return False
(W, S, E, N) = coordinates
box = ((W, S), (W, N), (E, N), (E, S), (W, S))
box_wkt = (('POLYGON ((' + ','.join([t2s1(corner) for corner in box])) + '))')
results = self.get_images_by_catid_and_aoi(catid=catid, aoi_wkt=box_wkt)
description = self.describe_images(results)
(pan_id, ms_id, num_bands) = (None, None, 0)
for (catid, images) in description.items():
for (partnum, part) in images['parts'].items():
if ('PAN' in part.keys()):
pan_id = part['PAN']['id']
bucket = part['PAN']['bucket']
if ('WORLDVIEW_8_BAND' in part.keys()):
ms_id = part['WORLDVIEW_8_BAND']['id']
num_bands = 8
bucket = part['WORLDVIEW_8_BAND']['bucket']
elif ('RGBN' in part.keys()):
ms_id = part['RGBN']['id']
num_bands = 4
bucket = part['RGBN']['bucket']
band_str = ''
if (chip_type == 'PAN'):
band_str = (pan_id + '?bands=0')
elif (chip_type == 'MS'):
band_str = (ms_id + '?')
elif (chip_type == 'PS'):
if (num_bands == 8):
band_str = ((ms_id + '?bands=4,2,1&panId=') + pan_id)
elif (num_bands == 4):
band_str = ((ms_id + '?bands=0,1,2&panId=') + pan_id)
location_str = '&upperLeft={}&lowerRight={}'.format(t2s2((W, N)), t2s2((E, S)))
service_url = (('https:
url = ((service_url + band_str) + location_str)
url += ((('&format=' + chip_format) + '&token=') + self.gbdx_connection.access_token)
r = requests.get(url)
if (r.status_code == 200):
with open(filename, 'wb') as f:
f.write(r.content)
return True
else:
print('Cannot download chip')
return False
|
Downloads a native resolution, orthorectified chip in tif format
from a user-specified catalog id.
Args:
coordinates (list): Rectangle coordinates in order West, South, East, North.
West and East are longitudes, North and South are latitudes.
The maximum chip size is (2048 pix)x(2048 pix)
catid (str): The image catalog id.
chip_type (str): 'PAN' (panchromatic), 'MS' (multispectral), 'PS' (pansharpened).
'MS' is 4 or 8 bands depending on sensor.
chip_format (str): 'TIF' or 'PNG'
filename (str): Where to save chip.
Returns:
True if chip is successfully downloaded; else False.
|
codesearchnet
|
def Parse(filename, global_env):
parser = StlParser(filename=filename, global_env=global_env)
with open(filename) as data:
return parser.parse(data.read())
|
Parse a state transition spec of |filename| and fill |module_dict|.
Args:
filename: A state transition spec file.
global_env: Dictionary to store global STL state. It has one field:
global_env['modules']: Dictionary of stl.module.Module by name.
|
github-repos
|
def merge(self, other_roc):
if ((other_roc.thresholds.size == self.thresholds.size) and np.all((other_roc.thresholds == self.thresholds))):
self.contingency_tables += other_roc.contingency_tables
else:
print('Input table thresholds do not match.')
|
Ingest the values of another DistributedROC object into this one and update the statistics inplace.
Args:
other_roc: another DistributedROC object.
|
codesearchnet
|
def _CanMergeLineIntoIfStatement(lines, limit):
if len(lines[1].tokens) == 1 and lines[1].last.is_multiline_string:
return True
if lines[0].lineno != lines[1].lineno:
return False
if lines[1].last.total_length >= limit:
return False
return style.Get('JOIN_MULTIPLE_LINES')
|
Determine if we can merge a short if-then statement into one line.
Two lines of an if-then statement can be merged if they were that way in the
original source, fit on the line without going over the column limit, and are
considered "simple" statements --- typically statements like 'pass',
'continue', and 'break'.
Arguments:
lines: (list of LogicalLine) The lines we are wanting to merge.
limit: (int) The amount of space remaining on the line.
Returns:
True if the lines can be merged, False otherwise.
|
github-repos
|
def run_gpu_only(func: _F) -> _F:
if tf_inspect.isclass(func):
raise ValueError('`run_gpu_only` only supports test methods.')
def decorated(self: 'TensorFlowTestCase', *args, **kwargs):
if not is_gpu_available():
self.skipTest('Test requires GPU')
return func(self, *args, **kwargs)
return decorated
|
Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated.
Returns:
Returns a function that will conditionally skip the decorated test method.
|
github-repos
|
def download_mmcif_header(pdb_id, outdir='', force_rerun=False):
pdb_id = pdb_id.lower()
file_type = 'cif'
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
download_link = 'http:
urlretrieve(download_link, outfile)
log.debug('{}: saved header file'.format(outfile))
else:
log.debug('{}: header file already saved'.format(outfile))
return outfile
|
Download a mmCIF header file from the RCSB PDB by ID.
Args:
pdb_id: PDB ID
outdir: Optional output directory, default is current working directory
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
|
juraj-google-style
|
def refresh(self, id_or_uri, timeout=-1):
uri = self._client.build_uri(id_or_uri) + "/refresh"
return self._client.update_with_zero_body(uri, timeout=timeout)
|
The Refresh action reclaims the top-of-rack switches in a logical switch.
Args:
id_or_uri:
Can be either the Logical Switch ID or URI
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: The Logical Switch
|
juraj-google-style
|
def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True):
img = _imsave_before(img, channel_first, auto_scale)
if auto_scale:
img = upscale_pixel_intensity(img, as_uint16)
img = check_type_and_cast_if_necessary(img, as_uint16)
bitdepth = (8 if (img.dtype == np.uint8) else 16)
grayscale = (True if ((len(img.shape) == 2) or ((len(img.shape) == 3) and (img.shape[(- 1)] == 1))) else False)
writer = png.Writer(img.shape[1], img.shape[0], greyscale=grayscale, bitdepth=bitdepth)
writer.write(open(path, 'wb'), img.reshape(img.shape[0], (- 1)))
|
Save image by pypng module.
Args:
path (str): output filename
img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.
channel_first:
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel)
as_uint16 (bool):
If True, save image as uint16.
auto_scale (bool) :
Whether upscale pixel values or not.
If you want to save float image, this argument must be True.
In pypng backend, all below are supported.
- float ([0, 1]) to uint8 ([0, 255]) (if img.dtype==float and upscale==True and as_uint16==False)
- float to uint16 ([0, 65535]) (if img.dtype==float and upscale==True and as_uint16==True)
- uint8 to uint16 are supported (if img.dtype==np.uint8 and upscale==True and as_uint16==True)
|
codesearchnet
|
def split(input_layer, split_dim=0, num_splits=2):
shape = input_layer.shape
_check_split_dims(num_splits, split_dim, shape)
splits = tf.split(value=input_layer, num_or_size_splits=num_splits, axis=split_dim)
return input_layer.with_sequence(splits)
|
Splits this Tensor along the split_dim into num_splits Equal chunks.
Examples:
* `[1, 2, 3, 4] -> [1, 2], [3, 4]`
* `[[1, 1], [2, 2], [3, 3], [4, 4]] -> [[1, 1], [2, 2]], [[3, 3], [4, 4]]`
Args:
input_layer: The chainable object, supplied.
split_dim: The dimension to split along. Defaults to batch.
num_splits: The number of splits.
Returns:
A list of PrettyTensors.
Raises:
ValueError: If split_dim is out of range or isn't divided evenly by
num_splits.
|
codesearchnet
|
def send(self, **kwargs):
assert len(kwargs) == 1, "Must make a single request."
res = self.send_req(sc_pb.Request(**kwargs))
return getattr(res, list(kwargs.keys())[0])
|
Create and send a specific request, and return the response.
For example: send(ping=sc_pb.RequestPing()) => sc_pb.ResponsePing
Args:
**kwargs: A single kwarg with the name and value to fill in to Request.
Returns:
The Response corresponding to your request.
|
juraj-google-style
|
def epsilon():
return _EPSILON
|
Returns the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
>>> tf.keras.backend.epsilon()
1e-07
|
github-repos
|
def _get_filters(nodes, context):
filters = []
for node in nodes:
for filter_block in sql_context_helpers.get_filters(node, context):
filter_sql_expression = _transform_filter_to_sql(filter_block, node, context)
filters.append(filter_sql_expression)
return filters
|
Get filters to apply to a list of SqlNodes.
Args:
nodes: List[SqlNode], the SqlNodes to get filters for.
context: CompilationContext, global compilation state and metadata.
Returns:
List[Expression], list of SQLAlchemy expressions.
|
juraj-google-style
|
def get_executor():
return context().executor
|
Get the Executor of the current thread.
Returns:
The Executor of the current thread.
|
github-repos
|
def join(self, other):
if self.contains(other):
return True
if other.contains(self):
self.x = other.x
self.y = other.y
self.width = other.width
self.height = other.height
return True
if not self.intersects(other, edges=True):
return False
if self.left == other.left and self.width == other.width:
y_min = min(self.bottom, other.bottom)
y_max = max(self.top, other.top)
self.y = y_min
self.height = y_max-y_min
return True
if self.bottom == other.bottom and self.height == other.height:
x_min = min(self.left, other.left)
x_max = max(self.right, other.right)
self.x = x_min
self.width = x_max-x_min
return True
return False
|
Try to join a rectangle to this one, if the result is also a rectangle
and the operation is successful and this rectangle is modified to the union.
Arguments:
other (Rectangle): Rectangle to join
Returns:
bool: True when successfully joined, False otherwise
|
juraj-google-style
|
def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
if selected_id is None:
return (labels, predictions_idx)
return (_select_class_id(labels, selected_id), _select_class_id(predictions_idx, selected_id))
|
If class ID is specified, filter all other classes.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
where N >= 1. Commonly, N=1 and `predictions_idx` has shape
[batch size, k].
selected_id: Int id to select.
Returns:
Tuple of `labels` and `predictions_idx`, possibly with classes removed.
|
github-repos
|
def add(self, coro, *args, **kw):
if asyncio.iscoroutinefunction(coro):
coro = coro(*args, **kw)
if not asyncio.iscoroutine(coro):
raise TypeError('paco: coro must be a coroutine object')
index = max(len(self.pool), 0)
task = Task(index, coro)
self.pool.append(task)
return coro
|
Adds a new coroutine function with optional variadic argumetns.
Arguments:
coro (coroutine function): coroutine to execute.
*args (mixed): optional variadic arguments
Raises:
TypeError: if the coro object is not a valid coroutine
Returns:
future: coroutine wrapped future
|
juraj-google-style
|
def get(self, node_id):
return \
self._nodes[_node.Root.ID].get(node_id) or \
self._nodes[_node.Root.ID].get(self._sid_map.get(node_id))
|
Get a note with the given ID.
Args:
node_id (str): The note ID.
Returns:
gkeepapi.node.TopLevelNode: The Note or None if not found.
|
juraj-google-style
|
def run_resume_status(self, entity, project_name, name):
query = gql('\n query Model($project: String!, $entity: String, $name: String!) {\n model(name: $project, entityName: $entity) {\n id\n name\n entity {\n id\n name\n }\n\n bucket(name: $name, missingOk: true) {\n id\n name\n logLineCount\n historyLineCount\n eventsLineCount\n historyTail\n eventsTail\n }\n }\n }\n ')
response = self.gql(query, variable_values={'entity': entity, 'project': project_name, 'name': name})
if (('model' not in response) or ('bucket' not in response['model'])):
return None
project = response['model']
self.set_setting('project', project_name)
if ('entity' in project):
self.set_setting('entity', project['entity']['name'])
return project['bucket']
|
Check if a run exists and get resume information.
Args:
entity (str, optional): The entity to scope this project to.
project_name (str): The project to download, (can include bucket)
run (str, optional): The run to download
|
codesearchnet
|
def create_checklist(self, checklist_json):
return trolly.checklist.Checklist(trello_client=self, checklist_id=checklist_json['id'], name=checklist_json['name'], data=checklist_json)
|
Create a Checklist object from JSON object
Returns:
Checklist: The checklist from the given `checklist_json`.
|
codesearchnet
|
def reverse_axis(self, axis_to_reverse):
if (axis_to_reverse.lower() == 'x'):
self.general.reverse_x_axis = True
if (axis_to_reverse.lower() == 'y'):
self.general.reverse_y_axis = True
if ((axis_to_reverse.lower() != 'x') or (axis_to_reverse.lower() != 'y')):
raise ValueError('Axis for reversing needs to be either x or y.')
return
|
Reverse an axis in all figure plots.
This will reverse the tick marks on an axis for each plot in the figure.
It can be overridden in SinglePlot class.
Args:
axis_to_reverse (str): Axis to reverse. Supports `x` and `y`.
Raises:
ValueError: The string representing the axis to reverse is not `x` or `y`.
|
codesearchnet
|
def create_unbroadcast_axis(shape, broadcast_shape):
return tuple(((- (1 + i)) for i in range(len(broadcast_shape)) if ((i >= len(shape)) or (broadcast_shape[(- (1 + i))] > shape[(- (1 + i))]))))
|
Creates the reduction axis for unbroadcasting.
Args:
shape: A list. The shape after the broadcast operation.
broadcast_shape: A list. The original shape the array being unbroadcast
had.
Returns:
A list. The axes along which the array needs to be reduced. These axes will
be distributed evenly into the original shape.
|
codesearchnet
|
def array_to_base64_png(array):
array = np.array(array, dtype=np.float32)
if (len(array.shape) != 2):
raise ValueError(('Expected rank-2 array; received rank-%d array.' % len(array.shape)))
if (not np.size(array)):
raise ValueError(('Cannot encode an empty array (size: %s) as image.' % (array.shape,)))
is_infinity = np.isinf(array)
is_positive = (array > 0.0)
is_positive_infinity = np.logical_and(is_infinity, is_positive)
is_negative_infinity = np.logical_and(is_infinity, np.logical_not(is_positive))
is_nan = np.isnan(array)
finite_indices = np.where(np.logical_and(np.logical_not(is_infinity), np.logical_not(is_nan)))
if np.size(finite_indices):
minval = np.min(array[finite_indices])
maxval = np.max(array[finite_indices])
scaled = np.array((((array - minval) / (maxval - minval)) * 255), dtype=np.uint8)
rgb = np.repeat(np.expand_dims(scaled, (- 1)), IMAGE_COLOR_CHANNELS, axis=(- 1))
else:
rgb = np.zeros((array.shape + (IMAGE_COLOR_CHANNELS,)), dtype=np.uint8)
rgb[is_positive_infinity] = POSITIVE_INFINITY_RGB
rgb[is_negative_infinity] = NEGATIVE_INFINITY_RGB
rgb[is_nan] = NAN_RGB
image_encoded = base64.b64encode(encoder.encode_png(rgb))
return image_encoded
|
Convert an array into base64-enoded PNG image.
Args:
array: A 2D np.ndarray or nested list of items.
Returns:
A base64-encoded string the image. The image is grayscale if the array is
2D. The image is RGB color if the image is 3D with lsat dimension equal to
3.
Raises:
ValueError: If the input `array` is not rank-2, or if the rank-2 `array` is
empty.
|
codesearchnet
|
def stats(self, container, decode=None, stream=True):
url = self._url('/containers/{0}/stats', container)
if stream:
return self._stream_helper(self._get(url, stream=True), decode=decode)
else:
if decode:
raise errors.InvalidArgument('decode is only available in conjuction with stream=True')
return self._result(self._get(url, params={'stream': False}), json=True)
|
Stream statistics for a specific container. Similar to the
``docker stats`` command.
Args:
container (str): The container to stream statistics from
decode (bool): If set to true, stream will be decoded into dicts
on the fly. Only applicable if ``stream`` is True.
False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def clear(self, rows=None):
rows = (tf.range(self._capacity) if (rows is None) else rows)
assert (rows.shape.ndims == 1)
return tf.scatter_update(self._length, rows, tf.zeros_like(rows))
|
Reset episodes in the memory.
Internally, this only sets their lengths to zero. The memory entries will
be overridden by future calls to append() or replace().
Args:
rows: Episodes to clear, defaults to all.
Returns:
Operation.
|
codesearchnet
|
def compare_names(first, second):
first = name_to_vector(first)
second = name_to_vector(second)
zipped = zip(first, second)
if (not zipped):
return 0
similarity_factor = 0
for (fitem, _) in zipped:
if (fitem in second):
similarity_factor += 1
return ((float(similarity_factor) / len(zipped)) * 100)
|
Compare two names in complicated, but more error prone way.
Algorithm is using vector comparison.
Example:
>>> compare_names("Franta Putšálek", "ing. Franta Putšálek")
100.0
>>> compare_names("F. Putšálek", "ing. Franta Putšálek")
50.0
Args:
first (str): Fisst name as string.
second (str): Second name as string.
Returns:
float: Percentage of the similarity.
|
codesearchnet
|
def __init__(self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None):
super(InverseTimeDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
|
Applies inverse time decay to the initial learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
|
github-repos
|
def compute_values(edge_compatibility, v):
all_edge_values = tf.matmul(tf.to_float(edge_compatibility), v)
output = tf.reduce_sum(all_edge_values, axis=1)
return output
|
Compute values. If edge compatibilities is just adjacency, we get ggnn.
Args:
edge_compatibility: A tensor of shape [batch, num_transforms, length, depth]
v: A tensor of shape [batch, num_transforms, length, depth]
Returns:
output: A [batch, length, depth] tensor
|
juraj-google-style
|
def save(**kwargs):
ret = {'comment': [], 'result': True}
beacons = list_(return_yaml=False, include_pillar=False, **kwargs)
sfn = os.path.join(os.path.dirname(__opts__['conf_file']), os.path.dirname(__opts__['default_include']), 'beacons.conf')
if beacons:
tmp = {'beacons': beacons}
yaml_out = salt.utils.yaml.safe_dump(tmp, default_flow_style=False)
else:
yaml_out = ''
try:
with salt.utils.files.fopen(sfn, 'w+') as fp_:
fp_.write(yaml_out)
ret['comment'] = 'Beacons saved to {0}.'.format(sfn)
except (IOError, OSError):
ret['comment'] = 'Unable to write to beacons file at {0}. Check permissions.'.format(sfn)
ret['result'] = False
return ret
|
Save all configured beacons to the minion config.
Returns:
dict: Boolean and status message on success or failure of save.
CLI Example:
.. code-block:: bash
salt '*' beacons.save
|
codesearchnet
|
def formatted(self, func):
other = EscapedString.__new__(EscapedString)
other.strings = []
for (is_literal, value) in self.strings:
if (not is_literal):
value = func(value)
other.strings.append((is_literal, value))
return other
|
Return the string with non-literal parts formatted.
Args:
func (callable): Callable that translates a string into a
formatted string.
Returns:
`EscapedString` object.
|
codesearchnet
|
def __init__(self, options=None):
super(ExportConverter, self).__init__()
self.options = options or ExportOptions()
|
Constructor.
Args:
options: ExportOptions value, which contains settings that may or or may
not affect this converter's behavior.
|
juraj-google-style
|
def get(self, **params):
if self._use_cache:
r = requests.get(self.url, params=params)
else:
with requests_cache.disabled():
r = requests.get(self.url, params=params)
r.raise_for_status()
return r
|
Performs get request to the biomart service.
Args:
**params (dict of str: any): Arbitrary keyword arguments, which
are added as parameters to the get request to biomart.
Returns:
requests.models.Response: Response from biomart for the request.
|
codesearchnet
|
def hwvtep_add_rbridgeid(self, **kwargs):
name = kwargs.pop('name')
id = kwargs.pop('rb_range')
ip_args = dict(name=name, rb_add=id)
method_name = 'overlay_gateway_attach_rbridge_id_rb_add'
method_class = self._brocade_tunnels
gw_attr = getattr(method_class, method_name)
config = gw_attr(**ip_args)
output = self._callback(config)
return output
|
Add a range of rbridge-ids
Args:
name (str): gateway-name
vlan (str): rbridge-ids range
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None
|
codesearchnet
|
def run_change_point_analysis(test_config_container: TestConfigContainer, big_query_metrics_fetcher: MetricsFetcher, change_point_config: ChangePointConfig=ChangePointConfig(), save_alert_metadata: bool=False):
logging.info('Running change point analysis for test ID :%s on metric: % s' % (test_config_container.test_id, test_config_container.metric_name))
test_name = test_config_container.test_name
min_runs_between_change_points = change_point_config.min_runs_between_change_points
num_runs_in_change_point_window = change_point_config.num_runs_in_change_point_window
metric_container = big_query_metrics_fetcher.fetch_metric_data(test_config=test_config_container)
metric_container.sort_by_timestamp()
metric_values = metric_container.values
timestamps = metric_container.timestamps
change_point_index = find_latest_change_point_index(metric_values=metric_values)
if not change_point_index:
logging.info('Change point is not detected for the test ID %s' % test_config_container.test_id)
return False
latest_change_point_run = len(timestamps) - 1 - change_point_index
if not is_change_point_in_valid_window(num_runs_in_change_point_window, latest_change_point_run):
logging.info('Performance regression/improvement found for the test ID: %s. on metric %s. Since the change point run %s lies outside the num_runs_in_change_point_window distance: %s, alert is not raised.' % (test_config_container.test_id, test_config_container.metric_name, latest_change_point_run + 1, num_runs_in_change_point_window))
return False
is_valid_change_point = True
last_reported_issue_number = None
issue_metadata_table_name = f'{test_config_container.metrics_table}_{test_config_container.metric_name}'
if test_config_container.test_name:
issue_metadata_table_name = f'{issue_metadata_table_name}_{test_config_container.test_name}'
existing_issue_data = get_existing_issues_data(table_name=issue_metadata_table_name)
if existing_issue_data is not None:
existing_issue_timestamps = existing_issue_data[constants._CHANGE_POINT_TIMESTAMP_LABEL].tolist()
last_reported_issue_number = existing_issue_data[constants._ISSUE_NUMBER].tolist()[0]
if not isinstance(last_reported_issue_number, int):
last_reported_issue_number = last_reported_issue_number.item()
is_valid_change_point = is_sibling_change_point(previous_change_point_timestamps=existing_issue_timestamps, change_point_index=change_point_index, timestamps=timestamps, min_runs_between_change_points=min_runs_between_change_points, test_id=test_config_container.test_id)
if is_valid_change_point and save_alert_metadata:
issue_number, issue_url = create_performance_alert(test_config_container=test_config_container, metric_container=metric_container, change_point_index=change_point_index, existing_issue_number=last_reported_issue_number)
issue_metadata = GitHubIssueMetaData(issue_timestamp=pd.Timestamp(datetime.now().replace(tzinfo=timezone.utc)), test_id=test_config_container.test_id.replace('.', '_'), test_name=test_name or uuid.uuid4().hex, metric_name=test_config_container.metric_name, change_point=metric_values[change_point_index], issue_number=issue_number, issue_url=issue_url, change_point_timestamp=timestamps[change_point_index])
publish_issue_metadata_to_big_query(issue_metadata=issue_metadata, table_name=issue_metadata_table_name, project=test_config_container.project)
return is_valid_change_point
|
Args:
test_config_container: TestConfigContainer containing test metadata for
fetching data and running change point analysis.
big_query_metrics_fetcher: BigQuery metrics fetcher used to fetch data for
change point analysis.
change_point_config: ChangePointConfig containing parameters to run
change point analysis.
save_alert_metadata: bool indicating if issue metadata
should be published to BigQuery table.
Returns:
bool indicating if a change point is observed and alerted on GitHub.
|
github-repos
|
def GetFeeds(client):
feed_service = client.GetService('FeedService', 'v201809')
feeds = []
more_pages = True
selector = {'fields': ['Id', 'Name', 'Attributes'], 'predicates': [{'field': 'Origin', 'operator': 'EQUALS', 'values': ['USER']}, {'field': 'FeedStatus', 'operator': 'EQUALS', 'values': ['ENABLED']}], 'paging': {'startIndex': 0, 'numberResults': PAGE_SIZE}}
while more_pages:
page = feed_service.get(selector)
if ('entries' in page):
feeds.extend(page['entries'])
selector['paging']['startIndex'] += PAGE_SIZE
more_pages = (selector['paging']['startIndex'] < int(page['totalNumEntries']))
return feeds
|
Returns a list of all enabled Feeds.
Args:
client: an AdWordsClient instance.
Returns:
A list containing all enabled Feeds.
|
codesearchnet
|
def national_significant_number(numobj):
national_number = U_EMPTY_STRING
if numobj.italian_leading_zero:
num_zeros = numobj.number_of_leading_zeros
if num_zeros is None:
num_zeros = 1
if num_zeros > 0:
national_number = U_ZERO * num_zeros
national_number += str(numobj.national_number)
return national_number
|
Gets the national significant number of a phone number.
Note that a national significant number doesn't contain a national prefix
or any formatting.
Arguments:
numobj -- The PhoneNumber object for which the national significant number
is needed.
Returns the national significant number of the PhoneNumber object passed
in.
|
juraj-google-style
|
def get_interpolated_value(self, energy, integrated=False):
inter = {}
for spin in self.cohp:
if (not integrated):
inter[spin] = get_linear_interpolated_value(self.energies, self.cohp[spin], energy)
elif (self.icohp is not None):
inter[spin] = get_linear_interpolated_value(self.energies, self.icohp[spin], energy)
else:
raise ValueError('ICOHP is empty.')
return inter
|
Returns the COHP for a particular energy.
Args:
energy: Energy to return the COHP value for.
|
codesearchnet
|
def GetMessage(self, log_source, lcid, message_identifier):
event_log_provider_key = self._GetEventLogProviderKey(log_source)
if (not event_log_provider_key):
return None
generator = self._GetMessageFileKeys(event_log_provider_key)
if (not generator):
return None
message_string = None
for message_file_key in generator:
message_string = self._GetMessage(message_file_key, lcid, message_identifier)
if message_string:
break
if (self._string_format == 'wrc'):
message_string = self._ReformatMessageString(message_string)
return message_string
|
Retrieves a specific message for a specific Event Log source.
Args:
log_source (str): Event Log source.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
|
codesearchnet
|
def _GetSocket(self):
try:
return socket.create_connection((self._host, self._port), self._SOCKET_TIMEOUT)
except socket.error as exception:
logger.error('Unable to connect to nsrlsvr with error: {0!s}.'.format(exception))
|
Establishes a connection to an nsrlsvr instance.
Returns:
socket._socketobject: socket connected to an nsrlsvr instance or None if
a connection cannot be established.
|
codesearchnet
|
def create_database_view(self, view: views.View, view_name: str) -> None:
view_sql = f'CREATE OR REPLACE VIEW {self._view_dataset}.{view_name} AS\n{self.to_sql(view)}'
self._engine.execute(view_sql).fetchall()
|
Creates a Spark view with the given name in the runner's view_dataset.
Args:
view: the FHIR view that creates
view_name: the view name passed to the CREATE OR REPLACE VIEW statement.
|
github-repos
|
def to_dataframe(self, view: views.View, limit: Optional[int]=None) -> pandas.DataFrame:
df = pandas.read_sql_query(sql=self.to_sql(view, limit=limit), con=self._engine.raw_connection())
return runner_utils.clean_dataframe(df, view.get_select_columns_to_return_type())
|
Returns a Pandas dataframe of the results.
Args:
view: the view that defines the query to run.
limit: optional limit of the number of items to return.
Returns:
pandas.DataFrame: dataframe of the view contents.
Raises:
ValueError propagated from the Spark client if pandas is not installed.
|
github-repos
|
def diff_linesToChars(self, text1, text2):
lineArray = []
lineHash = {}
lineArray.append('')
def diff_linesToCharsMunge(text):
'Split a text into an array of strings. Reduce the texts to a string\n of hashes where each Unicode character represents one line.\n Modifies linearray and linehash through being a closure.\n\n Args:\n text: String to encode.\n\n Returns:\n Encoded string.\n '
chars = []
lineStart = 0
lineEnd = (- 1)
while (lineEnd < (len(text) - 1)):
lineEnd = text.find('\n', lineStart)
if (lineEnd == (- 1)):
lineEnd = (len(text) - 1)
line = text[lineStart:(lineEnd + 1)]
if (line in lineHash):
chars.append(chr(lineHash[line]))
else:
if (len(lineArray) == maxLines):
line = text[lineStart:]
lineEnd = len(text)
lineArray.append(line)
lineHash[line] = (len(lineArray) - 1)
chars.append(chr((len(lineArray) - 1)))
lineStart = (lineEnd + 1)
return ''.join(chars)
maxLines = 666666
chars1 = diff_linesToCharsMunge(text1)
maxLines = 1114111
chars2 = diff_linesToCharsMunge(text2)
return (chars1, chars2, lineArray)
|
Split two texts into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Args:
text1: First string.
text2: Second string.
Returns:
Three element tuple, containing the encoded text1, the encoded text2 and
the array of unique strings. The zeroth element of the array of unique
strings is intentionally blank.
|
codesearchnet
|
def add(self, aspect, ifpresent='error'):
if isinstance(aspect, contextualize):
self.contextualize.update(aspect)
return True
classification = [(network, self.networks), (system, self.systems), (ansible, self.ansible_hosts), (deploy, self.deploys), (configure, self.configures)]
aspect_list = [l for (t, l) in classification if isinstance(aspect, t)]
assert (len(aspect_list) == 1), 'Unexpected aspect for RADL.'
aspect_list = aspect_list[0]
old_aspect = [a for a in aspect_list if (a.getId() == aspect.getId())]
if old_aspect:
if (ifpresent == 'error'):
raise Exception('Aspect with the same id was found.')
elif (ifpresent == 'replace'):
for (i, elem) in enumerate(aspect_list):
if (elem.getId() == old_aspect[0].getId()):
del aspect_list[i]
break
aspect_list.append(aspect)
return True
elif (ifpresent == 'ignore'):
return False
else:
raise ValueError
else:
aspect_list.append(aspect)
return True
|
Add a network, ansible_host, system, deploy, configure or contextualize.
Args:
- aspect(network, system, deploy, configure or contextualize): thing to add.
- ifpresent(str): if it has been defined, do:
- ``"ignore"``: not add the aspect.
- ``"replace"``: replace by the old defined.
- ``"error"``: raise an error.
Return(bool): True if aspect was added.
|
codesearchnet
|
def get_airport_stats(self, iata, page=1, limit=100):
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_airport_stats(url)
|
Retrieve the performance statistics at an airport
Given the IATA code of an airport, this method returns the performance statistics for the airport.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_stats('HYD')
f.get_airport_stats('HYD',page=1,limit=10)
|
codesearchnet
|
def get(self, language: str=None, default: str=None) -> str:
language = (language or settings.LANGUAGE_CODE)
value = super().get(language, default)
return (value if (value is not None) else default)
|
Gets the underlying value in the specified or
primary language.
Arguments:
language:
The language to get the value in.
Returns:
The value in the current language, or
the primary language in case no language
was specified.
|
codesearchnet
|
def add_dos(self, label, dos):
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
self._doses[label] = {'frequencies': dos.frequencies, 'densities': densities}
|
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
PhononDos object
|
juraj-google-style
|
def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None, flag_values=_flagvalues.FLAGS, **args):
parser = _argument_parser.IntegerParser(lower_bound, upper_bound)
serializer = _argument_parser.ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_register_bounds_validator_if_needed(parser, name, flag_values=flag_values)
|
Registers a flag whose value must be an integer.
If lower_bound, or upper_bound are set, then this flag must be
within the given range.
Args:
name: str, the flag name.
default: int|str|None, the default value of the flag.
help: str, the help message.
lower_bound: int, min value of the flag.
upper_bound: int, max value of the flag.
flag_values: FlagValues, the FlagValues instance with which the flag will
be registered. This should almost never need to be overridden.
**args: dict, the extra keyword args that are passed to DEFINE.
|
codesearchnet
|
def set_fig_size(self, width, height=None):
self.figure.figure_width = width
self.figure.figure_height = height
return
|
Set the figure size in inches.
Sets the figure size with a call to fig.set_size_inches.
Default in code is 8 inches for each.
Args:
width (float): Dimensions for figure width in inches.
height (float, optional): Dimensions for figure height in inches. Default is None.
|
codesearchnet
|
def id_pools_vmac_ranges(self):
if (not self.__id_pools_vmac_ranges):
self.__id_pools_vmac_ranges = IdPoolsRanges('vmac', self.__connection)
return self.__id_pools_vmac_ranges
|
Gets the IdPoolsRanges API Client for VMAC Ranges.
Returns:
IdPoolsRanges:
|
codesearchnet
|
def plot_summaries(self, show=False, save=True, figure_type=None):
if not figure_type:
figure_type = self.default_figure_type
if not figure_type in self.default_figure_types:
logger.debug("unknown figure type selected")
figure_type = self.default_figure_type
color_list, symbol_list = self._create_colors_markers_list()
summary_df = self.summary_df
selected_summaries = self.selected_summaries
batch_dir = self.batch_dir
batch_name = self.name
fig, ax = plot_summary_figure(self.info_df, summary_df, color_list,
symbol_list, selected_summaries,
batch_dir, batch_name, show=show,
save=save, figure_type=figure_type)
self.figure[figure_type] = fig
self.axes[figure_type] = ax
|
Plot summary graphs.
Args:
show: shows the figure if True.
save: saves the figure if True.
figure_type: optional, figure type to create.
|
juraj-google-style
|
def get_record_schema_from_dict_table_schema(schema_name: str, table_schema: Dict[str, Any], namespace: str='apache_beam.io.gcp.bigquery') -> Dict[str, Any]:
avro_fields = [table_field_to_avro_field(field, '.'.join((namespace, schema_name))) for field in table_schema['fields']]
return {'type': 'record', 'name': schema_name, 'fields': avro_fields, 'doc': 'Translated Avro Schema for {}'.format(schema_name), 'namespace': namespace}
|
Convert a table schema into an Avro schema.
Args:
schema_name (str): The name of the record.
table_schema (Dict[str, Any]): A BigQuery table schema in dict form.
namespace (str): The namespace of the Avro schema.
Returns:
Dict[str, Any]: The schema as an Avro RecordSchema.
|
github-repos
|
def get_action(self, action_id):
return Action.get_object(api_token=self.token, action_id=action_id)
|
Returns a specific Action by its ID.
Args:
action_id (int): id of action
|
codesearchnet
|
def collect_filtered_models(discard, *input_values):
ids = set([])
collected = []
queued = []
def queue_one(obj):
if ((obj.id not in ids) and (not (callable(discard) and discard(obj)))):
queued.append(obj)
for value in input_values:
_visit_value_and_its_immediate_references(value, queue_one)
while queued:
obj = queued.pop(0)
if (obj.id not in ids):
ids.add(obj.id)
collected.append(obj)
_visit_immediate_value_references(obj, queue_one)
return collected
|
Collect a duplicate-free list of all other Bokeh models referred to by
this model, or by any of its references, etc, unless filtered-out by the
provided callable.
Iterate over ``input_values`` and descend through their structure
collecting all nested ``Models`` on the go.
Args:
*discard (Callable[[Model], bool])
a callable which accepts a *Model* instance as its single argument
and returns a boolean stating whether to discard the instance. The
latter means that the instance will not be added to collected
models nor will its references be explored.
*input_values (Model)
Bokeh models to collect other models from
Returns:
None
|
codesearchnet
|
def load_with_classes(filename, classes):
ok = False
for class_ in classes:
obj = class_()
try:
obj.load(filename)
ok = True
except FileNotFoundError:
raise
except Exception as e:
if a99.logging_level == logging.DEBUG:
a99.get_python_logger().exception("Error trying with class \"{0!s}\"".format(
class_.__name__))
pass
if ok:
break
if ok:
return obj
return None
|
Attempts to load file by trial-and-error using a given list of classes.
Arguments:
filename -- full path to file
classes -- list of classes having a load() method
Returns: DataFile object if loaded successfully, or None if not.
Note: it will stop at the first successful load.
Attention: this is not good if there is a bug in any of the file readers,
because *all exceptions will be silenced!*
|
juraj-google-style
|
def clown_strike_ioc(self, ioc):
r = requests.get('http:
self._output(r.text)
|
Performs Clown Strike lookup on an IoC.
Args:
ioc - An IoC.
|
juraj-google-style
|
def add_input(self, mutable_accumulator, element, *args, **kwargs):
raise NotImplementedError(str(self))
|
Return result of folding element into accumulator.
CombineFn implementors must override add_input.
Args:
mutable_accumulator: the current accumulator,
may be modified and returned for efficiency
element: the element to add, should not be mutated
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
|
github-repos
|
def needs_reboot():
with salt.utils.winapi.Com():
obj_sys = win32com.client.Dispatch('Microsoft.Update.SystemInfo')
return salt.utils.data.is_true(obj_sys.RebootRequired)
|
Determines if the system needs to be rebooted.
Returns:
bool: True if the system requires a reboot, False if not
CLI Examples:
.. code-block:: bash
import salt.utils.win_update
salt.utils.win_update.needs_reboot()
|
codesearchnet
|
def on_pass(self, record):
|
A function that is executed upon a test passing.
Implementation is optional.
Args:
record: records.TestResultRecord, a copy of the test record for
this test, containing all information of the test execution
including exception objects.
|
github-repos
|
def get_app_state():
if (not hasattr(g, 'app_state')):
model = get_model()
g.app_state = {'app_title': APP_TITLE, 'model_name': type(model).__name__, 'latest_ckpt_name': model.latest_ckpt_name, 'latest_ckpt_time': model.latest_ckpt_time}
return g.app_state
|
Get current status of application in context
Returns:
:obj:`dict` of application status
|
codesearchnet
|
def list_tokens(self):
url = self.url() + "/nd/resource/public/token/"
req = self.remote_utils.get_url(url)
if req.status_code is not 200:
raise RemoteDataNotFoundError('Coud not find {}'.format(req.text))
else:
return req.json()
|
Lists a set of tokens that are public in Neurodata.
Arguments:
Returns:
dict: Public tokens found in Neurodata
|
juraj-google-style
|
def wrap_or_copy(cls, func, **options):
if isinstance(func, openhtf.PhaseGroup):
raise PhaseWrapError('Cannot wrap PhaseGroup <%s> as a phase.' % (
func.name or 'Unnamed'))
if isinstance(func, cls):
retval = mutablerecords.CopyRecord(func)
else:
retval = cls(func)
retval.options.update(**options)
return retval
|
Return a new PhaseDescriptor from the given function or instance.
We want to return a new copy so that you can reuse a phase with different
options, plugs, measurements, etc.
Args:
func: A phase function or PhaseDescriptor instance.
**options: Options to update on the result.
Raises:
PhaseWrapError: if func is a openhtf.PhaseGroup.
Returns:
A new PhaseDescriptor object.
|
juraj-google-style
|
def _local_var_name(splittable_dimensions, assignment):
assignment_string = []
for splittable in sorted(splittable_dimensions):
if splittable in assignment:
assignment_string.append("{}:{}".format(splittable,
assignment[splittable]))
else:
assignment_string.append("{}".format(splittable))
return "y_(" + ",".join(assignment_string) + ")"
|
Name for a local variable.
Args:
splittable_dimensions: frozenset of names of splittable dimensions.
assignment: dict from names of splittable dimensions to names of mesh
dimensions.
Returns:
A string, the variable name.
|
juraj-google-style
|
def project(self, term, **kwargs):
params = kwargs
baseuri = ((self._BASE_URI + 'projects/') + term)
res = self.session.get(baseuri, params=params)
self.handle_http_error(res)
return res
|
Search for a project by id.
Args:
term (str): Term to search for.
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
|
codesearchnet
|
def get_test_data(train_samples, test_samples, input_shape, num_classes, random_seed=None):
if random_seed is not None:
np.random.seed(random_seed)
num_sample = train_samples + test_samples
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
y = np.random.randint(0, num_classes, size=(num_sample,))
x = np.zeros((num_sample,) + input_shape, dtype=np.float32)
for i in range(num_sample):
x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1.0, size=input_shape)
return ((x[:train_samples], y[:train_samples]), (x[train_samples:], y[train_samples:]))
|
Generates test data to train a model on.
Args:
train_samples: Integer, how many training samples to generate.
test_samples: Integer, how many test samples to generate.
input_shape: Tuple of integers, shape of the inputs.
num_classes: Integer, number of classes for the data and targets.
random_seed: Integer, random seed used by numpy to generate data.
Returns:
A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
|
github-repos
|
def transform_wrap_with(source, left, right, name=None):
with ops.name_scope(name, 'TransformWrapWith', [source]):
source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)
if isinstance(source, tf.SparseTensor):
result = tf.SparseTensor(indices=source.indices, values=ops_module.transform_wrap_with(source.values, left, right), dense_shape=source.dense_shape)
else:
result = ops_module.transform_wrap_with(source, left, right)
return result
|
Wrap source strings with "left" and "right" strings
Args:
source: `Tensor` or `SparseTensor` of any shape, strings to replace digits.
left: Scalar string to add in the beginning
right: Scalar string to add in the ending
name: A name for the operation (optional).
Returns:
`SparseTensor` of same shape and size as input.
|
codesearchnet
|
class AriaTextMoELayer(nn.Module):
def __init__(self, config: AriaTextConfig):
super().__init__()
self.router = nn.Linear(config.hidden_size, config.moe_num_experts, bias=False)
self.experts = AriaGroupedExpertsMLP(config)
self.shared_experts = AriaSharedExpertsMLP(config)
self.config = config
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
original_shape = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_states.size(-1))
logits = self.router(hidden_states)
top_logits, top_indices = torch.topk(logits, k=self.config.moe_topk, dim=1)
scores = nn.functional.softmax(top_logits, dim=-1)
original_dtype = top_indices.dtype
tokens_per_expert = torch.histc(top_indices.flatten().to(torch.float32), bins=self.config.moe_num_experts, min=0, max=self.config.moe_num_experts - 1).to(original_dtype)
indices = top_indices
flatten_indices = indices.view(-1)
sorted_indices = torch.argsort(flatten_indices)
permuted_tokens = hidden_states.index_select(0, sorted_indices
expert_output = self.experts(permuted_tokens, tokens_per_expert)
unpermuted_tokens = torch.zeros((scores.shape[0] * self.config.moe_topk, expert_output.size(1)), dtype=expert_output.dtype, device=expert_output.device)
unpermuted_tokens.index_copy_(0, sorted_indices, expert_output)
unpermuted_tokens = unpermuted_tokens.view(-1, self.config.moe_topk, expert_output.size(1))
output = (unpermuted_tokens * scores.unsqueeze(-1)).sum(dim=1).view(original_shape)
shared_expert_output = self.shared_experts(hidden_states.view(original_shape))
return output + shared_expert_output
|
Aria Text Mixture of Experts (MoE) Layer.
This layer applies a gating mechanism to route input tokens to different experts.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
|
github-repos
|
def configure_tpu_version(self, version, restart_type='always'):
def configure_worker(worker):
ip_address = worker['ipAddress']
url = (_VERSION_SWITCHER_ENDPOINT + '/{}?restartType={}').format(ip_address, version, restart_type)
req = urllib.request.Request(url, data=b'')
try:
urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
status_code = e.code
if status_code == 404:
raise Exception('Tensorflow version {} is not available on Cloud TPU, try a previous nightly version or refer to https:
else:
raise Exception('Failed to configure worker {}'.format(ip_address))
workers = self.network_endpoints()
with futures.ThreadPoolExecutor(max_workers=len(workers)) as executor:
results = executor.map(configure_worker, workers)
for result in results:
if result:
result.result()
|
Configure TPU software version.
Args:
version (string): Version of software to configure the TPU with.
restart_type (string): Restart behaviour when switching versions,
defaults to always restart. Options are 'always', 'ifNeeded'.
|
github-repos
|
def keep_only_update_source_in_field(field, root, head, update):
update_sources = {source.lower() for source in get_value(thaw(update), '.'.join([field, 'source']), [])}
if (len(update_sources) != 1):
return (root, head, update)
source = update_sources.pop()
if (field in root):
root = root.set(field, remove_elements_with_source(source, root[field]))
if (field in head):
head = head.set(field, remove_elements_with_source(source, head[field]))
return (root, head, update)
|
Remove elements from root and head where ``source`` matches the update.
This is useful if the update needs to overwrite all elements with the same
source.
.. note::
If the update doesn't contain exactly one source in ``field``, the
records are returned with no modifications.
Args:
field (str): the field to filter out.
root (pmap): the root record, whose ``field`` will be cleaned.
head (pmap): the head record, whose ``field`` will be cleaned.
update (pmap): the update record, from which the ``source`` is read.
Returns:
tuple: ``(root, head, update)`` with some elements filtered out from
``root`` and ``head``.
|
codesearchnet
|
def convert_sigmoid(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting sigmoid ...')
if names == 'short':
tf_name = 'SIGM' + random_string(4)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
sigmoid = keras.layers.Activation('sigmoid', name=tf_name)
layers[scope_name] = sigmoid(layers[inputs[0]])
|
Convert sigmoid layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
juraj-google-style
|
def predict_signature_def(inputs, outputs):
if inputs is None or not inputs:
raise ValueError('Prediction `inputs` cannot be None or empty.')
if outputs is None or not outputs:
raise ValueError('Prediction `outputs` cannot be None or empty.')
signature_inputs = {key: utils.build_tensor_info(tensor) for key, tensor in inputs.items()}
signature_outputs = {key: utils.build_tensor_info(tensor) for key, tensor in outputs.items()}
signature_def = build_signature_def(signature_inputs, signature_outputs, signature_constants.PREDICT_METHOD_NAME)
return signature_def
|
Creates prediction signature from given inputs and outputs.
This function produces signatures intended for use with the TensorFlow Serving
Predict API (tensorflow_serving/apis/prediction_service.proto). This API
imposes no constraints on the input and output types.
Args:
inputs: dict of string to `Tensor`.
outputs: dict of string to `Tensor`.
Returns:
A prediction-flavored signature_def.
Raises:
ValueError: If inputs or outputs is `None`.
|
github-repos
|
def template_files(path, exts=None):
if not os.path.isabs(path):
_path = os.path.join(determine_path(), path)
if not (os.path.exists(_path) and os.path.isdir(_path)):
return []
if not exts:
exts = []
files = os.listdir(_path)
files = [f for f in files if os.path.splitext(f)[-1] in exts]
files = [os.path.join(path, f) for f in files]
return files
|
Return a list of filenames found at @path.
The list of filenames can be filtered by extensions.
Arguments:
path: Existing filepath we want to list.
exts: List of extensions to filter by.
Returns:
A list of filenames found in the path.
|
juraj-google-style
|
def merge_requests(self, **kwargs):
path = '%s/%s/merge_requests' % (self.manager.path, self.get_id())
return self.manager.gitlab.http_get(path, **kwargs)
|
List the merge requests related to the commit.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the references could not be retrieved
Returns:
list: The merge requests related to the commit.
|
juraj-google-style
|
def forward(self, hidden_states: torch.Tensor, global_hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:
residual = hidden_states
global_residual = global_hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
global_hidden_states = self.global_self_attn_layer_norm(global_hidden_states)
if self.stagger_blocks_this_layer:
hidden_states, attention_mask = self.pad_local_tokens(hidden_states=hidden_states, attention_mask=attention_mask, block_size=self.block_size)
hidden_states, global_hidden_states, attn_weights = self.self_attn(token_hidden_states=hidden_states, global_hidden_states=global_hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
if self.stagger_blocks_this_layer:
hidden_states = self.unpad_local_tokens(padded_hidden_states=hidden_states, block_size=self.block_size)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training)
global_hidden_states = global_residual + global_hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
global_residual = global_hidden_states
global_hidden_states = self.final_layer_norm(global_hidden_states)
global_hidden_states = self.activation_fn(self.fc1(global_hidden_states))
global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.activation_dropout, training=self.training)
global_hidden_states = self.fc2(global_hidden_states)
global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training)
global_hidden_states = global_residual + global_hidden_states
outputs = (hidden_states, global_hidden_states)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
global_hidden_states (`torch.FloatTensor`): global token hidden states
*(seq_len, num_global_tokens, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
|
github-repos
|
def _parse_schema(schema, method):
if (method and schema.get('readOnly', False)):
return _READONLY_PROPERTY
if ('allOf' in schema):
schema_ = copy.deepcopy(schema['allOf'][0])
for x in schema['allOf'][1:]:
_dict_merge(schema_, x)
return _parse_schema(schema_, method)
if ('oneOf' in schema):
return _parse_schema(schema['oneOf'][0], method)
if ('enum' in schema):
return schema['enum'][0]
schema_type = schema.get('type', 'object')
if (schema_type == 'array'):
if ('oneOf' in schema['items']):
return [_parse_schema(x, method) for x in schema['items']['oneOf']]
return [_parse_schema(schema['items'], method)]
if (schema_type == 'object'):
if (method and all((v.get('readOnly', False) for v in schema['properties'].values()))):
return _READONLY_PROPERTY
results = []
for (name, prop) in schema.get('properties', {}).items():
result = _parse_schema(prop, method)
if (result != _READONLY_PROPERTY):
results.append((name, result))
return collections.OrderedDict(results)
if ((schema_type, schema.get('format')) in _TYPE_MAPPING):
return _TYPE_MAPPING[(schema_type, schema.get('format'))]
return _TYPE_MAPPING[(schema_type, None)]
|
Convert a Schema Object to a Python object.
Args:
schema: An ``OrderedDict`` representing the schema object.
|
codesearchnet
|
def plot_job_history(jobs, interval='year'):
def get_date(job):
return datetime.datetime.strptime(job.creation_date(),
'%Y-%m-%dT%H:%M:%S.%fZ')
current_time = datetime.datetime.now()
if interval == 'year':
bins = [(current_time - datetime.timedelta(days=k*365/12))
for k in range(12)]
elif interval == 'month':
bins = [(current_time - datetime.timedelta(days=k)) for k in range(30)]
elif interval == 'week':
bins = [(current_time - datetime.timedelta(days=k)) for k in range(7)]
binned_jobs = [0]*len(bins)
if interval == 'year':
for job in jobs:
for ind, dat in enumerate(bins):
date = get_date(job)
if date.month == dat.month:
binned_jobs[ind] += 1
break
else:
continue
else:
for job in jobs:
for ind, dat in enumerate(bins):
date = get_date(job)
if date.day == dat.day and date.month == dat.month:
binned_jobs[ind] += 1
break
else:
continue
nz_bins = []
nz_idx = []
for ind, val in enumerate(binned_jobs):
if val != 0:
nz_idx.append(ind)
nz_bins.append(val)
total_jobs = sum(binned_jobs)
colors = ['
'
if interval == 'year':
labels = ['{}-{}'.format(str(bins[b].year)[2:], bins[b].month) for b in nz_idx]
else:
labels = ['{}-{}'.format(bins[b].month, bins[b].day) for b in nz_idx]
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.pie(nz_bins[::-1], labels=labels, colors=colors, textprops={'fontsize': 14},
rotatelabels=True, counterclock=False)
ax.add_artist(Circle((0, 0), 0.7, color='white', zorder=1))
ax.text(0, 0, total_jobs, horizontalalignment='center',
verticalalignment='center', fontsize=26)
fig.tight_layout()
return fig
|
Plots the job history of the user from the given list of jobs.
Args:
jobs (list): A list of jobs with type IBMQjob.
interval (str): Interval over which to examine.
Returns:
fig: A Matplotlib figure instance.
|
juraj-google-style
|
def save_statement(self, statement):
response = self.lrs.save_statement(statement)
if not response:
raise ClientError('EnterpriseXAPIClient request failed.')
|
Save xAPI statement.
Arguments:
statement (EnterpriseStatement): xAPI Statement to send to the LRS.
Raises:
ClientError: If xAPI statement fails to save.
|
juraj-google-style
|
def load_models_using_filepattern(
self, filename_pattern, model, glob_args, is_main_model=False,
encoding='utf-8', add_to_local_models=True):
if (model):
self.update_model_in_repo_based_on_filename(model)
filenames = glob.glob(filename_pattern, **glob_args)
if len(filenames) == 0:
raise IOError(
errno.ENOENT, os.strerror(errno.ENOENT), filename_pattern)
loaded_models = []
for filename in filenames:
the_metamodel = MetaModelProvider.get_metamodel(model, filename)
loaded_models.append(
self.load_model(the_metamodel, filename, is_main_model,
encoding=encoding,
add_to_local_models=add_to_local_models))
return loaded_models
|
add a new model to all relevant objects
Args:
filename_pattern: models to be loaded
model: model holding the loaded models in its _tx_model_repository
field (may be None).
glob_args: arguments passed to the glob.glob function.
Returns:
the list of loaded models
|
juraj-google-style
|
def get_entity_details(self, entity_id):
if (not is_valid_uuid(entity_id)):
raise StorageArgumentException('Invalid UUID for entity_id: {0}'.format(entity_id))
return self._authenticated_request.to_endpoint('entity/{}/'.format(entity_id)).return_body().get()
|
Get generic entity by UUID.
Args:
entity_id (str): The UUID of the requested entity.
Returns:
A dictionary describing the entity::
{
u'collab_id': 2271,
u'created_by': u'303447',
u'created_on': u'2017-03-10T12:50:06.077891Z',
u'description': u'',
u'entity_type': u'project',
u'modified_by': u'303447',
u'modified_on': u'2017-03-10T12:50:06.077946Z',
u'name': u'2271',
u'uuid': u'3abd8742-d069-44cf-a66b-2370df74a682'
}
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
|
codesearchnet
|
def build_fhir_path_ast(input_str: str) -> Expression:
error_listener = _FhirPathErrorListener()
lexer = FhirPathLexer(antlr4.InputStream(input_str))
lexer.removeErrorListeners()
lexer.addErrorListener(error_listener)
token_stream = antlr4.CommonTokenStream(lexer)
parser = FhirPathParser(token_stream)
parser.removeErrorListeners()
parser.addErrorListener(error_listener)
cst_visitor = _FhirPathCstVisitor()
cst = parser.expression()
if error_listener.errors:
raise ValueError('\n'.join(error_listener.errors))
ast = cst_visitor.visit(cst)
return ast
|
Given a FHIRPath query, constructs an AST and returns the root node.
Args:
input_str: The FHIRPath string to translate.
Returns:
A FHIRPath `Expression` instance, representing the root AST node.
Raises:
ValueError: In the event that the provided `input_str` was syntactically
invalid FHIRPath that failed during lexing/parsing.
|
github-repos
|
def role(self, value):
if value == self._defaults['ai.cloud.role'] and 'ai.cloud.role' in self._values:
del self._values['ai.cloud.role']
else:
self._values['ai.cloud.role'] = value
|
The role property.
Args:
value (string). the property value.
|
juraj-google-style
|
def create_sas_locator(access_token, asset_id, accesspolicy_id):
path = '/Locators'
endpoint = ''.join([ams_rest_endpoint, path])
body = (((('{ \t\t"AccessPolicyId":"' + accesspolicy_id) + '", \t\t"AssetId":"') + asset_id) + '", \t\t"Type":1 \t}')
return do_ams_post(endpoint, path, body, access_token)
|
Create Media Service SAS Locator.
Args:
access_token (str): A valid Azure authentication token.
asset_id (str): Media Service Asset ID.
accesspolicy_id (str): Media Service Access Policy ID.
Returns:
HTTP response. JSON body.
|
codesearchnet
|
def get_tensor_details(self, subgraph_index=0):
tensor_details = []
num_subgraphs = self._interpreter.NumSubgraphs()
if subgraph_index < 0 or subgraph_index >= num_subgraphs:
raise ValueError(f'subgraph_index is out of range: {subgraph_index} for the model, which has {num_subgraphs} subgraphs.')
for idx in range(self._interpreter.NumTensors(subgraph_index)):
try:
tensor_details.append(self._get_tensor_details(idx, subgraph_index))
except ValueError:
pass
return tensor_details
|
Gets tensor details for every tensor with valid tensor details from a subgraph.
Tensors where required information about the tensor is not found are not
added to the list. This includes temporary tensors without a name.
Args:
subgraph_index: Index of the subgraph to fetch the tensor.
Returns:
A list of dictionaries containing tensor information.
|
github-repos
|
def create_new_tf_function(func_graph):
transform.apply_func_graph_transforms(func_graph)
func = atomic_function.from_func_graph(func_graph.name, func_graph, {})
func_graph.outer_graph._add_function_recursive(func)
return func_graph.name
|
Converts func_graph to a TF_Function and adds it to the current graph.
Args:
func_graph: FuncGraph
Returns:
The name of the new TF_Function.
|
github-repos
|
def set_timeout(self, network_timeout):
if (network_timeout == self._network_timeout):
return
self._network_timeout = network_timeout
self._disconnect()
|
Set the timeout for existing and future Clients.
Close all current connections. This will cause future operations to
create new Clients with the network_timeout passed through
socketTimeoutMS optional parameter.
Args:
network_timeout: The new value in milliseconds for the timeout.
|
codesearchnet
|
def get_dummies(self, columns, **kwargs):
cls = type(self)
if columns is None:
columns = [c for c in self.columns if not is_numeric_dtype(self.dtypes[c])]
if len(columns) == 0:
return self.copy()
elif not is_list_like(columns):
columns = [columns]
def set_columns(df, columns):
df.columns = columns
return df
set_cols = self.columns
columns_applied = self._map_across_full_axis(
1, lambda df: set_columns(df, set_cols)
)
if len(columns) == len(self.columns):
def get_dummies_builder(df):
if df is not None:
if not df.empty:
return pandas.get_dummies(df, **kwargs)
else:
return pandas.DataFrame([])
func = self._prepare_method(lambda df: get_dummies_builder(df))
new_data = columns_applied.map_across_full_axis(0, func)
untouched_data = None
else:
def get_dummies_builder(df, internal_indices=[]):
return pandas.get_dummies(
df.iloc[:, internal_indices], columns=None, **kwargs
)
numeric_indices = list(self.columns.get_indexer_for(columns))
new_data = columns_applied.apply_func_to_select_indices_along_full_axis(
0, get_dummies_builder, numeric_indices, keep_remaining=False
)
untouched_data = self.drop(columns=columns)
final_columns = self.compute_index(1, new_data, False)
if len(columns) != len(self.columns):
new_data = untouched_data.data.concat(1, new_data)
final_columns = untouched_data.columns.append(pandas.Index(final_columns))
return cls(new_data, self.index, final_columns)
|
Convert categorical variables to dummy variables for certain columns.
Args:
columns: The columns to convert.
Returns:
A new QueryCompiler.
|
juraj-google-style
|
def __init__( self, sites, cell_lengths ):
self.cell_lengths = cell_lengths
self.sites = sites
self.number_of_sites = len( self.sites )
self.site_labels = set( [ site.label for site in self.sites ] )
self.site_populations = Counter( [ site.label for site in self.sites ] )
self.enforce_periodic_boundary_conditions()
self.initialise_site_lookup_table()
self.nn_energy = False
self.cn_energies = False
self.site_energies = False
self.jump_lookup_table = False
for site in self.sites:
site.p_neighbours = [ self.site_with_id( i ) for i in site.neighbours ]
self.reset()
|
Initialise a Lattice instance.
Args:
sites (List(Site)): List of sites contained in the lattice.
cell_lengths (np.array(x,y,z)): Vector of cell lengths for the simulation cell.
Returns:
None
|
juraj-google-style
|
def qry_create(options):
qry_string = filt_end = param_str = ''
filt_st = 'Filters=['
param_str_default = 'All'
if options.id:
qry_string += ("InstanceIds=['%s']" % options.id)
param_str += ("id: '%s'" % options.id)
param_str_default = ''
if options.instname:
(qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str)
filt_end = ']'
param_str_default = ''
qry_string += (filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % options.instname))
param_str += ("name: '%s'" % options.instname)
if options.inst_state:
(qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st)
qry_string += ("{'Name': 'instance-state-name','Values': ['%s']}" % options.inst_state)
param_str += ("state: '%s'" % options.inst_state)
filt_end = ']'
param_str_default = ''
qry_string += filt_end
param_str += param_str_default
debg.dprintx('\nQuery String')
debg.dprintx(qry_string, True)
debg.dprint('param_str: ', param_str)
return (qry_string, param_str)
|
Create query from the args specified and command chosen.
Creates a query string that incorporates the args in the options
object, and creates the title for the 'list' function.
Args:
options (object): contains args and data from parser
Returns:
qry_string (str): the query to be used against the aws ec2 client.
param_str (str): the title to display before the list.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.