code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def create_model_table(self, model):
try:
return db_model_factory(self.Base, model, self.models)
except Exception as exc:
raise ModelError(
model.name,
message="failed to create in-memory table.",
orig_exc=exc,
context=self.error_context
)
|
Creates the table for the given model.
Args:
model: A StatikModel instance.
Returns:
A SQLAlchemy model instance for the table corresponding to this
particular model.
|
juraj-google-style
|
def import_class(classpath):
modname, classname = classpath.rsplit(".", 1)
module = importlib.import_module(modname)
klass = getattr(module, classname)
return klass
|
Import the class referred to by the fully qualified class path.
Args:
classpath: A full "foo.bar.MyClass" path to a class definition.
Returns:
The class referred to by the classpath.
Raises:
ImportError: If an error occurs while importing the module.
AttributeError: IF the class does not exist in the imported module.
|
juraj-google-style
|
def _strip_layer_names(self, summaries, model_type):
result = set()
for s in summaries:
if '/' not in s.tag:
raise ValueError(f'tag has no layer name: {s.tag!r}')
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(s.tag.split('/')[start_from:])
result.add(s._replace(tag=new_tag))
return result
|
Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
|
github-repos
|
def to_json(self, drop_null=True, camel=False, indent=None, sort_keys=False):
return json.dumps(self.to_dict(drop_null, camel), indent=indent, sort_keys=sort_keys)
|
Serialize self as JSON
Args:
drop_null: bool, default True. Remove 'empty' attributes. See
to_dict.
camel: bool, default True. Convert keys to camelCase.
indent: int, default None. See json built-in.
sort_keys: bool, default False. See json built-in.
Return:
str: object params.
|
codesearchnet
|
def edit_distance_matrix(train_x, train_y=None):
if train_y is None:
ret = np.zeros((train_x.shape[0], train_x.shape[0]))
for x_index, x in enumerate(train_x):
for y_index, y in enumerate(train_x):
if x_index == y_index:
ret[x_index][y_index] = 0
elif x_index < y_index:
ret[x_index][y_index] = edit_distance(x, y)
else:
ret[x_index][y_index] = ret[y_index][x_index]
return ret
ret = np.zeros((train_x.shape[0], train_y.shape[0]))
for x_index, x in enumerate(train_x):
for y_index, y in enumerate(train_y):
ret[x_index][y_index] = edit_distance(x, y)
return ret
|
Calculate the edit distance.
Args:
train_x: A list of neural architectures.
train_y: A list of neural architectures.
Returns:
An edit-distance matrix.
|
juraj-google-style
|
def setOption(self, name, value):
if isinstance(value, bool):
lock_and_call(
lambda: self._impl.setBoolOption(name, value),
self._lock
)
elif isinstance(value, int):
lock_and_call(
lambda: self._impl.setIntOption(name, value),
self._lock
)
elif isinstance(value, float):
lock_and_call(
lambda: self._impl.setDblOption(name, value),
self._lock
)
elif isinstance(value, basestring):
lock_and_call(
lambda: self._impl.setOption(name, value),
self._lock
)
else:
raise TypeError
|
Set an AMPL option to a specified value.
Args:
name: Name of the option to be set (alphanumeric without spaces).
value: The value the option must be set to.
Raises:
InvalidArgumet: if the option name is not valid.
TypeError: if the value has an invalid type.
|
juraj-google-style
|
def _process_tabs(self, tabs, current_tab, group_current_tab):
for t in tabs:
t.current_tab = current_tab
t.group_current_tab = group_current_tab
tabs = list(filter((lambda t: t.tab_visible), tabs))
tabs.sort(key=(lambda t: t.weight))
return tabs
|
Process and prepare tabs.
This includes steps like updating references to the current tab,
filtering out hidden tabs, sorting tabs etc...
Args:
tabs:
The list of tabs to process.
current_tab:
The reference to the currently loaded tab.
group_current_tab:
The reference to the active tab in the current tab group. For
parent tabs, this is different than for the current tab group.
Returns:
Processed list of tabs. Note that the method may have side effects.
|
codesearchnet
|
def merge_dicts(dicts, op=operator.add):
a = None
for b in dicts:
if (a is None):
a = b.copy()
else:
a = dict(((a.items() + b.items()) + [(k, op(a[k], b[k])) for k in (set(b) & set(a))]))
return a
|
Merge a list of dictionaries.
Args:
dicts (list): a list of dictionary objects
op (operator): an operator item used to merge the dictionaries. Defaults to :py:func:`operator.add`.
Returns:
dict: the merged dictionary
|
codesearchnet
|
def decr(self, key, value, noreply=False):
key = self.check_key(key)
cmd = (((b'decr ' + key) + b' ') + six.text_type(value).encode('ascii'))
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'decr', noreply)
if noreply:
return None
if (results[0] == b'NOT_FOUND'):
return None
return int(results[0])
|
The memcached "decr" command.
Args:
key: str, see class docs for details.
value: int, the amount by which to increment the value.
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns None. Otherwise returns the new
value of the key, or None if the key wasn't found.
|
codesearchnet
|
def GetPathSegmentAndSuffix(self, base_path, path):
if ((path is None) or (base_path is None) or (not path.startswith(base_path))):
return (None, None)
path_index = len(base_path)
if (base_path and (not base_path.endswith(self.PATH_SEPARATOR))):
path_index += 1
if (path_index == len(path)):
return ('', '')
(path_segment, _, suffix) = path[path_index:].partition(self.PATH_SEPARATOR)
return (path_segment, suffix)
|
Determines the path segment and suffix of the path.
None is returned if the path does not start with the base path and
an empty string if the path exactly matches the base path.
Args:
base_path (str): base path.
path (str): path.
Returns:
tuple[str, str]: path segment and suffix string.
|
codesearchnet
|
def movie_credits(self, **kwargs):
path = self._get_id_path('movie_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get the movie credits for a specific person id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
|
codesearchnet
|
def deployment_plans(self):
if (not self.__deployment_plans):
self.__deployment_plans = DeploymentPlans(self.__connection)
return self.__deployment_plans
|
Gets the Deployment Plans API client.
Returns:
DeploymentPlans:
|
codesearchnet
|
def two_qubit_matrix_to_ion_operations(q0: ops.Qid,
q1: ops.Qid,
mat: np.ndarray,
atol: float = 1e-8
) -> List[ops.Operation]:
kak = linalg.kak_decomposition(mat, atol=atol)
operations = _kak_decomposition_to_operations(q0,
q1, kak, atol)
return _cleanup_operations(operations)
|
Decomposes a two-qubit operation into MS/single-qubit rotation gates.
Args:
q0: The first qubit being operated on.
q1: The other qubit being operated on.
mat: Defines the operation to apply to the pair of qubits.
tolerance: A limit on the amount of error introduced by the
construction.
Returns:
A list of operations implementing the matrix.
|
juraj-google-style
|
def post(self, **kwargs):
if (self._url is None):
raise NoWebsiteLoadedError('request submission requires a loaded website')
data = kwargs.get('data', {})
for i in self.soup('form').select('input[name]'):
if (i.get('name') not in data):
data[i.get('name')] = i.get('value', '')
kwargs['data'] = data
response = self.session.post(self._url, **kwargs)
self._url = response.url
self._response = response
return response
|
Send a POST request to the currently loaded website's URL.
The browser will automatically fill out the form. If `data` dict has
been passed into ``kwargs``, the contained input values will override
the automatically filled out values.
Returns:
`Response` object of a successful request.
Raises:
NoWebsiteLoadedError: If no website is currently loaded.
|
codesearchnet
|
def WriteFileHash(self, path, hash_value):
string = '{0:s}\t{1:s}\n'.format(hash_value, path)
encoded_string = self._EncodeString(string)
self._file_object.write(encoded_string)
|
Writes the file path and hash to file.
Args:
path (str): path of the file.
hash_value (str): message digest hash calculated over the file data.
|
juraj-google-style
|
def FromFile(cls, path, actions_dict, resources_dict, file_format='yaml', name=None):
format_map = {'yaml': cls._process_yaml}
format_handler = format_map.get(file_format)
if (format_handler is None):
raise ArgumentError('Unknown file format or file extension', file_format=file_format, known_formats=[x for x in format_map if (format_map[x] is not None)])
recipe_info = format_handler(path)
if (name is None):
(name, _ext) = os.path.splitext(os.path.basename(path))
try:
recipe_info = RecipeSchema.verify(recipe_info)
except ValidationError as exc:
raise RecipeFileInvalid('Recipe file does not match expected schema', file=path, error_message=exc.msg, **exc.params)
description = recipe_info.get('description')
try:
resources = cls._parse_resource_declarations(recipe_info.get('resources', []), resources_dict)
defaults = cls._parse_variable_defaults(recipe_info.get('defaults', []))
steps = []
for (i, action) in enumerate(recipe_info.get('actions', [])):
action_name = action.pop('name')
if (action_name is None):
raise RecipeFileInvalid('Action is missing required name parameter', parameters=action, path=path)
action_class = actions_dict.get(action_name)
if (action_class is None):
raise UnknownRecipeActionType('Unknown step specified in recipe', action=action_name, step=(i + 1), path=path)
step_resources = cls._parse_resource_usage(action, declarations=resources)
(fixed_files, _variable_files) = cls._parse_file_usage(action_class, action)
step = RecipeStep(action_class, action, step_resources, fixed_files)
steps.append(step)
return RecipeObject(name, description, steps, resources, defaults, path)
except RecipeFileInvalid as exc:
cls._future_raise(RecipeFileInvalid, RecipeFileInvalid(exc.msg, recipe=name, **exc.params), sys.exc_info()[2])
|
Create a RecipeObject from a file.
The file should be a specially constructed yaml file that describes
the recipe as well as the actions that it performs.
Args:
path (str): The path to the recipe file that we wish to load
actions_dict (dict): A dictionary of named RecipeActionObject
types that is used to look up all of the steps listed in
the recipe file.
resources_dict (dict): A dictionary of named RecipeResource types
that is used to look up all of the shared resources listed in
the recipe file.
file_format (str): The file format of the recipe file. Currently
we only support yaml.
name (str): The name of this recipe if we created it originally from an
archive.
|
codesearchnet
|
def delete_endpoint(self, endpoint_name=None):
endpoint_name = (endpoint_name or self.best_training_job())
self.sagemaker_session.delete_endpoint(endpoint_name)
|
Delete an Amazon SageMaker endpoint.
If an endpoint name is not specified, this defaults to looking for an endpoint that
shares a name with the best training job for deletion.
Args:
endpoint_name (str): Name of the endpoint to delete
|
codesearchnet
|
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
self.certificate_type.write(tstream, kmip_version=kmip_version)
self.certificate_value.write(tstream, kmip_version=kmip_version)
self.length = tstream.length()
super(Certificate, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer)
|
Write the data encoding the Certificate object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
|
codesearchnet
|
def cluster_nodes(self, tol=0.2):
lattice = self.structure.lattice
vf_coords = self.extrema_coords
if len(vf_coords) == 0:
if self.extrema_type is None:
logger.warning(
"Please run ChargeDensityAnalyzer.get_local_extrema first!")
return
new_f_coords = []
self._update_extrema(new_f_coords, self.extrema_type)
return new_f_coords
dist_matrix = np.array(lattice.get_all_distances(vf_coords, vf_coords))
dist_matrix = (dist_matrix + dist_matrix.T) / 2
for i in range(len(dist_matrix)):
dist_matrix[i, i] = 0
condensed_m = squareform(dist_matrix)
z = linkage(condensed_m)
cn = fcluster(z, tol, criterion="distance")
merged_fcoords = []
for n in set(cn):
frac_coords = []
for i, j in enumerate(np.where(cn == n)[0]):
if i == 0:
frac_coords.append(self.extrema_coords[j])
else:
f_coords = self.extrema_coords[j]
d, image = lattice.get_distance_and_image(frac_coords[0],
f_coords)
frac_coords.append(f_coords + image)
merged_fcoords.append(np.average(frac_coords, axis=0))
merged_fcoords = [f - np.floor(f) for f in merged_fcoords]
merged_fcoords = [f * (np.abs(f - 1) > 1E-15) for f in merged_fcoords]
self._update_extrema(merged_fcoords, extrema_type=self.extrema_type)
logger.debug(
"{} vertices after combination.".format(len(self.extrema_coords)))
|
Cluster nodes that are too close together using a tol.
Args:
tol (float): A distance tolerance. PBC is taken into account.
|
juraj-google-style
|
def value(self, new_value):
if ((self.unit != units.Undefined) and (new_value.unit != self.unit)):
raise AttributeError(('%s must be in %s' % (self.__class__, self.unit)))
self._value = new_value
|
Set the value of this measurement.
Raises:
AttributeError: if the new value isn't of the correct units.
|
codesearchnet
|
def __init__(self, filename):
self.root_values = {}
self.tree = []
if filename is not None and os.path.dirname(filename) == '':
self.original_filename = os.path.join(os.getcwd(), filename)
else:
self.original_filename = filename
if filename is not None:
self.sax_parse(filename)
|
A container for a SAX SVG light tree objects document.
This class provides functions for extracting SVG data into Path objects.
Args:
filename (str): The filename of the SVG file
|
juraj-google-style
|
def from_inputs(cls, workdir, inputs, manager=None, pickle_protocol=(- 1), task_class=ScfTask, work_class=Work, remove=False):
if (not isinstance(inputs, (list, tuple))):
inputs = [inputs]
flow = cls(workdir, manager=manager, pickle_protocol=pickle_protocol, remove=remove)
work = work_class()
for inp in inputs:
work.register(inp, task_class=task_class)
flow.register_work(work)
return flow.allocate()
|
Construct a simple flow from a list of inputs. The flow contains a single Work with
tasks whose class is given by task_class.
.. warning::
Don't use this interface if you have dependencies among the tasks.
Args:
workdir: String specifying the directory where the works will be produced.
inputs: List of inputs.
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
pickle_protocol: Pickle protocol version used for saving the status of the object.
-1 denotes the latest version supported by the python interpreter.
task_class: The class of the :class:`Task`.
work_class: The class of the :class:`Work`.
remove: attempt to remove working directory `workdir` if directory already exists.
|
codesearchnet
|
def _add_main_menu(output, node_name=None, enable_list_tensors=True, enable_node_info=True, enable_print_tensor=True, enable_list_inputs=True, enable_list_outputs=True):
menu = debugger_cli_common.Menu()
menu.append(debugger_cli_common.MenuItem('list_tensors', 'list_tensors', enabled=enable_list_tensors))
if node_name:
menu.append(debugger_cli_common.MenuItem('node_info', 'node_info -a -d -t %s' % node_name, enabled=enable_node_info))
menu.append(debugger_cli_common.MenuItem('print_tensor', 'print_tensor %s' % node_name, enabled=enable_print_tensor))
menu.append(debugger_cli_common.MenuItem('list_inputs', 'list_inputs -c -r %s' % node_name, enabled=enable_list_inputs))
menu.append(debugger_cli_common.MenuItem('list_outputs', 'list_outputs -c -r %s' % node_name, enabled=enable_list_outputs))
else:
menu.append(debugger_cli_common.MenuItem('node_info', None, enabled=False))
menu.append(debugger_cli_common.MenuItem('print_tensor', None, enabled=False))
menu.append(debugger_cli_common.MenuItem('list_inputs', None, enabled=False))
menu.append(debugger_cli_common.MenuItem('list_outputs', None, enabled=False))
menu.append(debugger_cli_common.MenuItem('run_info', 'run_info'))
menu.append(debugger_cli_common.MenuItem('help', 'help'))
output.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
|
Generate main menu for the screen output from a command.
Args:
output: (debugger_cli_common.RichTextLines) the output object to modify.
node_name: (str or None) name of the node involved (if any). If None,
the menu items node_info, list_inputs and list_outputs will be
automatically disabled, overriding the values of arguments
enable_node_info, enable_list_inputs and enable_list_outputs.
enable_list_tensors: (bool) whether the list_tensor menu item will be
enabled.
enable_node_info: (bool) whether the node_info item will be enabled.
enable_print_tensor: (bool) whether the print_tensor item will be enabled.
enable_list_inputs: (bool) whether the item list_inputs will be enabled.
enable_list_outputs: (bool) whether the item list_outputs will be enabled.
|
github-repos
|
def parse_rfc3339_utc_string(rfc3339_utc_string):
m = re.match(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}).?(\d*)Z',
rfc3339_utc_string)
if not m:
return None
groups = m.groups()
if len(groups[6]) not in (0, 3, 6, 9):
return None
g = [int(val) for val in groups[:6]]
fraction = groups[6]
if not fraction:
micros = 0
elif len(fraction) == 3:
micros = int(fraction) * 1000
elif len(fraction) == 6:
micros = int(fraction)
elif len(fraction) == 9:
micros = int(round(int(fraction) / 1000))
else:
assert False, 'Fraction length not 0, 6, or 9: {}'.len(fraction)
try:
return datetime(g[0], g[1], g[2], g[3], g[4], g[5], micros, tzinfo=pytz.utc)
except ValueError as e:
assert False, 'Could not parse RFC3339 datestring: {} exception: {}'.format(
rfc3339_utc_string, e)
|
Converts a datestamp from RFC3339 UTC to a datetime.
Args:
rfc3339_utc_string: a datetime string in RFC3339 UTC "Zulu" format
Returns:
A datetime.
|
juraj-google-style
|
def ParseAutofillRow(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = ChromeAutofillEventData()
event_data.field_name = self._GetRowValue(query_hash, row, 'name')
event_data.value = self._GetRowValue(query_hash, row, 'value')
event_data.usage_count = self._GetRowValue(query_hash, row, 'count')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'date_created')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if event_data.usage_count > 1:
timestamp = self._GetRowValue(query_hash, row, 'date_last_used')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_USED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses an autofill entry row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
juraj-google-style
|
def _save_representative_dataset(representative_dataset: repr_dataset.RepresentativeDatasetOrMapping, signature_def_map: _SignatureDefMap) -> Mapping[str, _RepresentativeDatasetFile]:
if isinstance(representative_dataset, Mapping):
if set(signature_def_map.keys()) != set(representative_dataset.keys()):
raise ValueError(f'The signature keys and the keys of representative dataset map do not match. Signature keys: {set(signature_def_map.keys())}, representative dataset map: {set(representative_dataset.keys())}.')
representative_dataset_map = representative_dataset
elif len(signature_def_map.keys()) > 1:
raise ValueError(f'Representative dataset is not a mapping (got: {type(representative_dataset)}), but there is more than one signature key provided. Please provide a map of {{signature_key -> dataset}} with more than one signature key.')
else:
representative_dataset_map = {list(signature_def_map.keys())[0]: representative_dataset}
path_map = {}
expected_input_key_map = {}
for signature_key, signature_def in signature_def_map.items():
_, path_map[signature_key] = tempfile.mkstemp(suffix='.tfrecord', prefix=signature_key)
expected_input_key_map[signature_key] = signature_def.inputs.keys()
return repr_dataset.TfRecordRepresentativeDatasetSaver(path_map=path_map, expected_input_key_map=expected_input_key_map).save(representative_dataset_map)
|
Saves the representative dataset to temporary TFRecord files.
Args:
representative_dataset: Representative dataset used for the calibration
step. Representative datasets should exist for each signature def key in
`signature_def_keys`.
signature_def_map: Signature def key -> SignatureDef mapping.
Returns:
A map from signature key to the saved representative dataset file.
|
github-repos
|
def squad_v2_f1(y_true: List[List[str]], y_predicted: List[str]) -> float:
f1_total = 0.0
for (ground_truth, prediction) in zip(y_true, y_predicted):
prediction_tokens = normalize_answer(prediction).split()
f1s = []
for gt in ground_truth:
gt_tokens = normalize_answer(gt).split()
if ((len(gt_tokens) == 0) or (len(prediction_tokens) == 0)):
f1s.append(float((gt_tokens == prediction_tokens)))
continue
common = (Counter(prediction_tokens) & Counter(gt_tokens))
num_same = sum(common.values())
if (num_same == 0):
f1s.append(0.0)
continue
precision = ((1.0 * num_same) / len(prediction_tokens))
recall = ((1.0 * num_same) / len(gt_tokens))
f1 = (((2 * precision) * recall) / (precision + recall))
f1s.append(f1)
f1_total += max(f1s)
return (((100 * f1_total) / len(y_true)) if (len(y_true) > 0) else 0)
|
Calculates F-1 score between y_true and y_predicted
F-1 score uses the best matching y_true answer
The same as in SQuAD-v2.0
Args:
y_true: list of correct answers (correct answers are represented by list of strings)
y_predicted: list of predicted answers
Returns:
F-1 score : float
|
codesearchnet
|
def rename_style(self, old_name, new_name):
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
if line.style == old_name:
line.style = new_name
|
Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken.
|
juraj-google-style
|
def forward(self, hidden_states: torch.Tensor, metadata: Optional[List[torch.LongTensor]], decode: Optional[bool]=False, get_preds: Optional[bool]=False) -> List[torch.Tensor]:
batch_size = hidden_states.shape[0]
music_tokens, *music_tokens_conds = self.encode(hidden_states, bs_chunks=batch_size)
loss, metrics = self.forward_tokens(music_tokens=music_tokens, music_tokens_conds=music_tokens_conds, metadata=metadata, get_preds=get_preds)
if decode:
dequantised_states = self.decode([music_tokens, *music_tokens_conds])
else:
dequantised_states = None
return (dequantised_states, loss, metrics)
|
Encode the hidden states using the `vqvae` encoder, and then predicts the next token in the `forward_tokens`
function. The loss is the sum of the `encoder` loss and the `decoder` loss.
Args:
hidden_states (`torch.Tensor`):
Hidden states which should be raw audio
metadata (`List[torch.LongTensor]`, *optional*):
List containing the metadata conditioning tensor with the lyric and the metadata tokens.
decode (`bool`, *optional*, defaults to `False`):
Whether or not to decode the encoded to tokens.
get_preds (`bool`, *optional*, defaults to `False`):
Whether or not to return the actual predictions of the model.
|
github-repos
|
def get_dihedral_degrees(self, indices, start_row=0):
coords = ['x', 'y', 'z']
if isinstance(indices, pd.DataFrame):
i_pos = self.loc[(indices.index, coords)].values
b_pos = self.loc[(indices.loc[(:, 'b')], coords)].values
a_pos = self.loc[(indices.loc[(:, 'a')], coords)].values
d_pos = self.loc[(indices.loc[(:, 'd')], coords)].values
else:
indices = np.array(indices)
if (len(indices.shape) == 1):
indices = indices[(None, :)]
i_pos = self.loc[(indices[(:, 0)], coords)].values
b_pos = self.loc[(indices[(:, 1)], coords)].values
a_pos = self.loc[(indices[(:, 2)], coords)].values
d_pos = self.loc[(indices[(:, 3)], coords)].values
IB = (b_pos - i_pos)
BA = (a_pos - b_pos)
AD = (d_pos - a_pos)
N1 = np.cross(IB, BA, axis=1)
N2 = np.cross(BA, AD, axis=1)
(n1, n2) = [(v / np.linalg.norm(v, axis=1)[(:, None)]) for v in (N1, N2)]
dot_product = np.sum((n1 * n2), axis=1)
dot_product[(dot_product > 1)] = 1
dot_product[(dot_product < (- 1))] = (- 1)
dihedrals = np.degrees(np.arccos(dot_product))
where_to_modify = (np.sum((BA * np.cross(n1, n2, axis=1)), axis=1) > 0)
where_to_modify = np.nonzero(where_to_modify)[0]
length = (indices.shape[0] - start_row)
sign = np.full(length, 1, dtype='float64')
to_add = np.full(length, 0, dtype='float64')
sign[where_to_modify] = (- 1)
to_add[where_to_modify] = 360
dihedrals = (to_add + (sign * dihedrals))
return dihedrals
|
Return the dihedrals between given atoms.
Calculates the dihedral angle in degrees between the atoms with
indices ``i, b, a, d``.
The indices can be given in three ways:
* As simple list ``[i, b, a, d]``
* As list of lists: ``[[i1, b1, a1, d1], [i2, b2, a2, d2]...]``
* As :class:`pandas.DataFrame` where ``i`` is taken from the index and
``b``, ``a`` and ``d``from the respective columns
``'b'``, ``'a'`` and ``'d'``.
Args:
indices (list):
Returns:
:class:`numpy.ndarray`: Vector of angles in degrees.
|
codesearchnet
|
def _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops_stack.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon)
return (normed, mean, var)
|
Non-fused, broadcast version of `normalize_batch_in_training`.
Args:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
|
github-repos
|
def dump_credibilities(self, output):
for p in self.products:
json.dump({
"product_id": p.name,
"credibility": self.credibility(p)
}, output)
output.write("\n")
|
Dump credibilities of all products.
Args:
output: a writable object.
|
juraj-google-style
|
def conversations_invite(self, *, channel: str, users: List[str], **kwargs) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({'channel': channel, 'users': users})
return self.api_call('conversations.invite', json=kwargs)
|
Invites users to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
users (list): An list of user id's to invite. e.g. ['U2345678901', 'U3456789012']
|
codesearchnet
|
def SetUseSSL(self, use_ssl):
self._use_ssl = use_ssl
logger.debug('Elasticsearch use_ssl: {0!s}'.format(use_ssl))
|
Sets the use of ssl.
Args:
use_ssl (bool): enforces use of ssl.
|
codesearchnet
|
def extend(self, table, keys=None):
if keys:
for k in keys:
if (k not in self._Header()):
raise IndexError("Unknown key: '%s'", k)
extend_with = []
for column in table.header:
if (column not in self.header):
extend_with.append(column)
if (not extend_with):
return
for column in extend_with:
self.AddColumn(column)
if (not keys):
for (row1, row2) in zip(self, table):
for column in extend_with:
row1[column] = row2[column]
return
for row1 in self:
for row2 in table:
for k in keys:
if (row1[k] != row2[k]):
break
else:
for column in extend_with:
row1[column] = row2[column]
break
|
Extends all rows in the texttable.
The rows are extended with the new columns from the table.
Args:
table: A texttable, the table to extend this table by.
keys: A set, the set of columns to use as the key. If None, the
row index is used.
Raises:
IndexError: If key is not a valid column name.
|
codesearchnet
|
def ordered_dump(data, Dumper=yaml.Dumper, **kws):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, None, OrderedDumper, **kws)
|
Expand PyYAML's built-in dumper to support parsing OrderedDict. Return
a string as parse result of the original data structure, which includes
OrderedDict.
Args:
data: the data structure to be dumped(parsed) which is supposed to
contain OrderedDict.
Dumper: the yaml serializer to be expanded and used.
kws: extra key-value arguments to be passed to yaml.dump.
|
codesearchnet
|
def add_snmp_community(self, **kwargs):
community = kwargs.pop('community')
callback = kwargs.pop('callback', self._callback)
config = ET.Element('config')
snmp_server = ET.SubElement(config, 'snmp-server',
xmlns=("urn:brocade.com:mgmt:"
"brocade-snmp"))
community_el = ET.SubElement(snmp_server, 'community')
community_name = ET.SubElement(community_el, 'community')
community_name.text = community
return callback(config)
|
Add SNMP Community to NOS device.
Args:
community (str): Community string to be added to device.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `community` is not defined.
|
juraj-google-style
|
async def remember(request, user_id):
auth_policy = request.get(POLICY_KEY)
if auth_policy is None:
raise RuntimeError('auth_middleware not installed')
return await auth_policy.remember(request, user_id)
|
Called to store and remember the userid for a request
Args:
request: aiohttp Request object.
user_id: String representing the user_id to remember
Raises:
RuntimeError: Middleware is not installed
|
juraj-google-style
|
def _validate_alias_command_level(alias, command):
alias_collision_table = AliasManager.build_collision_table([alias])
if not alias_collision_table:
return
command_collision_table = AliasManager.build_collision_table([command])
alias_collision_levels = alias_collision_table.get(alias.split()[0], [])
command_collision_levels = command_collision_table.get(command.split()[0], [])
if set(alias_collision_levels) & set(command_collision_levels):
raise CLIError(COMMAND_LVL_ERROR.format(alias, command))
|
Make sure that if the alias is a reserved command, the command that the alias points to
in the command tree does not conflict in levels.
e.g. 'dns' -> 'network dns' is valid because dns is a level 2 command and network dns starts at level 1.
However, 'list' -> 'show' is not valid because list and show are both reserved commands at level 2.
Args:
alias: The name of the alias.
command: The command that the alias points to.
|
juraj-google-style
|
def db_wb020(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `db_wb020`'.format(value))
self._db_wb020 = value
|
Corresponds to IDD Field `db_wb020`
mean coincident dry-bulb temperature to
Wet-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `db_wb020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def area_of_a_triangle_in_cartesian_space(a, b, c):
return (0.5 * np.linalg.norm(np.cross((b - a), (c - a))))
|
Returns the area of a triangle defined by three points in Cartesian space.
Args:
a (np.array): Cartesian coordinates of point A.
b (np.array): Cartesian coordinates of point B.
c (np.array): Cartesian coordinates of point C.
Returns:
(float): the area of the triangle.
|
codesearchnet
|
def start(self, attempts=5, timeout=2):
if not self.alive():
with LogTask('Create network %s' % self.name()):
net = self.libvirt_con.networkCreateXML(self._libvirt_xml())
if net is None:
raise RuntimeError(
'failed to create network, XML: %s' %
(self._libvirt_xml())
)
for _ in range(attempts):
if net.isActive():
return
LOGGER.debug(
'waiting for network %s to become active', net.name()
)
time.sleep(timeout)
raise RuntimeError(
'failed to verify network %s is active' % net.name()
)
|
Start the network, will check if the network is active ``attempts``
times, waiting ``timeout`` between each attempt.
Args:
attempts (int): number of attempts to check the network is active
timeout (int): timeout for each attempt
Returns:
None
Raises:
RuntimeError: if network creation failed, or failed to verify it is
active.
|
juraj-google-style
|
def _parse_normalization(normalization):
parsed_normalization = None
if isinstance(normalization, dict):
if len(normalization.keys()) == 1:
items = list(normalization.items())[0]
if len(items) == 2:
if items[1] and isinstance(items[1], dict):
parsed_normalization = items
else:
parsed_normalization = items[0]
elif isinstance(normalization, STR_TYPE):
parsed_normalization = normalization
return parsed_normalization
|
Parse a normalization item.
Transform dicts into a tuple containing the normalization
options. If a string is found, the actual value is used.
Args:
normalization: Normalization to parse.
Returns:
Tuple or string containing the parsed normalization.
|
juraj-google-style
|
def synctree(src, dst, onexist=None):
src = pathlib.Path(src).resolve()
dst = pathlib.Path(dst).resolve()
if (not src.is_dir()):
raise ValueError
if (dst.exists() and (not dst.is_dir())):
raise ValueError
if (onexist is None):
def onexist():
pass
_synctree(src, dst, onexist)
|
Recursively sync files at directory src to dst
This is more or less equivalent to::
cp -n -R ${src}/ ${dst}/
If a file at the same path exists in src and dst, it is NOT overwritten
in dst. Pass ``onexist`` in order to raise an error on such conditions.
Args:
src (path-like): source directory
dst (path-like): destination directory, does not need to exist
onexist (callable): function to call if file exists at destination,
takes the full path to destination file as only argument
|
codesearchnet
|
def __init__(self, message_type, message_text):
if message_type not in self.TYPES:
raise TypeError("Unknown message_type: " + message_type)
if not isinstance(message_text, six.text_type):
raise TypeError("Message text must be unicode")
self.type = message_type
self.text = message_text
|
Create a new message.
Args:
message_type (unicode): The type associated with this message. Must be included in `TYPES`.
message_text (unicode): The textual message.
|
juraj-google-style
|
def metadata(self):
if (self._metadata is None):
try:
with open(self.paths.metadata()) as metadata_fd:
self._metadata = json.load(metadata_fd)
except IOError:
self._metadata = {}
return self._metadata
|
Retrieve the metadata info for this prefix
Returns:
dict: metadata info
|
codesearchnet
|
def get_block_containing_tx(self, txid):
blocks = list(backend.query.get_block_with_transaction(self.connection, txid))
if len(blocks) > 1:
logger.critical('Transaction id %s exists in multiple blocks', txid)
return [block['height'] for block in blocks]
|
Retrieve the list of blocks (block ids) containing a
transaction with transaction id `txid`
Args:
txid (str): transaction id of the transaction to query
Returns:
Block id list (list(int))
|
juraj-google-style
|
def _read_template(template):
template = _read_content_or_path(template)
file_obj = StringIO.StringIO(template)
return ET.parse(file_obj)
|
Read XSLT template.
Args:
template (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
Returns:
obj: Required XML parsed with ``lxml.etree``.
|
codesearchnet
|
def probability_density(self, X):
self.check_fit()
covariance = self.covariance * np.identity(self.covariance.shape[0])
return stats.multivariate_normal.pdf(X, cov=covariance)
|
Compute probability density function for given copula family.
Args:
X: `numpy.ndarray` or `pandas.DataFrame`
Returns:
np.array: Probability density for the input values.
|
juraj-google-style
|
def __init__(self, *, dtype=np.complex64):
if dtype not in {np.complex64, np.complex128}:
raise ValueError(
'dtype must be complex64 or complex128 but was {}'.format(
dtype))
self._dtype = dtype
|
A sparse matrix simulator.
Args:
dtype: The `numpy.dtype` used by the simulation. One of
`numpy.complex64` or `numpy.complex128`
|
juraj-google-style
|
def datasets_insert(self, dataset_name, friendly_name=None, description=None):
url = Api._ENDPOINT + (Api._DATASETS_PATH % (dataset_name.project_id, ''))
data = {
'kind': 'bigquery
'datasetReference': {
'projectId': dataset_name.project_id,
'datasetId': dataset_name.dataset_id
},
}
if friendly_name:
data['friendlyName'] = friendly_name
if description:
data['description'] = description
return datalab.utils.Http.request(url, data=data, credentials=self._credentials)
|
Issues a request to create a dataset.
Args:
dataset_name: the name of the dataset to create.
friendly_name: (optional) the friendly name for the dataset
description: (optional) a description for the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
|
juraj-google-style
|
def get_flux_biases(sampler, embedding, chain_strength, num_reads=1000, max_age=3600):
if (not isinstance(sampler, dimod.Sampler)):
raise TypeError('input sampler should be DWaveSampler')
system_name = sampler.properties.get('chip_id', str(sampler.__class__))
try:
with cache_connect() as cur:
fbo = get_flux_biases_from_cache(cur, embedding.values(), system_name, chain_strength=chain_strength, max_age=max_age)
return fbo
except MissingFluxBias:
pass
try:
import dwave.drivers as drivers
except ImportError:
msg = "dwave-drivers not found, cannot calculate flux biases. dwave-drivers can be installed with 'pip install dwave-drivers --extra-index-url https:
raise RuntimeError(msg)
fbo = drivers.oneshot_flux_bias(sampler, embedding.values(), num_reads=num_reads, chain_strength=chain_strength)
with cache_connect() as cur:
for chain in embedding.values():
v = next(iter(chain))
flux_bias = fbo.get(v, 0.0)
insert_flux_bias(cur, chain, system_name, flux_bias, chain_strength)
return fbo
|
Get the flux bias offsets for sampler and embedding.
Args:
sampler (:obj:`.DWaveSampler`):
A D-Wave sampler.
embedding (dict[hashable, iterable]):
Mapping from a source graph to the specified sampler’s graph (the target graph). The
keys of embedding should be nodes in the source graph, the values should be an iterable
of nodes in the target graph.
chain_strength (number):
Desired chain coupling strength. This is the magnitude of couplings between qubits
in a chain.
num_reads (int, optional, default=1000):
The number of reads per system call if new flux biases need to be calculated.
max_age (int, optional, default=3600):
The maximum age (in seconds) allowed for previously calculated flux bias offsets.
Returns:
dict: A dict where the keys are the nodes in the chains and the values are the flux biases.
|
codesearchnet
|
def _flag_is_registered(self, flag_obj):
flag_dict = self._flags()
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
return False
|
Checks whether a Flag object is registered under long name or short name.
Args:
flag_obj: Flag, the Flag instance to check for.
Returns:
bool, True iff flag_obj is registered under long name or short name.
|
juraj-google-style
|
def format_unitary(mat, decimals=None):
num_basis = len(mat)
mat_complex = np.zeros((num_basis, num_basis), dtype=complex)
for (i, vec) in enumerate(mat):
mat_complex[i] = format_statevector(vec, decimals)
return mat_complex
|
Format unitary coming from the backend to present to the Qiskit user.
Args:
mat (list[list]): a list of list of [re, im] complex numbers
decimals (int): the number of decimals in the statevector.
If None, no rounding is done.
Returns:
list[list[complex]]: a matrix of complex numbers
|
codesearchnet
|
def map_arg(**maps):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if six.PY2:
argmap = inspect.getcallargs(func, *args, **kwargs)
else:
sig = inspect.signature(func)
argmap = sig.bind_partial(*args, **kwargs).arguments
for (k, map_func) in six.iteritems(maps):
if (k in argmap):
argmap[k] = map_func(argmap[k])
return func(**argmap)
return wrapper
return deco
|
Apply a mapping on certain argument before calling the original function.
Args:
maps (dict): {argument_name: map_func}
|
codesearchnet
|
def readSettings(self):
success = (self.readHolidayDates() and self.readMonthTariffs(ReadMonths.kWh) and self.readMonthTariffs(ReadMonths.kWhReverse) and self.readSchedules(ReadSchedules.Schedules_1_To_4) and self.readSchedules(ReadSchedules.Schedules_5_To_6))
return success
|
Recommended call to read all meter settings at once.
Returns:
bool: True if all subsequent serial calls completed with ACK.
|
codesearchnet
|
def quantize_saved_model(src_saved_model_path: str, dst_saved_model_path: str, config: qc.QuantizationConfig) -> None:
print('=== User-provided QuantizationConfig ===')
print(config)
config = qc.QuantizationConfig.FromString(pywrap_quantization.populate_default_configs(config.SerializeToString()))
config = qc.QuantizationConfig.FromString(pywrap_quantization.expand_preset_configs(config.SerializeToString()))
print('=== Updated QuantizationConfig ===')
print(config)
if not (_has_quantization_method(config.specs, 'static_range_ptq') and len(config.calibration_options.representative_datasets) == 1) and (not _has_quantization_method(config.specs, 'weight_only_ptq')):
raise ValueError('`quantize_saved_model` currently only supports static-range PTQ with a single signature or weight-only quantization.')
signature_def_map = save_model.get_signatures_from_saved_model(src_saved_model_path, signature_keys=None, tags=set(config.tf_saved_model.tags))
signature_def_map_serialized = _serialize_signature_def_map(signature_def_map)
if _has_quantization_method(config.specs, 'static_range_ptq'):
pywrap_quantization.static_range_ptq(src_saved_model_path, dst_saved_model_path, quantization_config_serialized=config.SerializeToString(), signature_keys=list(signature_def_map.keys()), signature_def_map_serialized=signature_def_map_serialized, py_function_library=py_function_lib.PyFunctionLibrary())
elif _has_quantization_method(config.specs, 'weight_only_ptq'):
pywrap_quantization.weight_only_ptq(src_saved_model_path, dst_saved_model_path, quantization_config_serialized=config.SerializeToString(), signature_keys=list(signature_def_map.keys()), signature_def_map_serialized=signature_def_map_serialized, py_function_library=py_function_lib.PyFunctionLibrary())
|
Quantizes a saved model.
Args:
src_saved_model_path: Path to the directory for the source SavedModel.
dst_saved_model_path: Path to the directory for the destination SavedModel.
config: Quantization configuration.
Raises:
ValueError: When `config` was not configured for static-range PTQ
single representative dataset.
|
github-repos
|
def login(self, email, password, android_id):
self._email = email
self._android_id = android_id
res = gpsoauth.perform_master_login(self._email, password, self._android_id)
if ('Token' not in res):
raise exception.LoginException(res.get('Error'), res.get('ErrorDetail'))
self._master_token = res['Token']
self.refresh()
return True
|
Authenticate to Google with the provided credentials.
Args:
email (str): The account to use.
password (str): The account password.
android_id (str): An identifier for this client.
Raises:
LoginException: If there was a problem logging in.
|
codesearchnet
|
def save_qasm(self, file_path: Union[(str, bytes, int)], header: Optional[str]=None, precision: int=10, qubit_order: ops.QubitOrderOrList=ops.QubitOrder.DEFAULT) -> None:
self._to_qasm_output(header, precision, qubit_order).save(file_path)
|
Save a QASM file equivalent to the circuit.
Args:
file_path: The location of the file where the qasm will be written.
header: A multi-line string that is placed in a comment at the top
of the QASM. Defaults to a cirq version specifier.
precision: Number of digits to use when representing numbers.
qubit_order: Determines how qubits are ordered in the QASM
register.
|
codesearchnet
|
def _ascending_sort(values, axis, return_argsort=False):
dtype = values.dtype
if dtype.is_unsigned:
offset = dtype.max
values_or_indices = _descending_sort(offset - values, axis, return_argsort)
return values_or_indices if return_argsort else offset - values_or_indices
elif dtype.is_integer:
values_or_indices = _descending_sort(-values - 1, axis, return_argsort)
return values_or_indices if return_argsort else -values_or_indices - 1
else:
values_or_indices = _descending_sort(-values, axis, return_argsort)
return values_or_indices if return_argsort else -values_or_indices
|
Sorts values in ascending order.
Args:
values: Tensor of numeric values.
axis: Index of the axis which values should be sorted along.
return_argsort: If False, return the sorted values. If True, return the
indices that would sort the values.
Returns:
The sorted values.
|
github-repos
|
def write_message(self, message, timeout):
with self._writer_lock:
self._transport.write(message.header, timeout.remaining_ms)
if timeout.has_expired():
_LOG.warning('Timed out between AdbMessage header and data, sending '
'data anyway with 10ms timeout')
timeout = timeouts.PolledTimeout.from_millis(10)
self._transport.write(message.data, timeout.remaining_ms)
|
Send the given message over this transport.
Args:
message: The AdbMessage to send.
timeout: Use this timeout for the entire write operation, it should be an
instance of timeouts.PolledTimeout.
|
juraj-google-style
|
def from_pkcs12(cls, key, email, scopes, subject=None, passphrase=PKCS12_PASSPHRASE):
key = OpenSSL.crypto.load_pkcs12(key, passphrase).get_privatekey()
return cls(key=key, email=email, scopes=scopes, subject=subject)
|
Alternate constructor intended for using .p12 files.
Args:
key (dict) - Parsed JSON with service account credentials.
email (str) - Service account email.
scopes (Union[str, collections.Iterable[str]]) -
List of permissions that the application requests.
subject (str) - The email address of the user for which
the application is requesting delegated access.
passphrase (str) - Passphrase of private key file.
Google generates .p12 files secured with fixed 'notasecret'
passphrase, so if you didn't change it it's fine to omit
this parameter.
Returns:
ServiceAccount
|
codesearchnet
|
def CallDhclient(interfaces, logger, dhclient_script=None):
logger.info('Enabling the Ethernet interfaces %s.', interfaces)
dhclient_command = ['dhclient']
if (dhclient_script and os.path.exists(dhclient_script)):
dhclient_command += ['-sf', dhclient_script]
try:
subprocess.check_call(((dhclient_command + ['-x']) + interfaces))
subprocess.check_call((dhclient_command + interfaces))
except subprocess.CalledProcessError:
logger.warning('Could not enable interfaces %s.', interfaces)
|
Configure the network interfaces using dhclient.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
|
codesearchnet
|
class SpQRConfig(QuantizationConfigMixin):
def __init__(self, bits: int=3, beta1: int=16, beta2: int=16, shapes: Optional[Dict[str, int]]=None, modules_to_not_convert: Optional[List[str]]=None, **kwargs):
if shapes is None:
shapes = {}
self.shapes = shapes
self.quant_method = QuantizationMethod.SPQR
self.bits = bits
self.beta1 = beta1
self.beta2 = beta2
self.modules_to_not_convert = modules_to_not_convert
self.post_init()
def post_init(self):
if not isinstance(self.bits, int):
raise TypeError('bits must be an int')
if not isinstance(self.beta1, int):
raise TypeError('beta1 must be an int')
if not isinstance(self.beta2, int):
raise TypeError('beta2 must be an int')
if self.bits != 3:
raise ValueError('SpQR currently only supports bits = 3')
if self.beta1 != 16:
raise ValueError('SpQR currently only supports beta1 = 16')
if self.beta2 != 16:
raise ValueError('SpQR currently only supports beta2 = 16')
if not isinstance(self.shapes, dict):
raise TypeError('shapes must be a dict')
|
This is a wrapper class about `spqr` parameters. Refer to the original publication for more details.
Args:
bits (`int`, *optional*, defaults to 3):
Specifies the bit count for the weights and first order zero-points and scales.
Currently only bits = 3 is supported.
beta1 (`int`, *optional*, defaults to 16):
SpQR tile width. Currently only beta1 = 16 is supported.
beta2 (`int`, *optional*, defaults to 16):
SpQR tile height. Currently only beta2 = 16 is supported.
shapes (`Optional`, *optional*):
A dictionary holding the shape of each object. We need this because it's impossible
to deduce the exact size of the parameters just from bits, beta1, beta2.
modules_to_not_convert (`Optional[List[str]]`, *optional*):
Optionally, provides a list of full paths of `nn.Linear` weight parameters that shall not be quantized.
Defaults to None.
kwargs (`Dict[str, Any]`, *optional*):
Additional parameters from which to initialize the configuration object.
|
github-repos
|
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A REALM sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
github-repos
|
def unlock_kinetis(jlink):
if not jlink.connected():
raise ValueError('No target to unlock.')
method = UNLOCK_METHODS.get(jlink.tif, None)
if method is None:
raise NotImplementedError('Unsupported target interface for unlock.')
return method(jlink)
|
Unlock for Freescale Kinetis K40 or K60 device.
Args:
jlink (JLink): an instance of a J-Link that is connected to a target.
Returns:
``True`` if the device was successfully unlocked, otherwise ``False``.
Raises:
ValueError: if the J-Link is not connected to a target.
|
juraj-google-style
|
def get(cls, session, team_id):
return cls(('/teams/%d.json' % team_id), singleton=True, session=session)
|
Return a specific team.
Args:
session (requests.sessions.Session): Authenticated session.
team_id (int): The ID of the team to get.
Returns:
helpscout.models.Person: A person singleton representing the team,
if existing. Otherwise ``None``.
|
codesearchnet
|
def icao(msg):
DF = df(msg)
if (DF in (11, 17, 18)):
addr = msg[2:8]
elif (DF in (0, 4, 5, 16, 20, 21)):
c0 = bin2int(crc(msg, encode=True))
c1 = hex2int(msg[(- 6):])
addr = ('%06X' % (c0 ^ c1))
else:
addr = None
return addr
|
Calculate the ICAO address from an Mode-S message
with DF4, DF5, DF20, DF21
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
String: ICAO address in 6 bytes hexadecimal string
|
codesearchnet
|
def load_keys():
consumer_key = os.environ.get('CONSUMER_KEY')
consumer_secret = os.environ.get('CONSUMER_SECRET')
access_token = os.environ.get('ACCESS_TOKEN')
access_token_secret = os.environ.get('ACCESS_TOKEN_SECRET')
return (consumer_key, consumer_secret, access_token, access_token_secret)
|
Loads Twitter keys.
Returns:
tuple: consumer_key, consumer_secret, access_token, access_token_secret
|
codesearchnet
|
def _process_intersects_filter_directive(filter_operation_info, location, context, parameters):
filtered_field_type = filter_operation_info.field_type
filtered_field_name = filter_operation_info.field_name
argument_inferred_type = strip_non_null_from_type(filtered_field_type)
if (not isinstance(argument_inferred_type, GraphQLList)):
raise GraphQLCompilationError(u'Cannot apply "intersects" to non-list type {}'.format(filtered_field_type))
(argument_expression, non_existence_expression) = _represent_argument(location, context, parameters[0], argument_inferred_type)
filter_predicate = expressions.BinaryComposition(u'intersects', expressions.LocalField(filtered_field_name), argument_expression)
if (non_existence_expression is not None):
filter_predicate = expressions.BinaryComposition(u'||', non_existence_expression, filter_predicate)
return blocks.Filter(filter_predicate)
|
Return a Filter basic block that checks if the directive arg and the field intersect.
Args:
filter_operation_info: FilterOperationInfo object, containing the directive and field info
of the field where the filter is to be applied.
location: Location where this filter is used.
context: dict, various per-compilation data (e.g. declared tags, whether the current block
is optional, etc.). May be mutated in-place in this function!
parameters: list of 1 element, specifying the collection in which the value must exist;
if the collection is optional and missing, the check will return True
Returns:
a Filter basic block that performs the intersects check
|
codesearchnet
|
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):
if (kmip_version < enums.KMIPVersion.KMIP_1_3):
raise exceptions.VersionNotSupported('KMIP {} does not support the CapabilityInformation object.'.format(kmip_version.value))
super(CapabilityInformation, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.STREAMING_CAPABILITY, local_buffer):
streaming_capability = primitives.Boolean(tag=enums.Tags.STREAMING_CAPABILITY)
streaming_capability.read(local_buffer, kmip_version=kmip_version)
self._streaming_capability = streaming_capability
if self.is_tag_next(enums.Tags.ASYNCHRONOUS_CAPABILITY, local_buffer):
asynchronous_capability = primitives.Boolean(tag=enums.Tags.ASYNCHRONOUS_CAPABILITY)
asynchronous_capability.read(local_buffer, kmip_version=kmip_version)
self._asynchronous_capability = asynchronous_capability
if self.is_tag_next(enums.Tags.ATTESTATION_CAPABILITY, local_buffer):
attestation_capability = primitives.Boolean(tag=enums.Tags.ATTESTATION_CAPABILITY)
attestation_capability.read(local_buffer, kmip_version=kmip_version)
self._attestation_capability = attestation_capability
if (kmip_version >= enums.KMIPVersion.KMIP_1_4):
if self.is_tag_next(enums.Tags.BATCH_UNDO_CAPABILITY, local_buffer):
batch_undo_capability = primitives.Boolean(tag=enums.Tags.BATCH_UNDO_CAPABILITY)
batch_undo_capability.read(local_buffer, kmip_version=kmip_version)
self._batch_continue_capability = batch_undo_capability
if self.is_tag_next(enums.Tags.BATCH_CONTINUE_CAPABILITY, local_buffer):
batch_continue_capability = primitives.Boolean(tag=enums.Tags.BATCH_CONTINUE_CAPABILITY)
batch_continue_capability.read(local_buffer, kmip_version=kmip_version)
self._batch_continue_capability = batch_continue_capability
if self.is_tag_next(enums.Tags.UNWRAP_MODE, local_buffer):
unwrap_mode = primitives.Enumeration(enums.UnwrapMode, tag=enums.Tags.UNWRAP_MODE)
unwrap_mode.read(local_buffer, kmip_version=kmip_version)
self._unwrap_mode = unwrap_mode
if self.is_tag_next(enums.Tags.DESTROY_ACTION, local_buffer):
destroy_action = primitives.Enumeration(enums.DestroyAction, tag=enums.Tags.DESTROY_ACTION)
destroy_action.read(local_buffer, kmip_version=kmip_version)
self._destroy_action = destroy_action
if self.is_tag_next(enums.Tags.SHREDDING_ALGORITHM, local_buffer):
shredding_algorithm = primitives.Enumeration(enums.ShreddingAlgorithm, tag=enums.Tags.SHREDDING_ALGORITHM)
shredding_algorithm.read(local_buffer, kmip_version=kmip_version)
self._shredding_algorithm = shredding_algorithm
if self.is_tag_next(enums.Tags.RNG_MODE, local_buffer):
rng_mode = primitives.Enumeration(enums.RNGMode, tag=enums.Tags.RNG_MODE)
rng_mode.read(local_buffer, kmip_version=kmip_version)
self._rng_mode = rng_mode
self.is_oversized(local_buffer)
|
Read the data encoding the CapabilityInformation structure and decode
it into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
VersionNotSupported: Raised when a KMIP version is provided that
does not support the CapabilityInformation structure.
|
codesearchnet
|
def from_string(cls, prjs):
def parse(v):
try:
return int(v)
except ValueError:
pass
try:
return float(v)
except ValueError:
return v
parts = [o.lstrip('+') for o in prjs.strip().split()]
items = map(
lambda kv: len(kv) == 2 and (kv[0], parse(kv[1])) or (kv[0], True),
(p.split('=') for p in parts))
return cls({k: v for k, v in items if '+'+k in PROJ4_PARAMS.keys()})
|
Turn a PROJ.4 string into a mapping of parameters. Bare parameters
like "+no_defs" are given a value of ``True``. All keys are checked
against the ``all_proj_keys`` list.
Args:
prjs (str): A PROJ4 string.
|
juraj-google-style
|
def get(url, max_backoff=32, verbose=False, **kwargs):
sleep_seconds = 1
while (sleep_seconds <= max_backoff):
try:
response = requests.get(url, **{**{'timeout': 30}, **kwargs})
if (400 <= response.status_code < 500):
return None
if (200 <= response.status_code < 400):
return response
except RequestException as e:
if verbose:
print(str(e))
time.sleep(sleep_seconds)
sleep_seconds *= 2
return None
|
Adding retries to requests.get with exponential backoff.
Args:
url (str): The URL to fetch
max_backoff (int): The number of seconds to sleep at maximums
verbose (bool): Whether to print exceptions.
Returns:
Response: For successful requests return requests' response. `None` otherwise.
|
codesearchnet
|
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
Create a mask from the two sequences passed to be used in a sequence-pair classification task. LED does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
|
github-repos
|
def _build_encryption_key_information(self, value):
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
|
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
|
juraj-google-style
|
def call(self, inputs):
del inputs
latent_code = ed.MultivariateNormalDiag(loc=tf.zeros(self.latent_size),
sample_shape=1,
name="latent_code")
state = self.lstm.zero_state(1, dtype=tf.float32)
t = 0
productions = []
stack = [self.grammar.start_symbol]
while stack:
symbol = stack.pop()
net, state = self.lstm(latent_code, state)
logits = (self.output_layer(net) +
self.grammar.mask(symbol, on_value=0., off_value=-1e9))
production = ed.OneHotCategorical(logits=logits,
name="production_" + str(t))
_, rhs = self.grammar.production_rules[tf.argmax(
input=production, axis=-1)]
for symbol in rhs:
if symbol in self.grammar.nonterminal_symbols:
stack.append(symbol)
productions.append(production)
t += 1
return tf.stack(productions, axis=1)
|
Runs the model forward to generate a sequence of productions.
Args:
inputs: Unused.
Returns:
productions: Tensor of shape [1, num_productions, num_production_rules].
Slices along the `num_productions` dimension represent one-hot vectors.
|
juraj-google-style
|
def variable(dims=1):
if dims == 1:
return Poly({(1,): 1}, dim=1, shape=())
return Poly({
tuple(indices): indices for indices in numpy.eye(dims, dtype=int)
}, dim=dims, shape=(dims,))
|
Simple constructor to create single variables to create polynomials.
Args:
dims (int):
Number of dimensions in the array.
Returns:
(Poly):
Polynomial array with unit components in each dimension.
Examples:
>>> print(variable())
q0
>>> print(variable(3))
[q0, q1, q2]
|
juraj-google-style
|
def AddItem(self, item, f=(lambda x: x)):
with self._mutex:
if ((len(self.items) < self._max_size) or (self._max_size == 0)):
self.items.append(f(item))
else:
r = self._random.randint(0, self._num_items_seen)
if (r < self._max_size):
self.items.pop(r)
self.items.append(f(item))
elif self.always_keep_last:
self.items[(- 1)] = f(item)
self._num_items_seen += 1
|
Add an item to the ReservoirBucket, replacing an old item if necessary.
The new item is guaranteed to be added to the bucket, and to be the last
element in the bucket. If the bucket has reached capacity, then an old item
will be replaced. With probability (_max_size/_num_items_seen) a random item
in the bucket will be popped out and the new item will be appended
to the end. With probability (1 - _max_size/_num_items_seen)
the last item in the bucket will be replaced.
Since the O(n) replacements occur with O(1/_num_items_seen) likelihood,
the amortized runtime is O(1).
Args:
item: The item to add to the bucket.
f: A function to transform item before addition, if it will be kept in
the reservoir.
|
codesearchnet
|
def product_name(self):
buf = (ctypes.c_char * self.MAX_BUF_SIZE)()
self._dll.JLINKARM_EMU_GetProductName(buf, self.MAX_BUF_SIZE)
return ctypes.string_at(buf).decode()
|
Returns the product name of the connected J-Link.
Args:
self (JLink): the ``JLink`` instance
Returns:
Product name.
|
juraj-google-style
|
def install(self, connection, partition, table_name=None, columns=None, materialize=False,
logger=None):
partition.localize()
self._add_partition(connection, partition)
fdw_table = partition.vid
view_table = '{}_v'.format(fdw_table)
if materialize:
with connection.cursor() as cursor:
view_exists = self._relation_exists(connection, view_table)
if view_exists:
logger.debug(
'Materialized view of the partition already exists.\n partition: {}, view: {}'
.format(partition.name, view_table))
else:
query = 'CREATE MATERIALIZED VIEW {} AS SELECT * FROM {};'\
.format(view_table, fdw_table)
logger.debug(
'Creating new materialized view of the partition.'
'\n partition: {}, view: {}, query: {}'
.format(partition.name, view_table, query))
cursor.execute(query)
cursor.execute('COMMIT;')
final_table = view_table if materialize else fdw_table
with connection.cursor() as cursor:
view_q = "CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} ".format(partition.vid, final_table)
cursor.execute(view_q)
cursor.execute('COMMIT;')
return partition.vid
|
Creates FDW or materialize view for given partition.
Args:
connection: connection to postgresql
partition (orm.Partition):
materialize (boolean): if True, create read-only table. If False create virtual table.
Returns:
str: name of the created table.
|
juraj-google-style
|
def retryable(a_func, retry_options, **kwargs):
delay_mult = retry_options.backoff_settings.retry_delay_multiplier
max_delay_millis = retry_options.backoff_settings.max_retry_delay_millis
has_timeout_settings = _has_timeout_settings(retry_options.backoff_settings)
if has_timeout_settings:
timeout_mult = retry_options.backoff_settings.rpc_timeout_multiplier
max_timeout = (retry_options.backoff_settings.max_rpc_timeout_millis / _MILLIS_PER_SECOND)
total_timeout = (retry_options.backoff_settings.total_timeout_millis / _MILLIS_PER_SECOND)
def inner(*args):
'Equivalent to ``a_func``, but retries upon transient failure.\n\n Retrying is done through an exponential backoff algorithm configured\n by the options in ``retry``.\n '
delay = retry_options.backoff_settings.initial_retry_delay_millis
exc = errors.RetryError('Retry total timeout exceeded before anyresponse was received')
if has_timeout_settings:
timeout = (retry_options.backoff_settings.initial_rpc_timeout_millis / _MILLIS_PER_SECOND)
now = time.time()
deadline = (now + total_timeout)
else:
timeout = None
deadline = None
while ((deadline is None) or (now < deadline)):
try:
to_call = add_timeout_arg(a_func, timeout, **kwargs)
return to_call(*args)
except Exception as exception:
code = config.exc_to_code(exception)
if (code not in retry_options.retry_codes):
raise errors.RetryError('Exception occurred in retry method that was not classified as transient', exception)
exc = errors.RetryError('Retry total timeout exceeded with exception', exception)
to_sleep = random.uniform(0, (delay * 2))
time.sleep((to_sleep / _MILLIS_PER_SECOND))
delay = min((delay * delay_mult), max_delay_millis)
if has_timeout_settings:
now = time.time()
timeout = min((timeout * timeout_mult), max_timeout, (deadline - now))
raise exc
return inner
|
Creates a function equivalent to a_func, but that retries on certain
exceptions.
Args:
a_func (callable): A callable.
retry_options (RetryOptions): Configures the exceptions upon which the
callable should retry, and the parameters to the exponential backoff
retry algorithm.
kwargs: Addtional arguments passed through to the callable.
Returns:
Callable: A function that will retry on exception.
|
codesearchnet
|
def sub_annotations_for_parameterized_class(self, cls: abstract.ParameterizedClass, annotations: dict[str, abstract.BaseValue]) -> dict[str, abstract.BaseValue]:
formal_type_parameters = cls.get_formal_type_parameters()
def get_type_parameter_subst(annotation: abstract.BaseValue) -> abstract.BaseValue | None:
for name in (f'{cls.full_name}.{annotation.name}', f'{cls.name}.{annotation.name}'):
if name in formal_type_parameters:
return formal_type_parameters[name]
return annotation
return {name: self._do_sub_one_annotation(self.ctx.root_node, annot, get_type_parameter_subst) for name, annot in annotations.items()}
|
Apply type parameter substitutions to a dictionary of annotations.
Args:
cls: ParameterizedClass that defines type parameter substitutions.
annotations: A dictionary of annotations to which type parameter
substition should be applied.
Returns:
Annotations with type parameters substituted.
|
github-repos
|
def get_cso_dataframe(self):
assert (self.jco is not None)
assert (self.pst is not None)
weights = self.pst.observation_data.loc[(self.jco.to_dataframe().index, 'weight')].copy().values
cso = (np.diag(np.sqrt(self.qhalfx.x.dot(self.qhalfx.x.T))) / float((self.pst.npar - 1)))
cso_df = pd.DataFrame.from_dict({'obnme': self.jco.to_dataframe().index, 'cso': cso})
cso_df.index = cso_df['obnme']
cso_df.drop('obnme', axis=1, inplace=True)
return cso_df
|
get a dataframe of composite observation sensitivity, as returned by PEST in the
seo file.
Note that this formulation deviates slightly from the PEST documentation in that the
values are divided by (npar-1) rather than by (npar).
The equation is cso_j = ((Q^1/2*J*J^T*Q^1/2)^1/2)_jj/(NPAR-1)
Returns:
cso : pandas.DataFrame
|
codesearchnet
|
def get_compliance_preview(self):
uri = '{}/compliance-preview'.format(self.data['uri'])
return self._helper.do_get(uri)
|
Gets the preview of manual and automatic updates required to make the server profile
consistent with its template.
Returns:
dict: Server profile compliance preview.
|
codesearchnet
|
def save_image(figure, filename):
path = os.path.join(IMAGES_DIR, filename)
figure.savefig(path, bbox_inches='tight')
plt.close(figure)
|
Save an image to the docs images directory.
Args:
filename (str): The name of the file (not containing
directory info).
|
codesearchnet
|
def ParseCacheEntry(self, file_object, block_offset):
cache_entry_map = self._GetDataTypeMap('chrome_cache_entry')
try:
cache_entry, _ = self._ReadStructureFromFileObject(
file_object, block_offset, cache_entry_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Unable to parse cache entry at offset: 0x{0:08x} with error: '
'{1!s}').format(block_offset, exception))
cache_entry_object = CacheEntry()
cache_entry_object.hash = cache_entry.hash
cache_entry_object.next = CacheAddress(cache_entry.next_address)
cache_entry_object.rankings_node = CacheAddress(
cache_entry.rankings_node_address)
cache_entry_object.creation_time = cache_entry.creation_time
byte_array = cache_entry.key
byte_string = bytes(bytearray(byte_array))
cache_entry_object.key, _, _ = byte_string.partition(b'\x00')
try:
cache_entry_object.original_url = cache_entry_object.key.decode('ascii')
except UnicodeDecodeError as exception:
raise errors.ParseError(
'Unable to decode original URL in key with error: {0!s}'.format(
exception))
return cache_entry_object
|
Parses a cache entry.
Args:
file_object (dfvfs.FileIO): a file-like object to read from.
block_offset (int): block offset of the cache entry.
Returns:
CacheEntry: cache entry.
Raises:
ParseError: if the cache entry cannot be read.
|
juraj-google-style
|
def _on_pass(self, record):
msg = record.details
if msg:
logging.info(msg)
self.on_pass(record)
|
Proxy function to guarantee the base implementation of on_pass is
called.
Args:
record: records.TestResultRecord, a copy of the test record for
this test, containing all information of the test execution
including exception objects.
|
github-repos
|
def default(fields=None, count=5):
projection = Sampling._create_projection(fields)
return (lambda sql: ('SELECT %s FROM (%s) LIMIT %d' % (projection, sql, count)))
|
Provides a simple default sampling strategy which limits the result set by a count.
Args:
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get a random sampling.
|
codesearchnet
|
def hardware_status(self):
stat = structs.JLinkHardwareStatus()
res = self._dll.JLINKARM_GetHWStatus(ctypes.byref(stat))
if res == 1:
raise errors.JLinkException('Error in reading hardware status.')
return stat
|
Retrieves and returns the hardware status.
Args:
self (JLink): the ``JLink`` instance
Returns:
A ``JLinkHardwareStatus`` describing the J-Link hardware.
|
juraj-google-style
|
def __init__(self, tcex):
self.tcex = tcex
self._request = self.tcex.request(self.tcex.session)
self._request.content_type = 'application/json'
self._api_branch = None
self._api_branch_base = None
self._api_entity = None
self._api_uri = None
self._case_preference = 'sensitive'
self._custom = False
self._http_method = 'GET'
self._filters = []
self._filter_or = False
self._name = None
self._parsable = False
self._paginate = True
self._paginate_count = 0
self._parent = None
self._request_entity = None
self._request_uri = None
self._result_count = None
self._result_limit = 500
self._result_start = 0
self._stream = False
self._status_codes = {}
self._value_fields = []
self.owner = self.tcex.args.api_default_org
|
Initialize the Class properties.
Args:
tcex (object): Instance of TcEx.
|
juraj-google-style
|
def get_subdomain(url):
if (url not in URLHelper.__cache):
URLHelper.__cache[url] = urlparse(url)
return '.'.join(URLHelper.__cache[url].netloc.split('.')[:(- 2)])
|
Get the subdomain of the given URL.
Args:
url (str): The URL to get the subdomain from.
Returns:
str: The subdomain(s)
|
codesearchnet
|
def parse_done(self, buf: memoryview) -> Tuple[(bool, memoryview)]:
match = self._pattern.match(buf)
if (not match):
raise NotParseable(buf)
done = (match.group(1).upper() == self.continuation)
buf = buf[match.end(0):]
return (done, buf)
|
Parse the continuation line sent by the client to end the ``IDLE``
command.
Args:
buf: The continuation line to parse.
|
codesearchnet
|
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
terms = np.array(self.terms[term])[:, np.newaxis]
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
return np.exp(scores) * (len(self.tokens) / samples)
|
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
|
juraj-google-style
|
def _pad_for_batching(self, pixel_values: List[torch.Tensor], image_sizes: List[List[int]]):
max_shape = (max([size[0] for size in image_sizes]), max([size[1] for size in image_sizes]))
pixel_values = [torch.nn.functional.pad(image, pad=(0, max_shape[1] - size[1], 0, max_shape[0] - size[0])) for image, size in zip(pixel_values, image_sizes)]
return torch.stack(pixel_values)
|
Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.
Args:
pixel_values (`List[torch.Tensor]`):
An array of pixel values of each images of shape (`batch_size`, `channels`, `height`, `width`)
image_sizes (`List[List[int]]`):
A list of sizes for each image in `pixel_values` in (height, width) format.
Returns:
List[`torch.Tensor`]: The padded images.
|
github-repos
|
def to_json(self, include_body=False):
message = {
'emailId': self.email_id,
'timestamp': isoformat(self.timestamp),
'subsystem': self.subsystem,
'subject': self.subject,
'sender': self.sender,
'recipients': self.recipients,
'uuid': self.uuid,
'messageHtml': None,
'messageText': None
}
if include_body:
message['messageHtml'] = self.message_html
message['messageText'] = self.message_text
return message
|
Exports the object to a JSON friendly dict
Args:
include_body (bool): Include the body of the message in the output
Returns:
Dict representation of object type
|
juraj-google-style
|
def export_default_probes(path, module_name = '', raise_errors = False):
raise NotImplementedError
import b26_toolkit.b26_toolkit.instruments as instruments
from pylabcontrol.core import Probe
for name, obj in inspect.getmembers(instruments):
if inspect.isclass(obj):
try:
instrument = obj()
print(('--- created ', obj.__name__, ' -- '))
for probe_name, probe_info in instrument._PROBES.items():
probe = Probe(instrument, probe_name, info = probe_info)
filename = os.path.join(path, '{:s}.b26'.format(instrument.name))
probe.save(filename)
except:
print(('failed to create probe file for: {:s}'.format(obj.__name__)))
print(('failed to create probe file for: {:s}'.format(obj.__name__)))
|
NOT IMPLEMENTED YET
tries to instantiate all the instruments that are imported in /instruments/__init__.py
and the probes of each instrument that could be instantiated into a .b26 file in the folder path
Args:
path: target path for .b26 files
|
juraj-google-style
|
def _google_section(line_info):
colon_index = line_info.remaining.find(':')
possible_title = line_info.remaining[:colon_index]
return _section_from_possible_title(possible_title)
|
Checks whether the current line is the start of a new Google-style section.
This docstring is a Google-style docstring. Google-style sections look like
this:
Section Name:
section body goes here
Args:
line_info: Information about the current line.
Returns:
A Section type if one matches, or None if no section type matches.
|
github-repos
|
def get(self, group=None, backend=None):
from .options import Store, Options
keywords = {}
groups = Options._option_groups if group is None else [group]
backend = backend if backend else Store.current_backend
for group in groups:
optsobj = Store.lookup_options(backend, self._obj, group)
keywords = dict(keywords, **optsobj.kwargs)
return Options(**keywords)
|
Returns the corresponding Options object.
Args:
group: The options group. Flattens across groups if None.
backend: Current backend if None otherwise chosen backend.
Returns:
Options object associated with the object containing the
applied option keywords.
|
juraj-google-style
|
def _as_document(self, dataset):
assert isinstance(dataset, Dataset)
doc = super(self.__class__, self)._as_document(dataset)
doc['keywords'] = doc['keywords'].replace('-', '_')
doc['doc'] = doc['doc'].replace('-', '_')
doc['title'] = doc['title'].replace('-', '_')
return doc
|
Converts dataset to document indexed by to FTS index.
Args:
dataset (orm.Dataset): dataset to convert.
Returns:
dict with structure matches to BaseDatasetIndex._schema.
|
juraj-google-style
|
def conv_output_shape(input_shape, kernel_shape, strides, padding):
dims = range(len(kernel_shape))
output_shape = [conv_output_length(input_shape[d], kernel_shape[d], padding, strides[d]) for d in dims]
output_shape = tuple([0 if input_shape[d] == 0 else output_shape[d] for d in dims])
return output_shape
|
Return the output shape of an N-D convolution.
Forces dimensions where input is empty (size 0) to remain empty.
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
Returns:
tuple of size N: `(d_out1, ..., d_outN)`, spatial shape of the output.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.