code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def dict_to_pyxb(rp_dict):
rp_pyxb = d1_common.types.dataoneTypes.replicationPolicy()
rp_pyxb.replicationAllowed = rp_dict['allowed']
rp_pyxb.numberReplicas = rp_dict['num']
rp_pyxb.blockedMemberNode = rp_dict['block']
rp_pyxb.preferredMemberNode = rp_dict['pref']
normalize(rp_pyxb)
return rp_pyxb
|
Convert dict to ReplicationPolicy PyXB object.
Args:
rp_dict: Native Python structure representing a Replication Policy.
Example::
{
'allowed': True,
'num': 3,
'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'},
'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'},
}
Returns:
ReplicationPolicy PyXB object.
|
juraj-google-style
|
def all_label_values(self, label_list_ids=None):
values = set()
for utterance in self.utterances.values():
values = values.union(utterance.all_label_values(label_list_ids=label_list_ids))
return values
|
Return a set of all label-values occurring in this corpus.
Args:
label_list_ids (list): If not None, only labels from label-lists with an id contained in this list
are considered.
Returns:
:class:`set`: A set of distinct label-values.
|
codesearchnet
|
def console_set_default_foreground(con: tcod.console.Console, col: Tuple[(int, int, int)]) -> None:
lib.TCOD_console_set_default_foreground(_console(con), col)
|
Change the default foreground color for a console.
Args:
con (Console): Any Console instance.
col (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
.. deprecated:: 8.5
Use :any:`Console.default_fg` instead.
|
codesearchnet
|
def projector(state, flatten=False):
density_matrix = np.outer(state.conjugate(), state)
if flatten:
return density_matrix.flatten(order='F')
return density_matrix
|
maps a pure state to a state matrix
Args:
state (ndarray): the number of qubits
flatten (bool): determine if state matrix of column work
Returns:
ndarray: state_mat(2**num, 2**num) if flatten is false
ndarray: state_mat(4**num) if flatten is true stacked on by the column
|
juraj-google-style
|
def expand_dims(self, axis):
if axis <= self._distaxis:
subaxis = axis
new_distaxis = self._distaxis + 1
else:
subaxis = axis - 1
new_distaxis = self._distaxis
new_subts = [rts.expand_dims(subaxis) for rts in self._subarrays]
if axis == 0:
return distob.DistArray(new_subts, new_distaxis)
else:
axislabels = self.labels[self._distaxis]
return DistTimeseries(new_subts, new_distaxis, axislabels)
|
Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted.
|
juraj-google-style
|
def example_number_for_non_geo_entity(country_calling_code):
metadata = PhoneMetadata.metadata_for_nongeo_region(country_calling_code, None)
if metadata is not None:
for desc in (metadata.mobile, metadata.toll_free, metadata.shared_cost, metadata.voip,
metadata.voicemail, metadata.uan, metadata.premium_rate):
try:
if (desc is not None and desc.example_number is not None):
return parse(_PLUS_SIGN + unicod(country_calling_code) + desc.example_number, UNKNOWN_REGION)
except NumberParseException:
pass
return None
|
Gets a valid number for the specified country calling code for a non-geographical entity.
Arguments:
country_calling_code -- The country calling code for a non-geographical entity.
Returns a valid number for the non-geographical entity. Returns None when
the metadata does not contain such information, or the country calling
code passed in does not belong to a non-geographical entity.
|
juraj-google-style
|
def _execute_adb_install(device: AndroidDevice, install_args: Iterable[str], timeout: int) -> None:
stderr_buffer = io.BytesIO()
stdout = device.adb.install(install_args, stderr=stderr_buffer, timeout=timeout)
stderr = stderr_buffer.getvalue().decode('utf-8').strip()
if not _is_apk_install_success(stdout, stderr):
adb_cmd = 'adb -s %s install %s' % (device.serial, ' '.join(install_args))
raise adb.AdbError(cmd=adb_cmd, stdout=stdout, stderr=stderr, ret_code=0)
|
Executes the adb install command.
Args:
device: AndroidDevice, Mobly's Android controller object.
install_args: list of strings, the args to be added to `adb install` cmd.
timeout: int, the number of seconds to wait before timing out.
Raises:
AdbError: installation failed.
|
github-repos
|
def _image_channel_compress_bottom(inputs, model_hparams, name='bottom'):
num_channels = 3
with tf.variable_scope(name):
inputs = tf.to_float(inputs)
hp = model_hparams
if (hp.mode != tf.estimator.ModeKeys.PREDICT):
tf.summary.image('inputs', common_layers.tpu_safe_image_summary(inputs), max_outputs=2)
inputs = common_layers.convert_rgb_to_symmetric_real(inputs)
inputs_shape = common_layers.shape_list(inputs)
inputs = tf.reshape(inputs, [(- 1), inputs_shape[1], (inputs_shape[2] * inputs_shape[3]), 1])
outputs = tf.layers.conv2d(inputs, model_hparams.hidden_size, kernel_size=(1, num_channels), padding='VALID', strides=(1, num_channels), activation=tf.nn.relu, name='conv_input')
return outputs
|
Compresses channel-wise input pixels into whole pixel representions.
Perform conversion of RGB pixel values to a real number in the range -1 to
1. This combines pixel channels to form a representation of shape
[img_len, img_len].
Args:
inputs: Tensor representing RGB pixel intensities as integers, of shape
[batch, img_len, img_len, channels].
model_hparams: HParams, model hyperparmeters.
name: string, scope.
Returns:
body_input: Tensor of shape
[batch, img_len, img_len, model_hparams.hidden_size].
|
codesearchnet
|
def DeserializeFromDB(buffer):
m = StreamManager.GetStream(buffer)
reader = BinaryReader(m)
v = StorageItem()
v.Deserialize(reader)
StreamManager.ReleaseStream(m)
return v
|
Deserialize full object.
Args:
buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.
Returns:
StorageItem:
|
juraj-google-style
|
def set_step_input_context(self, context):
logger.debug('starting')
if (self.in_parameters is not None):
parameter_count = len(self.in_parameters)
if (parameter_count > 0):
logger.debug(f"Updating context with {parameter_count} 'in' parameters.")
context.update(self.in_parameters)
logger.debug('done')
|
Append step's 'in' parameters to context, if they exist.
Append the[in] dictionary to the context. This will overwrite
existing values if the same keys are already in there. I.e if
in_parameters has {'eggs': 'boiled'} and key 'eggs' already
exists in context, context['eggs'] hereafter will be 'boiled'.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
|
codesearchnet
|
def process(self, feed_item):
item = self.get(feed_item)
if item:
self._process_update(item, feed_item)
self._clean(item)
self._update(item, feed_item)
else:
new_item = self._process_new(feed_item)
self._clean(new_item)
item = self._insert(new_item, feed_item)
if self._id_field and feed_item.get(self._id_field, '').startswith('ext'):
store.map(self._entity, feed_item.get(self._id_field), item['id'])
store.set(self._entity, [feed_item[self._id_field]], item)
if self._search_field and feed_item.get(self._search_field, ''):
store.map(self._entity, feed_item.get(self._search_field), item['id'])
store.set(self._entity, [feed_item[self._search_field]], item)
if item:
feed_item[self._id_field] = item['id']
store.set(self._entity, [item['id']], item)
self._post_process(feed_item, item)
return item
|
Processes a Bulkdozer feed item.
This method identifies if the item needs to be inserted or updated, cleans
it, performs the CM operations required, and update the feed item with newly
created ids and name lookups so that the feed can be updated.
Args:
feed_item: Bulkdozer feed item to process.
Returns:
Newly created or updated CM object.
|
github-repos
|
class OfflineDetector(AnomalyDetector):
@staticmethod
def score_prediction_adapter(keyed_prediction: tuple[KeyT, PredictionResult]) -> tuple[KeyT, AnomalyPrediction]:
key, prediction = keyed_prediction
score = prediction.inference
assert isinstance(score, SupportsFloat)
return (key, AnomalyPrediction(score=float(score)))
@staticmethod
def label_prediction_adapter(keyed_prediction: tuple[KeyT, PredictionResult]) -> tuple[KeyT, AnomalyPrediction]:
key, prediction = keyed_prediction
label = prediction.inference
assert isinstance(label, SupportsInt)
return (key, AnomalyPrediction(label=int(label)))
def __init__(self, keyed_model_handler: KeyedModelHandler[Any, beam.Row, PredictionT, Any], run_inference_args: Optional[dict[str, Any]]=None, **kwargs):
super().__init__(**kwargs)
self._keyed_model_handler = keyed_model_handler
self._run_inference_args = run_inference_args or {}
self._run_inference_args['model_identifier'] = self._model_id
def learn_one(self, x: beam.Row) -> None:
raise NotImplementedError
def score_one(self, x: beam.Row) -> Optional[float]:
raise NotImplementedError
|
A offline anomaly detector that uses a provided model handler for scoring.
Args:
keyed_model_handler: The model handler to use for inference.
Requires a `KeyModelHandler[Any, Row, PredictionT, Any]` instance.
run_inference_args: Optional arguments to pass to RunInference
**kwargs: Additional keyword arguments to pass to the base
AnomalyDetector class.
|
github-repos
|
def get(self, key, default=None):
if key in self._nodes: return self._nodes[key]
else: return default
|
Get
Returns the node of a specific key from the parent
Arguments:
key {str} -- The key to get
default {mixed} Value to return if the key does not exist
Returns:
mixed
|
juraj-google-style
|
def register_extensions(self, exts, force=False):
for ext_in, ext_out in exts.items():
self.register_extension(ext_in, ext_out, force)
|
Add/register extensions.
Args:
exts (dict):
force (bool): If ``force`` is set to ``True``, simply overwrite existing extensions, otherwise do nothing.
If the ``logger`` is set, log a warning about the duplicate extension if ``force == False``.
|
juraj-google-style
|
def matches(self, new, old):
if (isinstance(new, np.ndarray) or isinstance(old, np.ndarray)):
return np.array_equal(new, old)
if pd:
if (isinstance(new, pd.Series) or isinstance(old, pd.Series)):
return np.array_equal(new, old)
if (isinstance(new, pd.Index) or isinstance(old, pd.Index)):
return np.array_equal(new, old)
try:
if (isinstance(new, dict) and isinstance(old, dict)):
if (set(new.keys()) != set(old.keys())):
return False
return all((self.matches(new[k], old[k]) for k in new))
return (new == old)
except ValueError:
return False
|
Whether two parameters match values.
If either ``new`` or ``old`` is a NumPy array or Pandas Series or Index,
then the result of ``np.array_equal`` will determine if the values match.
Otherwise, the result of standard Python equality will be returned.
Returns:
True, if new and old match, False otherwise
|
codesearchnet
|
def _get_args_name_from_parser(parser):
return [action.dest for action in parser._actions if (not isinstance(action, argparse._HelpAction))]
|
Retrieve the name of the function argument linked to the given parser.
Args:
parser: a function parser
|
codesearchnet
|
def maximum(self, vars_list: List[str]) -> 'TensorFluent':
return self._aggregation_op(tf.reduce_max, self, vars_list)
|
Returns the TensorFluent for the maximum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the maximum aggregation function.
|
codesearchnet
|
def to_datetime(arg):
if isinstance(arg, datetime.datetime):
return arg
elif arg == 0:
return datetime.datetime.now()
elif isinstance(arg, str):
if arg == "now":
arg = datetime.datetime.now()
elif arg == "?":
arg = datetime.datetime(1970, 1, 1)
else:
arg = str2dt(arg)
elif isinstance(arg, datetime.date):
arg = date2datetime(arg)
elif isinstance(arg, (int, float)):
arg = ts2dt(arg)
else:
raise TypeError("Wrong type for argument 'arg': {}".format(arg.__class__.__name__))
return arg
|
Tries to convert any type of argument to datetime
Args:
arg: datetime, date, or str. If "?", will be converted to 1970-1-1.
if 0 or "now", will be converted to datetime.datetime.now()
|
juraj-google-style
|
def gt(self, other, axis="columns", level=None):
return self._binary_op("gt", other, axis=axis, level=level)
|
Checks element-wise that this is greater than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
|
juraj-google-style
|
def unparse_headers(hdrs):
return "".join([unparse_header(n, v) for n, v in hdrs.items()]) + "\r\n"
|
Parse a dictionary of headers to a string.
Args:
hdrs: A dictionary of headers.
Returns:
The headers as a string that can be used in an NNTP POST.
|
juraj-google-style
|
def _probe_characteristics(self, conn, services, timeout=5.0):
for service in services.values():
(success, result) = self._enumerate_handles(conn, service['start_handle'], service['end_handle'])
if (not success):
return (False, None)
attributes = result['attributes']
service['characteristics'] = {}
last_char = None
for (handle, attribute) in attributes.items():
if (attribute['uuid'].hex[(- 4):] == '0328'):
(success, result) = self._read_handle(conn, handle, timeout)
if (not success):
return (False, None)
value = result['data']
char = parse_characteristic_declaration(value)
service['characteristics'][char['uuid']] = char
last_char = char
elif (attribute['uuid'].hex[(- 4):] == '0229'):
if (last_char is None):
return (False, None)
(success, result) = self._read_handle(conn, handle, timeout)
if (not success):
return (False, None)
value = result['data']
assert (len(value) == 2)
(value,) = unpack('<H', value)
last_char['client_configuration'] = {'handle': handle, 'value': value}
return (True, {'services': services})
|
Probe gatt services for all associated characteristics in a BLE device
Args:
conn (int): the connection handle to probe
services (dict): a dictionary of services produced by probe_services()
timeout (float): the maximum number of seconds to spend in any single task
|
codesearchnet
|
def flush(self):
try:
termios.tcdrain(self._fd)
except termios.error as e:
raise SerialError(e.errno, ('Flushing serial port: ' + e.strerror))
|
Flush the write buffer of the serial port, blocking until all bytes
are written.
Raises:
SerialError: if an I/O or OS error occurs.
|
codesearchnet
|
def update_device_map(self, device_map: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
return device_map
|
Override this method if you want to pass a override the existing device map with a new
one. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is
passed, the device_map is set to `"auto"``
Args:
device_map (`Union[dict, str]`, *optional*):
The device_map that is passed through the `from_pretrained` method.
|
github-repos
|
def retry(retries=0, delay=timedelta(), conditions=[]):
delay_in_seconds = delay.total_seconds()
def decorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
func = partial(function, *args, **kwargs)
return retry_loop(retries, delay_in_seconds, conditions, func)
return wrapper
return decorator
|
A decorator for making a function that retries on failure.
Args:
retries (Integral): The number of times to retry if a failure occurs.
delay (timedelta, optional, 0 seconds): A timedelta representing
the amount of time to delay between retries.
conditions (list): A list of retry conditions.
|
juraj-google-style
|
def build_cfg(cls, node):
if (not isinstance(node, gast.FunctionDef)):
raise TypeError('input must be a function definition')
cfg = cls()
cfg.entry = Node(node.args)
cfg.head = [cfg.entry]
cfg.visit_statements(node.body)
cfg.exit = Node(None)
cfg.set_head(cfg.exit)
cfg.backlink(cfg.entry)
return cfg
|
Build a CFG for a function.
Args:
node: A function definition the body of which to analyze.
Returns:
A CFG object.
Raises:
TypeError: If the input is not a function definition.
|
codesearchnet
|
def _find_current_phase(self, global_step):
epoch_size = sum((phase.steps for phase in self._phases))
epoch = int((global_step
steps_in = (global_step % epoch_size)
for phase in self._phases:
if (steps_in < phase.steps):
return (phase, epoch, steps_in)
steps_in -= phase.steps
|
Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch number, and phase steps within the epoch.
|
codesearchnet
|
def decode_value(value, client):
value_type = value.WhichOneof('value_type')
if (value_type == 'null_value'):
return None
elif (value_type == 'boolean_value'):
return value.boolean_value
elif (value_type == 'integer_value'):
return value.integer_value
elif (value_type == 'double_value'):
return value.double_value
elif (value_type == 'timestamp_value'):
return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value)
elif (value_type == 'string_value'):
return value.string_value
elif (value_type == 'bytes_value'):
return value.bytes_value
elif (value_type == 'reference_value'):
return reference_value_to_document(value.reference_value, client)
elif (value_type == 'geo_point_value'):
return GeoPoint(value.geo_point_value.latitude, value.geo_point_value.longitude)
elif (value_type == 'array_value'):
return [decode_value(element, client) for element in value.array_value.values]
elif (value_type == 'map_value'):
return decode_dict(value.map_value.fields, client)
else:
raise ValueError('Unknown ``value_type``', value_type)
|
Converts a Firestore protobuf ``Value`` to a native Python value.
Args:
value (google.cloud.firestore_v1beta1.types.Value): A
Firestore protobuf to be decoded / parsed / converted.
client (~.firestore_v1beta1.client.Client): A client that has
a document factory.
Returns:
Union[NoneType, bool, int, float, datetime.datetime, \
str, bytes, dict, ~google.cloud.Firestore.GeoPoint]: A native
Python value converted from the ``value``.
Raises:
NotImplementedError: If the ``value_type`` is ``reference_value``.
ValueError: If the ``value_type`` is unknown.
|
codesearchnet
|
def add_signature_block(src_fileobj, dest_fileobj, signing_algorithm, signature=None):
algo_id = {'sha1': 1, 'sha384': 2}[signing_algorithm]
if (not signature):
signature = make_dummy_signature(algo_id)
src_fileobj.seek(0)
mardata = mar.parse_stream(src_fileobj)
header = mardata.header
dest_fileobj.write(mar_header.build(header))
sig = dict(algorithm_id=algo_id, size=len(signature), signature=signature)
filesize = 0
sigs_offset = dest_fileobj.tell()
sigs = sigs_header.build(dict(filesize=filesize, count=1, sigs=[sig]))
dest_fileobj.write(sigs)
dest_fileobj.write(extras_header.build(mardata.additional))
data_offset = dest_fileobj.tell()
src_fileobj.seek(mardata.data_offset)
write_to_file(takeexactly(src_fileobj, mardata.data_length), dest_fileobj)
index_offset = dest_fileobj.tell()
index = mardata.index
data_offset_delta = (data_offset - mardata.data_offset)
for e in index.entries:
e.offset += data_offset_delta
dest_fileobj.write(index_header.build(index))
filesize = dest_fileobj.tell()
dest_fileobj.seek(0)
header.index_offset = index_offset
dest_fileobj.write(mar_header.build(header))
dest_fileobj.seek(sigs_offset)
sigs = sigs_header.build(dict(filesize=filesize, count=1, sigs=[sig]))
dest_fileobj.write(sigs)
|
Add a signature block to marfile, a MarReader object.
Productversion and channel are preserved, but any existing signatures are overwritten.
Args:
src_fileobj (file object): The input MAR file to add a signature to
dest_fileobj (file object): File object to write new MAR file to. Must be open in w+b mode.
signing_algorithm (str): One of 'sha1', or 'sha384'
signature (bytes): Signature to write, or None to use a dummy signature
|
codesearchnet
|
def GetAPIScope(api_name):
try:
return SCOPES[api_name]
except KeyError:
raise googleads.errors.GoogleAdsValueError(('Invalid API name "%s" provided. Acceptable values are: %s' % (api_name, SCOPES.keys())))
|
Retrieves the scope for the given API name.
Args:
api_name: A string identifying the name of the API we want to retrieve a
scope for.
Returns:
A string that is the scope for the given API name.
Raises:
GoogleAdsValueError: If the given api_name is invalid; accepted values are
"adwords" and "ad_manager".
|
codesearchnet
|
def set_type(self, weather_type):
weather_type.lower()
exists = self.has_type(weather_type)
if exists:
self.add_string_parameters(weather_type)
|
Set the weather type.
Args:
weather_type (str): The weather type.
|
juraj-google-style
|
def _FormatOpaqueToken(self, token_data):
data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data])
return {'data': data}
|
Formats an opaque token as a dictionary of values.
Args:
token_data (bsm_token_data_opaque): AUT_OPAQUE token data.
Returns:
dict[str, str]: token values.
|
juraj-google-style
|
def _on_action(self, sequence, topic, message):
try:
slug = None
parts = topic.split('/')
slug = parts[(- 3)]
uuid = self._extract_device_uuid(slug)
except Exception as exc:
self._logger.warn('Error parsing slug in action handler (slug=%s, topic=%s)', slug, topic)
return
if messages.DisconnectCommand.matches(message):
self._logger.debug('Received disconnect command for device 0x%X', uuid)
key = message['key']
client = message['client']
self._loop.add_callback(self._disconnect_from_device, uuid, key, client)
elif (messages.OpenInterfaceCommand.matches(message) or messages.CloseInterfaceCommand.matches(message)):
self._logger.debug('Received %s command for device 0x%X', message['operation'], uuid)
key = message['key']
client = message['client']
oper = message['operation']
if (oper == 'open_interface'):
self._loop.add_callback(self._open_interface, client, uuid, message['interface'], key)
else:
self._loop.add_callback(self._close_interface, client, uuid, message['interface'], key)
elif messages.RPCCommand.matches(message):
rpc_msg = messages.RPCCommand.verify(message)
client = rpc_msg['client']
address = rpc_msg['address']
rpc = rpc_msg['rpc_id']
payload = rpc_msg['payload']
key = rpc_msg['key']
timeout = rpc_msg['timeout']
self._loop.add_callback(self._send_rpc, client, uuid, address, rpc, payload, timeout, key)
elif messages.ScriptCommand.matches(message):
script_msg = messages.ScriptCommand.verify(message)
key = script_msg['key']
client = script_msg['client']
script = script_msg['script']
self._loop.add_callback(self._send_script, client, uuid, script, key, (script_msg['fragment_index'], script_msg['fragment_count']))
else:
self._logger.error('Unsupported message received (topic=%s) (message=%s)', topic, str(message))
|
Process a command action that we received on behalf of a device.
Args:
sequence (int): The sequence number of the packet received
topic (string): The topic this message was received on
message (dict): The message itself
|
codesearchnet
|
def header_present(self, *names):
for name in names:
headers = {name: re.compile('(.*)')}
self.add_matcher(matcher('HeadersMatcher', headers))
|
Defines a new header matcher expectation that must be present in the
outgoing request in order to be satisfied, no matter what value it
hosts.
Header keys are case insensitive.
Arguments:
*names (str): header or headers names to match.
Returns:
self: current Mock instance.
Example::
(pook.get('server.com/api')
.header_present('content-type'))
|
codesearchnet
|
def datastore(self, domain, data_type, mapping=None):
from .tcex_datastore import TcExDataStore
return TcExDataStore(self, domain, data_type, mapping)
|
Get instance of the DataStore module.
Args:
domain (str): The domain can be either "system", "organization", or "local". When using
"organization" the data store can be accessed by any Application in the entire org,
while "local" access is restricted to the App writing the data. The "system" option
should not be used in almost all cases.
data_type (str): The data type descriptor (e.g., tc:whois:cache).
Returns:
object: An instance of the DataStore Class.
|
codesearchnet
|
def get_browser_controller(browser=None):
browser = settings.browser(browser)
if (browser is not None):
if (browser == 'none'):
controller = DummyWebBrowser()
else:
controller = webbrowser.get(browser)
else:
controller = webbrowser
return controller
|
Return a browser controller.
Args:
browser (str or None) : browser name, or ``None`` (default: ``None``)
If passed the string ``'none'``, a dummy web browser controller
is returned
Otherwise, use the value to select an appropriate controller using
the ``webbrowser`` standard library module. In the value is
``None`` then a system default is used.
.. note::
If the environment variable ``BOKEH_BROWSER`` is set, it will take
precedence.
Returns:
controller : a web browser controller
|
codesearchnet
|
def from_any_pb(pb_type, any_pb):
msg = pb_type()
if callable(getattr(pb_type, 'pb', None)):
msg_pb = pb_type.pb(msg)
else:
msg_pb = msg
if (not any_pb.Unpack(msg_pb)):
raise TypeError('Could not convert {} to {}'.format(any_pb.__class__.__name__, pb_type.__name__))
return msg
|
Converts an ``Any`` protobuf to the specified message type.
Args:
pb_type (type): the type of the message that any_pb stores an instance
of.
any_pb (google.protobuf.any_pb2.Any): the object to be converted.
Returns:
pb_type: An instance of the pb_type message.
Raises:
TypeError: if the message could not be converted.
|
codesearchnet
|
def mirror(self):
if (not self._definition):
return self.copy()
reverse_inst = self.copy(name=(self.name + '_mirror'))
reverse_inst.definition = []
for (inst, qargs, cargs) in reversed(self._definition):
reverse_inst._definition.append((inst.mirror(), qargs, cargs))
return reverse_inst
|
For a composite instruction, reverse the order of sub-gates.
This is done by recursively mirroring all sub-instructions.
It does not invert any gate.
Returns:
Instruction: a fresh gate with sub-gates reversed
|
codesearchnet
|
def _MaybeColocateWith(inputs):
if not inputs:
yield
else:
with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):
yield
|
A context manager for (maybe) colocating with a list of input tensors.
Args:
inputs: A list of `Tensor` or `Operation` objects.
Returns:
A context manager.
|
github-repos
|
def example_serving_receiver_fn(tf_transform_output, schema):
raw_feature_spec = taxi.get_raw_feature_spec(schema)
raw_feature_spec.pop(taxi.LABEL_KEY)
raw_input_fn = tf_estimator.export.build_parsing_serving_input_receiver_fn(raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_output.transform_raw_features(serving_input_receiver.features)
return tf_estimator.export.ServingInputReceiver(transformed_features, serving_input_receiver.receiver_tensors)
|
Build the serving in inputs.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
|
github-repos
|
def subcomponents(self, subcomponents):
for arg in self.args:
if arg.__class__.__name__ == "Function":
subcomponents.append(arg.to_string())
if arg.function_type == "primary":
arg.subcomponents(subcomponents)
else:
subcomponents.append(arg.to_string())
return subcomponents
|
Generate subcomponents of the BEL subject or object
These subcomponents are used for matching parts of a BEL
subject or Object in the Edgestore.
Args:
AST
subcomponents: Pass an empty list to start a new subcomponents request
Returns:
List[str]: subcomponents of BEL subject or object
|
juraj-google-style
|
def get_header(vcf_file_path):
logger.info('Parsing header of file {0}'.format(vcf_file_path))
head = HeaderParser()
handle = get_vcf_handle(infile=vcf_file_path)
for line in handle:
line = line.rstrip()
if line.startswith('
if line.startswith('
head.parse_meta_data(line)
else:
head.parse_header_line(line)
else:
break
handle.close()
return head
|
Parse the header and return a header object
Args:
vcf_file_path(str): Path to vcf
Returns:
head: A HeaderParser object
|
codesearchnet
|
def sampler_to_iterator(dataset, sampler):
for sample in sampler:
if isinstance(sample, (list, tuple)):
yield [dataset[i] for i in sample]
else:
yield dataset[sample]
|
Given a batch sampler or sampler returns examples instead of indices
Args:
dataset (torch.utils.data.Dataset): Dataset to sample from.
sampler (torch.utils.data.sampler.Sampler): Sampler over the dataset.
Returns:
generator over dataset examples
|
juraj-google-style
|
def get_readonly_field_data(field_name, instance, view=None, fun_kwargs=None):
fun_kwargs = fun_kwargs or {}
if view:
view_readonly_data = _get_view_readonly_data(field_name, view, fun_kwargs)
if view_readonly_data is not None:
return view_readonly_data
field_data = _get_model_readonly_data(field_name, instance, fun_kwargs)
if field_data is not None:
return field_data
raise FieldOrMethodDoesNotExist('Field or method with name {} not found'.format(field_name))
|
Returns field humanized value, label and widget which are used to display of instance or view readonly data.
Args:
field_name: name of the field which will be displayed
instance: model instance
view: view instance
fun_kwargs: kwargs that can be used inside method call
Returns:
field humanized value, label and widget which are used to display readonly data
|
juraj-google-style
|
def __init__(self, offset):
super(TimeZoneOffset, self).__init__()
if isinstance(offset, datetime.timedelta):
offset = total_seconds(offset) / 60
self.__offset = offset
|
Initialize a time zone offset.
Args:
offset: Integer or timedelta time zone offset, in minutes from UTC.
This can be negative.
|
juraj-google-style
|
def get_submission_and_student(uuid, read_replica=False):
submission = get_submission(uuid, read_replica=read_replica)
cache_key = 'submissions.student_item.{}'.format(submission['student_item'])
try:
cached_student_item = cache.get(cache_key)
except Exception:
logger.exception('Error occurred while retrieving student item from the cache')
cached_student_item = None
if (cached_student_item is not None):
submission['student_item'] = cached_student_item
else:
try:
student_item_qs = StudentItem.objects
if read_replica:
student_item_qs = _use_read_replica(student_item_qs)
student_item = student_item_qs.get(id=submission['student_item'])
submission['student_item'] = StudentItemSerializer(student_item).data
cache.set(cache_key, submission['student_item'])
except Exception as ex:
err_msg = 'Could not get submission due to error: {}'.format(ex)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
return submission
|
Retrieve a submission by its unique identifier, including the associated student item.
Args:
uuid (str): the unique identifier of the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Returns:
Serialized Submission model (dict) containing a serialized StudentItem model
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
|
codesearchnet
|
def energies(self, samples_like, dtype=np.float):
samples, labels = as_samples(samples_like)
if all(v == idx for idx, v in enumerate(labels)):
ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(dtype=dtype)
else:
ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(variable_order=labels, dtype=dtype)
energies = samples.dot(ldata) + (samples[:, irow]*samples[:, icol]).dot(qdata) + offset
return np.asarray(energies, dtype=dtype)
|
Determine the energies of the given samples.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of NumPy's array_like
structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`):
The data type of the returned energies.
Returns:
:obj:`numpy.ndarray`: The energies.
|
juraj-google-style
|
def start_log_monitor(redis_address, logs_dir, stdout_file=None, stderr_file=None, redis_password=None):
log_monitor_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log_monitor.py')
command = [sys.executable, '-u', log_monitor_filepath, '--redis-address={}'.format(redis_address), '--logs-dir={}'.format(logs_dir)]
if redis_password:
command += ['--redis-password', redis_password]
process_info = start_ray_process(command, ray_constants.PROCESS_TYPE_LOG_MONITOR, stdout_file=stdout_file, stderr_file=stderr_file)
return process_info
|
Start a log monitor process.
Args:
redis_address (str): The address of the Redis instance.
logs_dir (str): The directory of logging files.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
|
codesearchnet
|
def attention_mask_same_segment(
query_segment, memory_segment=None, dtype=tf.float32):
memory_segment = rename_length_to_memory_length(
memory_segment or query_segment)
return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9
|
Bias for attention where attention between segments is disallowed.
Args:
query_segment: a mtf.Tensor with shape [..., length_dim]
memory_segment: a mtf.Tensor with shape [..., memory_length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
|
juraj-google-style
|
def get_aws_unique_id(timeout=DEFAULT_AWS_TIMEOUT):
try:
resp = requests.get(AWS_ID_URL, timeout=timeout).json()
except requests.exceptions.ConnectTimeout:
_logger.warning('Connection timeout when determining AWS unique ID. Not using AWS unique ID.')
return None
else:
aws_id = '{0}_{1}_{2}'.format(resp['instanceId'], resp['region'], resp['accountId'])
_logger.debug('Using AWS unique ID %s.', aws_id)
return aws_id
|
Determine the current AWS unique ID
Args:
timeout (int): How long to wait for a response from AWS metadata IP
|
codesearchnet
|
def set_client_cmd(self, *args):
self.client_cmd.update(args)
self.output['client_cmd'] = list(self.client_cmd)
|
Adds given cmd(s) to ``self.output['client_cmd']``
Args:
*args: Client commands.
|
juraj-google-style
|
def _get_tensorrt_rewriter_config(conversion_params, is_dynamic_op=None, max_batch_size=None, is_v2=False, disable_non_trt_optimizers=False, use_implicit_batch=True, profile_strategy=PROFILE_STRATEGY_RANGE):
_check_conversion_params(conversion_params, is_v2=is_v2)
if is_v2 and is_dynamic_op is not None and (not is_dynamic_op):
raise ValueError('is_dynamic_op is either None or True for TF2')
if not is_v2 and is_dynamic_op is None:
raise ValueError("is_dynamic_op can't be None for TF1")
if (is_dynamic_op is None or is_dynamic_op) and max_batch_size is not None:
raise ValueError('max_batch_size has to be None for TF2 or when is_dynamic_op == True in TF1')
if is_dynamic_op is not None and (not is_dynamic_op) and (not isinstance(max_batch_size, int)):
raise ValueError('max_batch_size has to be an integer for is_dynamic_op==False in TF1')
rewriter_config_with_trt = rewriter_config_pb2.RewriterConfig()
rewriter_config_with_trt.remapping = False
rewriter_config_with_trt.experimental_disable_folding_quantization_emulation = trt_utils.is_linked_tensorrt_version_greater_equal(8, 0, 0) or trt_utils.is_loaded_tensorrt_version_greater_equal(8, 0, 0)
if not disable_non_trt_optimizers:
rewriter_config_with_trt.optimizers.extend(['pruning', 'debug_stripper', 'layout', 'dependency', 'constfold', 'common_subgraph_elimination'])
rewriter_config_with_trt.meta_optimizer_iterations = rewriter_config_pb2.RewriterConfig.ONE
optimizer = rewriter_config_with_trt.custom_optimizers.add()
if not disable_non_trt_optimizers:
rewriter_config_with_trt.custom_optimizers.add().name = 'constfold'
optimizer.name = 'TensorRTOptimizer'
optimizer.parameter_map['minimum_segment_size'].i = conversion_params.minimum_segment_size
optimizer.parameter_map['max_workspace_size_bytes'].i = conversion_params.max_workspace_size_bytes
optimizer.parameter_map['precision_mode'].s = _to_bytes(conversion_params.precision_mode)
optimizer.parameter_map['maximum_cached_engines'].i = conversion_params.maximum_cached_engines
optimizer.parameter_map['use_calibration'].b = conversion_params.use_calibration
optimizer.parameter_map['is_dynamic_op'].b = is_dynamic_op
optimizer.parameter_map['allow_build_at_runtime'].b = conversion_params.allow_build_at_runtime
if max_batch_size is not None:
optimizer.parameter_map['max_batch_size'].i = max_batch_size
optimizer.parameter_map['use_implicit_batch'].b = use_implicit_batch
if not use_implicit_batch:
optimizer.parameter_map['profile_strategy'].s = _to_bytes(profile_strategy.lower())
if disable_non_trt_optimizers:
trt_utils.disable_non_trt_optimizers_in_rewriter_config(rewriter_config_with_trt)
return rewriter_config_with_trt
|
Returns a RewriterConfig proto for TRT transformation.
Args:
conversion_params: a TrtConversionParams instance.
is_dynamic_op: whether to use dynamic engines.
max_batch_size: maximum batch size for static engines.
is_v2: whether we're getting a RewriterConfig for TF 2.0.
disable_non_trt_optimizers: Turn off all default Grappler optimizers.
use_implicit_batch: Whether to use implicit batch or explicit batch.
profile_strategy: dynamic shape optimization profile strategy.
Returns:
A RewriterConfig proto which sets a TensorRTOptimizer to run Grappler.
Raises:
TypeError: if any of the parameters are of unexpected type.
ValueError: if any of the parameters are of unexpected value.
|
github-repos
|
def create_asset_delivery_policy(access_token, ams_account, key_delivery_url):
path = '/AssetDeliveryPolicies'
endpoint = ''.join([ams_rest_endpoint, path])
body = '{ \
"Name":"AssetDeliveryPolicy", \
"AssetDeliveryProtocol":"4", \
"AssetDeliveryPolicyType":"3", \
"AssetDeliveryConfiguration":"[{ \
\\"Key\\":\\"2\\", \
\\"Value\\":\\"' + key_delivery_url + '\\"}]" \
}'
return do_ams_post(endpoint, path, body, access_token)
|
Create Media Service Asset Delivery Policy.
Args:
access_token (str): A valid Azure authentication token.
ams_account (str): Media Service Account.
Returns:
HTTP response. JSON body.
|
juraj-google-style
|
def __new__(cls, input_array, vscale=None, check_rank=None):
obj = np.asarray(input_array).view(cls)
obj.rank = len(obj.shape)
if check_rank and check_rank != obj.rank:
raise ValueError("{} input must be rank {}".format(
obj.__class__.__name__, check_rank))
vshape = tuple([3] * (obj.rank % 2) + [6] * (obj.rank
obj._vscale = np.ones(vshape)
if vscale is not None:
obj._vscale = vscale
if obj._vscale.shape != vshape:
raise ValueError("Voigt scaling matrix must be the shape of the "
"voigt notation matrix or vector.")
if not all([i == 3 for i in obj.shape]):
raise ValueError("Pymatgen only supports 3-dimensional tensors, "
"and default tensor constructor uses standard "
"notation. To construct from voigt notation, use"
" {}.from_voigt".format(obj.__class__.__name__))
return obj
|
Create a Tensor object. Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
input_array: (array-like with shape 3^N): array-like representing
a tensor quantity in standard (i. e. non-voigt) notation
vscale: (N x M array-like): a matrix corresponding
to the coefficients of the voigt-notation tensor
|
juraj-google-style
|
def delete_if_exists(self, **kwargs):
try:
self.get(**kwargs).blocking_delete()
return True
except ObjectDoesNotExist:
return False
|
Deletes an object if it exists in database according to given query
parameters and returns True otherwise does nothing and returns False.
Args:
**kwargs: query parameters
Returns(bool): True or False
|
juraj-google-style
|
def encode(request, data):
if (data is None):
return request
request.add_header('Content-Type', 'application/json')
request.data = json.dumps(data)
return request
|
Add request content data to request body, set Content-type header.
Should be overridden by subclasses if not using JSON encoding.
Args:
request (HTTPRequest): The request object.
data (dict, None): Data to be encoded.
Returns:
HTTPRequest: The request object.
|
codesearchnet
|
def log_prob(self, value, name='log_prob'):
return self._call_log_prob(value, name)
|
Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
|
github-repos
|
def consume(self, data):
if not self._started:
self.fire(JSONStreamer.DOC_START_EVENT)
self._started = True
self._file_like.write(data)
try:
self._parser.parse(self._file_like)
except YajlError as ye:
raise JSONStreamerException(ye.value)
|
Takes input that must be parsed
Note:
Attach all your listeners before calling this method
Args:
data (str): input json string
|
juraj-google-style
|
def get_overlaps(self, offset, length):
if ''.join([chunk.word for chunk in self])[offset] == ' ':
offset += 1
index = 0
result = ChunkList()
for chunk in self:
if offset < index + len(chunk.word) and index < offset + length:
result.append(chunk)
index += len(chunk.word)
return result
|
Returns chunks overlapped with the given range.
Args:
offset (int): Begin offset of the range.
length (int): Length of the range.
Returns:
Overlapped chunks. (:obj:`budou.chunk.ChunkList`)
|
juraj-google-style
|
def Parse(text):
precondition.AssertType(text, Text)
if compatibility.PY2:
text = text.encode('utf-8')
return yaml.safe_load(text)
|
Parses a YAML source into a Python object.
Args:
text: A YAML source to parse.
Returns:
A Python data structure corresponding to the YAML source.
|
codesearchnet
|
def insert_meta_fields_into_existing_schema(graphql_schema):
root_type_name = graphql_schema.get_query_type().name
for (type_name, type_obj) in six.iteritems(graphql_schema.get_type_map()):
if (type_name.startswith('__') or (type_name == root_type_name)):
continue
if (not isinstance(type_obj, (GraphQLObjectType, GraphQLInterfaceType))):
continue
for (meta_field_name, meta_field) in six.iteritems(EXTENDED_META_FIELD_DEFINITIONS):
if (meta_field_name in type_obj.fields):
raise AssertionError(u'Unexpectedly encountered an existing field named {} while attempting to add a meta-field of the same name. Make sure you are not attempting to add meta-fields twice.'.format(meta_field_name))
type_obj.fields[meta_field_name] = meta_field
|
Add compiler-specific meta-fields into all interfaces and types of the specified schema.
It is preferable to use the EXTENDED_META_FIELD_DEFINITIONS constant above to directly inject
the meta-fields during the initial process of building the schema, as that approach
is more robust. This function does its best to not mutate unexpected definitions, but
may break unexpectedly as the GraphQL standard is extended and the underlying
GraphQL library is updated.
Use this function at your own risk. Don't say you haven't been warned.
Properties added include:
- "_x_count", which allows filtering folds based on the number of elements they capture.
Args:
graphql_schema: GraphQLSchema object describing the schema that is going to be used with
the compiler. N.B.: MUTATED IN-PLACE in this method.
|
codesearchnet
|
def download_folder(bucket_name, prefix, target, sagemaker_session):
boto_session = sagemaker_session.boto_session
s3 = boto_session.resource('s3')
bucket = s3.Bucket(bucket_name)
prefix = prefix.lstrip('/')
objects = list(bucket.objects.filter(Prefix=prefix))
if len(objects) > 0 and objects[0].key == prefix and prefix[-1] != '/':
s3.Object(bucket_name, prefix).download_file(os.path.join(target, os.path.basename(prefix)))
return
for obj_sum in bucket.objects.filter(Prefix=prefix):
if obj_sum.key != '' and obj_sum.key[-1] == '/':
continue
obj = s3.Object(obj_sum.bucket_name, obj_sum.key)
s3_relative_path = obj_sum.key[len(prefix):].lstrip('/')
file_path = os.path.join(target, s3_relative_path)
try:
os.makedirs(os.path.dirname(file_path))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
obj.download_file(file_path)
|
Download a folder from S3 to a local path
Args:
bucket_name (str): S3 bucket name
prefix (str): S3 prefix within the bucket that will be downloaded. Can be a single file.
target (str): destination path where the downloaded items will be placed
sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker session to interact with S3.
|
juraj-google-style
|
def session_new(self, **kwargs):
path = self._get_path('session_new')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Generate a session id for user based authentication.
A session id is required in order to use any of the write methods.
Args:
request_token: The token you generated for the user to approve.
The token needs to be approved before being
used here.
Returns:
A dict respresentation of the JSON returned from the API.
|
codesearchnet
|
def convert_to_python_type(typ):
if isinstance(typ, typehints.TypeVariable):
if id(typ) not in _type_var_cache:
new_type_variable = typing.TypeVar(typ.name)
_type_var_cache[id(typ)] = new_type_variable
_type_var_cache[id(new_type_variable)] = typ
return _type_var_cache[id(typ)]
elif not getattr(typ, '__module__', None).endswith('typehints'):
return typ
if isinstance(typ, typehints.AnyTypeConstraint):
return typing.Any
if isinstance(typ, typehints.DictConstraint):
return dict[convert_to_python_type(typ.key_type), convert_to_python_type(typ.value_type)]
if isinstance(typ, typehints.ListConstraint):
return list[convert_to_python_type(typ.inner_type)]
if isinstance(typ, typehints.IterableTypeConstraint):
return collections.abc.Iterable[convert_to_python_type(typ.inner_type)]
if isinstance(typ, typehints.UnionConstraint):
if not typ.union_types:
return typing.Any
return typing.Union[tuple(convert_to_python_types(typ.union_types))]
if isinstance(typ, typehints.SetTypeConstraint):
return set[convert_to_python_type(typ.inner_type)]
if isinstance(typ, typehints.FrozenSetTypeConstraint):
return frozenset[convert_to_python_type(typ.inner_type)]
if isinstance(typ, typehints.TupleConstraint):
return tuple[tuple(convert_to_python_types(typ.tuple_types))]
if isinstance(typ, typehints.TupleSequenceConstraint):
return tuple[convert_to_python_type(typ.inner_type), ...]
if isinstance(typ, typehints.ABCSequenceTypeConstraint):
return collections.abc.Sequence[convert_to_python_type(typ.inner_type)]
if isinstance(typ, typehints.IteratorTypeConstraint):
return collections.abc.Iterator[convert_to_python_type(typ.yielded_type)]
if isinstance(typ, typehints.MappingTypeConstraint):
return collections.abc.Mapping[convert_to_python_type(typ.key_type), convert_to_python_type(typ.value_type)]
raise ValueError('Failed to convert Beam type: %s' % typ)
|
Converts a given Beam type to a python type.
This is the reverse of convert_to_beam_type.
Args:
typ: If a typehints.TypeConstraint, the type to convert. Otherwise, typ
will be unchanged.
Returns:
Converted version of typ, or unchanged.
Raises:
ValueError: The type was malformed or could not be converted.
|
github-repos
|
def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
|
3D convolution.
Args:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 3 integers.
Returns:
A tensor, result of 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
|
github-repos
|
def watch_statuses(self, observer, batch_ids):
with self._lock:
statuses = self.get_statuses(batch_ids)
if self._has_no_pendings(statuses):
observer.notify_batches_finished(statuses)
else:
self._observers[observer] = statuses
|
Allows a component to register to be notified when a set of
batches is no longer PENDING. Expects to be able to call the
"notify_batches_finished" method on the registered component, sending
the statuses of the batches.
Args:
observer (object): Must implement "notify_batches_finished" method
batch_ids (list of str): The ids of the batches to watch
|
juraj-google-style
|
def _get_bucket(self, client_kwargs):
return _oss.Bucket(self.client, endpoint=self._endpoint, bucket_name=client_kwargs['bucket_name'])
|
Get bucket object.
Returns:
oss2.Bucket
|
codesearchnet
|
def _add(self, frame, strict):
if (not isinstance(frame, Frame)):
raise TypeError(('%r not a Frame instance' % frame))
orig_frame = frame
frame = frame._upgrade_frame()
if (frame is None):
if (not strict):
return
raise TypeError(("Can't upgrade %r frame" % type(orig_frame).__name__))
hash_key = frame.HashKey
if (strict or (hash_key not in self)):
self[hash_key] = frame
return
while True:
old_frame = self[hash_key]
new_frame = old_frame._merge_frame(frame)
new_hash = new_frame.HashKey
if (new_hash == hash_key):
self[hash_key] = new_frame
break
else:
assert (new_frame is frame)
if (new_hash not in self):
self[new_hash] = new_frame
break
hash_key = new_hash
|
Add a frame.
Args:
frame (Frame): the frame to add
strict (bool): if this should raise in case it can't be added
and frames shouldn't be merged.
|
codesearchnet
|
def create_indexes(names, settings=None):
for name in names:
index = Index(name)
try:
if (not index.exists()):
logger.debug('Creating Elasticsearch index: {0}'.format(name))
if (settings is None):
index.settings(number_of_shards=1, number_of_replicas=1)
else:
index.settings(**settings)
index.create()
except Exception as e:
raise ElasticsearchError('Elasticsearch error: {0}'.format(e.__str__()))
|
Create Elasticsearch indexes
Args:
names (list): A list of index names
settings (dict): Index settings
|
codesearchnet
|
def generate_row(fake: Faker, config: Config) -> Row:
row: Row = {}
for column in config:
row[column.bq_name] = column.value(fake)
return row
|
Generates a Row of Faker data, conforming to the config.
Args:
* fake: Faker instance
* config: List of Columns
Returns:
* Row of Faker data
|
github-repos
|
def transform(self, df):
for name, function in self.outputs:
df[name] = function(df)
|
Transforms a DataFrame in place. Computes all outputs of the DataFrame.
Args:
df (pandas.DataFrame): DataFrame to transform.
|
juraj-google-style
|
def _get_block_sizes(resnet_size):
choices = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3]
}
try:
return choices[resnet_size]
except KeyError:
err = ('Could not find layers for selected Resnet size.\n'
'Size received: {}; sizes allowed: {}.'.format(
resnet_size, choices.keys()))
raise ValueError(err)
|
Retrieve the size of each block_layer in the ResNet model.
The number of block layers used for the Resnet model varies according
to the size of the model. This helper grabs the layer set we want, throwing
an error if a non-standard size has been selected.
Args:
resnet_size: The number of convolutional layers needed in the model.
Returns:
A list of block sizes to use in building the model.
Raises:
KeyError: if invalid resnet_size is received.
|
juraj-google-style
|
def ParseNetworkConnectivityUsage(self, parser_mediator, cache=None, database=None, table=None, **unused_kwargs):
self._ParseGUIDTable(parser_mediator, cache, database, table, self._NETWORK_CONNECTIVITY_USAGE_VALUES_MAP, SRUMNetworkConnectivityUsageEventData)
|
Parses the network connectivity usage monitor table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache, which contains information about
the identifiers stored in the SruDbIdMapTable table.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
|
codesearchnet
|
def event_type(self, event, cameo_code) -> List[str]:
key = self.event_name[event]
entry = self.mapping.get(cameo_code)
result = None
if entry:
result = entry[key]
if result is None or result == "":
return None
elif not isinstance(result, list):
result = [result]
return result
|
Look up the event tupe of an event
Args:
event: one of "event1", "event2" or "event3"
cameo_code: one of the cameo codes
Returns: a list of the event types or None if the event is not relevant.
|
juraj-google-style
|
def has_axon(neuron, treefun=_read_neurite_type):
return CheckResult((NeuriteType.axon in (treefun(n) for n in neuron.neurites)))
|
Check if a neuron has an axon
Arguments:
neuron(Neuron): The neuron object to test
treefun: Optional function to calculate the tree type of
neuron's neurites
Returns:
CheckResult with result
|
codesearchnet
|
def report_get(config, auth, report_id=None, name=None):
if name:
for query in API_DBM(config, auth, iterate=True).queries().list().execute():
if query['metadata']['title'] == name:
return query
else:
return API_DBM(config, auth).queries().get(queryId=report_id).execute()
|
Returns the DBM JSON definition of a report based on name or ID.
Args:
* auth: (string) Either user or service.
* report_id: (int) ID of DCm report to fetch ( either or name ).
* name: (string) Name of report to fetch ( either or report_id ).
Returns:
* JSON definition of report.
|
github-repos
|
def convert_x_www_form_urlencoded_to_dict(post_data):
if isinstance(post_data, str):
converted_dict = {}
for k_v in post_data.split("&"):
try:
key, value = k_v.split("=")
except ValueError:
raise Exception(
"Invalid x_www_form_urlencoded data format: {}".format(post_data)
)
converted_dict[key] = unquote(value)
return converted_dict
else:
return post_data
|
convert x_www_form_urlencoded data to dict
Args:
post_data (str): a=1&b=2
Returns:
dict: {"a":1, "b":2}
|
juraj-google-style
|
def loss_contrastive(self, contrastive_queries_logits: Tensor, text_queries: Tensor):
image_queries = contrastive_queries_logits.float()
image_queries = nn.functional.normalize(image_queries.flatten(1), dim=-1)
text_queries = nn.functional.normalize(text_queries.flatten(1), dim=-1)
logit_scale = torch.clamp(self.logit_scale.exp(), max=100)
logits_per_text = torch.matmul(text_queries, image_queries.t()) * logit_scale
logits_per_img = logits_per_text.t()
loss_img = nn.functional.cross_entropy(logits_per_img, torch.arange(len(logits_per_img), device=logits_per_text.device))
loss_text = nn.functional.cross_entropy(logits_per_text, torch.arange(len(logits_per_text), device=logits_per_text.device))
loss_contrastive = loss_img + loss_text
losses = {'loss_contrastive': loss_contrastive}
return losses
|
Compute the query-text contrastive loss.
Args:
contrastive_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
text_queries (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
Returns:
`Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_contrastive** -- The query-text contrastive loss computed using task-guided queries
and text queries derived from input text list.
|
github-repos
|
def read_value(self, varname, path='/', cmode=None, default=NO_DEFAULT):
try:
var = self.read_variable(varname, path=path)
except self.Error:
if (default is NO_DEFAULT):
raise
return default
if (cmode is None):
try:
return (var.getValue()[0] if (not var.shape) else var[:])
except IndexError:
return (var.getValue() if (not var.shape) else var[:])
else:
assert (var.shape[(- 1)] == 2)
if (cmode == 'c'):
return (var[(..., 0)] + (1j * var[(..., 1)]))
else:
raise ValueError(('Wrong value for cmode %s' % cmode))
|
Returns the values of variable with name varname in the group specified by path.
Args:
varname: Name of the variable
path: path to the group.
cmode: if cmode=="c", a complex ndarrays is constructed and returned
(netcdf does not provide native support from complex datatype).
default: returns default if varname is not present.
self.Error is raised if default is default is NO_DEFAULT
Returns:
numpy array if varname represents an array, scalar otherwise.
|
codesearchnet
|
def GetFileEntryByPathSpec(self, path_spec):
return data_range_file_entry.DataRangeFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
DataRangeFileEntry: a file entry or None if not available.
|
juraj-google-style
|
def isfinite(x):
if any_symbolic_tensors((x,)):
return Isfinite().symbolic_call(x)
return backend.numpy.isfinite(x)
|
Return whether a tensor is finite, element-wise.
Real values are finite when they are not NaN, not positive infinity, and
not negative infinity. Complex values are finite when both their real
and imaginary parts are finite.
Args:
x: Input tensor.
Returns:
Output boolean tensor.
|
github-repos
|
def create_string_array(self, key, value):
data = None
if ((key is not None) and (value is not None)):
if isinstance(value, list):
data = self.db.create(key.strip(), json.dumps(value))
else:
data = self.db.create(key.strip(), value)
else:
self.tcex.log.warning(u'The key or value field was None.')
return data
|
Create method of CRUD operation for string array data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result of DB write.
|
codesearchnet
|
def assertProtoEqual(self, a, b, check_initialized=True, normalize_numbers=False, msg=None, relative_tolerance=None):
pool = descriptor_pool.Default()
if isinstance(a, str):
a = text_format.Parse(a, b.__class__(), descriptor_pool=pool)
for pb in (a, b):
if check_initialized:
errors = pb.FindInitializationErrors()
if errors:
self.fail('Initialization errors: %s\n%s' % (errors, pb))
if normalize_numbers:
NormalizeNumberFields(pb)
if relative_tolerance is not None:
checkFloatEqAndReplace(self, expected=b, actual=a, relative_tolerance=relative_tolerance)
a_str = text_format.MessageToString(a, descriptor_pool=pool)
b_str = text_format.MessageToString(b, descriptor_pool=pool)
if len(a_str) < 2 ** 16 and len(b_str) < 2 ** 16:
self.assertMultiLineEqual(a_str, b_str, msg=msg)
else:
diff = ''.join(difflib.unified_diff(a_str.splitlines(True), b_str.splitlines(True)))
if diff:
self.fail('%s :\n%s' % (msg, diff))
|
Fails with a useful error if a and b aren't equal.
Comparison of repeated fields matches the semantics of
unittest.TestCase.assertEqual(), ie order and extra duplicates fields matter.
Args:
self: googletest.TestCase
a: proto2 PB instance, or text string representing one.
b: proto2 PB instance -- message.Message or subclass thereof.
check_initialized: boolean, whether to fail if either a or b isn't
initialized.
normalize_numbers: boolean, whether to normalize types and precision of
numbers before comparison.
msg: if specified, is used as the error message on failure.
relative_tolerance: float, relative tolerance. If this is not provided, then
all floats are compared using string comparison otherwise, floating point
comparisons are done using the relative tolerance provided.
|
github-repos
|
def iter_acgt_geno_marker(self, markers):
for snp, geno, s in self.iter_geno_marker(markers, return_index=True):
yield snp, self._allele_encoding[s][geno]
|
Iterates over genotypes for a list of markers (ACGT format).
Args:
markers (list): The list of markers to iterate onto.
Returns:
tuple: The name of the marker as a string, and its genotypes as a
:py:class:`numpy.ndarray` (ACGT format).
|
juraj-google-style
|
def value_report(self, address, zipcode, report_type='full', format_type='json'):
query_params = {'report_type': report_type, 'format': format_type, 'address': address, 'zipcode': zipcode}
return self._api_client.fetch_synchronous('property/value_report', query_params)
|
Call the value_report component
Value Report only supports a single address.
Args:
- address
- zipcode
Kwargs:
- report_type - "full" or "summary". Default is "full".
- format_type - "json", "pdf", "xlsx" or "all". Default is "json".
|
codesearchnet
|
def fft_mesh(self, kpoint, band, spin=0, shift=True):
mesh = np.zeros(tuple(self.ng), dtype=np.complex)
tcoeffs = (self.coeffs[spin][kpoint][band] if (self.spin == 2) else self.coeffs[kpoint][band])
for (gp, coeff) in zip(self.Gpoints[kpoint], tcoeffs):
t = tuple((gp.astype(np.int) + (self.ng / 2).astype(np.int)))
mesh[t] = coeff
if shift:
return np.fft.ifftshift(mesh)
else:
return mesh
|
Places the coefficients of a wavefunction onto an fft mesh.
Once the mesh has been obtained, a discrete fourier transform can be
used to obtain real-space evaluation of the wavefunction. The output
of this function can be passed directly to numpy's fft function. For
example:
mesh = Wavecar('WAVECAR').fft_mesh(kpoint, band)
evals = np.fft.ifftn(mesh)
Args:
kpoint (int): the index of the kpoint where the wavefunction
will be evaluated
band (int): the index of the band where the wavefunction will be
evaluated
spin (int): the spin of the wavefunction for the desired
wavefunction (only for ISPIN = 2, default = 0)
shift (bool): determines if the zero frequency coefficient is
placed at index (0, 0, 0) or centered
Returns:
a numpy ndarray representing the 3D mesh of coefficients
|
codesearchnet
|
def embed(self, url, format='json', **opt):
if (format not in ['json', 'xml']):
raise OEmbedInvalidRequest('Format must be json or xml')
opt['format'] = format
return self._request(url, **opt)
|
Get an OEmbedResponse from one of the providers configured in this
consumer according to the resource url.
Args:
url: The url of the resource to get.
format: Desired response format.
**opt: Optional parameters to pass in the url to the provider.
Returns:
OEmbedResponse object.
|
codesearchnet
|
def _streaming_request_iterable(self, config, requests):
(yield self.types.StreamingRecognizeRequest(streaming_config=config))
for request in requests:
(yield request)
|
A generator that yields the config followed by the requests.
Args:
config (~.speech_v1.types.StreamingRecognitionConfig): The
configuration to use for the stream.
requests (Iterable[~.speech_v1.types.StreamingRecognizeRequest]):
The input objects.
Returns:
Iterable[~.speech_v1.types.StreamingRecognizeRequest]): The
correctly formatted input for
:meth:`~.speech_v1.SpeechClient.streaming_recognize`.
|
codesearchnet
|
def __init__(self, code):
message = code
self.code = None
if util.is_integer(code):
message = self.to_string(code)
self.code = code
super(JLinkException, self).__init__(message)
self.message = message
|
Generates an exception by coercing the given ``code`` to an error
string if is a number, otherwise assumes it is the message.
Args:
self (JLinkException): the 'JLinkException' instance
code (object): message or error code
Returns:
``None``
|
juraj-google-style
|
def path_new_using_function(
w: int,
h: int,
func: Callable[[int, int, int, int, Any], float],
userData: Any = 0,
dcost: float = 1.41,
) -> tcod.path.AStar:
return tcod.path.AStar(
tcod.path._EdgeCostFunc((func, userData), (w, h)), dcost
)
|
Return a new AStar using the given callable function.
Args:
w (int): Clipping width.
h (int): Clipping height.
func (Callable[[int, int, int, int, Any], float]):
userData (Any):
dcost (float): A multiplier for the cost of diagonal movement.
Can be set to 0 to disable diagonal movement.
Returns:
AStar: A new AStar instance.
|
juraj-google-style
|
def peek_native(make):
def peek(service, container, _stack=None):
return make(service.peekNative(container))
return peek
|
Deserializer factory for types which state can be natively serialized.
Arguments:
make (callable): type constructor.
Returns:
callable: deserializer (`peek` routine)
|
juraj-google-style
|
def read_records(self, file_name, offset_range_tracker):
raise NotImplementedError
|
Returns a generator of records created by reading file 'file_name'.
Args:
file_name: a ``string`` that gives the name of the file to be read. Method
``FileBasedSource.open_file()`` must be used to open the file
and create a seekable file object.
offset_range_tracker: a object of type ``OffsetRangeTracker``. This
defines the byte range of the file that should be
read. See documentation in
``iobase.BoundedSource.read()`` for more information
on reading records while complying to the range
defined by a given ``RangeTracker``.
Returns:
an iterator that gives the records read from the given file.
|
github-repos
|
def get_config_parameter_loglevel(config: ConfigParser,
section: str,
param: str,
default: int) -> int:
try:
value = config.get(section, param).lower()
if value == "debug":
return logging.DEBUG
elif value == "info":
return logging.INFO
elif value in ["warn", "warning"]:
return logging.WARN
elif value == "error":
return logging.ERROR
elif value in ["critical", "fatal"]:
return logging.CRITICAL
else:
raise ValueError
except (TypeError, ValueError, NoOptionError, AttributeError):
log.warning(
"Configuration variable {} not found or improper in section [{}]; "
"using default of {!r}", param, section, default)
return default
|
Get ``loglevel`` parameter from ``configparser`` ``.INI`` file, e.g.
mapping ``'debug'`` to ``logging.DEBUG``.
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default
|
juraj-google-style
|
def exit(tensor, name=None):
tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True)
if isinstance(tensor, tensor_lib.Tensor):
if tensor.dtype._is_ref_dtype:
return gen_control_flow_ops.ref_exit(tensor, name)
else:
return gen_control_flow_ops._exit(tensor, name)
elif isinstance(tensor, composite_tensor.CompositeTensor):
return nest.map_structure(exit, tensor, expand_composites=True)
else:
raise TypeError(f"'tensor' must be a Tensor or CompositeTensor. Received: {type(tensor)}.")
|
Exits the current frame to its parent frame.
Exit makes its input `tensor` available to the parent frame.
Args:
tensor: The tensor to be made available to the parent frame.
name: A name for this operation (optional).
Returns:
The same tensor as `tensor`.
|
github-repos
|
def __expand_meta_datas(meta_datas, meta_datas_expanded):
if isinstance(meta_datas, dict):
meta_datas_expanded.append(meta_datas)
elif isinstance(meta_datas, list):
for meta_data in meta_datas:
__expand_meta_datas(meta_data, meta_datas_expanded)
|
expand meta_datas to one level
Args:
meta_datas (dict/list): maybe in nested format
Returns:
list: expanded list in one level
Examples:
>>> meta_datas = [
[
dict1,
dict2
],
dict3
]
>>> meta_datas_expanded = []
>>> __expand_meta_datas(meta_datas, meta_datas_expanded)
>>> print(meta_datas_expanded)
[dict1, dict2, dict3]
|
juraj-google-style
|
def DecodeMessages(self, response_comms):
cipher_verified = False
try:
cipher = self.encrypted_cipher_cache.Get(response_comms.encrypted_cipher)
stats_collector_instance.Get().IncrementCounter('grr_encrypted_cipher_cache', fields=['hits'])
cipher.VerifyReceivedHMAC(response_comms)
cipher_verified = True
source = cipher.GetSource()
remote_public_key = self._GetRemotePublicKey(source)
except KeyError:
stats_collector_instance.Get().IncrementCounter('grr_encrypted_cipher_cache', fields=['misses'])
cipher = ReceivedCipher(response_comms, self.private_key)
source = cipher.GetSource()
try:
remote_public_key = self._GetRemotePublicKey(source)
if cipher.VerifyCipherSignature(remote_public_key):
self.encrypted_cipher_cache.Put(response_comms.encrypted_cipher, cipher)
cipher_verified = True
except UnknownClientCertError:
remote_public_key = None
plain = cipher.Decrypt(response_comms.encrypted, response_comms.packet_iv)
try:
packed_message_list = rdf_flows.PackedMessageList.FromSerializedString(plain)
except rdfvalue.DecodeError as e:
raise DecryptionError(e)
message_list = self.DecompressMessageList(packed_message_list)
auth_state = self.VerifyMessageSignature(response_comms, packed_message_list, cipher, cipher_verified, response_comms.api_version, remote_public_key)
for msg in message_list.job:
msg.auth_state = auth_state
msg.source = cipher.cipher_metadata.source
return (message_list.job, cipher.cipher_metadata.source, packed_message_list.timestamp)
|
Extract and verify server message.
Args:
response_comms: A ClientCommunication rdfvalue
Returns:
list of messages and the CN where they came from.
Raises:
DecryptionError: If the message failed to decrypt properly.
|
codesearchnet
|
def _wordcount_test_preprocessor(test_spec: dict, expected: List[str], env: TestEnvironment):
all_words = []
for element in expected:
word = element.split('=')[1].split(',')[0].replace("'", '')
count = int(element.split('=')[2].replace(')', ''))
all_words += [word] * count
random.shuffle(all_words)
lines = []
while all_words:
line_length = random.randint(1, min(10, len(all_words)))
line = ' '.join((all_words.pop(random.randrange(len(all_words))) for _ in range(line_length)))
lines.append(line)
return replace_recursive(test_spec, 'ReadFromText', 'path', env.input_file('kinglear.txt', '\n'.join(lines)))
|
Preprocessor for the wordcount_minimal.yaml test.
This preprocessor generates a random input file based on the expected output
of the wordcount example. This allows the test to verify the pipeline's
correctness without relying on a fixed input file.
Args:
test_spec: The dictionary representation of the YAML pipeline specification.
expected: A list of strings representing the expected output of the
pipeline.
env: The TestEnvironment object providing utilities for creating temporary
files.
Returns:
The modified test_spec dictionary with the input file path replaced.
|
github-repos
|
class MllamaVisionEncoder(nn.Module):
def __init__(self, config: MllamaVisionConfig, num_layers=32, is_gated=False):
super().__init__()
self.config = config
self.layers = nn.ModuleList([MllamaVisionEncoderLayer(config, is_gated) for _ in range(num_layers)])
self.gradient_checkpointing = False
self.config = config
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, output_attentions)
else:
layer_outputs = encoder_layer(hidden_state=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = layer_outputs[0]
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`MllamaEncoderLayer`].
Args:
config: MllamaConfig
|
github-repos
|
def icon_description(self, **kwargs):
params = {'language': util.language_code(kwargs.get('lang'))}
result = self.make_request('icon_description', {}, **params)
if not util.check_result(result):
return False, result.get('message', 'UNKNOWN ERROR')
values = util.response_list(result, 'Data')
return True, [emtype.IconDescription(**a) for a in values]
|
Obtain a list of elements that have an associated icon.
Args:
lang (str): Language code (*es* or *en*).
Returns:
Status boolean and parsed response (list[IconDescription]), or
message string in case of error.
|
juraj-google-style
|
def convert_dicts(d, to_class=AttrDictWrapper, from_class=dict):
d_ = to_class()
for key, value in d.iteritems():
if isinstance(value, from_class):
d_[key] = convert_dicts(value, to_class=to_class,
from_class=from_class)
else:
d_[key] = value
return d_
|
Recursively convert dict and UserDict types.
Note that `d` is unchanged.
Args:
to_class (type): Dict-like type to convert values to, usually UserDict
subclass, or dict.
from_class (type): Dict-like type to convert values from. If a tuple,
multiple types are converted.
Returns:
Converted data as `to_class` instance.
|
juraj-google-style
|
def parse_args(args):
parser = argparse.ArgumentParser(
description="Just a Fibonnaci demonstration")
parser.add_argument(
'--version',
action='version',
version='nlpia {ver}'.format(ver=__version__))
parser.add_argument(
dest="n",
help="n-th Fibonacci number",
type=int,
metavar="INT")
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
return parser.parse_args(args)
|
Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.