code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def from_dict(cls, copula_dict):
instance = cls(copula_dict['copula_type'])
instance.theta = copula_dict['theta']
instance.tau = copula_dict['tau']
return instance | Create a new instance from the given parameters.
Args:
copula_dict: `dict` with the parameters to replicate the copula.
Like the output of `Bivariate.to_dict`
Returns:
Bivariate: Instance of the copula defined on the parameters. | juraj-google-style |
def should_submit(stack):
if stack.enabled:
return True
logger.debug("Stack %s is not enabled. Skipping.", stack.name)
return False | Tests whether a stack should be submitted to CF for update/create
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be submitted, return True. | juraj-google-style |
def findLabel(self, query, create=False):
if isinstance(query, six.string_types):
query = query.lower()
for label in self._labels.values():
if (isinstance(query, six.string_types) and query == label.name.lower()) or \
(isinstance(query, Pattern) and query.search(label.name)):
return label
return self.createLabel(query) if create and isinstance(query, six.string_types) else None | Find a label with the given name.
Args:
name (Union[_sre.SRE_Pattern, str]): A str or regular expression to match against the name.
create (bool): Whether to create the label if it doesn't exist (only if name is a str).
Returns:
Union[gkeepapi.node.Label, None]: The label. | juraj-google-style |
def __eof_qubit(rho):
c = concurrence(rho)
c = 0.5 + 0.5 * np.sqrt(1 - c * c)
return shannon_entropy([c, 1 - c]) | Compute the Entanglement of Formation of a 2-qubit density matrix.
Args:
rho ((array_like): (4,4) array_like, input density matrix.
Returns:
float: The entanglement of formation. | juraj-google-style |
def convert_variables_to_constants_v2(func, lower_control_flow=True, aggressive_inlining=False):
converter_data = _FunctionConverterDataInEager(func=func, lower_control_flow=lower_control_flow, aggressive_inlining=aggressive_inlining)
output_graph_def, converted_input_indices = _replace_variables_by_constants(converter_data=converter_data)
return _construct_concrete_function(func, output_graph_def, converted_input_indices) | Replaces all the variables in a graph with constants of the same values.
TensorFlow 2.0 function for converting all Variable ops into Const ops holding
the same values. This makes it possible to describe the network fully with a
single GraphDef file, and allows the removal of a lot of ops related to
loading and saving the variables. This function runs Grappler's function
inlining optimization in order to return a single subgraph.
The current implementation only works for graphs that do not contain any
control flow or embedding related ops.
Args:
func: ConcreteFunction.
lower_control_flow: Boolean indicating whether or not to lower control flow
ops such as If and While. (default True)
aggressive_inlining: Boolean indicating whether or not to do aggressive
function inlining (might be unsafe if function has stateful ops, not
properly connected to control outputs). (default False)
Returns:
ConcreteFunction containing a simplified version of the original. | github-repos |
def strip_string(self, string, *args):
res = string
for r in args:
res = re.sub(r, '', res.strip(), flags=(re.IGNORECASE | re.MULTILINE))
return res.strip() | Strips matching regular expressions from string
Keyword arguments:
string -- The given string, that will be stripped
*args -- List of regex strings, that are used in parsing
Returns:
String with *args removed from string | codesearchnet |
def calculate_expiration(self, token):
if not token:
return None
now = datetime.utcnow()
time_to_live = self.config["expiration"]
if "exp" not in token:
return now + timedelta(seconds=time_to_live)
elif self.config["refresh"]:
exp = datetime.utcfromtimestamp(token["exp"])
if exp - now < timedelta(seconds=0.5 * time_to_live):
return now + timedelta(seconds=time_to_live)
return None | Calculate token expiration
return expiration if the token need to set expiration or refresh,
otherwise return None.
Args:
token (dict): a decoded token | juraj-google-style |
def _AcceptRPC(self):
request = self._ReadObject()
if (request['func'] == '__kill__'):
self.ClearBreakpoints()
self._WriteObject('__kill_ack__')
return False
if (('func' not in request) or request['func'].startswith('_')):
raise RpcException('Not a valid public API function.')
rpc_result = getattr(self, request['func'])(*request['args'])
self._WriteObject(rpc_result)
return True | Reads RPC request from stdin and processes it, writing result to stdout.
Returns:
True as long as execution is to be continued, False otherwise.
Raises:
RpcException: if no function was specified in the RPC or no such API
function exists. | codesearchnet |
def delete_with_casper_admin_save(self, pkg):
if pkg.__class__.__name__ == "Package":
package_to_delete = pkg.id
elif isinstance(pkg, int):
package_to_delete = pkg
elif isinstance(pkg, str):
package_to_delete = self.connection["jss"].Package(pkg).id
else:
raise TypeError
data_dict = {"username": self.connection["jss"].user,
"password": self.connection["jss"].password,
"deletedPackageID": package_to_delete}
self.connection["jss"].session.post(url=self.connection["delete_url"],
data=data_dict) | Delete a pkg from the distribution server.
Args:
pkg: Can be a jss.Package object, an int ID of a package, or
a filename. | juraj-google-style |
def call_requests(requests: Union[(Request, Iterable[Request])], methods: Methods, debug: bool) -> Response:
if isinstance(requests, collections.Iterable):
return BatchResponse((safe_call(r, methods, debug=debug) for r in requests))
return safe_call(requests, methods, debug=debug) | Takes a request or list of Requests and calls them.
Args:
requests: Request object, or a collection of them.
methods: The list of methods that can be called.
debug: Include more information in error responses. | codesearchnet |
def print_stats(self, reset=True):
if not self.ncalls:
return
stats = self.stats
code = self.fn.__code__
print('--- Function Profiling ---')
print('File "{}", line {}, function {}'.format(
code.co_filename,
code.co_firstlineno,
self.fn.__name__))
stats.sort_stats(*self.sort_keys)
stats.print_stats(*self.print_restrictions)
print('--------------------------')
if reset:
self.reset_stats() | Manually print profiling result.
Args:
reset (bool): If False is specified, the profiling statistics so
far is maintained. If ``True`` (default),
:obj:`~reset_stats`
is called to reset the profiling statistics. | juraj-google-style |
def update(self, other, **kwargs):
assert isinstance(other, type(self)), 'Must have the same DataManager subclass to perform this operation'
def update_builder(df, other, **kwargs):
df = df.copy()
df.update(other, **kwargs)
return df
return self._inter_df_op_handler(update_builder, other, **kwargs) | Uses other manager to update corresponding values in this manager.
Args:
other: The other manager.
Returns:
New DataManager with updated data and index. | codesearchnet |
def __init__(self, obj_to_invoke, method_name):
if not isinstance(obj_to_invoke, (DoFn, RestrictionProvider, WatermarkEstimatorProvider)):
raise ValueError("'obj_to_invoke' has to be either a 'DoFn' or a 'RestrictionProvider'. Received %r instead." % obj_to_invoke)
self.args, self.defaults = core.get_function_arguments(obj_to_invoke, method_name)
self.method_value = getattr(obj_to_invoke, method_name)
self.method_name = method_name
self.has_userstate_arguments = False
self.state_args_to_replace = {}
self.timer_args_to_replace = {}
self.timestamp_arg_name = None
self.window_arg_name = None
self.key_arg_name = None
self.restriction_provider = None
self.restriction_provider_arg_name = None
self.watermark_estimator_provider = None
self.watermark_estimator_provider_arg_name = None
self.dynamic_timer_tag_arg_name = None
if hasattr(self.method_value, 'unbounded_per_element'):
self.unbounded_per_element = True
else:
self.unbounded_per_element = False
for kw, v in zip(self.args[-len(self.defaults):], self.defaults):
if isinstance(v, core.DoFn.StateParam):
self.state_args_to_replace[kw] = v.state_spec
self.has_userstate_arguments = True
elif isinstance(v, core.DoFn.TimerParam):
self.timer_args_to_replace[kw] = v.timer_spec
self.has_userstate_arguments = True
elif core.DoFn.TimestampParam == v:
self.timestamp_arg_name = kw
elif core.DoFn.WindowParam == v:
self.window_arg_name = kw
elif core.DoFn.WindowedValueParam == v:
self.window_arg_name = kw
elif core.DoFn.KeyParam == v:
self.key_arg_name = kw
elif isinstance(v, core.DoFn.RestrictionParam):
self.restriction_provider = v.restriction_provider or obj_to_invoke
self.restriction_provider_arg_name = kw
elif isinstance(v, core.DoFn.WatermarkEstimatorParam):
self.watermark_estimator_provider = v.watermark_estimator_provider or obj_to_invoke
self.watermark_estimator_provider_arg_name = kw
elif core.DoFn.DynamicTimerTagParam == v:
self.dynamic_timer_tag_arg_name = kw
if self.watermark_estimator_provider is None:
self.watermark_estimator_provider = NoOpWatermarkEstimatorProvider() | Initiates a ``MethodWrapper``.
Args:
obj_to_invoke: the object that contains the method. Has to either be a
`DoFn` object or a `RestrictionProvider` object.
method_name: name of the method as a string. | github-repos |
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple]]=None):
logits = outputs.logits
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation | Converts the output of [`DPTForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
Args:
outputs ([`DPTForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id. | github-repos |
def parse_tmhmm_long(tmhmm_results):
with open(tmhmm_results) as f:
lines = f.read().splitlines()
infodict = defaultdict(dict)
for l in lines:
if 'Number of predicted TMHs:' in l:
gene = l.split(' Number')[0].strip('
infodict[gene]['num_tm_helices'] = int(l.split(': ')[1])
if 'WARNING' in l:
log.warning('{}: no TMHMM predictions'.format(l))
continue
if '
stuff = l.split()
if stuff[1] == 'TMHMM2.0':
gene = stuff[0]
region = stuff[2]
region_start = stuff[3]
region_end = stuff[4]
if 'sequence' in infodict[gene]:
tm_seq = infodict[gene]['sequence']
else:
tm_seq = ''
if region == 'outside':
info = 'O'
elif region == 'inside':
info = 'I'
elif region == 'TMhelix':
info = 'T'
else:
log.error('{}: unknown region type'.format(info))
info = '-'
for r in range(int(region_start), int(region_end) + 1):
tm_seq += info
infodict[gene]['sequence'] = tm_seq
return infodict | Parse the 'long' output format of TMHMM and return a dictionary of ``{sequence_ID: TMHMM_prediction}``.
Args:
tmhmm_results (str): Path to long format TMHMM output.
Returns:
dict: Dictionary of ``{sequence_ID: TMHMM_prediction}`` | juraj-google-style |
def _repeat(values, count):
return [[value] * value for value in np.tile(values, count)] | Produces a list of lists suitable for testing interleave.
Args:
values: for each element `x` the result contains `[x] * x`
count: determines how many times to repeat `[x] * x` in the result
Returns:
A list of lists of values suitable for testing interleave. | github-repos |
def _write_except_dispatcher(self, exc, tb, handlers):
handler_labels = []
for i, except_node in enumerate(handlers):
handler_labels.append(self.block.genlabel())
if except_node.type:
with self.visit_expr(except_node.type) as type_,\
self.block.alloc_temp('bool') as is_inst:
self.writer.write_checked_call2(
is_inst, 'πg.IsInstance(πF, {}.ToObject(), {})', exc, type_.expr)
self.writer.write_tmpl(textwrap.dedent(), is_inst=is_inst.expr, label=handler_labels[-1])
else:
if i != len(handlers) - 1:
msg = "default 'except:' must be last"
raise util.ParseError(except_node, msg)
self.writer.write('goto Label{}'.format(handler_labels[-1]))
if handlers[-1].type:
self.writer.write(
'πE = πF.Raise({}.ToObject(), nil, {}.ToObject())'.format(exc, tb))
self.writer.write('continue')
return handler_labels | Outputs a Go code that jumps to the appropriate except handler.
Args:
exc: Go variable holding the current exception.
tb: Go variable holding the current exception's traceback.
handlers: A list of ast.ExceptHandler nodes.
Returns:
A list of Go labels indexes corresponding to the exception handlers.
Raises:
ParseError: Except handlers are in an invalid order. | juraj-google-style |
def display_upstream_structure(structure_dict):
graph = _create_graph(structure_dict)
plt = Image(graph.create_png())
display(plt) | Displays pipeline structure in the jupyter notebook.
Args:
structure_dict (dict): dict returned by
:func:`~steppy.base.Step.upstream_structure`. | juraj-google-style |
def find_mrms_tracks(self):
obs_objects = []
tracked_obs_objects = []
if (self.mrms_ew is not None):
self.mrms_grid.load_data()
if (len(self.mrms_grid.data) != len(self.hours)):
print('Less than 24 hours of observation data found')
return tracked_obs_objects
for (h, hour) in enumerate(self.hours):
mrms_data = np.zeros(self.mrms_grid.data[h].shape)
mrms_data[:] = np.array(self.mrms_grid.data[h])
mrms_data[(mrms_data < 0)] = 0
hour_labels = self.mrms_ew.size_filter(self.mrms_ew.label(gaussian_filter(mrms_data, self.gaussian_window)), self.size_filter)
hour_labels[(mrms_data < self.mrms_ew.min_thresh)] = 0
obj_slices = find_objects(hour_labels)
num_slices = len(obj_slices)
obs_objects.append([])
if (num_slices > 0):
for sl in obj_slices:
obs_objects[(- 1)].append(STObject(mrms_data[sl], np.where((hour_labels[sl] > 0), 1, 0), self.model_grid.x[sl], self.model_grid.y[sl], self.model_grid.i[sl], self.model_grid.j[sl], hour, hour, dx=self.model_grid.dx))
if (h > 0):
dims = obs_objects[(- 1)][(- 1)].timesteps[0].shape
obs_objects[(- 1)][(- 1)].estimate_motion(hour, self.mrms_grid.data[(h - 1)], dims[1], dims[0])
for (h, hour) in enumerate(self.hours):
past_time_objs = []
for obj in tracked_obs_objects:
if (obj.end_time == (hour - 1)):
past_time_objs.append(obj)
if (len(past_time_objs) == 0):
tracked_obs_objects.extend(obs_objects[h])
elif ((len(past_time_objs) > 0) and (len(obs_objects[h]) > 0)):
assignments = self.object_matcher.match_objects(past_time_objs, obs_objects[h], (hour - 1), hour)
unpaired = list(range(len(obs_objects[h])))
for pair in assignments:
past_time_objs[pair[0]].extend(obs_objects[h][pair[1]])
unpaired.remove(pair[1])
if (len(unpaired) > 0):
for up in unpaired:
tracked_obs_objects.append(obs_objects[h][up])
print('Tracked Obs Objects: {0:03d} Hour: {1:02d}'.format(len(tracked_obs_objects), hour))
return tracked_obs_objects | Identify objects from MRMS timesteps and link them together with object matching.
Returns:
List of STObjects containing MESH track information. | codesearchnet |
def set_parameter(self, name, value):
i = self.get_parameter_names(include_frozen=True).index(name)
v = self.get_parameter_vector(include_frozen=True)
v[i] = value
self.set_parameter_vector(v, include_frozen=True) | Set a parameter value by name
Args:
name: The name of the parameter
value (float): The new value for the parameter | juraj-google-style |
def regex_check(equation_str):
match1 = re.match(
r'^(([xy+\-*/()0-9. ]+|sin\(|cos\(|exp\(|log\()?)+$',
equation_str
)
match2 = re.match(r'^.*([xy]) *([xy]).*$', equation_str)
if match1 and not match2:
return True
raise BadInputError('Cannot parse entered equation') | A quick regular expression check to see that the input is sane
Args:
equation_str (str): String of equation to be parsed by sympify
function. Expected to be valid Python.
Raises:
BadInputError: If input does not look safe to parse as an equation. | juraj-google-style |
def process(self, batch, *args, **kwargs):
if (self.postprocessing is not None):
batch = self.postprocessing(batch)
return batch | Process a list of examples to create a batch.
Postprocess the batch with user-provided Pipeline.
Args:
batch (list(object)): A list of object from a batch of examples.
Returns:
object: Processed object given the input and custom
postprocessing Pipeline. | codesearchnet |
def state_name(self):
if (self.state == 1):
return 'New Issue'
elif (self.state == 2):
return 'Shutdown in 1 week'
elif (self.state == 3):
return 'Shutdown in 1 day'
elif (self.state == 4):
return 'Pending Shutdown'
elif (self.state == 5):
return 'Stopped, delete in 12 weeks'
elif (self.state == 6):
return 'Instance deleted'
else:
raise ValueError('Invalid state: {}'.format(self.state)) | Get a human-readable value of the state
Returns:
str: Name of the current state | codesearchnet |
def _authenticate(self):
csrf_token = self._get_csrf_token()
self._login(csrf_token)
domain_text_element = self._get_domain_text_of_authoritative_zone()
self.domain_id = self._get_domain_id(domain_text_element)
LOGGER.debug('Easyname domain ID: %s', self.domain_id)
return True | Authenticates against Easyname website and try to find out the domain
id.
Easyname uses a CSRF token in its login form, so two requests are
neccessary to actually login.
Returns:
bool: True if domain id was found.
Raises:
AssertionError: When a request returns unexpected or unknown data.
ValueError: When login data is wrong or the domain does not exist. | codesearchnet |
def _get_resource_hash(zone_name, record):
record_data = defaultdict(int, record)
if (type(record_data['GeoLocation']) == dict):
record_data['GeoLocation'] = ':'.join(['{}={}'.format(k, v) for (k, v) in record_data['GeoLocation'].items()])
args = [zone_name, record_data['Name'], record_data['Type'], record_data['Weight'], record_data['Region'], record_data['GeoLocation'], record_data['Failover'], record_data['HealthCheckId'], record_data['TrafficPolicyInstanceId']]
return get_resource_id('r53r', args) | Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique
resource IDs
Args:
zone_name (`str`): The name of the DNS Zone the record belongs to
record (`dict`): A record dict to generate the hash from
Returns:
`str` | codesearchnet |
def create_bmi_config_file(self, filename: str = "bmi_config.txt") -> None:
s0 = self.construct_default_initial_state()
s0.to_csv(filename, index_label="variable") | Create a BMI config file to initialize the model.
Args:
filename: The filename with which the config file should be saved. | juraj-google-style |
def last_timestamp(self, event_key=None):
if (event_key is None):
timestamps = [self._trackers[key].first_timestamp for key in self._trackers]
return max((timestamp for timestamp in timestamps if (timestamp >= 0)))
else:
return self._trackers[event_key].last_timestamp | Obtain the last timestamp.
Args:
event_key: the type key of the sought events (e.g., constants.NAN_KEY). If
None, includes all event type keys.
Returns:
Last (latest) timestamp of all the events of the given type (or all
event types if event_key is None). | codesearchnet |
def page_length(self, length):
mH = length/256
mL = length%256
if length < 12000:
self.send(chr(27)+'('+'C'+chr(2)+chr(0)+chr(mL)+chr(mH))
else:
raise RuntimeError('Length must be less than 12000.') | Specifies page length. This command is only valid with continuous length labels.
Args:
length: The length of the page, in dots. Can't exceed 12000.
Returns:
None
Raises:
RuntimeError: Length must be less than 12000. | juraj-google-style |
def ModulePath(module_name):
module = importlib.import_module(module_name)
path = inspect.getfile(module)
if compatibility.PY2:
path = path.decode("utf-8")
if os.path.basename(path).startswith("__init__."):
path = os.path.dirname(path)
if path.endswith(".pyc"):
path = path[:-4] + ".py"
return path | Computes a path to the specified module.
Args:
module_name: A name of the module to get the path for.
Returns:
A path to the specified module.
Raises:
ImportError: If specified module cannot be imported. | juraj-google-style |
def of_type(self, classinfo):
if self.closed():
raise ValueError('Attempt to call of_type() on a closed Queryable.')
if (not is_type(classinfo)):
raise TypeError('of_type() parameter classinfo={0} is not a class object or a type objector a tuple of class or type objects.'.format(classinfo))
return self.where((lambda x: isinstance(x, classinfo))) | Filters elements according to whether they are of a certain type.
Note: This method uses deferred execution.
Args:
classinfo: If classinfo is neither a class object nor a type object
it may be a tuple of class or type objects, or may recursively
contain other such tuples (other sequence types are not
accepted).
Returns:
A Queryable over those elements of the source sequence for which
the predicate is True.
Raises:
ValueError: If the Queryable is closed.
TypeError: If classinfo is not a class, type, or tuple of classes,
types, and such tuples. | codesearchnet |
def __init__(self, drop_ffi_call_fn, initialized_ptr=None):
if initialized_ptr is not None:
self._ptr = initialized_ptr
else:
self._ptr = ctypes.c_void_p()
self._drop_ffi_fn = drop_ffi_call_fn | Constructs an owned pointer.
Initializing the pointer is left to the extending classes
Args:
drop_ffi_call_fn (str): the name of the FFI function to call on
drop or garbage collection.
initialized_ptr (ctypes.c_void_p:optional): a preinitialized
pointer to the native memory | juraj-google-style |
def add(app: web.Application, feature: Any, key: Hashable=None, exist_ok: bool=False):
if (FEATURES_KEY not in app):
app[FEATURES_KEY] = dict()
key = (key or type(feature))
if (key in app[FEATURES_KEY]):
if exist_ok:
return
else:
raise KeyError(f'Feature "{key}" already registered')
app[FEATURES_KEY][key] = feature | Adds a new feature to the app.
Features can either be registered as the default feature for the class,
or be given an explicit name.
Args:
app (web.Application):
The current Aiohttp application.
feature (Any):
The new feature that should be registered.
It is recommended, but not required to use a `ServiceFeature`.
key (Hashable, optional):
The key under which the feature should be registered.
Defaults to `type(feature)`.
exist_ok (bool):
If truthy, this function will do nothing if a feature was already registered for `key`.
Otherwise, an exception is raised. | codesearchnet |
def _assert_gcs_files(files):
if sys.version_info.major > 2:
string_type = (str, bytes)
else:
string_type = basestring
if isinstance(files, string_type):
files = [files]
for f in files:
if f is not None and not f.startswith('gs:
raise ValueError('File %s is not a gcs path' % f) | Check files starts wtih gs://.
Args:
files: string to file path, or list of file paths. | juraj-google-style |
def cd(new_directory, clean_up=(lambda : True)):
previous_directory = os.getcwd()
os.chdir(os.path.expanduser(new_directory))
try:
(yield)
finally:
os.chdir(previous_directory)
clean_up() | Changes into a given directory and cleans up after it is done
Args:
new_directory: The directory to change to
clean_up: A method to clean up the working directory once done | codesearchnet |
async def _async_supervisor(func, animation_, step, *args, **kwargs):
with ThreadPoolExecutor(max_workers=2) as pool:
with _terminating_event() as event:
pool.submit(animate_cli, animation_, step, event)
result = (await func(*args, **kwargs))
return result | Supervisor for running an animation with an asynchronous function.
Args:
func: A function to be run alongside an animation.
animation_: An infinite generator that produces
strings for the animation.
step: Seconds between each animation frame.
*args: Arguments for func.
**kwargs: Keyword arguments for func.
Returns:
The result of func(*args, **kwargs)
Raises:
Any exception that is thrown when executing func. | codesearchnet |
def SerializeExclusiveData(self, writer):
writer.WriteByte(self.AssetType)
writer.WriteVarString(self.Name)
writer.WriteFixed8(self.Amount)
writer.WriteByte(self.Precision)
self.Owner.Serialize(writer)
writer.WriteUInt160(self.Admin) | Serialize object.
Args:
writer (neo.IO.BinaryWriter): | juraj-google-style |
def recognize(self, node: yaml.Node, expected_type: Type) -> RecResult:
logger.debug('Recognizing {} as a {}'.format(node, expected_type))
recognized_types = None
if (expected_type in [str, int, float, bool, bool_union_fix, datetime, None, type(None)]):
(recognized_types, message) = self.__recognize_scalar(node, expected_type)
elif is_generic_union(expected_type):
(recognized_types, message) = self.__recognize_union(node, expected_type)
elif is_generic_list(expected_type):
(recognized_types, message) = self.__recognize_list(node, expected_type)
elif is_generic_dict(expected_type):
(recognized_types, message) = self.__recognize_dict(node, expected_type)
elif (expected_type in self.__registered_classes.values()):
(recognized_types, message) = self.__recognize_user_classes(node, expected_type)
if (recognized_types is None):
raise RecognitionError('Could not recognize for type {}, is it registered?'.format(expected_type))
logger.debug('Recognized types {} matching {}'.format(recognized_types, expected_type))
return (recognized_types, message) | Figure out how to interpret this node.
This is not quite a type check. This function makes a list of \
all types that match the expected type and also the node, and \
returns that list. The goal here is not to test validity, but \
to determine how to process this node further.
That said, it will recognize built-in types only in case of \
an exact match.
Args:
node: The YAML node to recognize.
expected_type: The type we expect this node to be, based \
on the context provided by our type definitions.
Returns:
A list of matching types. | codesearchnet |
def __init__(self, api, endpoint=None, cls=None):
self.api = api
self.endpoint = endpoint
self._cls = cls | Creates an instance of the APIEndpoint class.
Args:
api - Gophish.client - The authenticated REST client
endpoint - str - The URL path to the resource endpoint
cls - gophish.models.Model - The Class to use when parsing results | juraj-google-style |
def most_frequent(self, k):
word_count = {w: self.word_count[w] for w in self.words[:k]}
return CountedVocabulary(word_count=word_count) | Returns a vocabulary with the most frequent `k` words.
Args:
k (integer): specifies the top k most frequent words to be returned. | codesearchnet |
def install_package(tar_url, folder, md5_url='{tar_url}.md5', on_download=(lambda : None), on_complete=(lambda : None)):
data_file = join(folder, basename(tar_url))
md5_url = md5_url.format(tar_url=tar_url)
try:
remote_md5 = download(md5_url).decode('utf-8').split(' ')[0]
except (UnicodeDecodeError, URLError):
raise ValueError(('Invalid MD5 url: ' + md5_url))
if (remote_md5 != calc_md5(data_file)):
on_download()
if isfile(data_file):
try:
with tarfile.open(data_file) as tar:
for i in reversed(list(tar)):
try:
os.remove(join(folder, i.path))
except OSError:
pass
except (OSError, EOFError):
pass
download_extract_tar(tar_url, folder, data_file)
on_complete()
if (remote_md5 != calc_md5(data_file)):
raise ValueError(('MD5 url does not match tar: ' + md5_url))
return True
return False | Install or update a tar package that has an md5
Args:
tar_url (str): URL of package to download
folder (str): Location to extract tar. Will be created if doesn't exist
md5_url (str): URL of md5 to use to check for updates
on_download (Callable): Function that gets called when downloading a new update
on_complete (Callable): Function that gets called when a new download is complete
Returns:
bool: Whether the package was updated | codesearchnet |
def _ParseFile(self, file_obj, line_parser):
lines = [l.strip() for l in utils.ReadFileBytesAsUnicode(file_obj).splitlines()]
try:
for (index, line) in enumerate(lines):
if line:
line_parser(line)
except (IndexError, KeyError) as e:
raise parser.ParseError(('Invalid file at line %d: %s' % ((index + 1), e))) | Process a file line by line.
Args:
file_obj: The file to parse.
line_parser: The parser method used to process and store line content.
Raises:
parser.ParseError if the parser is unable to process the line. | codesearchnet |
def ee_initialize(use_personal_account: bool=False, enforce_high_volume: bool=False, service_account: t.Optional[str]=None, private_key: t.Optional[str]=None, project_id: t.Optional[str]=None) -> None:
creds = get_creds(use_personal_account, service_account, private_key)
on_compute_engine = is_compute_engine()
if on_compute_engine:
if project_id is None and use_personal_account:
raise RuntimeError('Project_name should not be None!')
params = {'credentials': creds, 'opt_url': 'https:
if project_id:
params['project'] = project_id
ee.Initialize(**params)
elif enforce_high_volume and (not on_compute_engine):
raise RuntimeError('Must run on a compute engine VM to use the high volume earth engine api.')
else:
ee.Initialize(creds) | Initializes earth engine with the high volume API when using a compute engine VM.
Args:
use_personal_account: A flag to use personal account for ee authentication. Default: False.
enforce_high_volume: A flag to use the high volume API when using a compute engine VM. Default: False.
service_account: Service account address when using a private key for earth engine authentication.
private_key: A private key path to authenticate earth engine using private key. Default: None.
Project ID: An identifier that represents the name of a project present in Earth Engine.
Raises:
RuntimeError: Earth Engine did not initialize. | github-repos |
def validate_start_end_range(range_tuple):
start, end = range_tuple
if (start and end) and (start > end):
raise ValueError(_("Start after end!"))
return range_tuple | Perform basic sanity checks on a timeframe.
Args:
range_tuple (tuple): ``(start, end)`` tuple as returned by
``complete_timeframe``.
Raises:
ValueError: If start > end.
Returns:
tuple: ``(start, end)`` tuple that passed validation.
Note:
``timeframes`` may be incomplete, especially if ``complete_timeframe(partial=True)`` has
been used to construct them. | juraj-google-style |
def remat(f):
return tf.recompute_grad(f) | Implementation of rematerialization.
Args:
f: The function or operation to rematerialize.
Returns:
A function wrapping f that defines a custom gradient, which
recomputes f on the backwards pass of a gradient call. | github-repos |
def expand_groups(grp):
p = re.compile('(?P<name>.+)\\[(?P<start>\\d+)-(?P<end>\\d+)\\]')
m = p.match(grp)
if (m is not None):
s = int(m.group('start'))
e = int(m.group('end'))
n = m.group('name')
return list(map((lambda x: (n + str(x))), range(s, (e + 1))))
else:
return [grp] | Expand group names.
Args:
grp (string): group names to expand
Returns:
list of groups
Examples:
* grp[1-3] will be expanded to [grp1, grp2, grp3]
* grp1 will be expanded to [grp1] | codesearchnet |
def __init__(self, launchdjobs):
self.launchdjobs = launchdjobs
self.blacklist_regex = [
re.compile(r"^0x[a-z0-9]+\.anonymous\..+$"),
re.compile(r"^0x[a-z0-9]+\.mach_init\.(crash_inspector|Inspector)$"),
] | Initialize.
Args:
launchdjobs: NSCFArray of NSCFDictionarys containing launchd job data from
the ServiceManagement framework. | juraj-google-style |
def graph_key_from_tag(tag, entity_index):
start_token = tag.get('start_token')
entity = tag.get('entities', [])[entity_index]
return str(start_token) + '-' + entity.get('key') + '-' + str(entity.get('confidence')) | Returns a key from a tag entity
Args:
tag (tag) : this is the tag selected to get the key from
entity_index (int) : this is the index of the tagged entity
Returns:
str : String representing the key for the given tagged entity. | juraj-google-style |
def substitute_globals(config_dict):
constants = get_all_constants()
if type(config_dict) != dict:
return
for key in config_dict.keys():
if key in constants and type(config_dict[key]) in _ALLOWED:
globals()[key] = config_dict[key] | Set global variables to values defined in `config_dict`.
Args:
config_dict (dict): dictionary with data, which are used to set \
`globals`.
Note:
`config_dict` have to be dictionary, or it is ignored. Also all
variables, that are not already in globals, or are not types defined in
:attr:`_ALLOWED` (str, int, float) or starts with ``_`` are silently
ignored. | juraj-google-style |
def _parse_data_fields(self, fields, tag_id="tag", sub_id="code"):
for field in fields:
params = field.params
if tag_id not in params:
continue
field_repr = OrderedDict([
[self.i1_name, params.get(self.i1_name, " ")],
[self.i2_name, params.get(self.i2_name, " ")],
])
for subfield in field.find("subfield"):
if sub_id not in subfield.params:
continue
content = MARCSubrecord(
val=subfield.getContent().strip(),
i1=field_repr[self.i1_name],
i2=field_repr[self.i2_name],
other_subfields=field_repr
)
code = subfield.params[sub_id]
if code in field_repr:
field_repr[code].append(content)
else:
field_repr[code] = [content]
tag = params[tag_id]
if tag in self.datafields:
self.datafields[tag].append(field_repr)
else:
self.datafields[tag] = [field_repr] | Parse data fields.
Args:
fields (list): of HTMLElements
tag_id (str): parameter name, which holds the information, about
field name this is normally "tag", but in case of
oai_marc "id"
sub_id (str): id of parameter, which holds informations about
subfield name this is normally "code" but in case of
oai_marc "label" | juraj-google-style |
def _get_media_files(cls, packager, media_packages, media_type, extra_files):
source_files = list(extra_files)
if ((not settings.PIPELINE_ENABLED) and settings.PIPELINE_COLLECTOR_ENABLED):
default_collector.collect()
for media_package in media_packages:
package = packager.package_for(media_type, media_package)
if settings.PIPELINE_ENABLED:
source_files.append(staticfiles_storage.url(package.output_filename))
else:
source_files += packager.compile(package.paths)
return source_files | Return source or output media files for a list of packages.
This will go through the media files belonging to the provided list
of packages referenced in a Media class and return the output files
(if Pipeline is enabled) or the source files (if not enabled).
Args:
packager (pipeline.packager.Packager):
The packager responsible for media compilation for this type
of package.
media_packages (list of unicode):
The list of media packages referenced in Media to compile or
return.
extra_files (list of unicode):
The list of extra files to include in the result. This would
be the list stored in the Media class's original :py:attr:`css`
or :py:attr:`js` attributes.
Returns:
list:
The list of media files for the given packages. | codesearchnet |
def get_transcript_lengths(ensembl, transcript_ids):
transcripts = {}
for transcript_id in transcript_ids:
try:
seq = ensembl.get_protein_seq_for_transcript(transcript_id)
except ValueError:
continue
transcripts[transcript_id] = len(seq)
return transcripts | finds the protein length for ensembl transcript IDs for a gene
Args:
ensembl: EnsemblRequest object to request sequences and data
from the ensembl REST API
transcript_ids: list of transcript IDs for a single gene
Returns:
dictionary of lengths (in amino acids), indexed by transcript IDs | juraj-google-style |
def _as_serialized_graph(self, allow_stateful=None, strip_device_assignment=None, external_state_policy=options_lib.ExternalStatePolicy.WARN):
if external_state_policy:
policy = external_state_policy.value
return gen_dataset_ops.dataset_to_graph_v2(self._variant_tensor, external_state_policy=policy, strip_device_assignment=strip_device_assignment)
if strip_device_assignment:
return gen_dataset_ops.dataset_to_graph(self._variant_tensor, allow_stateful=allow_stateful, strip_device_assignment=strip_device_assignment)
return gen_dataset_ops.dataset_to_graph(self._variant_tensor, allow_stateful=allow_stateful) | Produces serialized graph representation of the dataset.
Args:
allow_stateful: If true, we allow stateful ops to be present in the graph
def. In that case, the state in these ops would be thrown away.
strip_device_assignment: If true, non-local (i.e. job and task) device
assignment is stripped from ops in the serialized graph.
external_state_policy: The ExternalStatePolicy enum that determines how we
handle input pipelines that depend on external state. By default, its
set to WARN.
Returns:
A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a
serialized graph. | github-repos |
def resolve_image_as_pil(self, image_url, coords=None):
files = self.mets.find_files(url=image_url)
if files:
image_filename = self.download_file(files[0]).local_filename
else:
image_filename = self.download_url(image_url)
if image_url not in self.image_cache['pil']:
self.image_cache['pil'][image_url] = Image.open(image_filename)
pil_image = self.image_cache['pil'][image_url]
if coords is None:
return pil_image
if image_url not in self.image_cache['cv2']:
log.debug("Converting PIL to OpenCV: %s", image_url)
color_conversion = cv2.COLOR_GRAY2BGR if pil_image.mode in ('1', 'L') else cv2.COLOR_RGB2BGR
pil_as_np_array = np.array(pil_image).astype('uint8') if pil_image.mode == '1' else np.array(pil_image)
self.image_cache['cv2'][image_url] = cv2.cvtColor(pil_as_np_array, color_conversion)
cv2_image = self.image_cache['cv2'][image_url]
poly = np.array(coords, np.int32)
log.debug("Cutting region %s from %s", coords, image_url)
region_cut = cv2_image[
np.min(poly[:, 1]):np.max(poly[:, 1]),
np.min(poly[:, 0]):np.max(poly[:, 0])
]
return Image.fromarray(region_cut) | Resolve an image URL to a PIL image.
Args:
coords (list) : Coordinates of the bounding box to cut from the image
Returns:
Image or region in image as PIL.Image | juraj-google-style |
def toy_logistic_data(num_examples, input_size=2, weights_prior_stddev=5.0):
random_weights = (weights_prior_stddev * np.random.randn(input_size))
random_bias = np.random.randn()
design_matrix = ((np.random.rand(num_examples, input_size) * 2) - 1)
logits = np.reshape((np.dot(design_matrix, random_weights) + random_bias), ((- 1), 1))
p_labels = (1.0 / (1 + np.exp((- logits))))
labels = np.int32((p_labels > np.random.rand(num_examples, 1)))
return (random_weights, random_bias, np.float32(design_matrix), labels) | Generates synthetic data for binary classification.
Args:
num_examples: The number of samples to generate (scalar Python `int`).
input_size: The input space dimension (scalar Python `int`).
weights_prior_stddev: The prior standard deviation of the weight
vector. (scalar Python `float`).
Returns:
random_weights: Sampled weights as a Numpy `array` of shape
`[input_size]`.
random_bias: Sampled bias as a scalar Python `float`.
design_matrix: Points sampled uniformly from the cube `[-1,
1]^{input_size}`, as a Numpy `array` of shape `(num_examples,
input_size)`.
labels: Labels sampled from the logistic model `p(label=1) =
logistic(dot(features, random_weights) + random_bias)`, as a Numpy
`int32` `array` of shape `(num_examples, 1)`. | codesearchnet |
def are_you_sure(msg=''):
r
print(msg)
from utool import util_arg
from utool import util_str
override = util_arg.get_argflag(('--yes', '--y', '-y'))
if override:
print('accepting based on command line flag')
return True
valid_ans = ['yes', 'y']
valid_prompt = util_str.conj_phrase(valid_ans, 'or')
ans = input('Are you sure?\n Enter %s to accept\n' % valid_prompt)
return ans.lower() in valid_ans | r"""
Prompts user to accept or checks command line for -y
Args:
msg (str):
Returns:
bool: accept or not | juraj-google-style |
def _astimezone_ts(self, timezone):
if self.created.tzinfo is timezone:
return self
else:
nw_obj = Timestamps((None,)*4)
nw_obj.created = self.created.astimezone(timezone)
nw_obj.changed = self.changed.astimezone(timezone)
nw_obj.mft_changed = self.mft_changed.astimezone(timezone)
nw_obj.accessed = self.accessed.astimezone(timezone)
return nw_obj | Changes the time zones of all timestamps.
Receives a new timezone and applies to all timestamps, if necessary.
Args:
timezone (:obj:`tzinfo`): Time zone to be applied
Returns:
A new ``Timestamps`` object if the time zone changes, otherwise returns ``self``. | juraj-google-style |
def key_swap(d, cls, marshal):
dname = '_{}marshal_key_swap'.format(('' if marshal else 'un'))
if hasattr(cls, dname):
key_swap = getattr(cls, dname)
return {(key_swap[k] if (k in key_swap) else k): v for (k, v) in d.items()}
else:
return d | Swap the keys in a dictionary
Args:
d: dict, The dict to swap keys in
cls: class, If the class has a staticly defined
_marshal_key_swap and/or _unmarshal_key_swap dict,
the keys will be swapped.
Otherwise @d is returned
marshal: bool, True if marshalling class to JSON,
False if unmarshalling JSON to class
Returns:
dict | codesearchnet |
def _run_conversion(self, meta_graph_def):
grappler_session_config = config_pb2.ConfigProto()
custom_rewriter_config = _get_tensorrt_rewriter_config(conversion_params=self._conversion_params._replace(allow_build_at_runtime=True), is_dynamic_op=True, max_batch_size=None, disable_non_trt_optimizers=self._test_only_disable_non_trt_optimizers, use_implicit_batch=not self._use_dynamic_shape, profile_strategy=self._profile_strategy)
grappler_session_config.graph_options.rewrite_options.CopyFrom(custom_rewriter_config)
return tf_optimizer.OptimizeGraph(grappler_session_config, meta_graph_def, graph_id=b'tf_graph') | Run Grappler's OptimizeGraph() tool to convert the graph.
Args:
meta_graph_def: the MetaGraphDef instance to run the optimizations on.
Returns:
The optimized GraphDef. | github-repos |
def Decode(data, encoding=None):
if data is None:
return None
if isinstance(data, str) or isinstance(data, bytes):
string = data
else:
string = str(data)
if isinstance(string, str):
return string
try:
return string.decode('ascii')
except UnicodeError:
pass
if encoding:
try:
return string.decode(encoding)
except UnicodeError:
pass
try:
return string.decode('utf8')
except UnicodeError:
pass
try:
return string.decode(sys.getfilesystemencoding())
except UnicodeError:
pass
try:
return string.decode(sys.getdefaultencoding())
except UnicodeError:
pass
return string.decode('iso-8859-1') | Returns string with non-ascii characters decoded to UNICODE.
UTF-8, the suggested encoding, and the usual suspects will be attempted in
order.
Args:
data: A string or object that has str() and unicode() methods that may
contain an encoding incompatible with the standard output encoding.
encoding: The suggested encoding if known.
Returns:
A text string representing the decoded byte string. | github-repos |
def transpose(a, axes=None):
if isinstance(a, np.ndarray):
return np.transpose(a, axes)
elif isinstance(a, RemoteArray):
return a.transpose(*axes)
elif isinstance(a, Remote):
return _remote_to_array(a).transpose(*axes)
elif isinstance(a, DistArray):
if axes is None:
axes = range(a.ndim - 1, -1, -1)
axes = list(axes)
if len(set(axes)) < len(axes):
raise ValueError("repeated axis in transpose")
if sorted(axes) != list(range(a.ndim)):
raise ValueError("axes don't match array")
distaxis = a._distaxis
new_distaxis = axes.index(distaxis)
new_subarrays = [ra.transpose(*axes) for ra in a._subarrays]
return DistArray(new_subarrays, new_distaxis)
else:
return np.transpose(a, axes) | Returns a view of the array with axes transposed.
For a 1-D array, this has no effect.
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted
Args:
a (array_like): Input array.
axes (list of int, optional): By default, reverse the dimensions,
otherwise permute the axes according to the values given. | juraj-google-style |
def push(self, stream, value):
raise ArgumentError("Attempting to push reading to an invalid stream walker that cannot hold data", selector=self.selector, stream=stream) | Update this stream walker with a new responsive reading.
Args:
stream (DataStream): The stream that we're pushing
value (IOTileReading): The reading tha we're pushing | juraj-google-style |
def exit_code(self, code):
if ((code is not None) and (code in [0, 1, 3])):
self._exit_code = code
else:
self.log.warning(u'Invalid exit code') | Set the App exit code.
For TC Exchange Apps there are 3 supported exit codes.
* 0 indicates a normal exit
* 1 indicates a failure during execution
* 3 indicates a partial failure
Args:
code (integer): The exit code value for the app. | codesearchnet |
def _add_function(self, func, identify_observed):
key = self.make_key(func)
if (key not in self.observers):
self.observers[key] = ObserverFunction(func, identify_observed, (key, self.observers))
return True
else:
return False | Add a function as an observer.
Args:
func: The function to register as an observer.
identify_observed: See docstring for add_observer.
Returns:
True if the function is added, otherwise False. | codesearchnet |
def getHostCertPath(self, name):
path = s_common.genpath(self.certdir, 'hosts', ('%s.crt' % name))
if (not os.path.isfile(path)):
return None
return path | Gets the path to a host certificate.
Args:
name (str): The name of the host keypair.
Examples:
Get the path to the host certificate for the host "myhost":
mypath = cdir.getHostCertPath('myhost')
Returns:
str: The path if exists. | codesearchnet |
def get_next_base26(prev=None):
if (not prev):
return 'a'
r = re.compile('^[a-z]*$')
if (not r.match(prev)):
raise ValueError('Invalid base26')
if (not prev.endswith('z')):
return (prev[:(- 1)] + chr((ord(prev[(- 1)]) + 1)))
return (get_next_base26(prev[:(- 1)]) + 'a') | Increment letter-based IDs.
Generates IDs like ['a', 'b', ..., 'z', 'aa', ab', ..., 'az', 'ba', ...]
Returns:
str: Next base-26 ID. | codesearchnet |
def process_input(self, stream, value, rpc_executor):
self.sensor_log.push(stream, value)
if stream.important:
associated_output = stream.associated_stream()
self.sensor_log.push(associated_output, value)
to_check = deque([x for x in self.roots])
while len(to_check) > 0:
node = to_check.popleft()
if node.triggered():
try:
results = node.process(rpc_executor, self.mark_streamer)
for result in results:
result.raw_time = value.raw_time
self.sensor_log.push(node.stream, result)
except:
self._logger.exception("Unhandled exception in graph node processing function for node %s", str(node))
if len(results) > 0:
to_check.extend(node.outputs) | Process an input through this sensor graph.
The tick information in value should be correct and is transfered
to all results produced by nodes acting on this tick.
Args:
stream (DataStream): The stream the input is part of
value (IOTileReading): The value to process
rpc_executor (RPCExecutor): An object capable of executing RPCs
in case we need to do that. | juraj-google-style |
def set_timing(self, timing: bool, reset: bool=False) -> None:
self._timing = timing
if reset:
self.reset() | Manually set the ``timing`` parameter, and optionally reset the timers.
Args:
timing: should we be timing?
reset: reset the timers? | codesearchnet |
def get_object_id_from_graph(access_token=None):
if (access_token is None):
access_token = get_graph_token_from_msi()
endpoint = (('https:
headers = {'Authorization': ('Bearer ' + access_token), 'Host': GRAPH_RESOURCE_HOST}
ret = requests.get(endpoint, headers=headers)
return ret.json()['id'] | Return the object ID for the Graph user who owns the access token.
Args:
access_token (str): A Microsoft Graph access token. (Not an Azure access token.)
If not provided, attempt to get it from MSI_ENDPOINT.
Returns:
An object ID string for a user or service principal. | codesearchnet |
def get_definition(self, name: YangIdentifier, kw: YangIdentifier) -> Optional['Statement']:
stmt = self.superstmt
while stmt:
res = stmt.find1(kw, name)
if res:
return res
stmt = stmt.superstmt
return None | Search ancestor statements for a definition.
Args:
name: Name of a grouping or datatype (with no prefix).
kw: ``grouping`` or ``typedef``.
Raises:
DefinitionNotFound: If the definition is not found. | codesearchnet |
def parse_panel_app_panel(panel_info, hgnc_map, institute='cust000', panel_type='clinical'):
date_format = "%Y-%m-%dT%H:%M:%S.%f"
gene_panel = {}
gene_panel['version'] = float(panel_info['version'])
gene_panel['date'] = get_date(panel_info['Created'][:-1], date_format=date_format)
gene_panel['display_name'] = panel_info['SpecificDiseaseName']
gene_panel['institute'] = institute
gene_panel['panel_type'] = panel_type
LOG.info("Parsing panel %s", gene_panel['display_name'])
gene_panel['genes'] = []
nr_low_confidence = 1
nr_genes = 0
for nr_genes, gene in enumerate(panel_info['Genes'],1):
gene_info = parse_panel_app_gene(gene, hgnc_map)
if not gene_info:
nr_low_confidence += 1
continue
gene_panel['genes'].append(gene_info)
LOG.info("Number of genes in panel %s", nr_genes)
LOG.info("Number of low confidence genes in panel %s", nr_low_confidence)
return gene_panel | Parse a PanelApp panel
Args:
panel_info(dict)
hgnc_map(dict): Map from symbol to hgnc ids
institute(str)
panel_type(str)
Returns:
gene_panel(dict) | juraj-google-style |
def find_newline(self, size=-1):
if size < 0:
return self._buffer.find('\n', self._offset)
return self._buffer.find('\n', self._offset, self._offset + size) | Search for newline char in buffer starting from current offset.
Args:
size: number of bytes to search. -1 means all.
Returns:
offset of newline char in buffer. -1 if doesn't exist. | juraj-google-style |
def _traceback_to_alignment(tb, a, b):
for idx, direction in tb:
if direction == Direction.DIAG:
yield (idx[0] - 1, idx[1] - 1)
elif direction == Direction.UP:
yield (idx[0] - 1, None)
elif direction == Direction.LEFT:
yield (None, idx[1] - 1) | Convert a traceback (i.e. as returned by `tracebacks()`) into an alignment
(i.e. as returned by `align`).
Arguments:
tb: A traceback.
a: the sequence defining the rows in the traceback matrix.
b: the sequence defining the columns in the traceback matrix.
Returns: An iterable of (index, index) tupless where ether (but not both)
tuples can be `None`. | juraj-google-style |
def create_graph_from_data(self, data, **kwargs):
self.arguments['{SCORE}'] = self.scores[self.score]
self.arguments['{CUTOFF}'] = str(self.cutoff)
self.arguments['{VARSEL}'] = str(self.variablesel).upper()
self.arguments['{SELMETHOD}'] = self.var_selection[self.selmethod]
self.arguments['{PRUNING}'] = str(self.pruning).upper()
self.arguments['{PRUNMETHOD}'] = self.var_selection[self.prunmethod]
self.arguments['{NJOBS}'] = str(self.nb_jobs)
self.arguments['{VERBOSE}'] = str(self.verbose).upper()
results = self._run_cam(data, verbose=self.verbose)
return nx.relabel_nodes(nx.DiGraph(results),
{idx: i for idx, i in enumerate(data.columns)}) | Apply causal discovery on observational data using CAM.
Args:
data (pandas.DataFrame): DataFrame containing the data
Returns:
networkx.DiGraph: Solution given by the CAM algorithm. | juraj-google-style |
def _get_entities(self, text, language=''):
body = {'document': {'type': 'PLAIN_TEXT', 'content': text}, 'encodingType': 'UTF32'}
if language:
body['document']['language'] = language
request = self.service.documents().analyzeEntities(body=body)
response = request.execute()
result = []
for entity in response.get('entities', []):
mentions = entity.get('mentions', [])
if (not mentions):
continue
entity_text = mentions[0]['text']
offset = entity_text['beginOffset']
for word in entity_text['content'].split():
result.append({'content': word, 'beginOffset': offset})
offset += len(word)
return result | Returns the list of entities retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
List of entities. | codesearchnet |
def root(self):
node = self
while (node.package is not None):
node = node.package
return node | Property to return the root of this node.
Returns:
Package: this node's root package. | codesearchnet |
def extract_jtl_string_pairs_from_text_file(results_dict, file_path):
result_pairs = re.findall(JTL_REGEX, open(file_path).read())
for result_key, result_comment in result_pairs:
results_dict[result_key] = result_comment
return results_dict | Extracts all string pairs matching the JTL pattern from given text file.
This can be used as an "extract_func" argument in the extract_string_pairs_in_directory method.
Args:
results_dict (dict): The dict to add the the string pairs to.
file_path (str): The path of the file from which to extract the string pairs. | juraj-google-style |
def Pack(cls, obj, version):
if isinstance(obj, (datetime.datetime, datetime.date)):
return cls.AdManagerDateTimePacker(obj, version)
return obj | Pack the given object using Ad Manager-specific logic.
Args:
obj: an object to be packed for SOAP using Ad Manager-specific logic, if
applicable.
version: the version of the current API, e.g. 'v201811'
Returns:
The given object packed with Ad Manager-specific logic for SOAP,
if applicable. Otherwise, returns the given object unmodified. | juraj-google-style |
def read(self, directory, filename, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None, **kwargs):
path = os.path.join(directory, filename)
filename_split = filename.split('.')
name = filename_split[0]
extension = ''
if (len(filename_split) >= 2):
extension = filename_split[(- 1)]
if os.path.isfile(path):
session.add(self)
self._read(directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile, **kwargs)
self._commit(session, self.COMMIT_ERROR_MESSAGE)
else:
session.rollback()
log.warning('Could not find file named {0}. File not read.'.format(filename)) | Generic read file into database method.
Args:
directory (str): Directory containing the file to be read.
filename (str): Name of the file which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. Required if
spatial is True. Defaults to srid 4236.
replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if
the file you are reading contains replacement parameters. | codesearchnet |
def seek(self, offset, whence=os.SEEK_SET):
if not self._is_open:
raise IOError('Not opened.')
if whence not in [os.SEEK_SET, os.SEEK_CUR, os.SEEK_END]:
raise IOError('Unsupported whence.')
self._file_object.seek(offset, whence) | Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed. | juraj-google-style |
def xzhdr(self, header, msgid_range=None):
args = header
if (msgid_range is not None):
args += (' ' + utils.unparse_msgid_range(msgid_range))
(code, message) = self.command('XZHDR', args)
if (code != 221):
raise NNTPReplyError(code, message)
return self.info(code, message, compressed=True) | XZHDR command.
Args:
msgid_range: A message-id as a string, or an article number as an
integer, or a tuple of specifying a range of article numbers in
the form (first, [last]) - if last is omitted then all articles
after first are included. A msgid_range of None (the default)
uses the current article. | codesearchnet |
def diff_dictionaries(old_dict, new_dict):
old_set = set(old_dict)
new_set = set(new_dict)
added_set = (new_set - old_set)
removed_set = (old_set - new_set)
common_set = (old_set & new_set)
changes = 0
output = []
for key in added_set:
changes += 1
output.append(DictValue(key, None, new_dict[key]))
for key in removed_set:
changes += 1
output.append(DictValue(key, old_dict[key], None))
for key in common_set:
output.append(DictValue(key, old_dict[key], new_dict[key]))
if (str(old_dict[key]) != str(new_dict[key])):
changes += 1
output.sort(key=attrgetter('key'))
return [changes, output] | Diffs two single dimension dictionaries
Returns the number of changes and an unordered list
expressing the common entries and changes.
Args:
old_dict(dict): old dictionary
new_dict(dict): new dictionary
Returns: list()
int: number of changed records
list: [DictValue] | codesearchnet |
def manual_payment(request, invoice_id):
FORM_PREFIX = 'manual_payment'
current_invoice = InvoiceController.for_id_or_404(invoice_id)
form = forms.ManualPaymentForm((request.POST or None), prefix=FORM_PREFIX)
if (request.POST and form.is_valid()):
form.instance.invoice = current_invoice.invoice
form.instance.entered_by = request.user
form.save()
current_invoice.update_status()
form = forms.ManualPaymentForm(prefix=FORM_PREFIX)
data = {'invoice': current_invoice.invoice, 'form': form}
return render(request, 'registrasion/manual_payment.html', data) | Allows staff to make manual payments or refunds on an invoice.
This form requires a login, and the logged in user needs to be staff.
Arguments:
invoice_id (castable to int): The invoice ID to be paid
Returns:
render:
Renders ``registrasion/manual_payment.html`` with the following
data::
{
"invoice": models.commerce.Invoice(),
"form": form, # A form that saves a ``ManualPayment``
# object.
} | codesearchnet |
def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
read_receipt = event_values.get('read_receipt', None)
if read_receipt is not None:
event_values['read_receipt'] = (
self._READ_RECEIPT.get(read_receipt, 'UNKNOWN'))
message_type = event_values.get('message_type', None)
if message_type is not None:
event_values['message_type'] = (
self._MESSAGE_TYPE.get(message_type, 'UNKNOWN'))
return self._ConditionalFormatMessages(event_values) | Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter. | juraj-google-style |
def list(self, keyword=None, arg=None):
return [x for x in self.list_gen(keyword, arg)] | LIST command.
A wrapper for all of the other list commands. The output of this command
depends on the keyword specified. The output format for each keyword can
be found in the list function that corresponds to the keyword.
Args:
keyword: Information requested.
arg: Pattern or keyword specific argument.
Note: Keywords supported by this function are include ACTIVE,
ACTIVE.TIMES, DISTRIB.PATS, HEADERS, NEWSGROUPS, OVERVIEW.FMT and
EXTENSIONS.
Raises:
NotImplementedError: For unsupported keywords. | codesearchnet |
def push(self, files, run=None, entity=None, project=None, description=None, force=True, progress=False):
if (project is None):
project = self.get_project()
if (project is None):
raise CommError('No project configured.')
if (run is None):
run = self.current_run_id
(run_id, result) = self.upload_urls(project, files, run, entity, description)
responses = []
for (file_name, file_info) in result.items():
try:
normal_name = os.path.join(*file_name.split('/'))
open_file = (files[normal_name] if isinstance(files, dict) else open(normal_name, 'rb'))
except IOError:
print(('%s does not exist' % file_name))
continue
if progress:
if hasattr(progress, '__call__'):
responses.append(self.upload_file_retry(file_info['url'], open_file, progress))
else:
length = os.fstat(open_file.fileno()).st_size
with click.progressbar(file=progress, length=length, label=('Uploading file: %s' % file_name), fill_char=click.style('&', fg='green')) as bar:
responses.append(self.upload_file_retry(file_info['url'], open_file, (lambda bites, _: bar.update(bites))))
else:
responses.append(self.upload_file_retry(file_info['url'], open_file))
open_file.close()
return responses | Uploads multiple files to W&B
Args:
files (list or dict): The filenames to upload
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
project (str, optional): The name of the project to upload to. Defaults to the one in settings.
description (str, optional): The description of the changes
force (bool, optional): Whether to prevent push if git has uncommitted changes
progress (callable, or stream): If callable, will be called with (chunk_bytes,
total_bytes) as argument else if True, renders a progress bar to stream.
Returns:
The requests library response object | codesearchnet |
def from_url(url, format=None):
string = urllib2.urlopen(url).read()
if PY3 is True:
string = string.decode('utf-8')
if format:
format = format.lower().replace(" ", "_")
func = parse.__getattr__("from_%s" % format)
else:
func = parse.from_unknown_text
crs = func(string)
return crs | Returns the crs object from a string interpreted as a specified format, located at a given url site.
Arguments:
- *url*: The url where the crs string is to be read from.
- *format* (optional): Which format to parse the crs string as. One of "ogc wkt", "esri wkt", or "proj4".
If None, tries to autodetect the format for you (default).
Returns:
- CRS object. | juraj-google-style |
def device(self, idx):
class GpuDevice(Structure):
pass
c_nvmlDevice_t = POINTER(GpuDevice)
c_index = c_uint(idx)
device = c_nvmlDevice_t()
_check_return(_NVML.get_function('nvmlDeviceGetHandleByIndex_v2')(c_index, byref(device)))
return NvidiaDevice(device) | Get a specific GPU device
Args:
idx: index of device
Returns:
NvidiaDevice: single GPU device | codesearchnet |
def console_set_default_background(con: tcod.console.Console, col: Tuple[(int, int, int)]) -> None:
lib.TCOD_console_set_default_background(_console(con), col) | Change the default background color for a console.
Args:
con (Console): Any Console instance.
col (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
.. deprecated:: 8.5
Use :any:`Console.default_bg` instead. | codesearchnet |
def read_zmat(cls, inputfile, implicit_index=True):
cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral']
if implicit_index:
zmat_frame = pd.read_table(inputfile, comment='
delim_whitespace=True,
names=cols)
zmat_frame.index = range(1, len(zmat_frame) + 1)
else:
zmat_frame = pd.read_table(inputfile, comment='
delim_whitespace=True,
names=['temp_index'] + cols)
zmat_frame.set_index('temp_index', drop=True, inplace=True)
zmat_frame.index.name = None
if pd.isnull(zmat_frame.iloc[0, 1]):
zmat_values = [1.27, 127., 127.]
zmat_refs = [constants.int_label[x] for x in
['origin', 'e_z', 'e_x']]
for row, i in enumerate(zmat_frame.index[:3]):
cols = ['b', 'a', 'd']
zmat_frame.loc[:, cols] = zmat_frame.loc[:, cols].astype('O')
if row < 2:
zmat_frame.loc[i, cols[row:]] = zmat_refs[row:]
zmat_frame.loc[i, ['bond', 'angle', 'dihedral'][row:]
] = zmat_values[row:]
else:
zmat_frame.loc[i, 'd'] = zmat_refs[2]
zmat_frame.loc[i, 'dihedral'] = zmat_values[2]
elif zmat_frame.iloc[0, 1] in constants.int_label.keys():
zmat_frame = zmat_frame.replace(
{col: constants.int_label for col in ['b', 'a', 'd']})
zmat_frame = cls._cast_correct_types(zmat_frame)
try:
Zmat = cls(zmat_frame)
except InvalidReference:
raise UndefinedCoordinateSystem(
'Your zmatrix cannot be transformed to cartesian coordinates')
return Zmat | Reads a zmat file.
Lines beginning with ``#`` are ignored.
Args:
inputfile (str):
implicit_index (bool): If this option is true the first column
has to be the element symbols for the atoms.
The row number is used to determine the index.
Returns:
Zmat: | juraj-google-style |
def _construct(self, context):
with self.g.as_default():
if self._pass_through:
return self._pass_through._construct(context)
current_value = context.get(self, None)
assert (current_value is not _unspecified), 'Circular dependency'
if (current_value is not None):
return current_value
context[self] = _unspecified
method_args = self._replace_deferred(self._method_args, context)
method_kwargs = self._replace_deferred(self._method_kwargs, context)
result = self._method(*method_args, **method_kwargs)
_strip_unnecessary_contents_from_stack(result, set())
context[self] = result
return result | Constructs this by calling the deferred method.
This assumes that all unbound_vars have been specified in context and if
this layer has already been computed in this context, then the previously
constructed value will be returned.
Args:
context: A dict of UnboundVariables/_DeferredLayers to their values.
Returns:
The result of calling the given method on this layer. | codesearchnet |
def append_transformation(self, transformation, extend_collection=False, clear_redo=True):
if (self.ncores and transformation.use_multiprocessing):
p = Pool(self.ncores)
z = map((lambda x: (x, transformation, extend_collection, clear_redo)), self.transformed_structures)
new_tstructs = p.map(_apply_transformation, z, 1)
self.transformed_structures = []
for ts in new_tstructs:
self.transformed_structures.extend(ts)
else:
new_structures = []
for x in self.transformed_structures:
new = x.append_transformation(transformation, extend_collection, clear_redo=clear_redo)
if (new is not None):
new_structures.extend(new)
self.transformed_structures.extend(new_structures) | Appends a transformation to all TransformedStructures.
Args:
transformation: Transformation to append
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
clear_redo (bool): Whether to clear the redo list. By default,
this is True, meaning any appends clears the history of
undoing. However, when using append_transformation to do a
redo, the redo list should not be cleared to allow multiple
redos.
Returns:
List of booleans corresponding to initial transformed structures
each boolean describes whether the transformation altered the
structure | codesearchnet |
def compute_batch_size(dataset):
def get_static_batch_dim(type_spec):
try:
output_shape = type_spec._to_legacy_output_shapes()
except NotImplementedError:
return None
if not isinstance(output_shape, tensor_shape.TensorShape):
return None
if output_shape.rank is None:
return None
return output_shape.dims[0].value
batch_dims = [get_static_batch_dim(type_spec) for type_spec in nest.flatten(dataset_ops.get_structure(dataset))]
if all((d is not None for d in batch_dims)):
if all((d == batch_dims[0] for d in batch_dims)):
batch_dim = batch_dims[0]
else:
batch_dim = -1
return constant_op.constant(batch_dim, dtype=dtypes.int64, name='static_batch_size')
return ged_ops.compute_batch_size(dataset._variant_tensor) | An operation that returns the batch size of the dataset.
This op tries to infer the batch size statically by walking up the dataset
tree from the final dataset node and returning the batch size of the first
batching dataset (such as from .batch() and .padded_batch()) that it
encounters. This differs from using the `element_spec` of a dataset in that it
does not account for partial batches.
This operation may fail if it encounters contradictory batch sizes (for
example, if the dataset is created by zipping together two datasets with
different batch sizes), if there are no explicit batching transformations, or
if there are operations downstream from the batching transformation that may
modify its batch size. In these cases, it returns a -1.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.int64` Tensor representing the batch size of the dataset sans partial
batches. If this cannot be inferred statically, the value of this tensor
will be -1. | github-repos |
def filter_by_months(self, months):
_filt_values = []
_filt_datetimes = []
for i, d in enumerate(self.datetimes):
if d in months:
_filt_datetimes.append(d)
_filt_values.append(self._values[i])
_filt_header = self.header.duplicate()
return MonthlyCollection(_filt_header, _filt_values, _filt_datetimes) | Filter the Data Collection based on a list of months of the year (as integers).
Args:
months: A List of months of the year [1..12]
Return:
A new Data Collection with filtered data | juraj-google-style |
def _add_sync_queues_and_barrier(self, name, dependencies):
self._sync_queue_counter += 1
with tf.device(self.sync_queue_devices[self._sync_queue_counter % len(self.sync_queue_devices)]):
sync_queues = [
tf.FIFOQueue(self.num_worker, [tf.bool], shapes=[[]],
shared_name='%s%s' % (name, i))
for i in range(self.num_worker)]
queue_ops = []
token = tf.constant(False)
with tf.control_dependencies(dependencies):
for i, q in enumerate(sync_queues):
if i != self.task_index:
queue_ops.append(q.enqueue(token))
queue_ops.append(
sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))
return tf.group(*queue_ops, name=name) | Adds ops to enqueue on all worker queues.
Args:
name: prefixed for the shared_name of ops.
dependencies: control dependency from ops.
Returns:
an op that should be used as control dependency before starting next step. | juraj-google-style |
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
if registry_key is None:
return
values_dict = {}
for value_name in self._VALUE_NAMES:
registry_value = registry_key.GetValueByName(value_name)
if not registry_value:
continue
value_data = registry_value.GetDataAsObject()
if value_data is None:
continue
values_dict[value_name] = value_data
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key. | juraj-google-style |
def easeInOutCirc(n):
_checkRange(n)
n = (n * 2)
if (n < 1):
return ((- 0.5) * (math.sqrt((1 - (n ** 2))) - 1))
else:
n = (n - 2)
return (0.5 * (math.sqrt((1 - (n ** 2))) + 1)) | A circular tween function that accelerates, reaches the midpoint, and then decelerates.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). | codesearchnet |
def parse(self, line, cell, namespace=None):
if (namespace is None):
ipy = IPython.get_ipython()
namespace = ipy.user_ns
args = CommandParser.create_args(line, namespace)
sub_parsers_progs = [x.prog for x in self._get_subparsers()]
matched_progs = []
for prog in sub_parsers_progs:
match = prog.split()[1:]
for i in range(len(args)):
if (args[i:(i + len(match))] == match):
matched_progs.append(prog)
break
matched_prog = None
if matched_progs:
matched_prog = max(matched_progs, key=(lambda x: len(x.split())))
line_args = self._get_subparser_line_args(matched_prog)
if line_args:
cell_config = None
try:
(cell_config, cell) = google.datalab.utils.commands.parse_config_for_selected_keys(cell, line_args)
except:
pass
if cell_config:
google.datalab.utils.commands.replace_vars(cell_config, namespace)
for arg_name in cell_config:
arg_value = cell_config[arg_name]
if (arg_value is None):
continue
if (('--' + arg_name) in args):
raise ValueError(('config item "%s" is specified in both cell and line.' % arg_name))
if isinstance(arg_value, bool):
if arg_value:
line += (' --%s' % arg_name)
else:
line += (' --%s %s' % (arg_name, str(cell_config[arg_name])))
args = CommandParser.create_args(line, namespace)
args = vars(self.parse_args(args))
cell_config = None
cell_args = self._get_subparser_cell_args(matched_prog)
if cell_args:
try:
(cell_config, _) = google.datalab.utils.commands.parse_config_for_selected_keys(cell, cell_args)
except:
pass
if cell_config:
google.datalab.utils.commands.replace_vars(cell_config, namespace)
for arg in cell_args:
if (cell_args[arg]['required'] and ((cell_config is None) or (cell_config.get(arg, None) is None))):
raise ValueError(('Cell config "%s" is required.' % arg))
if cell_config:
args.update(cell_config)
return (args, cell) | Parses a line and cell into a dictionary of arguments, expanding variables from a namespace.
For each line parameters beginning with --, it also checks the cell content and see if it exists
there. For example, if "--config1" is a line parameter, it checks to see if cell dict contains
"config1" item, and if so, use the cell value. The "config1" item will also be removed from
cell content.
Args:
line: line content.
cell: cell content.
namespace: user namespace. If None, IPython's user namespace is used.
Returns:
A tuple of: 1. parsed config dict. 2. remaining cell after line parameters are extracted. | codesearchnet |
def onScreen(x, y=None):
(x, y) = _unpackXY(x, y)
x = int(x)
y = int(y)
(width, height) = platformModule._size()
return ((0 <= x < width) and (0 <= y < height)) | Returns whether the given xy coordinates are on the screen or not.
Args:
Either the arguments are two separate values, first arg for x and second
for y, or there is a single argument of a sequence with two values, the
first x and the second y.
Example: onScreen(x, y) or onScreen([x, y])
Returns:
bool: True if the xy coordinates are on the screen at its current
resolution, otherwise False. | codesearchnet |
def buckets_get(self, bucket, projection='noAcl'):
args = {'projection': projection}
url = Api._ENDPOINT + (Api._BUCKET_PATH % bucket)
return google.datalab.utils.Http.request(url, credentials=self._credentials, args=args) | Issues a request to retrieve information about a bucket.
Args:
bucket: the name of the bucket.
projection: the projection of the bucket information to retrieve.
Returns:
A parsed bucket information dictionary.
Raises:
Exception if there is an error performing the operation. | juraj-google-style |
def add_batch_parser(subparsers, parent_parser):
parser = subparsers.add_parser(
'batch',
help='Displays information about batches and submit new batches',
description='Provides subcommands to display Batch information and '
'submit Batches to the validator via the REST API.')
grand_parsers = parser.add_subparsers(title='subcommands',
dest='subcommand')
grand_parsers.required = True
add_batch_list_parser(grand_parsers, parent_parser)
add_batch_show_parser(grand_parsers, parent_parser)
add_batch_status_parser(grand_parsers, parent_parser)
add_batch_submit_parser(grand_parsers, parent_parser) | Adds arguments parsers for the batch list, batch show and batch status
commands
Args:
subparsers: Add parsers to this subparser object
parent_parser: The parent argparse.ArgumentParser object | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.