code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __init__(self, file_system, mount_point, environment_variables=None):
super(FileSystemWinRegistryFileReader, self).__init__()
self._file_system = file_system
self._path_resolver = self._CreateWindowsPathResolver(
file_system, mount_point, environment_variables=environment_variables)
|
Initializes a Windows Registry file reader object.
Args:
file_system (dfvfs.FileSystem): file system.
mount_point (dfvfs.PathSpec): mount point path specification.
environment_variables (Optional[list[EnvironmentVariableArtifact]]):
environment variables.
|
juraj-google-style
|
def parse_content(self, content):
self.active_lines_unparsed = get_active_lines(content) if content is not None else []
self.active_settings = split_kv_pairs(content, use_partition=False) if content is not None else []
|
Main parsing class method which stores all interesting data from the content.
Args:
content (context.content): Parser context content
|
juraj-google-style
|
def _disc_kn(clearness_index, airmass, max_airmass=12):
kt = clearness_index
am = airmass
am = min(am, max_airmass)
kt2 = kt * kt
kt3 = kt2 * kt
if kt <= 0.6:
a = 0.512 - 1.56*kt + 2.286*kt2 - 2.222*kt3
b = 0.37 + 0.962*kt
c = -0.28 + 0.932*kt - 2.048*kt2
else:
a = -5.743 + 21.77*kt - 27.49*kt2 + 11.56*kt3
b = 41.4 - 118.5*kt + 66.05*kt2 + 31.9*kt3
c = -47.01 + 184.2*kt - 222.0*kt2 + 73.81*kt3
delta_kn = a + b * math.exp(c*am)
Knc = 0.866 - 0.122*am + 0.0121*am**2 - 0.000653*am**3 + 1.4e-05*am**4
Kn = Knc - delta_kn
return Kn, am
|
Calculate Kn for `disc`
Args:
clearness_index : numeric
airmass : numeric
max_airmass : float
airmass > max_airmass is set to max_airmass before being used
in calculating Kn.
Returns:
Kn : numeric
am : numeric
airmass used in the calculation of Kn. am <= max_airmass.
|
juraj-google-style
|
def crossing_times(ts, c=0.0, d=0.0):
ts = ts.squeeze()
if (ts.ndim is not 1):
raise ValueError('Currently can only use on single variable timeseries')
ts = (ts - c)
tsa = ts[0:(- 1)]
tsb = ts[1:]
zc = (np.nonzero((((tsa < 0) & (tsb >= 0)) | ((tsa > 0) & (tsb <= 0))))[0] + 1)
va = ts[(zc - 1)]
vb = ts[zc]
ct = (((np.abs(vb) * ts.tspan[(zc - 1)]) + (np.abs(va) * ts.tspan[zc])) / np.abs((vb - va)))
if (ts[0] == 0.0):
zc = np.r_[(np.array([0]), zc)]
ct = np.r_[(np.array([ts.tspan[0]]), ct)]
if ((d == 0.0) or (ct.shape[0] is 0)):
return ct
dc = (np.nonzero((((tsa < d) & (tsb >= d)) | ((tsa > (- d)) & (tsb <= (- d)))))[0] + 1)
splice = np.searchsorted(dc, zc)
which_zc = np.r_[(np.array([0]), (np.nonzero((splice[0:(- 1)] - splice[1:]))[0] + 1))]
return ct[which_zc]
|
For a single variable timeseries, find the times at which the
value crosses ``c`` from above or below. Can optionally set a non-zero
``d`` to impose the condition that the value must wander at least ``d``
units away from ``c`` between crossings.
If the timeseries begins (or ends) exactly at ``c``, then time zero
(or the ending time) is also included as a crossing event,
so that the boundaries of the first and last excursions are included.
If the actual crossing time falls between two time steps, linear
interpolation is used to estimate the crossing time.
Args:
ts: Timeseries (single variable)
c (float): Critical value at which to report crossings.
d (float): Optional min distance from c to be attained between crossings.
Returns:
array of float
|
codesearchnet
|
def is_valid(container, path):
try:
tmp_hash_path = (container.filename + '.hash')
with open(tmp_hash_path, 'r') as tmp_file:
tmp_hash = tmp_file.readline()
except IOError:
LOG.info('No .hash-file in the tmp-directory.')
container_hash_path = (local.path(path) / 'gentoo.tar.bz2.hash')
if container_hash_path.exists():
with open(container_hash_path, 'r') as hash_file:
container_hash = hash_file.readline()
return (container_hash == tmp_hash)
return False
|
Checks if a container exists and is unpacked.
Args:
path: The location where the container is expected.
Returns:
True if the container is valid, False if the container needs to
unpacked or if the path does not exist yet.
|
codesearchnet
|
def _FishScript(name, commands, default_options=None):
default_options = default_options or set()
global_options, options_map, subcommands_map = _GetMaps(name, commands, default_options)
fish_source = 'function __fish_using_command\n set cmd (commandline -opc)\n for i in (seq (count $cmd) 1)\n switch $cmd[$i]\n case "-*"\n case "*"\n if [ $cmd[$i] = $argv[1] ]\n return 0\n else\n return 1\n end\n end\n end\n return 1\nend\n\nfunction __option_entered_check\n set cmd (commandline -opc)\n for i in (seq (count $cmd))\n switch $cmd[$i]\n case "-*"\n if [ $cmd[$i] = $argv[1] ]\n return 1\n end\n end\n end\n return 0\nend\n\nfunction __is_prev_global\n set cmd (commandline -opc)\n set global_options {global_options}\n set prev (count $cmd)\n\n for opt in $global_options\n if [ "--$opt" = $cmd[$prev] ]\n echo $prev\n return 0\n end\n end\n return 1\nend\n\n'
subcommand_template = "complete -c {name} -n '__fish_using_command {command}' -f -a {subcommand}\n"
flag_template = "complete -c {name} -n '__fish_using_command {command};{prev_global_check} and __option_entered_check --{option}' -l {option}\n"
prev_global_check = ' and __is_prev_global;'
for command in set(subcommands_map.keys()).union(set(options_map.keys())):
for subcommand in subcommands_map[command]:
fish_source += subcommand_template.format(name=name, command=command, subcommand=subcommand)
for option in options_map[command].union(global_options):
check_needed = command != name
fish_source += flag_template.format(name=name, command=command, prev_global_check=prev_global_check if check_needed else '', option=option.lstrip('--'))
return fish_source.format(global_options=' '.join((f'"{option}"' for option in global_options)))
|
Returns a Fish script registering a completion function for the commands.
Args:
name: The first token in the commands, also the name of the command.
commands: A list of all possible commands that tab completion can complete
to. Each command is a list or tuple of the string tokens that make up
that command.
default_options: A dict of options that can be used with any command. Use
this if there are flags that can always be appended to a command.
Returns:
A string which is the Fish script. Source the fish script to enable tab
completion in Fish.
|
github-repos
|
def search_stack_for_var(varname, verbose=util_arg.NOT_QUIET):
curr_frame = inspect.currentframe()
if verbose:
print(' * Searching parent frames for: ' + six.text_type(varname))
frame_no = 0
while curr_frame.f_back is not None:
if varname in curr_frame.f_locals.keys():
if verbose:
print(' * Found local in frame: ' + six.text_type(frame_no))
return curr_frame.f_locals[varname]
if varname in curr_frame.f_globals.keys():
if verbose:
print(' * Found global in frame: ' + six.text_type(frame_no))
return curr_frame.f_globals[varname]
frame_no += 1
curr_frame = curr_frame.f_back
if verbose:
print('... Found nothing in all ' + six.text_type(frame_no) + ' frames.')
return None
|
Finds a varable (local or global) somewhere in the stack and returns the value
Args:
varname (str): variable name
Returns:
None if varname is not found else its value
|
juraj-google-style
|
def find(self, name):
collectors = self.get_collectors()
for collector in collectors:
if (name.lower() == collector['name'].lower()):
self.collector_id = collector['id']
return collector
return {'status': 'No results found.'}
|
Returns a dict of collector's details if found.
Args:
name (str): name of collector searching for
|
codesearchnet
|
def is_descriptor_class(desc, include_abstract=False):
return (isinstance(desc, type) and issubclass(desc, Descriptor) and (True if include_abstract else (not inspect.isabstract(desc))))
|
r"""Check calculatable descriptor class or not.
Returns:
bool
|
codesearchnet
|
def get_impacted_files_from_tiny_model_summary(diff_with_last_commit: bool=False) -> List[str]:
repo = Repo(PATH_TO_REPO)
folder = Path(repo.working_dir)
if not diff_with_last_commit:
print(f'main is at {repo.refs.main.commit}')
print(f'Current head is at {repo.head.commit}')
commits = repo.merge_base(repo.refs.main, repo.head)
for commit in commits:
print(f'Branching commit: {commit}')
else:
print(f'main is at {repo.head.commit}')
commits = repo.head.commit.parents
for commit in commits:
print(f'Parent commit: {commit}')
if not os.path.isfile(folder / 'tests/utils/tiny_model_summary.json'):
return []
files = set()
for commit in commits:
with checkout_commit(repo, commit):
with open(folder / 'tests/utils/tiny_model_summary.json', 'r', encoding='utf-8') as f:
old_content = f.read()
with open(folder / 'tests/utils/tiny_model_summary.json', 'r', encoding='utf-8') as f:
new_content = f.read()
old_content = json.loads(old_content)
new_content = json.loads(new_content)
old_keys = set(old_content.keys())
new_keys = set(new_content.keys())
keys_with_diff = old_keys.symmetric_difference(new_keys)
common_keys = old_keys.intersection(new_keys)
for key in common_keys:
if old_content[key] != new_content[key]:
keys_with_diff.add(key)
impacted_model_classes = []
for key in keys_with_diff:
if key in new_keys:
impacted_model_classes.extend(new_content[key]['model_classes'])
with open(folder / 'src/transformers/__init__.py') as fp:
lines = fp.readlines()
new_lines = []
for line in lines:
if line == '_import_structure = {\n':
new_lines.append(line)
elif line == '
break
elif len(new_lines) > 0:
line = re.sub('is_.+_available\\(\\)', 'True', line)
line = line.replace('OptionalDependencyNotAvailable', 'Exception')
line = line.replace('Exception()', 'Exception')
new_lines.append(line)
with tempfile.TemporaryDirectory() as tmpdirname:
with open(os.path.join(tmpdirname, 'temp_init.py'), 'w') as fp:
fp.write(''.join(new_lines))
spec = importlib.util.spec_from_file_location('temp_init', os.path.join(tmpdirname, 'temp_init.py'))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
import_structure = module._import_structure
reversed_structure = {}
for key, values in import_structure.items():
for value in values:
reversed_structure[value] = key
for model_class in impacted_model_classes:
module = reversed_structure[model_class]
framework = ''
if model_class.startswith('TF'):
framework = 'tf'
elif model_class.startswith('Flax'):
framework = 'flax'
fn = f'modeling_{module.split('.')[-1]}.py' if framework == '' else f'modeling_{framework}_{module.split('.')[-1]}.py'
files.add(f'src.transformers.{module}.{fn}'.replace('.', os.path.sep).replace(f'{os.path.sep}py', '.py'))
return sorted(files)
|
Return a list of python modeling files that are impacted by the changes of `tiny_model_summary.json` in between:
- the current head and the main branch if `diff_with_last_commit=False` (default)
- the current head and its parent commit otherwise.
Returns:
`List[str]`: The list of Python modeling files that are impacted by the changes of `tiny_model_summary.json`.
|
github-repos
|
def __add_scraped_requests_to_queue(self, queue_item, scraped_requests):
new_queue_items = []
for scraped_request in scraped_requests:
HTTPRequestHelper.patch_with_options(scraped_request, self.__options, queue_item)
if (not HTTPRequestHelper.complies_with_scope(queue_item, scraped_request, self.__options.scope)):
continue
if self.queue.has_request(scraped_request):
continue
scraped_request.depth = (queue_item.request.depth + 1)
if (self.__options.scope.max_depth is not None):
if (scraped_request.depth > self.__options.scope.max_depth):
continue
new_queue_item = self.queue.add_request(scraped_request)
new_queue_items.append(new_queue_item)
return new_queue_items
|
Convert the scraped requests to queue items, return them and also add them to the queue.
Args:
queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished.
new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request.
Returns:
list(:class:`nyawc.QueueItem`): The new queue items.
|
codesearchnet
|
def _parse_batch_lastlog(last_log):
regexp = re.compile('(-?[0-9]\d*):\W+(.*)')
wrong_commands = list()
for line in last_log:
result = regexp.match(line)
if result is not None:
status_code = result.group(1)
command = result.group(2)
if int(status_code) < 0:
wrong_commands.append((status_code, command))
return wrong_commands
|
This static method will help reading the result of the commit, command by command.
Args:
last_log(list): A list containing, line by line, the result of committing the changes.
Returns:
A list of tuples that went wrong. The tuple will contain (*status_code*, *command*)
|
juraj-google-style
|
def populate_ast_nsarg_orthologs(ast, species):
ortholog_namespace = "EG"
if isinstance(ast, NSArg):
if re.match(ortholog_namespace, ast.canonical):
orthologs = bel.terms.orthologs.get_orthologs(
ast.canonical, list(species.keys())
)
for species_id in species:
if species_id in orthologs:
orthologs[species_id]["species_label"] = species[species_id]
ast.orthologs = copy.deepcopy(orthologs)
if hasattr(ast, "args"):
for arg in ast.args:
populate_ast_nsarg_orthologs(arg, species)
return ast
|
Recursively collect NSArg orthologs for BEL AST
This requires bo.collect_nsarg_norms() to be run first so NSArg.canonical is available
Args:
ast: AST at recursive point in belobj
species: dictionary of species ids vs labels for or
|
juraj-google-style
|
def refresh(self, updated_self):
logger.debug('refreshing binary attributes')
self.mimetype = updated_self.binary.mimetype
self.data = updated_self.binary.data
|
method to refresh binary attributes and data
Args:
updated_self (Resource): resource this binary data attaches to
Returns:
None: updates attributes
|
codesearchnet
|
def __init__(self, cipher_suites=None):
super(TLS12AuthenticationSuite, self).__init__(cipher_suites)
self._protocol = ssl.PROTOCOL_TLSv1_2
|
Create a TLS12AuthenticationSuite object.
Args:
cipher_suites (list): A list of strings representing the names of
cipher suites to use. Overrides the default set of cipher
suites. Optional, defaults to None.
|
juraj-google-style
|
def _do_logoff(self):
session_uri = '/api/sessions/this-session'
self.delete(session_uri, logon_required=False)
self._session_id = None
self._session = None
self._headers.pop('X-API-Session', None)
|
Log off, unconditionally.
Raises:
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.HTTPError`
|
codesearchnet
|
def run_and_monitor(args, pid_to_wait, std_out_filter_fn=None, cwd=None):
monitor_process = None
try:
p = subprocess.Popen(args,
cwd=cwd,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pids_to_kill = [p.pid]
script = ('import %s;%s._wait_and_kill(%s, %s)' %
(__name__, __name__, str(pid_to_wait), str(pids_to_kill)))
monitor_process = subprocess.Popen(['python', '-c', script], env=os.environ)
while p.poll() is None:
line = p.stdout.readline()
if not six.PY2:
line = line.decode()
if std_out_filter_fn is None or std_out_filter_fn(line):
sys.stdout.write(line)
finally:
if monitor_process:
monitor_process.kill()
|
Start a process, and have it depend on another specified process.
Args:
args: the args of the process to start and monitor.
pid_to_wait: the process to wait on. If the process ends, also kill the started process.
std_out_filter_fn: a filter function which takes a string content from the stdout of the
started process, and returns True if the string should be redirected to console stdout.
cwd: the current working directory for the process to start.
|
juraj-google-style
|
def process_python_objects(data, filepath=None):
def _process(value):
if isinstance(value, dict):
for (k, v) in value.items():
value[k] = _process(v)
return value
elif isfunction(value):
func = value
if hasattr(func, '_early'):
import types
fn = types.FunctionType(func.func_code, func.func_globals.copy(), name=func.func_name, argdefs=func.func_defaults, closure=func.func_closure)
fn.func_globals['this'] = EarlyThis(data)
fn.func_globals.update(get_objects())
spec = getargspec(func)
args = (spec.args or [])
if (len(args) not in (0, 1)):
raise ResourceError('@early decorated function must take zero or one args only')
if args:
value_ = fn(data)
else:
value_ = fn()
return _process(value_)
elif hasattr(func, '_late'):
return SourceCode(func=func, filepath=filepath, eval_as_function=True)
elif (func.__name__ in package_rex_keys):
return SourceCode(func=func, filepath=filepath, eval_as_function=False)
else:
return func
else:
return value
def _trim(value):
if isinstance(value, dict):
for (k, v) in value.items():
if isfunction(v):
if (v.__name__ == 'preprocess'):
pass
else:
del value[k]
elif (ismodule(v) or k.startswith('__')):
del value[k]
else:
value[k] = _trim(v)
return value
data = _process(data)
data = _trim(data)
return data
|
Replace certain values in the given package data dict.
Does things like:
* evaluates @early decorated functions, and replaces with return value;
* converts functions into `SourceCode` instances so they can be serialized
out to installed packages, and evaluated later;
* strips some values (modules, __-leading variables) that are never to be
part of installed packages.
Returns:
dict: Updated dict.
|
codesearchnet
|
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
|
Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
juraj-google-style
|
def array_to_jsbuffer(array):
if array.ndim != 1:
raise TypeError('Only 1d arrays can be converted JS TypedArray.')
if array.dtype.name not in JS_ARRAY_TYPES:
raise TypeError('Array dtype not supported by JS TypedArray.')
js_type_name = array.dtype.name.capitalize() + 'Array'
data_base64 = base64.b64encode(array.tobytes()).decode('ascii')
code = % (data_base64, js_type_name)
return code
|
Serialize 1d NumPy array to JS TypedArray.
Data is serialized to base64-encoded string, which is much faster
and memory-efficient than json list serialization.
Args:
array: 1d NumPy array, dtype must be one of JS_ARRAY_TYPES.
Returns:
JS code that evaluates to a TypedArray as string.
Raises:
TypeError: if array dtype or shape not supported.
|
juraj-google-style
|
def delete_group_maintainer(self, grp_name, user):
self.service.delete_group_maintainer(
grp_name, user, self.url_prefix, self.auth, self.session,
self.session_send_opts)
|
Delete the given user to the named group.
Both group and user must already exist for this to succeed.
Args:
name (string): Name of group.
user (string): User to add to group.
Raises:
requests.HTTPError on failure.
|
juraj-google-style
|
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
def _should_dropout(p):
return not isinstance(p, float) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, 'input', self._recurrent_input_noise, self._input_keep_prob)
output, new_state = cell_call_fn(inputs, state, **kwargs)
if _should_dropout(self._state_keep_prob):
shallow_filtered_substructure = nest.get_traverse_shallow_structure(self._dropout_state_filter, new_state)
new_state = self._dropout(new_state, 'state', self._recurrent_state_noise, self._state_keep_prob, shallow_filtered_substructure)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, 'output', self._recurrent_output_noise, self._output_keep_prob)
return (output, new_state)
|
Runs the wrapped cell and applies dropout.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
|
github-repos
|
def __init__(self, channel):
self.ListProfiles = channel.unary_unary(
"/google.cloud.talent.v4beta1.ProfileService/ListProfiles",
request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.ListProfilesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.ListProfilesResponse.FromString,
)
self.CreateProfile = channel.unary_unary(
"/google.cloud.talent.v4beta1.ProfileService/CreateProfile",
request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.CreateProfileRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__pb2.Profile.FromString,
)
self.GetProfile = channel.unary_unary(
"/google.cloud.talent.v4beta1.ProfileService/GetProfile",
request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.GetProfileRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__pb2.Profile.FromString,
)
self.UpdateProfile = channel.unary_unary(
"/google.cloud.talent.v4beta1.ProfileService/UpdateProfile",
request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.UpdateProfileRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__pb2.Profile.FromString,
)
self.DeleteProfile = channel.unary_unary(
"/google.cloud.talent.v4beta1.ProfileService/DeleteProfile",
request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.DeleteProfileRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.SearchProfiles = channel.unary_unary(
"/google.cloud.talent.v4beta1.ProfileService/SearchProfiles",
request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.SearchProfilesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.SearchProfilesResponse.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def dataset_docs_str(datasets=None):
module_to_builder = make_module_to_builder_dict(datasets)
sections = sorted(list(module_to_builder.keys()))
section_tocs = []
section_docs = []
for section in sections:
builders = tf.nest.flatten(module_to_builder[section])
builders = sorted(builders, key=lambda b: b.name)
builder_docs = [document_single_builder(builder) for builder in builders]
section_doc = SECTION_DATASETS.format(
section_name=section, datasets="\n".join(builder_docs))
section_toc = create_section_toc(section, builders)
section_docs.append(section_doc)
section_tocs.append(section_toc)
full_doc = DOC.format(toc="\n".join(section_tocs),
datasets="\n".join(section_docs))
return full_doc
|
Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
string describing the datasets (in the MarkDown format).
|
juraj-google-style
|
def _create_op_from_tf_operation(self, c_op, compute_device=True) -> 'Operation':
self._check_not_finalized()
ret = Operation._from_c_op(c_op=c_op, g=self)
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
|
Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Returns:
An `Operation` object.
|
github-repos
|
def image(request, data):
try:
width = int(request.GET.get("w", PYDENTICON_WIDTH))
except ValueError:
raise SuspiciousOperation("Identicon width must be a positive integer.")
try:
height = int(request.GET.get("h", PYDENTICON_HEIGHT))
except ValueError:
raise SuspiciousOperation("Identicon height must be a positive integer.")
output_format = request.GET.get("f", PYDENTICON_FORMAT)
try:
padding = [int(p) for p in request.GET["p"].split(",")]
except KeyError:
padding = PYDENTICON_PADDING
except ValueError:
raise SuspiciousOperation("Identicon padding must consist out of 4 positive integers separated with commas.")
if "i" in request.GET:
inverted = request.GET.get("i")
if inverted.lower() == "true":
inverted = True
elif inverted.lower() == "false":
inverted = False
else:
raise SuspiciousOperation("Inversion parameter must be a boolean (true/false).")
else:
inverted = PYDENTICON_INVERT
if not isinstance(width, int) or width <= 0:
raise SuspiciousOperation("Identicon width must be a positive integer.")
if not isinstance(height, int) or height <= 0:
raise SuspiciousOperation("Identicon height must be a positive integer.")
if not all([isinstance(p, int) and p >= 0 for p in padding]) or len(padding) != 4:
raise SuspiciousOperation("Padding must be a 4-element tuple consisting out of positive integers.")
if output_format == "png":
content_type = "image/png"
elif output_format == "ascii":
content_type = "text/plain"
else:
raise SuspiciousOperation("Unsupported identicon format requested - '%s' % output_format")
generator = Generator(PYDENTICON_ROWS, PYDENTICON_COLUMNS,
foreground = PYDENTICON_FOREGROUND, background = PYDENTICON_BACKGROUND,
digest = PYDENTICON_DIGEST)
content = generator.generate(data, width, height, padding=padding, output_format=output_format, inverted=inverted)
response = HttpResponse(content, content_type=content_type)
return response
|
Generates identicon image based on passed data.
Arguments:
data - Data which should be used for generating an identicon. This data
will be used in order to create a digest which is used for generating the
identicon. If the data passed is a hex digest already, the digest will be
used as-is.
Returns:
Identicon image in raw format.
|
juraj-google-style
|
def name(self):
return ctypes.cast(self.sName, ctypes.c_char_p).value.decode()
|
Returns the name of the device.
Args:
self (JLinkDeviceInfo): the ``JLinkDeviceInfo`` instance
Returns:
Device name.
|
juraj-google-style
|
def add_prop_descriptor_to_class(self, class_name, new_class_attrs, names_with_refs, container_names, dataspecs):
from .bases import ContainerProperty
from .dataspec import DataSpec
name = self.name
if (name in new_class_attrs):
raise RuntimeError(('Two property generators both created %s.%s' % (class_name, name)))
new_class_attrs[name] = self
if self.has_ref:
names_with_refs.add(name)
if isinstance(self, BasicPropertyDescriptor):
if isinstance(self.property, ContainerProperty):
container_names.add(name)
if isinstance(self.property, DataSpec):
dataspecs[name] = self
|
``MetaHasProps`` calls this during class creation as it iterates
over properties to add, to update its registry of new properties.
The parameters passed in are mutable and this function is expected to
update them accordingly.
Args:
class_name (str) :
name of the class this descriptor is added to
new_class_attrs(dict[str, PropertyDescriptor]) :
mapping of attribute names to PropertyDescriptor that this
function will update
names_with_refs (set[str]) :
set of all property names for properties that also have
references, that this function will update
container_names (set[str]) :
set of all property names for properties that are
container props, that this function will update
dataspecs(dict[str, PropertyDescriptor]) :
mapping of attribute names to PropertyDescriptor for DataSpec
properties that this function will update
Return:
None
|
codesearchnet
|
def get_named_tensor(self, name):
if (name in self.named_tensors):
return (True, self.named_tensors[name])
else:
return (False, None)
|
Returns a named tensor if available.
Returns:
valid: True if named tensor found, False otherwise
tensor: If valid, will be a tensor, otherwise None
|
codesearchnet
|
def encode_all_features(dataset, vocabulary):
def my_fn(features):
ret = {}
for k, v in features.items():
v = vocabulary.encode_tf(v)
v = tf.concat([tf.to_int64(v), [1]], 0)
ret[k] = v
return ret
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
Encode all features.
Args:
dataset: a tf.data.Dataset
vocabulary: a vocabulary.Vocabulary
Returns:
a tf.data.Dataset
|
juraj-google-style
|
def _normalize_direction(heading: int) -> int:
while (heading > 359):
heading = int((heading - 359))
while (heading < 0):
heading = int((heading + 359))
return heading
|
Make sure that 0 < heading < 360
Args:
heading: base heading
Returns: corrected heading
|
codesearchnet
|
def _FindCodeObjectsReferents(module, start_objects, visit_recorder):
def CheckIgnoreCodeObject(code_object):
'Checks if the code object can be ignored.\n\n Code objects that are not implemented in the module, or are from a lambda or\n generator expression can be ignored.\n\n If the module was precompiled, the code object may point to .py file, while\n the module says that it originated from .pyc file. We just strip extension\n altogether to work around it.\n\n Args:\n code_object: code object that we want to check against module.\n\n Returns:\n True if the code object can be ignored, False otherwise.\n '
if (code_object.co_name in ('<lambda>', '<genexpr>')):
return True
code_object_file = os.path.splitext(code_object.co_filename)[0]
module_file = os.path.splitext(module.__file__)[0]
if (code_object_file == module_file):
return False
return True
def CheckIgnoreClass(cls):
'Returns True if the class is definitely not coming from "module".'
cls_module = sys.modules.get(cls.__module__)
if (not cls_module):
return False
return ((cls_module is not module) and (getattr(cls_module, '__file__', None) != module.__file__))
code_objects = set()
current = start_objects
for obj in current:
visit_recorder.Record(current)
depth = 0
while (current and (depth < _MAX_REFERENTS_BFS_DEPTH)):
new_current = []
for current_obj in current:
referents = gc.get_referents(current_obj)
if ((current_obj is not module.__dict__) and (len(referents) > _MAX_OBJECT_REFERENTS)):
continue
for obj in referents:
if (isinstance(obj, _BFS_IGNORE_TYPES) or (not visit_recorder.Record(obj))):
continue
if (isinstance(obj, types.CodeType) and CheckIgnoreCodeObject(obj)):
continue
if (isinstance(obj, six.class_types) and CheckIgnoreClass(obj)):
continue
if isinstance(obj, types.CodeType):
code_objects.add(obj)
else:
new_current.append(obj)
current = new_current
depth += 1
return code_objects
|
Looks for all the code objects referenced by objects in start_objects.
The traversal implemented by this function is a shallow one. In other words
if the reference chain is a -> b -> co1 -> c -> co2, this function will
return [co1] only.
The traversal is implemented with BFS. The maximum depth is limited to avoid
touching all the objects in the process. Each object is only visited once
using visit_recorder.
Args:
module: module in which we are looking for code objects.
start_objects: initial set of objects for the BFS traversal.
visit_recorder: instance of _VisitRecorder class to ensure each object is
visited at most once.
Returns:
List of code objects.
|
codesearchnet
|
def convert_bboxes_from_albumentations(bboxes, target_format, rows, cols, check_validity=False):
return [convert_bbox_from_albumentations(bbox, target_format, rows, cols, check_validity) for bbox in bboxes]
|
Convert a list of bounding boxes from the format used by albumentations to a format, specified
in `target_format`.
Args:
bboxes (list): List of bounding box with coordinates in the format used by albumentations
target_format (str): required format of the output bounding box. Should be 'coco' or 'pascal_voc'.
rows (int): image height
cols (int): image width
check_validity (bool): check if all boxes are valid boxes
|
codesearchnet
|
def MultiNotifyQueue(self, notifications, mutation_pool=None):
extract_queue = (lambda notification: notification.session_id.Queue())
for (queue, notifications) in iteritems(collection.Group(notifications, extract_queue)):
self._MultiNotifyQueue(queue, notifications, mutation_pool=mutation_pool)
|
This is the same as NotifyQueue but for several session_ids at once.
Args:
notifications: A list of notifications.
mutation_pool: A MutationPool object to schedule Notifications on.
Raises:
RuntimeError: An invalid session_id was passed.
|
codesearchnet
|
def automatic_control_dependencies(f):
def wrapper(*args, **kwargs):
with AutomaticControlDependencies() as a:
result = f(*args, **kwargs)
result_flat = [a.mark_as_return(t) for t in nest.flatten(result)]
return nest.pack_sequence_as(result, result_flat)
return tf_decorator.make_decorator(f, wrapper)
|
Wraps f to automatically insert control dependencies.
The inserted dependencies ensure that:
1. All stateful ops in f run when the result of f runs
2. Updates to the same resources happen in order.
Args:
f: the function to be wrapped.
Returns:
The wrapped function.
|
github-repos
|
def argv(cls, name, short_name=None, type=None, help=None):
cls.__hierarchy.append(argv.Argv(name, short_name, type, help))
|
Set command line arguments as a source
Parses the command line arguments described by the parameters.
Args:
name: the long name of the argument (foo)
short_name: the optional short name of the argument (f)
type: the optional type of the argument, defaults to bool
help: the optional help text for the argument
|
juraj-google-style
|
def _PromptUserForPartitionIdentifiers(
self, volume_system, volume_identifiers):
print_header = True
while True:
if print_header:
self._PrintTSKPartitionIdentifiersOverview(
volume_system, volume_identifiers)
print_header = False
lines = self._textwrapper.wrap(self._USER_PROMPT_TSK)
self._output_writer.Write('\n'.join(lines))
self._output_writer.Write('\n\nPartition identifiers: ')
try:
selected_volumes = self._ReadSelectedVolumes(volume_system, prefix='p')
if (selected_volumes and
not set(selected_volumes).difference(volume_identifiers)):
break
except ValueError:
pass
self._output_writer.Write('\n')
lines = self._textwrapper.wrap(
'Unsupported partition identifier(s), please try again or abort with '
'Ctrl^C.')
self._output_writer.Write('\n'.join(lines))
self._output_writer.Write('\n\n')
return selected_volumes
|
Prompts the user to provide partition identifiers.
Args:
volume_system (dfvfs.TSKVolumeSystem): volume system.
volume_identifiers (list[str]): volume identifiers including prefix.
Returns:
list[str]: selected volume identifiers including prefix or None.
|
juraj-google-style
|
def get_ethernet_settings(self):
uri = '{}/ethernetSettings'.format(self.data['uri'])
return self._helper.do_get(uri)
|
Gets the Ethernet interconnect settings for the Logical Interconnect.
Returns:
dict: Ethernet Interconnect Settings
|
codesearchnet
|
def close(self):
self._dll.JLINKARM_Close()
if (self._lock is not None):
del self._lock
self._lock = None
return None
|
Closes the open J-Link.
Args:
self (JLink): the ``JLink`` instance
Returns:
``None``
Raises:
JLinkException: if there is no connected JLink.
|
codesearchnet
|
def set_acl(self, role, users):
acl_updates = [{"user": user, "role": role} for user in users]
r = fapi.update_repository_method_acl(
self.namespace, self.name, self.snapshot_id,
acl_updates, self.api_url
)
fapi._check_response_code(r, 200)
|
Set permissions for this method.
Args:
role (str): Access level
one of {one of "OWNER", "READER", "WRITER", "NO ACCESS"}
users (list(str)): List of users to give role to
|
juraj-google-style
|
def combine(self, x):
depth = tf.shape(x)[-1]
x *= tf.expand_dims(self._nonpadding, -1)
ret = tf.unsorted_segment_sum(
x, self._flat_indices, num_segments=self._batch * self._length)
ret = tf.reshape(ret, [self._batch, self._length, depth])
return ret
|
Return the output from the experts.
When one example goes to multiple experts, the outputs are summed.
Args:
x: a Tensor with shape [batch, num_experts, expert_capacity, depth]
Returns:
a `Tensor` with shape `[batch, length, depth]
|
juraj-google-style
|
def are_equal(self, sp1, sp2):
for s1 in sp1.keys():
spin1 = getattr(s1, 'spin', 0)
oxi1 = getattr(s1, 'oxi_state', 0)
for s2 in sp2.keys():
spin2 = getattr(s2, 'spin', 0)
oxi2 = getattr(s2, 'oxi_state', 0)
if ((s1.symbol == s2.symbol) and (oxi1 == oxi2) and (spin2 == (- spin1))):
break
else:
return False
return True
|
True if species are exactly the same, i.e., Fe2+ == Fe2+ but not
Fe3+. and the spins are reversed. i.e., spin up maps to spin down,
and vice versa.
Args:
sp1: First species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
sp2: Second species. A dict of {specie/element: amt} as per the
definition in Site and PeriodicSite.
Returns:
Boolean indicating whether species are equal.
|
codesearchnet
|
def is_subgroup(self, supergroup):
warnings.warn("This is not fully functional. Only trivial subsets are tested right now. ")
return set(self.symmetry_ops).issubset(supergroup.symmetry_ops)
|
True if this group is a subgroup of the supplied group.
Args:
supergroup (SymmetryGroup): Supergroup to test.
Returns:
True if this group is a subgroup of the supplied group.
|
juraj-google-style
|
def eval(self, session=None):
return self._variable.eval(session=session)
|
In a session, computes and returns the value of this variable.
This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See `tf.compat.v1.Session` for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.compat.v1.global_variables_initializer()
with tf.compat.v1.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
print(v.eval(sess))
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
print(v.eval())
```
Args:
session: The session to use to evaluate this variable. If none, the
default session is used.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
|
github-repos
|
def run(self, tag=None, output=None, **kwargs):
start = datetime.datetime.now()
count = 0
if tag:
tag = Uri(tag)
xml_generator = etree.iterparse(self.source, tag=tag.etree)
else:
xml_generator = etree.iterparse(self.source)
i = 0
for (event, element) in xml_generator:
type_tags = element.findall(_RDF_TYPE_TAG)
rdf_types = [el.get(_RES_TAG) for el in type_tags if el.get(_RES_TAG)]
if (str(self.filter_val) in rdf_types):
pdb.set_trace()
count += 1
i += 1
element.clear()
print("Found '{}' items in {}".format(count, (datetime.datetime.now() - start)))
|
runs the extractor
Args:
-----
output: ['filepath', None]
|
codesearchnet
|
def determine_git_ref(self, config):
ref_config_keys = 0
for i in ['commit', 'tag', 'branch']:
if config.get(i):
ref_config_keys += 1
if ref_config_keys > 1:
raise ImportError("Fetching remote git sources failed: "
"conflicting revisions (e.g. 'commit', 'tag', "
"'branch') specified for a package source")
if config.get('commit'):
ref = config['commit']
elif config.get('tag'):
ref = config['tag']
else:
ref = self.git_ls_remote(
config['uri'],
self.determine_git_ls_remote_ref(config)
)
if sys.version_info[0] > 2 and isinstance(ref, bytes):
return ref.decode()
return ref
|
Determine the ref to be used for 'git checkout'.
Args:
config (dict): git config dictionary
Returns:
str: A commit id or tag name
|
juraj-google-style
|
def parse(type: Type):
def decorator(parser):
EnvVar.parsers[type] = parser
return parser
return decorator
|
Register a parser for a attribute type.
Parsers will be used to parse `str` type objects from either
the commandline arguments or environment variables.
Args:
type: the type the decorated function will be responsible
for parsing a environment variable to.
|
codesearchnet
|
def parse_column_path(column: str) -> list:
nested_columns = []
for col in column.split('.'):
parts = PATTERN.match(col)
if parts:
column_name, key = (parts.groups()[0], parts.groups()[1])
else:
column_name, key = (col, None)
if not column_name:
raise ValueError(f'Invalid column path: {column}')
nested_columns.append((column_name, key))
return nested_columns
|
Parse the column string to extract nested fields and array indices.
Args:
column (str): The column string with potential nested fields and array
indices.
Returns:
list: A list of tuples, where each tuple contains the column name and the
key/index.
|
github-repos
|
def __init__(self, input_dataset, target_device, source_device='/cpu:0'):
self._input_dataset = input_dataset._apply_debug_options()
self._target_device = target_device
spec = framework_device.DeviceSpec().from_string(self._target_device)
self._is_gpu_target = spec.device_type == 'GPU'
self._source_device_string = source_device
self._source_device = ops.convert_to_tensor(source_device)
wrap_ds_variant = gen_dataset_ops.wrap_dataset_variant(self._input_dataset._variant_tensor)
@def_function.function()
def _init_func():
ds_variant = gen_dataset_ops.unwrap_dataset_variant(wrap_ds_variant)
resource = gen_dataset_ops.anonymous_iterator(**self._input_dataset._flat_structure)
with ops.control_dependencies([gen_dataset_ops.make_iterator(ds_variant, resource)]):
return gen_dataset_ops.iterator_to_string_handle(resource)
init_func_concrete = _init_func.get_concrete_function()
@def_function.function()
def _remote_init_func():
return functional_ops.remote_call(target=self._source_device, args=init_func_concrete.captured_inputs, Tout=[dtypes.string], f=init_func_concrete)
self._init_func = _remote_init_func.get_concrete_function()
self._init_captured_args = self._init_func.captured_inputs
@def_function.function(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _next_func(string_handle):
with ops.device(self._source_device_string):
iterator = iterator_ops.Iterator.from_string_handle(string_handle, dataset_ops.get_legacy_output_types(self), dataset_ops.get_legacy_output_shapes(self), dataset_ops.get_legacy_output_classes(self))
return structure.to_tensor_list(self.element_spec, iterator.get_next())
next_func_concrete = _next_func.get_concrete_function()
@def_function.function(input_signature=[tensor_spec.TensorSpec([], dtypes.string)], experimental_attributes={'experimental_ints_on_device': True})
def _remote_next_func(string_handle):
return functional_ops.remote_call(target=self._source_device, args=[string_handle] + next_func_concrete.captured_inputs, Tout=self._input_dataset._flat_types, f=next_func_concrete)
self._next_func = _remote_next_func.get_concrete_function()
self._next_captured_args = self._next_func.captured_inputs
@def_function.function(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _finalize_func(string_handle):
iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(string_handle, **self._input_dataset._flat_structure)
with ops.control_dependencies([resource_variable_ops.destroy_resource_op(iterator_resource, ignore_lookup_error=True)]):
return array_ops.constant(0, dtypes.int64)
finalize_func_concrete = _finalize_func.get_concrete_function()
@def_function.function(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _remote_finalize_func(string_handle):
return functional_ops.remote_call(target=self._source_device, args=[string_handle] + finalize_func_concrete.captured_inputs, Tout=[dtypes.int64], f=finalize_func_concrete)
self._finalize_func = _remote_finalize_func.get_concrete_function()
self._finalize_captured_args = self._finalize_func.captured_inputs
g = ops.get_default_graph()
self._init_func.add_to_graph(g)
self._next_func.add_to_graph(g)
self._finalize_func.add_to_graph(g)
with ops.device(self._target_device):
variant_tensor = gen_dataset_ops.generator_dataset(self._init_captured_args, self._next_captured_args, self._finalize_captured_args, init_func=self._init_func, next_func=self._next_func, finalize_func=self._finalize_func, **self._input_dataset._flat_structure)
super(_CopyToDeviceDataset, self).__init__(input_dataset, variant_tensor)
|
Constructs a _CopyToDeviceDataset.
Args:
input_dataset: `Dataset` to be copied
target_device: The name of the device to which elements would be copied.
source_device: Device where input_dataset would be placed.
|
github-repos
|
def dot(r1, r2):
if r1.size != r2.size:
raise ValueError("Both arguments must have the same input size.")
if r1.deriv != r2.deriv:
raise ValueError("Both arguments must have the same deriv.")
return r1.x*r2.x + r1.y*r2.y + r1.z*r2.z
|
Compute the dot product
Arguments:
| ``r1``, ``r2`` -- two :class:`Vector3` objects
(Returns a Scalar)
|
juraj-google-style
|
def parse(self, values):
type_map = {}
for name, t in self._hparam_types.items():
param_type, _ = t
type_map[name] = param_type
values_map = parse_values(values, type_map)
return self.override_from_dict(values_map)
|
Override existing hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where 'value'
must follow the syntax described above.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values` cannot be parsed or a hyperparameter in `values`
doesn't exist.
|
juraj-google-style
|
def get_checkpoint_factories_and_keys(object_names, object_map=None):
checkpoint_factory_map = object_identity.ObjectIdentityDictionary()
unmapped_registered_savers = collections.defaultdict(dict)
for trackable, object_name in object_names.items():
object_to_save = util.get_mapped_trackable(trackable, object_map)
saver_name = registration.get_registered_saver_name(object_to_save)
if saver_name:
unmapped_registered_savers[saver_name][object_name] = trackable
else:
checkpoint_factory_map[trackable] = []
for name, saveable_factory in saveable_object_util.saveable_objects_from_trackable(object_to_save).items():
key_suffix = saveable_compat.get_saveable_name(object_to_save) or name
checkpoint_key = trackable_utils.checkpoint_key(object_name, key_suffix)
if not saveable_compat.force_checkpoint_conversion_enabled():
name = key_suffix
checkpoint_factory_map[trackable].append(_CheckpointFactoryData(factory=saveable_factory, name=name, checkpoint_key=checkpoint_key))
return (checkpoint_factory_map, unmapped_registered_savers)
|
Gets a map of saveable factories and corresponding checkpoint keys.
Args:
object_names: a dictionary that maps `Trackable` objects to auto-generated
string names.
object_map: a dictionary mapping `Trackable` to copied `Trackable` objects.
The copied objects are generated from `Trackable.
_export_to_saved_model_graph()` which copies the object into another
graph. Generally only resource objects (e.g. Variables, Tables) will be
in this map.
Returns:
A tuple of (
Dictionary mapping trackable -> list of _CheckpointFactoryData,
Dictionary mapping registered saver name -> {object name -> trackable})
|
github-repos
|
def create_function(self, vpc_config):
zip_file = 'lambda-holder.zip'
with zipfile.ZipFile(zip_file, mode='w') as zipped:
zipped.writestr('index.py', 'print "Hello world"')
contents = ''
with open('lambda-holder.zip', 'rb') as openfile:
contents = openfile.read()
LOG.info('Creating lambda function: %s', self.app_name)
try:
self.lambda_client.create_function(
Environment=self.lambda_environment,
FunctionName=self.app_name,
Runtime=self.runtime,
Role=self.role_arn,
Handler=self.handler,
Code={'ZipFile': contents},
Description=self.description,
Timeout=int(self.timeout),
MemorySize=int(self.memory),
Publish=False,
VpcConfig=vpc_config,
Tags={'app_group': self.group,
'app_name': self.app_name})
except boto3.exceptions.botocore.exceptions.ClientError as error:
if 'CreateNetworkInterface' in error.response['Error']['Message']:
message = '{0} is missing "ec2:CreateNetworkInterface"'.format(self.role_arn)
LOG.critical(message)
raise SystemExit(message)
raise
LOG.info("Successfully created Lambda function and alias")
|
Create lambda function, configures lambda parameters.
We need to upload non-zero zip when creating function. Uploading
hello_world python lambda function since AWS doesn't care which
executable is in ZIP.
Args:
vpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using
a VPC in lambda
|
juraj-google-style
|
def submit_files(self, halt_on_error=True):
if (self.halt_on_file_error is not None):
halt_on_error = self.halt_on_file_error
upload_status = []
for (xid, content_data) in self._files.items():
del self._files[xid]
status = True
if (self.debug and (xid in self.saved_xids)):
self.tcex.log.debug('skipping previously saved file {}.'.format(xid))
continue
content = content_data.get('fileContent')
if callable(content):
content = content_data.get('fileContent')(xid)
if (content is None):
upload_status.append({'uploaded': False, 'xid': xid})
self.tcex.log.warning('File content was null for xid {}.'.format(xid))
continue
if (content_data.get('type') == 'Document'):
api_branch = 'documents'
elif (content_data.get('type') == 'Report'):
api_branch = 'reports'
url = '/v2/groups/{}/{}/upload'.format(api_branch, xid)
headers = {'Content-Type': 'application/octet-stream'}
params = {'owner': self._owner}
r = self.submit_file_content('POST', url, content, headers, params, halt_on_error)
if (r.status_code == 401):
self.tcex.log.info('Received 401 status code using POST. Trying PUT to update.')
r = self.submit_file_content('PUT', url, content, headers, params, halt_on_error)
self.tcex.log.debug('{} Upload URL: {}.'.format(content_data.get('type'), r.url))
if (not r.ok):
status = False
self.tcex.handle_error(585, [r.status_code, r.text], halt_on_error)
elif self.debug:
self.saved_xids.append(xid)
self.tcex.log.info('Status {} for file upload with xid {}.'.format(r.status_code, xid))
upload_status.append({'uploaded': status, 'xid': xid})
return upload_status
|
Submit Files for Documents and Reports to ThreatConnect API.
Critical Errors
* There is insufficient document storage allocated to this account.
Args:
halt_on_error (bool, default:True): If True any exception will raise an error.
Returns:
dict: The upload status for each xid.
|
codesearchnet
|
def search(cls, session, queries):
return super(Conversations, cls).search(session, queries, SearchConversation)
|
Search for a conversation given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
Returns:
RequestPaginator(output_type=helpscout.models.SearchCustomer):
SearchCustomer iterator.
|
codesearchnet
|
def __init__(self, identifier):
super(Volume, self).__init__()
self.identifier = identifier
self._attributes = {}
self._extents = []
self._is_parsed = False
|
Initializes a volume.
Args:
identifier (str): identifier of the attribute within the volume.
|
juraj-google-style
|
def AddEventSource(self, event_source):
self._RaiseIfNotWritable()
self._AddAttributeContainer(
self._CONTAINER_TYPE_EVENT_SOURCE, event_source)
|
Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage file is closed or read-only.
OSError: when the storage file is closed or read-only.
|
juraj-google-style
|
def stacked_bi_rnn(units: tf.Tensor, n_hidden_list: List, cell_type='gru', seq_lengths=None, use_peepholes=False, name='RNN_layer'):
for (n, n_hidden) in enumerate(n_hidden_list):
with tf.variable_scope(((name + '_') + str(n))):
if (cell_type == 'gru'):
forward_cell = tf.nn.rnn_cell.GRUCell(n_hidden)
backward_cell = tf.nn.rnn_cell.GRUCell(n_hidden)
elif (cell_type == 'lstm'):
forward_cell = tf.nn.rnn_cell.LSTMCell(n_hidden, use_peepholes=use_peepholes)
backward_cell = tf.nn.rnn_cell.LSTMCell(n_hidden, use_peepholes=use_peepholes)
else:
raise RuntimeError('cell_type must be either gru or lstm')
((rnn_output_fw, rnn_output_bw), (fw, bw)) = tf.nn.bidirectional_dynamic_rnn(forward_cell, backward_cell, units, dtype=tf.float32, sequence_length=seq_lengths)
units = tf.concat([rnn_output_fw, rnn_output_bw], axis=2)
if (cell_type == 'gru'):
last_units = tf.concat([fw, bw], axis=1)
else:
((c_fw, h_fw), (c_bw, h_bw)) = (fw, bw)
c = tf.concat([c_fw, c_bw], axis=1)
h = tf.concat([h_fw, h_bw], axis=1)
last_units = (h, c)
return (units, last_units)
|
Stackted recurrent neural networks GRU or LSTM
Args:
units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]
n_hidden_list: list with number of hidden units at the ouput of each layer
seq_lengths: length of sequences for different length sequences in batch
can be None for maximum length as a length for every sample in the batch
cell_type: 'lstm' or 'gru'
use_peepholes: whether to use peephole connections (only 'lstm' case affected)
name: what variable_scope to use for the network parameters
Returns:
units: tensor at the output of the last recurrent layer
with dimensionality [None, n_tokens, n_hidden_list[-1]]
last_units: tensor of last hidden states for GRU and tuple
of last hidden stated and last cell states for LSTM
dimensionality of cell states and hidden states are
similar and equal to [B x 2 * H], where B - batch
size and H is number of hidden units
|
codesearchnet
|
def __init__(self, job_context, shard_state):
self.job_context = job_context
self.id = shard_state.shard_id
self.number = shard_state.shard_number
self.attempt = shard_state.retries + 1
self._state = shard_state
|
Init.
The signature of __init__ is subject to change.
Read only properties:
job_context: JobContext object.
id: str. of format job_id-shard_number.
number: int. shard number. 0 indexed.
attempt: int. The current attempt at executing this shard.
Starting at 1.
Args:
job_context: map_job.JobConfig.
shard_state: model.ShardState.
|
juraj-google-style
|
def proxy_num(self, protocol=None):
http_num = len(self.proxies['http'])
https_num = len(self.proxies['https'])
if protocol == 'http':
return http_num
elif protocol == 'https':
return https_num
else:
return http_num + https_num
|
Get the number of proxies in the pool
Args:
protocol (str, optional): 'http' or 'https' or None. (default None)
Returns:
If protocol is None, return the total number of proxies, otherwise,
return the number of proxies of corresponding protocol.
|
juraj-google-style
|
def get_model_filepath(self, infodict):
u = infodict['uniprot_ac']
original_filename = '{}_{}_{}_{}'.format(infodict['from'], infodict['to'], infodict['template'], infodict['coordinate_id'])
file_path = op.join(self.metadata_dir, u[:2], u[2:4], u[4:6], 'swissmodel', '{}.pdb'.format(original_filename))
if op.exists(file_path):
return file_path
else:
log.warning('{}: no file {} found for model'.format(u, file_path))
return None
|
Get the path to the homology model using information from the index dictionary for a single model.
Example: use self.get_models(UNIPROT_ID) to get all the models, which returns a list of dictionaries.
Use one of those dictionaries as input to this function to get the filepath to the model itself.
Args:
infodict (dict): Information about a model from get_models
Returns:
str: Path to homology model
|
codesearchnet
|
def is_array_str(x: Any) -> bool:
if isinstance(x, (bytes, str)):
return True
elif is_array(x):
return is_dtype_str(x.dtype)
else:
return False
|
Returns True if the given array is a `str` array.
Note: Also returns True for scalar `str`, `bytes` values. For compatibility
with `tensor.numpy()` which returns `bytes`
Args:
x: The array to test
Returns:
True or False
|
github-repos
|
def set_authentication_profile(profile=None, deploy=False):
if not profile:
raise CommandExecutionError("Profile name option must not be none.")
ret = {}
query = {'type': 'config',
'action': 'set',
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/'
'authentication-profile',
'element': '<authentication-profile>{0}</authentication-profile>'.format(profile)}
ret.update(__proxy__['panos.call'](query))
if deploy is True:
ret.update(commit())
return ret
|
Set the authentication profile of the Palo Alto proxy minion. A commit will be required before this is processed.
CLI Example:
Args:
profile (str): The name of the authentication profile to set.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_authentication_profile foo
salt '*' panos.set_authentication_profile foo deploy=True
|
juraj-google-style
|
def _send_request(self, url, method="get", data=None, extra_headers=None):
headers = {'Content-type': 'application/json'}
if isinstance(extra_headers, dict):
headers.update(extra_headers)
if not data or "password" not in data:
logger.debug("Sending {method} request to {url} with data {data}".format(
method=method.upper(), url=url, data=data)
)
r = self.session.request(method, url, headers=headers, data=data)
r.raise_for_status()
return r.json()
|
Performs a given request and returns a json object
Args:
url (str): URL of the request
method (str): Any of "get", "post", "delete"
data (any): Possible extra data to send with the request
extra_headers (dict): Possible extra headers to send along in the request
Returns:
dict
|
juraj-google-style
|
def assert_almost_eq(arr_test, arr_target, thresh=1E-11):
r
if util_arg.NO_ASSERTS:
return
import utool as ut
arr1 = np.array(arr_test)
arr2 = np.array(arr_target)
passed, error = ut.almost_eq(arr1, arr2, thresh, ret_error=True)
if not np.all(passed):
failed_xs = np.where(np.logical_not(passed))
failed_error = error.take(failed_xs)
failed_arr_test = arr1.take(failed_xs)
failed_arr_target = arr2.take(failed_xs)
msg_list = [
'FAILED ASSERT ALMOST EQUAL',
' * failed_xs = %r' % (failed_xs,),
' * failed_error = %r' % (failed_error,),
' * failed_arr_test = %r' % (failed_arr_test,),
' * failed_arr_target = %r' % (failed_arr_target,),
]
msg = '\n'.join(msg_list)
raise AssertionError(msg)
return error
|
r"""
Args:
arr_test (ndarray or list):
arr_target (ndarray or list):
thresh (scalar or ndarray or list):
|
juraj-google-style
|
def make_anchor(file_path: pathlib.Path, offset: int, width: int, context_width: int, metadata, encoding: str='utf-8', handle=None):
@contextmanager
def get_handle():
if (handle is None):
with file_path.open(mode='rt', encoding=encoding) as fp:
(yield fp)
else:
(yield handle)
with get_handle() as fp:
context = _make_context(fp, offset, width, context_width)
return Anchor(file_path=file_path, encoding=encoding, context=context, metadata=metadata)
|
Construct a new `Anchor`.
Args:
file_path: The absolute path to the target file for the anchor.
offset: The offset of the anchored text in codepoints in `file_path`'s
contents.
width: The width in codepoints of the anchored text.
context_width: The width in codepoints of context on either side of the
anchor.
metadata: The metadata to attach to the anchor. Must be json-serializeable.
encoding: The encoding of the contents of `file_path`.
handle: If not `None`, this is a file-like object the contents of which
are used to calculate the context of the anchor. If `None`, then
the file indicated by `file_path` is opened instead.
Raises:
ValueError: `width` characters can't be read at `offset`.
ValueError: `file_path` is not absolute.
|
codesearchnet
|
def secure_channel(target, credentials, options=None, *, loop=None, executor=None,
standalone_pool_for_streaming=False):
return Channel(_grpc.secure_channel(target, credentials, options),
loop, executor, standalone_pool_for_streaming)
|
Creates a secure Channel to a server.
Args:
target: The server address.
credentials: A ChannelCredentials instance.
options: An optional list of key-value pairs (channel args in gRPC runtime)
to configure the channel.
Returns:
A Channel object.
|
juraj-google-style
|
def DeserializeExclusiveData(self, reader):
self.Nonce = reader.ReadUInt32()
self.Type = TransactionType.MinerTransaction
|
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
|
juraj-google-style
|
def AddBlob(self, blob_id, length):
if self.finalized and length > 0:
raise IOError("Can't add blobs to finalized BlobImage")
self.content_dirty = True
self.index.seek(0, 2)
self.index.write(blob_id.AsBytes())
self.size += length
if length < self.chunksize:
self.finalized = True
|
Add another blob to this image using its hash.
Once a blob is added that is smaller than the chunksize we finalize the
file, since handling adding more blobs makes the code much more complex.
Args:
blob_id: rdf_objects.BlobID object.
length: int length of blob
Raises:
IOError: if blob has been finalized.
|
juraj-google-style
|
def forward(self, encoder_hidden_states):
hidden_states = encoder_hidden_states.transpose(1, -1)
for layer in self.conv_layers:
hidden_states = layer(hidden_states)
hidden_states = self.linear(hidden_states.transpose(1, -1)).squeeze(-1)
if not self.training:
hidden_states = torch.clamp(torch.round(hidden_states.exp() - self.log_domain_offset), min=0).long()
return hidden_states
|
Args:
hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
Batch of input sequences.
padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
Batch of masks indicating padded part.
Returns:
`torch.Tensor`: Batch of predicted durations in log domain `(batch_size, max_text_length)`.
|
github-repos
|
def order_by(self, *args):
clone = copy.deepcopy(self)
clone.adapter.ordered = True
if args:
clone.adapter.order_by(*args)
return clone
|
Applies query ordering.
Args:
**args: Order by fields names.
Defaults to ascending, prepend with hypen (-) for desecending ordering.
Returns:
Self. Queryset object.
Examples:
>>> Person.objects.order_by('-name', 'join_date')
|
juraj-google-style
|
def circuit_to_image(circ: Circuit,
qubits: Qubits = None) -> PIL.Image:
latex = circuit_to_latex(circ, qubits)
img = render_latex(latex)
return img
|
Create an image of a quantum circuit.
A convenience function that calls circuit_to_latex() and render_latex().
Args:
circ: A quantum Circuit
qubits: Optional qubit list to specify qubit order
Returns:
Returns: A PIL Image (Use img.show() to display)
Raises:
NotImplementedError: For unsupported gates.
OSError: If an external dependency is not installed.
|
juraj-google-style
|
def update_labels(self, node_name: str, labels: dict):
if (not self._manager):
raise RuntimeError('Only the Swarm manager node can update node details.')
node_spec = {'Availability': 'active', 'Name': node_name, 'Role': 'manager', 'Labels': labels}
node = self._client.nodes.get(node_name)
node.update(node_spec)
|
Update label of a node.
Args:
node_name (string): Name of the node.
labels (dict): Label to add to the node
|
codesearchnet
|
def ParseFileObject(self, parser_mediator, file_object):
display_name = parser_mediator.GetDisplayName()
if not zipfile.is_zipfile(file_object):
raise errors.UnableToParseFile(
'[{0:s}] unable to parse file: {1:s} with error: {2:s}'.format(
self.NAME, display_name, 'Not a Zip file.'))
try:
zip_file = zipfile.ZipFile(file_object, 'r', allowZip64=True)
self._ProcessZipFileWithPlugins(parser_mediator, zip_file)
zip_file.close()
except (zipfile.BadZipfile, struct.error) as exception:
raise errors.UnableToParseFile(
'[{0:s}] unable to parse file: {1:s} with error: {2!s}'.format(
self.NAME, display_name, exception))
|
Parses a compound ZIP file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
juraj-google-style
|
def shakespeare(chunk_size):
file_name = maybe_download('http:
'shakespear.txt')
with open(file_name) as f:
shakespeare_full = f.read()
length = (len(shakespeare_full)
if length < len(shakespeare_full):
shakespeare_full = shakespeare_full[:length]
arr = np.array([convert_to_int(c) for c in shakespeare_full])[
0:len(shakespeare_full) / chunk_size * chunk_size]
return arr.reshape((len(arr) / chunk_size, chunk_size))
|
Downloads Shakespeare, converts it into ASCII codes and chunks it.
Args:
chunk_size: The dataset is broken down so that it is shaped into batches x
chunk_size.
Returns:
A numpy array of ASCII codes shaped into batches x chunk_size.
|
juraj-google-style
|
def resize_file(fobj, diff, BUFFER_SIZE=2 ** 16):
fobj.seek(0, 2)
filesize = fobj.tell()
if diff < 0:
if filesize + diff < 0:
raise ValueError
fobj.truncate(filesize + diff)
elif diff > 0:
try:
while diff:
addsize = min(BUFFER_SIZE, diff)
fobj.write(b"\x00" * addsize)
diff -= addsize
fobj.flush()
except IOError as e:
if e.errno == errno.ENOSPC:
fobj.truncate(filesize)
raise
|
Resize a file by `diff`.
New space will be filled with zeros.
Args:
fobj (fileobj)
diff (int): amount of size to change
Raises:
IOError
|
juraj-google-style
|
def fn(x: int) -> int:
return x
|
Test function
Args:
x: The input
Returns:
The output
|
github-repos
|
def trace_region_count(self):
cmd = enums.JLinkTraceCommand.GET_NUM_REGIONS
data = ctypes.c_uint32(0)
res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))
if (res == 1):
raise errors.JLinkException('Failed to get trace region count.')
return data.value
|
Retrieves a count of the number of available trace regions.
Args:
self (JLink): the ``JLink`` instance.
Returns:
Count of the number of available trace regions.
|
codesearchnet
|
def clear(self, color: Tuple[(int, int, int)]) -> None:
lib.TCOD_image_clear(self.image_c, color)
|
Fill this entire Image with color.
Args:
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
|
codesearchnet
|
def VerifyServerPEM(self, http_object):
try:
server_pem = http_object.data
server_url = http_object.url
if (b'BEGIN CERTIFICATE' in server_pem):
server_certificate = rdf_crypto.RDFX509Cert(server_pem)
self.communicator.LoadServerCertificate(server_certificate=server_certificate, ca_certificate=self.ca_cert)
logging.info('Server PEM re-keyed.')
return True
except Exception as e:
logging.info('Unable to verify server certificate at %s: %s', server_url, e)
return False
|
Check the server PEM for validity.
This is used to determine connectivity to the server. Sometimes captive
portals return a valid HTTP status, but the data is corrupted.
Args:
http_object: The response received from the server.
Returns:
True if the response contains a valid server certificate.
|
codesearchnet
|
def from_maildir(self, codes: str) -> FrozenSet[Flag]:
flags = set()
for code in codes:
if code == ',':
break
to_sys = self._to_sys.get(code)
if to_sys is not None:
flags.add(to_sys)
else:
to_kwd = self._to_kwd.get(code)
if to_kwd is not None:
flags.add(to_kwd)
return frozenset(flags)
|
Return the set of IMAP flags that correspond to the letter codes.
Args:
codes: The letter codes to map.
|
juraj-google-style
|
def problem(problem_name, **kwargs):
spec = parse_problem_name(problem_name)
try:
return Registries.problems[spec.base_name](
was_copy=spec.was_copy, was_reversed=spec.was_reversed)
except KeyError:
return env_problem(problem_name, **kwargs)
|
Get possibly copied/reversed problem in `base_registry` or `env_registry`.
Args:
problem_name: string problem name. See `parse_problem_name`.
**kwargs: forwarded to env problem's initialize method.
Returns:
possibly reversed/copied version of base problem registered in the given
registry.
|
juraj-google-style
|
def encode(self, s):
return [(int(w) + self._num_reserved_ids) for w in s.split()]
|
Transform a human-readable string into a sequence of int ids.
The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
num_reserved_ids) are reserved.
EOS is not appended.
Args:
s: human-readable string to be converted.
Returns:
ids: list of integers
|
codesearchnet
|
def napalm_cli(task: Task, commands: List[str]) -> Result:
device = task.host.get_connection("napalm", task.nornir.config)
result = device.cli(commands)
return Result(host=task.host, result=result)
|
Run commands on remote devices using napalm
Arguments:
commands: commands to execute
Returns:
Result object with the following attributes set:
* result (``dict``): result of the commands execution
|
juraj-google-style
|
def increment_id(cls, _id: ObjectId, inc: int) -> ObjectId:
id_number = _ObjectIdHelper.id_to_int(_id)
new_number = id_number + inc
if new_number < 0 or new_number >= 1 << 96:
raise ValueError('invalid incremental, inc value must be within [%s, %s)' % (0 - id_number, 1 << 96 - id_number))
return _ObjectIdHelper.int_to_id(new_number)
|
Increment object_id binary value by inc value and return new object id.
Args:
_id: The `_id` to change.
inc(int): The incremental int value to be added to `_id`.
Returns:
`_id` incremented by `inc` value
|
github-repos
|
def infer(msg, mrar=False):
df = common.df(msg)
if common.allzeros(msg):
return 'EMPTY'
if (df == 17):
tc = common.typecode(msg)
if (1 <= tc <= 4):
return 'BDS08'
if (5 <= tc <= 8):
return 'BDS06'
if (9 <= tc <= 18):
return 'BDS05'
if (tc == 19):
return 'BDS09'
if (20 <= tc <= 22):
return 'BDS05'
if (tc == 28):
return 'BDS61'
if (tc == 29):
return 'BDS62'
if (tc == 31):
return 'BDS65'
IS10 = bds10.is10(msg)
IS17 = bds17.is17(msg)
IS20 = bds20.is20(msg)
IS30 = bds30.is30(msg)
IS40 = bds40.is40(msg)
IS50 = bds50.is50(msg)
IS60 = bds60.is60(msg)
IS44 = bds44.is44(msg)
IS45 = bds45.is45(msg)
if mrar:
allbds = np.array(['BDS10', 'BDS17', 'BDS20', 'BDS30', 'BDS40', 'BDS44', 'BDS45', 'BDS50', 'BDS60'])
mask = [IS10, IS17, IS20, IS30, IS40, IS44, IS45, IS50, IS60]
else:
allbds = np.array(['BDS10', 'BDS17', 'BDS20', 'BDS30', 'BDS40', 'BDS50', 'BDS60'])
mask = [IS10, IS17, IS20, IS30, IS40, IS50, IS60]
bds = ','.join(sorted(allbds[mask]))
if (len(bds) == 0):
return None
else:
return bds
|
Estimate the most likely BDS code of an message.
Args:
msg (String): 28 bytes hexadecimal message string
mrar (bool): Also infer MRAR (BDS 44) and MHR (BDS 45). Defaults to False.
Returns:
String or None: BDS version, or possible versions, or None if nothing matches.
|
codesearchnet
|
def wrap(access_pyxb, read_only=False):
w = AccessPolicyWrapper(access_pyxb)
yield w
if not read_only:
w.get_normalized_pyxb()
|
Work with the AccessPolicy in a SystemMetadata PyXB object.
Args:
access_pyxb : AccessPolicy PyXB object
The AccessPolicy to modify.
read_only: bool
Do not update the wrapped AccessPolicy.
When only a single AccessPolicy operation is needed, there's no need to use this
context manager. Instead, use the generated context manager wrappers.
|
juraj-google-style
|
def FileEntryExistsByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
if (location is None or
not location.startswith(self.LOCATION_ROOT)):
return False
if len(location) == 1:
return True
try:
self._tar_file.getmember(location[1:])
return True
except KeyError:
pass
for name in iter(self._tar_file.getnames()):
if name.startswith(location[1:]):
return True
return False
|
Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists.
|
juraj-google-style
|
def private_map(self):
return self._private_map
|
A map from parents to symbols that should not be included at all.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not include.
|
github-repos
|
def __init__(self, match=None, qps=None, user_qps=None, daily=None,
analytics_id=None):
_CheckType(match, basestring, 'match')
_CheckType(qps, int, 'qps')
_CheckType(user_qps, int, 'user_qps')
_CheckType(daily, int, 'daily')
_CheckType(analytics_id, basestring, 'analytics_id')
self.__match = match
self.__qps = qps
self.__user_qps = user_qps
self.__daily = daily
self.__analytics_id = analytics_id
|
Constructor for ApiFrontEndLimitRule.
Args:
match: string, the matching rule that defines this traffic segment.
qps: int, the aggregate QPS for this segment.
user_qps: int, the per-end-user QPS for this segment.
daily: int, the aggregate daily maximum for this segment.
analytics_id: string, the project ID under which traffic for this segment
will be logged.
|
juraj-google-style
|
def __init__(self, username, email, manager):
super(User, self).__init__(manager)
self.username = username
self.email = email
|
Initialize a user.
Args:
username (str): The user's username.
email (str): The user's email.
manager (:class:`saltant.models.user.UserManager`):
The manager which spawned this user instance.
|
juraj-google-style
|
def do_usufy(self, query, **kwargs):
try:
self.wrapperAPI = TwitterAPIWrapper()
results = self.wrapperAPI.get_user(query)
for r in results:
aux = {}
aux["type"]="i3visio.uri"
alias=r["value"].split(' - ')[1]
aux["value"]= self.createURL(word=alias, mode="usufy")
aux["attributes"]= []
r["attributes"].append(aux)
except Exception, e:
return super(Twitter, self).do_usufy(query, **kwargs)
|
Verifying a usufy query in this platform.
This might be redefined in any class inheriting from Platform.
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended.
|
juraj-google-style
|
def CreateDataTypeMapByType(cls, data_type_definition):
data_type_map_class = cls._MAP_PER_DEFINITION.get(
data_type_definition.TYPE_INDICATOR, None)
if not data_type_map_class:
return None
return data_type_map_class(data_type_definition)
|
Creates a specific data type map by type indicator.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Returns:
DataTypeMap: data type map or None if the date type definition
is not available.
|
juraj-google-style
|
def append_item(self, item):
did_remove = self.remove_exit()
item.menu = self
self.items.append(item)
if did_remove:
self.add_exit()
|
Add an item to the end of the menu before the exit item.
Args:
item (MenuItem): The item to be added.
|
juraj-google-style
|
def persist_perf(run, session, svg_path):
from benchbuild.utils import schema as s
with open(svg_path, 'r') as svg_file:
svg_data = svg_file.read()
session.add(
s.Metadata(name="perf.flamegraph", value=svg_data, run_id=run.id))
|
Persist the flamegraph in the database.
The flamegraph exists as a SVG image on disk until we persist it in the
database.
Args:
run: The run we attach these perf measurements to.
session: The db transaction we belong to.
svg_path: The path to the SVG file we want to store.
|
juraj-google-style
|
def merge_json_fhir_object_into_proto(json_value: Dict[str, Any], target: message.Message, *, validate: bool=True, default_timezone: str=_primitive_time_utils.SIMPLE_ZULU) -> None:
parser = _json_parser.JsonParser.json_parser_with_default_timezone(_PRIMITIVE_HANDLER, default_timezone=default_timezone)
parser.merge_value(json_value, target)
if validate:
resource_validation.validate_resource(target, _PRIMITIVE_HANDLER)
|
Merges the provided json_value object into a target Message.
Args:
json_value: The parsed JSON object to merge into target.
target: The Message instance to merge raw_json into.
validate: A Boolean value indicating if validation should be performed on
the resultant Message. Validation takes the form of ensuring that basic
checks such as cardinality guarantees, required field adherence, etc. are
met. Defaults to True.
default_timezone: A string specifying the timezone string to use for time-
like FHIR data during parsing. Defaults to 'Z' for UTC.
Raises:
fhir_errors.InvalidFhirError: In the event that validation fails after
parsing.
|
github-repos
|
def __init__(self, action_meanings):
self.action_meanings = action_meanings
self._wait = True
self.action_space = None
self._last_step_tuples = None
self.action_meanings = action_meanings
self.name_to_action_num = {name: num for num, name in
enumerate(self.action_meanings)}
|
Constructor for PlayerEnv.
Args:
action_meanings: list of strings indicating action names. Can be obtain by
>>> env = gym.make("PongNoFrameskip-v4") # insert your game name
>>> env.unwrapped.get_action_meanings()
See gym AtariEnv get_action_meanings() for more details.
|
juraj-google-style
|
def run(self, args):
jlink = self.create_jlink(args)
if args.downgrade:
if (not jlink.firmware_newer()):
print('DLL firmware is not older than J-Link firmware.')
else:
jlink.invalidate_firmware()
try:
jlink.update_firmware()
except pylink.JLinkException as e:
jlink = self.create_jlink(args)
print(('Firmware Downgraded: %s' % jlink.firmware_version))
elif args.upgrade:
if (not jlink.firmware_outdated()):
print('DLL firmware is not newer than J-Link firmware.')
else:
try:
jlink.update_firmware()
except pylink.JLinkException as e:
jlink = self.create_jlink(args)
print(('Firmware Updated: %s' % jlink.firmware_version))
return None
|
Runs the firmware command.
Args:
self (FirmwareCommand): the ``FirmwareCommand`` instance
args (Namespace): arguments to parse
Returns:
``None``
|
codesearchnet
|
def extract_all(self):
(longmin, longmax, latmin, latmax) = self.Boundary()
(sample_min, sample_max) = map(int, (self.SAMPLE_FIRST_PIXEL, self.SAMPLE_LAST_PIXEL))
(line_min, line_max) = map(int, (self.LINE_FIRST_PIXEL, self.LINE_LAST_PIXEL))
X = np.array(map(self.long_id, range(sample_min, (sample_max + 1), 1)))
Y = np.array(map(self.lat_id, range(line_min, (line_max + 1), 1)))
for (i, line) in enumerate(range(int(line_min), (int(line_max) + 1))):
start = (((line - 1) * int(self.SAMPLE_LAST_PIXEL)) + sample_min)
chunk_size = int((sample_max - sample_min))
Za = self.array(chunk_size, start, self.bytesize)
if (i == 0):
Z = Za
else:
Z = np.vstack((Z, Za))
(X, Y) = np.meshgrid(X, Y)
return (X, Y, Z)
|
Extract all the image
Returns:
A tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the
longitudes, ``Y`` contains the latitude and ``Z`` the values
extracted from the image.
Note:
All return arrays have the same size.
All coordinate are in degree.
|
codesearchnet
|
def validate_json_schema(data, schema, name="task"):
try:
jsonschema.validate(data, schema)
except jsonschema.exceptions.ValidationError as exc:
raise ScriptWorkerTaskException(
"Can't validate {} schema!\n{}".format(name, str(exc)),
exit_code=STATUSES['malformed-payload']
)
|
Given data and a jsonschema, let's validate it.
This happens for tasks and chain of trust artifacts.
Args:
data (dict): the json to validate.
schema (dict): the jsonschema to validate against.
name (str, optional): the name of the json, for exception messages.
Defaults to "task".
Raises:
ScriptWorkerTaskException: on failure
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.