code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def url(self, url):
if url and url.endswith('/'):
url = url[:-1]
self._url = url | Set API URL endpoint
Args:
url: the url of the API endpoint | juraj-google-style |
def _FormatDateTime(self, event):
try:
return timelib.Timestamp.CopyToIsoFormat(
event.timestamp, timezone=self._output_mediator.timezone,
raise_error=True)
except (OverflowError, ValueError) as exception:
self._ReportEventError(event, (
'unable to copy timestamp: {0!s} to a human readable date and time '
'with error: {1!s}. Defaulting to: "0000-00-00T00:00:00"').format(
event.timestamp, exception))
return '0000-00-00T00:00:00' | Formats the date and time in ISO 8601 format.
Args:
event (EventObject): event.
Returns:
str: date and time field. | juraj-google-style |
def assign_seat(self, seat):
rc = self._libinput.libinput_udev_assign_seat(self._li, seat.encode())
assert (rc == 0), 'Failed to assign {}'.format(seat) | Assign a seat to this libinput context.
New devices or the removal of existing devices will appear as events
when iterating over :meth:`~libinput.LibInput.get_event`.
:meth:`assign_seat` succeeds even if no input devices are
currently available on this seat, or if devices are available but fail
to open. Devices that do not have the minimum capabilities to be
recognized as pointer, keyboard or touch device are ignored. Such
devices and those that failed to open are ignored until the next call
to :meth:`~libinput.LibInput.resume`.
Warning:
This method may only be called once per context.
Args:
seat (str): A seat identifier. | codesearchnet |
def __init__(self, status_address, bundle_process_cache=None, state_cache=None, enable_heap_dump=False, worker_id=None, log_lull_timeout_ns=DEFAULT_LOG_LULL_TIMEOUT_NS):
self._alive = True
self._bundle_process_cache = bundle_process_cache
self._state_cache = state_cache
ch = GRPCChannelFactory.insecure_channel(status_address)
grpc.channel_ready_future(ch).result(timeout=60)
self._status_channel = grpc.intercept_channel(ch, WorkerIdInterceptor(worker_id))
self._status_stub = beam_fn_api_pb2_grpc.BeamFnWorkerStatusStub(self._status_channel)
self._responses = queue.Queue()
self.log_lull_timeout_ns = log_lull_timeout_ns
self._last_full_thread_dump_secs = 0.0
self._last_lull_logged_secs = 0.0
self._server = threading.Thread(target=lambda: self._serve(), name='fn_api_status_handler')
self._server.daemon = True
self._enable_heap_dump = enable_heap_dump
self._server.start()
self._lull_logger = threading.Thread(target=lambda: self._log_lull_in_bundle_processor(self._bundle_process_cache), name='lull_operation_logger')
self._lull_logger.daemon = True
self._lull_logger.start() | Initialize FnApiWorkerStatusHandler.
Args:
status_address: The URL Runner uses to host the WorkerStatus server.
bundle_process_cache: The BundleProcessor cache dict from sdk worker.
state_cache: The StateCache form sdk worker. | github-repos |
def format_color(text, color, use_color_setting):
if (not use_color_setting):
return text
else:
return '{}{}{}'.format(color, text, NORMAL) | Format text with color.
Args:
text - Text to be formatted with color if `use_color`
color - The color start string
use_color_setting - Whether or not to color | codesearchnet |
def _cart_dists(self, s1, s2, avg_lattice, mask, normalization, lll_frac_tol=None):
if (len(s2) > len(s1)):
raise ValueError('s1 must be larger than s2')
if (mask.shape != (len(s2), len(s1))):
raise ValueError('mask has incorrect shape')
(vecs, d_2) = pbc_shortest_vectors(avg_lattice, s2, s1, mask, return_d2=True, lll_frac_tol=lll_frac_tol)
lin = LinearAssignment(d_2)
s = lin.solution
short_vecs = vecs[(np.arange(len(s)), s)]
translation = np.average(short_vecs, axis=0)
f_translation = avg_lattice.get_fractional_coords(translation)
new_d2 = np.sum(((short_vecs - translation) ** 2), axis=(- 1))
return (((new_d2 ** 0.5) * normalization), f_translation, s) | Finds a matching in cartesian space. Finds an additional
fractional translation vector to minimize RMS distance
Args:
s1, s2: numpy arrays of fractional coordinates. len(s1) >= len(s2)
avg_lattice: Lattice on which to calculate distances
mask: numpy array of booleans. mask[i, j] = True indicates
that s2[i] cannot be matched to s1[j]
normalization (float): inverse normalization length
Returns:
Distances from s2 to s1, normalized by (V/Natom) ^ 1/3
Fractional translation vector to apply to s2.
Mapping from s1 to s2, i.e. with numpy slicing, s1[mapping] => s2 | codesearchnet |
def xpath(self, exact=None):
exact = exact if exact is not None else self.exact
if isinstance(self.expression, AbstractExpression):
expression = self._apply_expression_filters(self.expression)
return to_xpath(expression, exact=exact)
else:
return str_(self.expression) | Returns the XPath query for this selector.
Args:
exact (bool, optional): Whether to exactly match text.
Returns:
str: The XPath query for this selector. | juraj-google-style |
def getTraitCorrCoef(self,term_i=None):
cov = self.getTraitCovar(term_i)
stds = sp.sqrt(cov.diagonal())[:,sp.newaxis]
RV = cov / stds / stds.T
return RV | Return the estimated trait correlation coefficient matrix for term_i (or the total if term_i is None)
To retrieve the trait covariance matrix use \see getTraitCovar
Args:
term_i: index of the random effect term we want to retrieve the correlation coefficients
Returns:
estimated trait correlation coefficient matrix | juraj-google-style |
def metaclass(*metaclasses):
def _inner(cls):
metabases = tuple(collections.OrderedDict(((c, None) for c in (metaclasses + (type(cls),)))).keys())
_Meta = metabases[0]
for base in metabases[1:]:
class _Meta(base, _Meta):
pass
return six.add_metaclass(_Meta)(cls)
return _inner | Create the class using all metaclasses.
Args:
metaclasses: A tuple of metaclasses that will be used to generate and
replace a specified class.
Returns:
A decorator that will recreate the class using the specified
metaclasses. | codesearchnet |
def system_info(url, auth, verify_ssl):
sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
sysinfo_response.raise_for_status()
return sysinfo_response.json() | Retrieve SDC system information.
Args:
url (str): the host url.
auth (tuple): a tuple of username, and password. | juraj-google-style |
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
stores = match.get('Stores', {})
for (volume_name, volume) in iter(stores.items()):
datetime_value = volume.get('CreationDate', None)
if (not datetime_value):
continue
partial_path = volume['PartialPath']
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'Spotlight Volume {0:s} ({1:s}) activated.'.format(volume_name, partial_path)
event_data.key = ''
event_data.root = '/Stores'
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts relevant Volume Configuration Spotlight entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. | codesearchnet |
def compose_back(self, input_circuit, edge_map=None):
edge_map = (edge_map or {})
if (len(set(edge_map.values())) != len(edge_map)):
raise DAGCircuitError('duplicates in wire_map')
add_qregs = self._check_edgemap_registers(edge_map, input_circuit.qregs, self.qregs)
for qreg in add_qregs:
self.add_qreg(qreg)
add_cregs = self._check_edgemap_registers(edge_map, input_circuit.cregs, self.cregs)
for creg in add_cregs:
self.add_creg(creg)
self._check_wiremap_validity(edge_map, input_circuit.input_map, self.output_map)
for nd in input_circuit.topological_nodes():
if (nd.type == 'in'):
m_wire = edge_map.get(nd.wire, nd.wire)
if (m_wire not in self.output_map):
raise DAGCircuitError(('wire %s[%d] not in self' % (m_wire[0].name, m_wire[1])))
if (nd.wire not in input_circuit.wires):
raise DAGCircuitError(('inconsistent wire type for %s[%d] in input_circuit' % (nd.wire[0].name, nd.wire[1])))
elif (nd.type == 'out'):
pass
elif (nd.type == 'op'):
condition = self._map_condition(edge_map, nd.condition)
self._check_condition(nd.name, condition)
m_qargs = list(map((lambda x: edge_map.get(x, x)), nd.qargs))
m_cargs = list(map((lambda x: edge_map.get(x, x)), nd.cargs))
self.apply_operation_back(nd.op, m_qargs, m_cargs, condition)
else:
raise DAGCircuitError(('bad node type %s' % nd.type)) | Apply the input circuit to the output of this circuit.
The two bases must be "compatible" or an exception occurs.
A subset of input qubits of the input circuit are mapped
to a subset of output qubits of this circuit.
Args:
input_circuit (DAGCircuit): circuit to append
edge_map (dict): map {(Register, int): (Register, int)}
from the output wires of input_circuit to input wires
of self.
Raises:
DAGCircuitError: if missing, duplicate or incosistent wire | codesearchnet |
def get_gpu_count():
key = 'gpu_count_no_sudo'
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
if err and FLAGS.debug:
print('Error in detecting GPU count:\n %s' % str(err))
return out.strip(b'\n') | Retrieves total number of GPU's available in the system.
Returns:
Integer that is the total # of GPU's found. | github-repos |
def _TensorArrayGatherGrad(op: ops.Operation, grad):
handle = op.inputs[0]
indices = op.inputs[1]
flow = op.inputs[2]
dtype = op.get_attr('dtype')
grad_source = _GetGradSource(grad)
g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)
u_g = g.scatter(indices, grad)
return [None, None, u_g.flow] | Gradient for TensorArrayGather.
Args:
op: Forward TensorArrayGather op.
grad: Gradient `Tensor` to TensorArrayGather.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`. | github-repos |
def prepare_lazy_data(content, functions_mapping=None, check_variables_set=None, cached=False):
if ((content is None) or isinstance(content, (numeric_types, bool, type))):
return content
elif isinstance(content, (list, set, tuple)):
return [prepare_lazy_data(item, functions_mapping, check_variables_set, cached) for item in content]
elif isinstance(content, dict):
parsed_content = {}
for (key, value) in content.items():
parsed_key = prepare_lazy_data(key, functions_mapping, check_variables_set, cached)
parsed_value = prepare_lazy_data(value, functions_mapping, check_variables_set, cached)
parsed_content[parsed_key] = parsed_value
return parsed_content
elif isinstance(content, basestring):
if (not is_var_or_func_exist(content)):
return content.replace('$$', '$')
functions_mapping = (functions_mapping or {})
check_variables_set = (check_variables_set or set())
content = content.strip()
content = LazyString(content, functions_mapping, check_variables_set, cached)
return content | make string in content as lazy object with functions_mapping
Raises:
exceptions.VariableNotFound: if any variable undefined in check_variables_set | codesearchnet |
def AddContract(self, contract):
super(UserWallet, self).AddContract(contract)
try:
db_contract = Contract.get(ScriptHash=contract.ScriptHash.ToBytes())
db_contract.delete_instance()
except Exception as e:
logger.debug("contract does not exist yet")
sh = bytes(contract.ScriptHash.ToArray())
address, created = Address.get_or_create(ScriptHash=sh)
address.IsWatchOnly = False
address.save()
db_contract = Contract.create(RawData=contract.ToArray(),
ScriptHash=contract.ScriptHash.ToBytes(),
PublicKeyHash=contract.PublicKeyHash.ToBytes(),
Address=address,
Account=self.__dbaccount)
logger.debug("Creating db contract %s " % db_contract)
db_contract.save() | Add a contract to the database.
Args:
contract(neo.SmartContract.Contract): a Contract instance. | juraj-google-style |
def get_keys_from_ldap(self, username=None):
result_dict = {}
filter = ['(sshPublicKey=*)']
if (username is not None):
filter.append('(uid={})'.format(username))
attributes = ['uid', 'sshPublicKey']
results = self.client.search(filter, attributes)
for result in results:
result_dict[result.uid.value] = result.sshPublicKey.values
return result_dict | Fetch keys from ldap.
Args:
username Username associated with keys to fetch (optional)
Returns:
Array of dictionaries in '{username: [public keys]}' format | codesearchnet |
def rotate(self, matrix, tol=1e-3):
matrix = SquareTensor(matrix)
if not matrix.is_rotation(tol):
raise ValueError("Rotation matrix is not valid.")
sop = SymmOp.from_rotation_and_translation(matrix,
[0., 0., 0.])
return self.transform(sop) | Applies a rotation directly, and tests input matrix to ensure a valid
rotation.
Args:
matrix (3x3 array-like): rotation matrix to be applied to tensor
tol (float): tolerance for testing rotation matrix validity | juraj-google-style |
def get_help_datapacks(module_name, server_prefix):
_dir = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
module_dir = "{}/../{}".format(_dir, module_name, "_help.json")
if os.path.isdir(module_dir):
module_help_path = "{}/{}".format(module_dir, "_help.json")
if os.path.isfile(module_help_path):
return helptools.get_help_datapacks(module_help_path, server_prefix)
else:
return [("Help", "{} does not have a help.json file".format(module_name), False)]
else:
return [("Help", "No module found called {}".format(module_name), False)] | Get the help datapacks for a module
Args:
module_name (str): The module to get help data for
server_prefix (str): The command prefix for this server
Returns:
datapacks (list): The help datapacks for the module | juraj-google-style |
def getLogger(self, component_name: str=None) -> logging.Logger:
logger_name = (self.root + (component_name if component_name else 'generic'))
_logger = self.loggers.get(logger_name)
if (not _logger):
_logger = logging.getLogger(logger_name)
stdio_handler = logging.StreamHandler()
stdio_handler.setFormatter(LogFormatter())
stdio_handler.setLevel(logging.INFO)
_logger.addHandler(stdio_handler)
_logger.setLevel(logging.DEBUG)
self.loggers[logger_name] = _logger
return _logger | Get the logger instance matching ``component_name`` or create a new one if non-existent.
Args:
component_name: a neo-python component name. e.g. network, vm, db
Returns:
a logger for the specified component. | codesearchnet |
def parsed_stack(self, value):
if value == self._defaults['parsedStack'] and 'parsedStack' in self._values:
del self._values['parsedStack']
else:
self._values['parsedStack'] = value | The parsed_stack property.
Args:
value (list). the property value. | juraj-google-style |
def info(self, **kwargs):
path = self._get_path('info')
kwargs.update({'session_id': self.session_id})
response = self._GET(path, kwargs)
self.id = response['id']
self._set_attrs_to_values(response)
return response | Get the basic information for an account.
Call this method first, before calling other Account methods.
Returns:
A dict respresentation of the JSON returned from the API. | codesearchnet |
def describe(obj: Any, denylist: Collection[Any], leaves_only: bool=False) -> str:
if get_ignore_reason(obj, denylist):
return '{}{}'.format(get_ignore_reason(obj, denylist), type(obj))
if tf_inspect.isframe(obj):
return 'frame: {}'.format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return 'module: {}'.format(obj.__name__)
elif leaves_only:
return '{}, {}'.format(type(obj), id(obj))
elif isinstance(obj, list):
return 'list({}): {}'.format(id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return 'tuple({}): {}'.format(id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return 'dict({}): {} keys'.format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return 'function({}) {}; globals ID: {}'.format(id(obj), obj.__name__, id(obj.__globals__))
else:
return '{}, {}'.format(type(obj), id(obj)) | Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
denylist: same as denylist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections. | github-repos |
def __normalized_name(self, message_type):
name = message_type.definition_name()
split_name = re.split('[^0-9a-zA-Z]', name)
normalized = ''.join(((part[0].upper() + part[1:]) for part in split_name if part))
previous = self.__normalized_names.get(normalized)
if previous:
if (previous != name):
raise KeyError(('Both %s and %s normalize to the same schema name: %s' % (name, previous, normalized)))
else:
self.__normalized_names[normalized] = name
return normalized | Normalized schema name.
Generate a normalized schema name, taking the class name and stripping out
everything but alphanumerics, and camel casing the remaining words.
A normalized schema name is a name that matches [a-zA-Z][a-zA-Z0-9]*
Args:
message_type: protorpc.message.Message class being parsed.
Returns:
A string, the normalized schema name.
Raises:
KeyError: A collision was found between normalized names. | codesearchnet |
def ctc_state_log_probs(seq_lengths, max_seq_length):
batch_size = _get_dim(seq_lengths, 0)
num_label_states = max_seq_length + 1
num_duration_states = 2
num_states = num_duration_states * num_label_states
log_0 = math_ops.cast(math_ops.log(math_ops.cast(0, dtypes.float64) + 1e-307), dtypes.float32)
initial_state_log_probs = array_ops.one_hot(indices=array_ops.zeros([batch_size], dtype=dtypes.int32), depth=num_states, on_value=0.0, off_value=log_0, axis=1)
label_final_state_mask = array_ops.one_hot(seq_lengths, depth=num_label_states, axis=0)
duration_final_state_mask = array_ops.ones([num_duration_states, 1, batch_size])
final_state_mask = duration_final_state_mask * label_final_state_mask
final_state_log_probs = (1.0 - final_state_mask) * log_0
final_state_log_probs = array_ops.reshape(final_state_log_probs, [num_states, batch_size])
return (initial_state_log_probs, array_ops.transpose(final_state_log_probs)) | Computes CTC alignment initial and final state log probabilities.
Create the initial/final state values directly as log values to avoid
having to take a float64 log on tpu (which does not exist).
Args:
seq_lengths: int tensor of shape [batch_size], seq lengths in the batch.
max_seq_length: int, max sequence length possible.
Returns:
initial_state_log_probs, final_state_log_probs | github-repos |
def SetCredentials(self, password=None, username=None):
if password:
self._password = password
if username:
self._user = username | Sets the database credentials.
Args:
password (Optional[str]): password to access the database.
username (Optional[str]): username to access the database. | juraj-google-style |
def pack_eager_tensors(tensors, ctx=None) -> EagerTensor:
if not isinstance(tensors, list):
raise TypeError(f'tensors must be a list, but got a {type(tensors)}')
if not tensors:
raise ValueError('Cannot pack an empty list of tensors.')
dtype = tensors[0].dtype
shape = tensors[0].shape
handle_data = tensors[0]._handle_data
is_resource = dtype == dtypes.resource
for i in range(len(tensors)):
t = tensors[i]
if not isinstance(t, EagerTensor):
raise TypeError(f'All tensors being packed must be EagerTensor. Found an item of type {type(t)}.')
if t.dtype != dtype:
raise ValueError(f'All tensors being packed should have the same dtype {dtype}, but the {i}-th tensor is of dtype {t.dtype}')
if t.shape != shape:
raise ValueError(f'All tensors being packed should have the same shape {shape}, but the {i}-th tensor is of shape {t.shape}')
if is_resource and t._handle_data != handle_data:
raise ValueError(f'All tensors being packed should have the same handle data {handle_data}, but the {i}-th tensor is of handle data {t._handle_data}')
if ctx is None:
ctx = context.context()
packed_tensor = ctx.pack_eager_tensors(tensors)
if handle_data is not None:
packed_tensor._handle_data = handle_data
def grad_fun(_):
raise ValueError('Computing gradients through pack_eager_tensors is not supported.')
record.record_operation('pack_eager_tensors', [packed_tensor], tensors, grad_fun)
return packed_tensor | Pack multiple `EagerTensor`s of the same dtype and shape.
Args:
tensors: a list of EagerTensors to pack.
ctx: context.context().
Returns:
A packed EagerTensor. | github-repos |
def evaluated_variants(self, case_id):
query = {'$and': [{'case_id': case_id}, {'$or': [{'acmg_classification': {'$exists': True}}, {'manual_rank': {'$exists': True}}, {'dismiss_variant': {'$exists': True}}]}]}
variants = {}
for var in self.variant_collection.find(query):
variants[var['variant_id']] = self.add_gene_info(var)
event_query = {'$and': [{'case': case_id}, {'category': 'variant'}, {'verb': 'comment'}]}
comment_variants = {event['variant_id'] for event in self.event_collection.find(event_query)}
for var_id in comment_variants:
if (var_id in variants):
continue
variant_obj = self.variant(var_id, case_id=case_id)
if (not variant_obj):
continue
variant_obj['is_commented'] = True
variants[var_id] = variant_obj
return variants.values() | Returns variants that has been evaluated
Return all variants, snvs/indels and svs from case case_id
which have a entry for 'acmg_classification', 'manual_rank', 'dismiss_variant'
or if they are commented.
Args:
case_id(str)
Returns:
variants(iterable(Variant)) | codesearchnet |
def create_symbol(self, *args, **kwargs):
if not kwargs.get('project_name'):
kwargs['project_name'] = self.project.project_name
sym = self.app.database.create_symbol(*args, **kwargs)
if sym:
if type(sym) != Symbol:
self._created_symbols[sym.filename].add(sym.unique_name)
return sym | Extensions that discover and create instances of `symbols.Symbol`
should do this through this method, as it will keep an index
of these which can be used when generating a "naive index".
See `database.Database.create_symbol` for more
information.
Args:
args: see `database.Database.create_symbol`
kwargs: see `database.Database.create_symbol`
Returns:
symbols.Symbol: the created symbol, or `None`. | juraj-google-style |
def safe_url(self, url, errors='strict'):
if url is not None:
url = quote(self.s(url, errors=errors), safe='~')
return url | URL encode value for safe HTTP request.
Args:
url (string): The string to URL Encode.
Returns:
(string): The urlencoded string. | juraj-google-style |
def _check_required_fields(self, object_type, ignore_fields):
for field in self.configuration[object_type]['required_fields']:
if ((field not in self.data) and (field not in ignore_fields)):
raise HDXError(('Field %s is missing in %s!' % (field, object_type))) | Helper method to check that metadata for HDX object is complete
Args:
ignore_fields (List[str]): Any fields to ignore in the check
Returns:
None | codesearchnet |
def _AssertValidators(self, validators):
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.verify(self)
except exceptions.ValidationError as e:
message = validator.print_flags_with_values(self)
raise exceptions.IllegalFlagValueError('%s: %s' % (message, str(e))) | Assert if all validators in the list are satisfied.
Asserts validators in the order they were created.
Args:
validators: Iterable(validators.Validator), validators to be
verified
Raises:
AttributeError: if validators work with a non-existing flag.
IllegalFlagValueError: if validation fails for at least one validator | juraj-google-style |
def register_rml_def(self,
location_type,
location,
filename=None,
**kwargs):
if location_type == 'directory':
self.register_directory(location, **kwargs)
elif location_type == 'filepath':
if not os.path.exists(location):
raise OSError("File not found", location)
if os.path.isfile(location):
self.register_rml(location)
elif filename:
new_loc = os.path.join(location, filename)
if not os.path.exists(new_loc):
raise OSError("File not found", new_loc)
elif os.path.isfile(new_loc):
self.register_rml(new_loc)
else:
raise OSError("File not found", location)
elif location_type.startswith('package'):
pkg_path = \
importlib.util.find_spec(\
location).submodule_search_locations[0]
if location_type.endswith('_all'):
self.register_directory(pkg_path, **kwargs)
elif location_type.endswith('_file'):
filepath = os.path.join(pkg_path, filename)
self.register_rml(filepath, **kwargs)
else:
raise NotImplementedError | Registers the rml file locations for easy access
Args:
-----
location_type: ['package_all',
'package_file',
'directory',
'filepath']
location: The correlated location string based on the location_type
filename: Optional, associated with 'package_file' location_type
kwargs:
-------
include_subfolders: Boolean | juraj-google-style |
def get_corrections_dict(self, entry):
corrections = {}
for c in self.corrections:
val = c.get_correction(entry)
if (val != 0):
corrections[str(c)] = val
return corrections | Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
({correction_name: value}) | codesearchnet |
def write_fasta_file(seq_records, outname, outdir=None, outext='.faa', force_rerun=False):
if (not outdir):
outdir = ''
outfile = ssbio.utils.outfile_maker(inname='', outname=outname, outdir=outdir, outext=outext)
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
SeqIO.write(seq_records, outfile, 'fasta')
return outfile | Write a FASTA file for a SeqRecord or a list of SeqRecord objects.
Args:
seq_records (SeqRecord, list): SeqRecord or a list of SeqRecord objects
outname: Name of the output file which will have outext appended to it
outdir: Path to directory to output sequences to
outext: Extension of FASTA file, default ".faa"
force_rerun: If file should be overwritten if it exists
Returns:
str: Path to output FASTA file. | codesearchnet |
def CreateStorageWriterForFile(cls, session, path):
if sqlite_file.SQLiteStorageFile.CheckSupportedFormat(path):
return sqlite_writer.SQLiteStorageFileWriter(session, path)
return None | Creates a storage writer based on the file.
Args:
session (Session): session the storage changes are part of.
path (str): path to the storage file.
Returns:
StorageWriter: a storage writer or None if the storage file cannot be
opened or the storage format is not supported. | codesearchnet |
def run_command(command, input_data=None, out_pipe=subprocess.PIPE, err_pipe=subprocess.PIPE, env=None, **kwargs):
if (env is None):
env = os.environ.copy()
with LogTask(('Run command: %s' % ' '.join((('"%s"' % arg) for arg in command))), logger=LOGGER, level='debug') as task:
command_result = _run_command(command=command, input_data=input_data, out_pipe=out_pipe, err_pipe=err_pipe, env=env, uuid=task.uuid, **kwargs)
return command_result | Runs a command non-interactively
Args:
command(list of str): args of the command to execute, including the
command itself as command[0] as `['ls', '-l']`
input_data(str): If passed, will feed that data to the subprocess
through stdin
out_pipe(int or file): File descriptor as passed to
:ref:subprocess.Popen to use as stdout
err_pipe(int or file): File descriptor as passed to
:ref:subprocess.Popen to use as stderr
env(dict of str:str): If set, will use the given dict as env for the
subprocess
**kwargs: Any other keyword args passed will be passed to the
:ref:subprocess.Popen call
Returns:
lago.utils.CommandStatus: result of the interactive execution | codesearchnet |
def DtypeToNumberConverter(self, dtype):
if np.issubdtype(dtype, np.datetime64):
def DatetimesToNumbers(dt_list):
return np.array([pd.Timestamp(dt).value for dt in dt_list])
return DatetimesToNumbers
elif np.issubdtype(dtype, np.timedelta64):
def TimedetlasToNumbers(td_list):
return np.array([pd.Timedelta(td).value for td in td_list])
return TimedetlasToNumbers
else:
return None | Converts a Numpy dtype to a converter method if applicable.
The converter method takes in a numpy array of objects of the provided
dtype
and returns a numpy array of the numbers backing that object for
statistical
analysis. Returns None if no converter is necessary.
Args:
dtype: The numpy dtype to make a converter for.
Returns:
The converter method or None. | juraj-google-style |
def _create_zeros_for_none_grads(forward_graphs, grad_graphs):
assert len(forward_graphs) == len(grad_graphs)
branch_outputs = [g.structured_outputs for g in grad_graphs]
num_outputs_per_branch = [len(outs) for outs in branch_outputs]
assert len(set(num_outputs_per_branch)) == 1, num_outputs_per_branch
for output_idx, branch_outs in enumerate(zip(*branch_outputs)):
if any((t is None for t in branch_outs)) and any((t is not None for t in branch_outs)):
for branch_index, t in enumerate(branch_outs):
if t is None:
with grad_graphs[branch_index].as_default():
zeros = default_gradient.zeros_like(forward_graphs[branch_index].inputs[output_idx])
grad_graphs[branch_index].structured_outputs[output_idx] = zeros
for grad_graph in grad_graphs:
grad_graph.outputs = [t for t in func_graph_module.flatten(grad_graph.structured_outputs) if t is not None] | Creates zeros for None out grads if at least one branch has non-None grad.
Args:
forward_graphs: List of forward FuncGraphs.
grad_graphs: List of grad FuncGraphs. | github-repos |
def unique_array(arr):
if (not len(arr)):
return np.asarray(arr)
elif pd:
if (isinstance(arr, np.ndarray) and (arr.dtype.kind not in 'MO')):
return pd.unique(arr)
values = []
for v in arr:
if (isinstance(v, datetime_types) and (not isinstance(v, cftime_types))):
v = pd.Timestamp(v).to_datetime64()
values.append(v)
return pd.unique(values)
else:
arr = np.asarray(arr)
(_, uniq_inds) = np.unique(arr, return_index=True)
return arr[np.sort(uniq_inds)] | Returns an array of unique values in the input order.
Args:
arr (np.ndarray or list): The array to compute unique values on
Returns:
A new array of unique values | codesearchnet |
def calculate_character_to_length_mapping(
measurer: text_measurer.TextMeasurer,
characters: Iterable[str]) -> Mapping[str, float]:
char_to_length = {}
for c in characters:
char_to_length[c] = measurer.text_width(c)
return char_to_length | Return a mapping between each given character and its length.
Args:
measurer: The TextMeasurer used to measure the width of the text in
pixels.
characters: The characters to measure e.g. "ml".
Returns:
A mapping from the given characters to their length in pixels, as
determined by 'measurer' e.g. {'m': 5.2, 'l', 1.2}. | juraj-google-style |
def update_from_json(self, path=join('config', 'hdx_dataset_static.json')):
super(Dataset, self).update_from_json(path)
self.separate_resources() | Update dataset metadata with static metadata from JSON file
Args:
path (str): Path to JSON dataset metadata. Defaults to config/hdx_dataset_static.json.
Returns:
None | codesearchnet |
def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None):
pass | Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
inputs: A `LazyBuilder` as a cache to get input tensors required to create
`IdWeightPair`.
weight_collections: List of graph collections to which variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.compat.v1.get_variable`). | github-repos |
def next_population(self, population, fitnesses):
return common.make_population(self._population_size,
self._generate_solution) | Make a new population after each optimization iteration.
Args:
population: The population current population of solutions.
fitnesses: The fitness associated with each solution in the population
Returns:
list; a list of solutions. | juraj-google-style |
def sgn_prod(p1, p2):
phase = Pauli._prod_phase(p1, p2)
new_pauli = (p1 * p2)
return (new_pauli, phase) | r"""
Multiply two Paulis and track the phase.
$P_3 = P_1 \otimes P_2$: X*Y
Args:
p1 (Pauli): pauli 1
p2 (Pauli): pauli 2
Returns:
Pauli: the multiplied pauli
complex: the sign of the multiplication, 1, -1, 1j or -1j | codesearchnet |
class RedisEnrichmentHandler(EnrichmentSourceHandler[beam.Row, beam.Row]):
def __init__(self, redis_host: str, redis_port: int, index_name: str='embeddings-index', vector_field: str='text_vector', return_fields: list=['id', 'title', 'url', 'text'], hybrid_fields: str='*', k: int=2):
self.redis_host = redis_host
self.redis_port = redis_port
self.index_name = index_name
self.vector_field = vector_field
self.return_fields = return_fields
self.hybrid_fields = hybrid_fields
self.k = k
self.client = None
def __enter__(self):
self.client = redis.Redis(host=self.redis_host, port=self.redis_port)
def __call__(self, request: beam.Row, *args, **kwargs):
embedded_query = request['text']
base_query = f'{self.hybrid_fields}=>[KNN {self.k} @{self.vector_field} $vector AS vector_score]'
query = Query(base_query).return_fields(*self.return_fields).paging(0, self.k).dialect(2)
params_dict = {'vector': np.array(embedded_query).astype(dtype=np.float32).tobytes()}
results = self.client.ft(self.index_name).search(query, params_dict)
return (beam.Row(text=embedded_query), beam.Row(docs=results.docs)) | A handler for :class:`apache_beam.transforms.enrichment.Enrichment`
transform to interact with redis vector DB.
Args:
redis_host (str): Redis Host to connect to redis DB
redis_port (int): Redis Port to connect to redis DB
index_name (str): Index Name created for searching in Redis DB
vector_field (str): vector field to compute similarity score in vector DB
return_fields (list): returns list of similar text and its embeddings
hybrid_fields (str): fields to be selected
k (int): Value of K in KNN algorithm for searching in redis | github-repos |
def _check_job_status(self, job, desc, status_key_name):
status = desc[status_key_name]
status = _STATUS_CODE_TABLE.get(status, status)
if status != 'Completed' and status != 'Stopped':
reason = desc.get('FailureReason', '(No reason provided)')
job_type = status_key_name.replace('JobStatus', ' job')
raise ValueError('Error for {} {}: {} Reason: {}'.format(job_type, job, status, reason)) | Check to see if the job completed successfully and, if not, construct and
raise a ValueError.
Args:
job (str): The name of the job to check.
desc (dict[str, str]): The result of ``describe_training_job()``.
status_key_name (str): Status key name to check for.
Raises:
ValueError: If the training job fails. | juraj-google-style |
def _find_best_fit(self, pbin):
fit = ((pbin.fitness(r[0], r[1]), k) for k, r in self._sorted_rect.items())
fit = (f for f in fit if f[0] is not None)
try:
_, rect = min(fit, key=self.first_item)
return rect
except ValueError:
return None | Return best fitness rectangle from rectangles packing _sorted_rect list
Arguments:
pbin (PackingAlgorithm): Packing bin
Returns:
key of the rectangle with best fitness | juraj-google-style |
def process_exception(self, e, uuid, routing_key, body, tb=None):
msg = e.message if hasattr(e, "message") else str(e)
exception_type = str(e.__class__)
exception_name = str(e.__class__.__name__)
print "Sending exception %s: %s for UUID %s." % (
exception_name,
msg,
uuid
)
self.sendMessage(
self.output_exchange,
routing_key,
str(body),
properties=pika.BasicProperties(
content_type="application/text",
delivery_mode=2,
headers={
"exception": msg,
"exception_type": exception_type,
"exception_name": exception_name,
"traceback": tb,
"UUID": uuid
}
)
) | Callback called when exception was raised.
This method serializes the exception and sends it over AMQP back
to caller.
Args:
e (obj): Instance of the exception.
uuid (str): UUID of the message that caused the exception to raise.
routing_key (str): Which routing key was used.
body (str): Body of the exception - the longer text.
tb (str, default None): Traceback (stacktrace)v of the exception. | juraj-google-style |
def AddFile(self, path, file_data):
if self.file_system.FileEntryExistsByPath(path):
raise ValueError('Path: {0:s} already set.'.format(path))
self._AddParentDirectories(path)
self.file_system.AddFileEntry(path, file_data=file_data) | Adds a "regular" file to the fake file system.
Note that this function will create parent directories if needed.
Args:
path (str): path of the file within the fake file system.
file_data (bytes): data of the file.
Raises:
ValueError: if the path is already set. | juraj-google-style |
def generate_panel(self, img):
plt.figure(figsize=(14, 6))
ax = plt.gca()
fig = plt.gcf()
plt.subplot(122)
data_save = np.zeros_like(self.postcard)
self.roll_best = np.zeros((4, 2))
for i in range(4):
g = np.where((self.qs == i))[0]
wh = np.where((self.times[g] > 54947))
self.roll_best[i] = self.do_rolltest(g, wh)
self.do_photometry()
for i in range(4):
g = np.where((self.qs == i))[0]
plt.errorbar(self.times[g], self.obs_flux[g], yerr=self.flux_uncert[i], fmt=fmt[i])
plt.xlabel('Time', fontsize=20)
plt.ylabel('Relative Flux', fontsize=20)
plt.subplot(121)
implot = plt.imshow(img, interpolation='nearest', cmap='gray', vmin=(98000 * 52), vmax=(104000 * 52))
cid = fig.canvas.mpl_connect('button_press_event', self.onclick)
plt.show(block=True) | Creates the figure shown in ``adjust_aperture`` for visualization purposes. Called by other functions
and generally not called by the user directly.
Args:
img: The data frame to be passed through to be plotted. A cutout of the ``integrated_postcard`` | codesearchnet |
def credit_note(request, note_id, access_code=None):
note_id = int(note_id)
current_note = CreditNoteController.for_id_or_404(note_id)
apply_form = forms.ApplyCreditNoteForm(current_note.credit_note.invoice.user, (request.POST or None), prefix='apply_note')
refund_form = forms.ManualCreditNoteRefundForm((request.POST or None), prefix='refund_note')
cancellation_fee_form = forms.CancellationFeeForm((request.POST or None), prefix='cancellation_fee')
if (request.POST and apply_form.is_valid()):
inv_id = apply_form.cleaned_data['invoice']
invoice = commerce.Invoice.objects.get(pk=inv_id)
current_note.apply_to_invoice(invoice)
messages.success(request, ('Applied credit note %d to invoice.' % note_id))
return redirect('invoice', invoice.id)
elif (request.POST and refund_form.is_valid()):
refund_form.instance.entered_by = request.user
refund_form.instance.parent = current_note.credit_note
refund_form.save()
messages.success(request, 'Applied manual refund to credit note.')
refund_form = forms.ManualCreditNoteRefundForm(prefix='refund_note')
elif (request.POST and cancellation_fee_form.is_valid()):
percentage = cancellation_fee_form.cleaned_data['percentage']
invoice = current_note.cancellation_fee(percentage)
messages.success(request, ('Generated cancellation fee for credit note %d.' % note_id))
return redirect('invoice', invoice.invoice.id)
data = {'credit_note': current_note.credit_note, 'apply_form': apply_form, 'refund_form': refund_form, 'cancellation_fee_form': cancellation_fee_form}
return render(request, 'registrasion/credit_note.html', data) | Displays a credit note.
If ``request`` is a ``POST`` request, forms for applying or refunding
a credit note will be processed.
This view requires a login, and the logged in user must be staff.
Arguments:
note_id (castable to int): The ID of the credit note to view.
Returns:
render or redirect:
If the "apply to invoice" form is correctly processed, redirect to
that invoice, otherwise, render ``registration/credit_note.html``
with the following data::
{
"credit_note": models.commerce.CreditNote(),
"apply_form": form, # A form for applying credit note
# to an invoice.
"refund_form": form, # A form for applying a *manual*
# refund of the credit note.
"cancellation_fee_form" : form, # A form for generating an
# invoice with a
# cancellation fee
} | codesearchnet |
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> torch.Tensor:
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = self.attn_dropout(hidden_states)
hidden_states = residual + hidden_states
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.cross_attention_layer_norm(hidden_states)
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.cross_attention(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, past_key_value=cross_attn_past_key_value, attention_mask=encoder_attention_mask, output_attentions=output_attentions)
hidden_states = self.attn_dropout(hidden_states)
hidden_states = residual + hidden_states
present_key_value += cross_attn_present_key_value
residual = hidden_states
hidden_states = self.ffn_layer_norm(hidden_states)
hidden_states = self.ffn(hidden_states)
hidden_states = self.ffn_dropout(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states, present_key_value)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs | Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`):
encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by
very large negative values.
past_key_value (`Tuple(torch.FloatTensor)`):
cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail. | github-repos |
def _FormatDateTime(self, event):
try:
return timelib.Timestamp.CopyToIsoFormat(event.timestamp, timezone=self._output_mediator.timezone, raise_error=True)
except (OverflowError, ValueError) as exception:
self._ReportEventError(event, 'unable to copy timestamp: {0!s} to a human readable date and time with error: {1!s}. Defaulting to: "0000-00-00T00:00:00"'.format(event.timestamp, exception))
return '0000-00-00T00:00:00' | Formats the date and time in ISO 8601 format.
Args:
event (EventObject): event.
Returns:
str: date and time field. | codesearchnet |
def oem(self, command, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
return self._simple_command(
'oem %s' % command, timeout_ms=timeout_ms, info_cb=info_cb) | Executes an OEM command on the device.
Args:
command: The command to execute, such as 'poweroff' or 'bootconfig read'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
info_cb: See Download. Messages vary based on command.
Returns:
The final response from the device. | juraj-google-style |
def lengths( self ):
return( np.array( [ math.sqrt( sum( row**2 ) ) for row in self.matrix ] ) ) | The cell lengths.
Args:
None
Returns:
(np.array(a,b,c)): The cell lengths. | juraj-google-style |
def get_inspection_units(logdir='', event_file='', tag=''):
if logdir:
subdirs = io_wrapper.GetLogdirSubdirectories(logdir)
inspection_units = []
for subdir in subdirs:
generator = itertools.chain(*[
generator_from_event_file(os.path.join(subdir, f))
for f in tf.io.gfile.listdir(subdir)
if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))
])
inspection_units.append(InspectionUnit(
name=subdir,
generator=generator,
field_to_obs=get_field_to_observations_map(generator, tag)))
if inspection_units:
print('Found event files in:\n{}\n'.format('\n'.join(
[u.name for u in inspection_units])))
elif io_wrapper.IsTensorFlowEventsFile(logdir):
print(
'It seems that {} may be an event file instead of a logdir. If this '
'is the case, use --event_file instead of --logdir to pass '
'it in.'.format(logdir))
else:
print('No event files found within logdir {}'.format(logdir))
return inspection_units
elif event_file:
generator = generator_from_event_file(event_file)
return [InspectionUnit(
name=event_file,
generator=generator,
field_to_obs=get_field_to_observations_map(generator, tag))]
return [] | Returns a list of InspectionUnit objects given either logdir or event_file.
If logdir is given, the number of InspectionUnits should equal the
number of directories or subdirectories that contain event files.
If event_file is given, the number of InspectionUnits should be 1.
Args:
logdir: A log directory that contains event files.
event_file: Or, a particular event file path.
tag: An optional tag name to query for.
Returns:
A list of InspectionUnit objects. | juraj-google-style |
def sg_restore(sess, save_path, category=''):
if (not isinstance(category, (tuple, list))):
category = [category]
var_list = {}
for cat in category:
for t in tf.global_variables():
if t.name.startswith(cat):
var_list[t.name[:(- 2)]] = t
saver = tf.train.Saver(var_list)
saver.restore(sess, save_path) | r""" Restores previously saved variables.
Args:
sess: A `Session` to use to restore the parameters.
save_path: Path where parameters were previously saved.
category: A `String` to filter variables starts with given category.
Returns: | codesearchnet |
def report_clean(rows):
print('DBM Report Clean')
first = True
last = False
date = None
for row in rows:
if row == ['No data returned by the reporting service.']:
break
if not row or row[0] is None or row[0] == '':
break
if first:
try:
date_column = row.index('Date')
row[date_column] = 'Report_Day'
except ValueError:
pass
row = [column_header_sanitize(cell) for cell in row]
else:
row = [cell.replace('/', '-') if isinstance(cell, str) and len(cell) == 4 + 1 + 2 + 1 + 2 and (cell[4] == '/') and (cell[7] == '/') else cell for cell in row]
row = ['' if cell.strip() in ('Unknown', '-') else '1000' if cell == '< 1000' else cell for cell in row]
yield row
first = False | Helper to fix DBM report issues for BigQuery and ensure schema compliance.
Memory efficiently cleans each row by fixing:
* Strips header and footer to preserve only data rows.
* Changes 'Date' to 'Report_Day' to avoid using reserved name in BigQuery.
* Changes date values to use '-' instead of '/' for BigQuery compatibility.
* Changes columns '-' and 'Unknown' to NULL
* Changes '< 1000' to 1000
Usage example:
```
filename, report = report_file(...)
rows = report_to_rows(report)
rows = report_clean(rows)
```
Args:
* rows: (iterator) Rows to clean.
Returns:
* Iterator of cleaned rows. | github-repos |
def array_to_img(x, data_format=None, scale=True, dtype=None):
data_format = backend.standardize_data_format(data_format)
if dtype is None:
dtype = backend.floatx()
if pil_image is None:
raise ImportError('Could not import PIL.Image. The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=dtype)
if x.ndim != 3:
raise ValueError(f'Expected image array to have rank 3 (single image). Got array with shape: {x.shape}')
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x - np.min(x)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 4:
return pil_image.fromarray(x.astype('uint8'), 'RGBA')
elif x.shape[2] == 3:
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
if np.max(x) > 255:
return pil_image.fromarray(x[:, :, 0].astype('int32'), 'I')
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError(f'Unsupported channel number: {x.shape[2]}') | Converts a 3D NumPy array to a PIL Image instance.
Example:
```python
from PIL import Image
img = np.random.random(size=(100, 100, 3))
pil_img = keras.utils.array_to_img(img)
```
Args:
x: Input data, in any form that can be converted to a NumPy array.
data_format: Image data format, can be either `"channels_first"` or
`"channels_last"`. Defaults to `None`, in which case the global
setting `keras.backend.image_data_format()` is used (unless you
changed it, it defaults to `"channels_last"`).
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Defaults to `True`.
dtype: Dtype to use. `None` means the global setting
`keras.backend.floatx()` is used (unless you changed it, it
defaults to `"float32"`). Defaults to `None`.
Returns:
A PIL Image instance. | github-repos |
def add_untagged_ok(self, text: MaybeBytes,
code: Optional[ResponseCode] = None) -> None:
response = ResponseOk(b'*', text, code)
self.add_untagged(response) | Add an untagged ``OK`` response.
See Also:
:meth:`.add_untagged`, :class:`ResponseOk`
Args:
text: The response text.
code: Optional response code. | juraj-google-style |
def load_drops(self, dropin):
obj = load_object(dropin)
try:
drops = getattr(obj, self.drops_type)
except AttributeError:
try:
drops = load_object(('%s.%s' % (dropin, self.drops_type)))
except ImportError:
drops = None
if hasattr(drops, '__drops__'):
drops = drops.__drops__
if callable(drops):
drops = drops(self.app)
return (drops or []) | Load `drops` from the given dropin.
Args:
dropin (string): path of a dropin, e.g. dropin.auth
Returns:
An iterable contains the drops object in the given dropin
This method load drops object by some sort of convension. For example, assuming
we want to load drops type `models` from dropin `dropin.articls`. The drops are
discoveried with the following sequence::
import dropin.articles
drops = dropin.articles.models
if anything goes wrong, next try is ::
import dropin.articles.models as drops
if the current drops object has attribute **__drops__** ::
drops = drops.__drops__
if the current drops object is a callable ::
drops = drops()
if not drops was found, an empty list is returned. | codesearchnet |
def _add_input_deps(self, executor, args, kwargs):
if executor == 'data_manager':
return args, kwargs
inputs = kwargs.get('inputs', [])
for idx, f in enumerate(inputs):
if isinstance(f, File) and f.is_remote():
inputs[idx] = self.data_manager.stage_in(f, executor)
for kwarg, f in kwargs.items():
if isinstance(f, File) and f.is_remote():
kwargs[kwarg] = self.data_manager.stage_in(f, executor)
newargs = list(args)
for idx, f in enumerate(newargs):
if isinstance(f, File) and f.is_remote():
newargs[idx] = self.data_manager.stage_in(f, executor)
return tuple(newargs), kwargs | Look for inputs of the app that are remote files. Submit stage_in
apps for such files and replace the file objects in the inputs list with
corresponding DataFuture objects.
Args:
- executor (str) : executor where the app is going to be launched
- args (List) : Positional args to app function
- kwargs (Dict) : Kwargs to app function | juraj-google-style |
def add_send_message(self, connection, send_message):
self._send_message[connection] = send_message
LOGGER.debug("Added send_message function "
"for connection %s", connection) | Adds a send_message function to the Dispatcher's
dictionary of functions indexed by connection.
Args:
connection (str): A locally unique identifier
provided by the receiver of messages.
send_message (fn): The method that should be called
by the dispatcher to respond to messages which
arrive via connection. | juraj-google-style |
def _compile_control_flow_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
etype = expr.etype
args = expr.args
if etype[1] == 'if':
condition = self._compile_expression(args[0], scope, batch_size, noise)
true_case = self._compile_expression(args[1], scope, batch_size, noise)
false_case = self._compile_expression(args[2], scope, batch_size, noise)
fluent = TensorFluent.if_then_else(condition, true_case, false_case)
else:
raise ValueError('Invalid control flow expression:\n{}'.format(expr))
return fluent | Compile a control flow expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL control flow expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. | juraj-google-style |
def plot_labels(ax, label_fontsize=14, xlabel=None, xlabel_arg=None, ylabel=None, ylabel_arg=None, zlabel=None, zlabel_arg=None):
xlabel = (xlabel if (xlabel is not None) else (ax.get_xlabel() or 'X'))
ylabel = (ylabel if (ylabel is not None) else (ax.get_ylabel() or 'Y'))
xlabel_arg = dict_if_none(xlabel_arg)
ylabel_arg = dict_if_none(ylabel_arg)
ax.set_xlabel(xlabel, fontsize=label_fontsize, **xlabel_arg)
ax.set_ylabel(ylabel, fontsize=label_fontsize, **ylabel_arg)
if hasattr(ax, 'zaxis'):
zlabel = (zlabel if (zlabel is not None) else (ax.get_zlabel() or 'Z'))
zlabel_arg = dict_if_none(zlabel_arg)
ax.set_zlabel(zlabel, fontsize=label_fontsize, **zlabel_arg) | Sets the labels options of a matplotlib plot
Args:
ax: matplotlib axes
label_fontsize(int): Size of the labels' font
xlabel(str): The xlabel for the figure
xlabel_arg(dict): Passsed into matplotlib as xlabel arguments
ylabel(str): The ylabel for the figure
ylabel_arg(dict): Passsed into matplotlib as ylabel arguments
zlabel(str): The zlabel for the figure
zlabel_arg(dict): Passsed into matplotlib as zlabel arguments | codesearchnet |
def addRow(self, *value):
if ((len(value) == 1) and isinstance(value[0], (tuple, list))):
value = value[0]
assert (len(value) == self.getNumCols())
self._impl.addRow(Tuple(value)._impl) | Add a row to the DataFrame. The size of the tuple must be equal to the
total number of columns in the dataframe.
Args:
value: A single argument with a tuple containing all the values
for the row to be added, or multiple arguments with the values for
each column. | codesearchnet |
def upgrade(self, remote=None):
if self.enabled:
raise errors.DockerError('Plugin must be disabled before upgrading.')
if (remote is None):
remote = self.name
privileges = self.client.api.plugin_privileges(remote)
for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
(yield d)
self._reload() | Upgrade the plugin.
Args:
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
Default: this plugin's name.
Returns:
A generator streaming the decoded API logs | codesearchnet |
def _create_pseudo_names(tensors, prefix):
def one_index(ele):
if isinstance(ele, int):
return ele + 1
return ele
flat_paths = list(nest.yield_flat_paths(tensors))
flat_paths = nest.map_structure(one_index, flat_paths)
names = []
for path in flat_paths:
if not path:
name = prefix + '1'
else:
name = '_'.join((str(p) for p in path))
if isinstance(path[0], int):
name = prefix + name
names.append(name)
return names | Creates pseudo {input | output} names for subclassed Models.
Warning: this function should only be used to define default
names for `Metics` and `SavedModel`. No other use cases should
rely on a `Model`'s input or output names.
Example with dict:
`{'a': [x1, x2], 'b': x3}` becomes:
`['a_1', 'a_2', 'b']`
Example with list:
`[x, y]` becomes:
`['output_1', 'output_2']`
Args:
tensors: `Model`'s outputs or inputs.
prefix: 'output_' for outputs, 'input_' for inputs.
Returns:
Flattened list of pseudo names. | github-repos |
def add_element(self, element):
if isinstance(element, BaseExpression):
element.set_parent(self._working_fragment)
self._working_fragment.elements.append(element)
return self
else:
return self.add_operator(element) | Add an element of type ``Operator``, ``Constraint``, or
``Expression`` to the ``Expression``.
Args:
element: ``Constraint``, ``Expression``, or ``Operator``.
Returns:
Expression: ``self``
Raises:
FiqlObjectException: Element is not a valid type. | codesearchnet |
def _bytestringToFloat(bytestring, numberOfRegisters=2):
_checkString(bytestring, minlength=4, maxlength=8, description='bytestring')
_checkInt(numberOfRegisters, minvalue=2, maxvalue=4, description='number of registers')
numberOfBytes = _NUMBER_OF_BYTES_PER_REGISTER * numberOfRegisters
formatcode = '>'
if numberOfRegisters == 2:
formatcode += 'f'
elif numberOfRegisters == 4:
formatcode += 'd'
else:
raise ValueError('Wrong number of registers! Given value is {0!r}'.format(numberOfRegisters))
if len(bytestring) != numberOfBytes:
raise ValueError('Wrong length of the byte string! Given value is {0!r}, and numberOfRegisters is {1!r}.'.\
format(bytestring, numberOfRegisters))
return _unpack(formatcode, bytestring) | Convert a four-byte string to a float.
Floats are stored in two or more consecutive 16-bit registers in the slave.
For discussion on precision, number of bits, number of registers, the range, byte order
and on alternative names, see :func:`minimalmodbus._floatToBytestring`.
Args:
* bytestring (str): A string of length 4 or 8.
* numberOfRegisters (int): Can be 2 or 4.
Returns:
A float.
Raises:
TypeError, ValueError | juraj-google-style |
def sanger_variants(self, institute_id=None, case_id=None):
query = {'validation': {'$exists': True}}
if institute_id:
query['institute_id'] = institute_id
if case_id:
query['case_id'] = case_id
return self.variant_collection.find(query) | Return all variants with sanger information
Args:
institute_id(str)
case_id(str)
Returns:
res(pymongo.Cursor): A Cursor with all variants with sanger activity | juraj-google-style |
def __init__(self, ascii_codepage='cp1252', key_path_prefix=''):
super(WinRegistryFile, self).__init__()
self._ascii_codepage = ascii_codepage
self._key_path_prefix = key_path_prefix
self._key_path_prefix_length = len(key_path_prefix)
self._key_path_prefix_upper = key_path_prefix.upper() | Initializes a Windows Registry file.
Args:
ascii_codepage (Optional[str]): ASCII string codepage.
key_path_prefix (Optional[str]): Windows Registry key path prefix. | juraj-google-style |
def int_to_bit(self, x_int, num_bits, base=2):
x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1))
x_labels = [
tf.floormod(
tf.floordiv(tf.to_int32(x_l),
tf.to_int32(base)**i), tf.to_int32(base))
for i in range(num_bits)]
res = tf.concat(x_labels, axis=-1)
return tf.to_float(res) | Turn x_int representing numbers into a bitwise (lower-endian) tensor.
Args:
x_int: Tensor containing integer to be converted into base
notation.
num_bits: Number of bits in the representation.
base: Base of the representation.
Returns:
Corresponding number expressed in base. | juraj-google-style |
def get(self, key, default='', stringify=True):
obj = self.__getitem__(key)
if (obj is None):
obj = default
elif stringify:
obj = str(obj)
return obj | Returns dictionary values or default.
Args:
key: string. Dictionary key to look up.
default: string. Return this value if key not found.
stringify: bool. Force all return values to string for compatibility
reasons.
Returns:
python-wrapped CF object or default if not found. | codesearchnet |
def _cell_magic(line, query):
args = magic_arguments.parse_argstring(_cell_magic, line)
params = []
if (args.params is not None):
try:
params = _helpers.to_query_parameters(ast.literal_eval(''.join(args.params)))
except Exception:
raise SyntaxError('--params is not a correctly formatted JSON string or a JSON serializable dictionary')
project = (args.project or context.project)
client = bigquery.Client(project=project, credentials=context.credentials)
bqstorage_client = _make_bqstorage_client((args.use_bqstorage_api or context.use_bqstorage_api), context.credentials)
job_config = bigquery.job.QueryJobConfig()
job_config.query_parameters = params
job_config.use_legacy_sql = args.use_legacy_sql
query_job = _run_query(client, query, job_config)
if (not args.verbose):
display.clear_output()
result = query_job.to_dataframe(bqstorage_client=bqstorage_client)
if args.destination_var:
IPython.get_ipython().push({args.destination_var: result})
else:
return result | Underlying function for bigquery cell magic
Note:
This function contains the underlying logic for the 'bigquery' cell
magic. This function is not meant to be called directly.
Args:
line (str): "%%bigquery" followed by arguments as required
query (str): SQL query to run
Returns:
pandas.DataFrame: the query results. | codesearchnet |
def __init__(self, partitioned_dim_sizes, inner_dim_sizes, dim_size_dtype=None):
assert isinstance(partitioned_dim_sizes, (list, tuple))
with ops.name_scope(None, 'RaggedTensorDynamicShape', (partitioned_dim_sizes, inner_dim_sizes)):
partitioned_dim_sizes = tuple((ops.convert_to_tensor(size, name='partitioned_dimension_size_%d' % i) for i, size in enumerate(partitioned_dim_sizes)))
inner_dim_sizes = ops.convert_to_tensor(inner_dim_sizes, name='inner_dim_sizes')
if partitioned_dim_sizes:
for axis, dimension_size in enumerate(partitioned_dim_sizes):
if dimension_size.shape.ndims is None:
raise ValueError('rank of partitioned_dim_sizes[%d] is unknown' % axis)
dimension_size.shape.with_rank_at_most(1)
if partitioned_dim_sizes[0].shape.ndims == 1:
raise ValueError('outermost partitioned dimension must be uniform')
if partitioned_dim_sizes[-1].shape.ndims == 0:
raise ValueError('innermost partitioned dimension must be ragged')
inner_dim_sizes.shape.assert_has_rank(1)
if dim_size_dtype is None:
dim_size_dtypes = set((p.dtype for p in partitioned_dim_sizes if p.shape.ndims == 1))
if not dim_size_dtypes:
dim_size_dtype = dtypes.int64
elif len(dim_size_dtypes) == 1:
dim_size_dtype = dim_size_dtypes.pop()
else:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError('partitioned_dim_sizes must have matching dtypes')
dim_size_dtype = dtypes.int64
partitioned_dim_sizes = tuple((math_ops.cast(p, dim_size_dtype) for p in partitioned_dim_sizes))
inner_dim_sizes = math_ops.cast(inner_dim_sizes, dim_size_dtype)
self._partitioned_dim_sizes = partitioned_dim_sizes
self._inner_dim_sizes = inner_dim_sizes | Creates a RaggedTensorDynamicShape.
Args:
partitioned_dim_sizes: A `list` of 0-D or 1-D integer `Tensor`, one for
each partitioned dimension. If dimension `d` is uniform, then
`partitioned_dim_sizes[d]` must be an integer scalar, specifying the
size of all slices across dimension `d`. If dimension `d` is ragged,
then `partitioned_dim_sizes[d]` must be an integer vector, specifying
the size of each slice across dimension `d`.
inner_dim_sizes: A 1-D integer `Tensor`, whose length is equal to the
number of inner dimensions. `inner_dim_sizes[n]` is the size of all
slices across the `n`th inner dimension (which is the
`(len(partitioned_dim_sizes)+n)`th dimension in the overall tensor.
dim_size_dtype: dtype for dimension sizes. If not specified, then it
is chosen based on the dtypes of `partitioned_dim_sizes` and
`inner_dim_sizes`. | github-repos |
def asdatetime(self, naive=True):
args = list(self.timetuple()[0:6])+[self.microsecond]
if not naive:
args.append(self.tzinfo)
return datetime.datetime(*args) | Return this datetime_tz as a datetime object.
Args:
naive: Return *without* any tz info.
Returns:
This datetime_tz as a datetime object. | juraj-google-style |
def ParseHeader(table):
precondition.AssertIterableType(table, dict)
prototype = None
for row in table:
columns = list(iterkeys(row))
if (prototype is None):
prototype = columns
elif (prototype != columns):
message = "Expected columns '{expected}', got '{actual}' for table {json}"
message = message.format(expected=prototype, actual=columns, json=table)
raise ValueError(message)
result = rdf_osquery.OsqueryHeader()
for name in (prototype or []):
result.columns.append(rdf_osquery.OsqueryColumn(name=name))
return result | Parses header of osquery output.
Args:
table: A table in a "parsed JSON" representation.
Returns:
A parsed `rdf_osquery.OsqueryHeader` instance. | codesearchnet |
def search_artists_by_name(self, artist_name: str, limit: int = 5) -> List[NameExternalIDPair]:
response: requests.Response = requests.get(
self._API_URL_TEMPLATE.format("search"),
params={"q": artist_name, "type": "artist", "limit": limit},
headers={"Authorization": "Bearer {}".format(self._token.access_token)}
)
response.raise_for_status()
if not response.text:
return []
result: List[NameExternalIDPair] = []
data: List[Dict] = response.json()["artists"]["items"]
for artist in data:
artist = NameExternalIDPair(artist["name"].strip(), artist["id"].strip())
if not artist.name or not artist.external_id:
raise SpotifyClientError("Name or ID is missing")
result.append(artist)
return result | Returns zero or more artist name - external ID pairs that match the specified artist name.
Arguments:
artist_name (str): The artist name to search in the Spotify API.
limit (int): The maximum number of results to return.
Returns:
Zero or more artist name - external ID pairs.
Raises:
requests.HTTPError: If an HTTP error occurred during the request.
SpotifyClientError: If an invalid item is found. | juraj-google-style |
def convert_line_endings(filename: str, to_unix: bool = False,
to_windows: bool = False) -> None:
assert to_unix != to_windows
with open(filename, "rb") as f:
contents = f.read()
windows_eol = b"\r\n"
unix_eol = b"\n"
if to_unix:
log.info("Converting from Windows to UNIX line endings: {!r}",
filename)
src = windows_eol
dst = unix_eol
else:
log.info("Converting from UNIX to Windows line endings: {!r}",
filename)
src = unix_eol
dst = windows_eol
if windows_eol in contents:
log.info("... already contains at least one Windows line ending; "
"probably converted before; skipping")
return
contents = contents.replace(src, dst)
with open(filename, "wb") as f:
f.write(contents) | Converts a file (in place) from UNIX to Windows line endings, or the
reverse.
Args:
filename: filename to modify (in place)
to_unix: convert Windows (CR LF) to UNIX (LF)
to_windows: convert UNIX (LF) to Windows (CR LF) | juraj-google-style |
def HeartBeat(self):
if (self.allow_overruns or (not self.job.lifetime)):
return
runtime = (rdfvalue.RDFDatetime.Now() - self.run_state.started_at)
if (runtime > self.lifetime):
raise LifetimeExceededError(('Cronjob run has exceeded the maximum runtime of %s.' % self.lifetime)) | Terminates a cronjob-run if it has exceeded its maximum runtime.
This is a no-op for cronjobs that allow overruns.
Raises:
LifetimeExceededError: If the cronjob has exceeded its maximum runtime. | codesearchnet |
def Insert(self, key, value, row_index):
if (row_index < 0):
row_index += len(self)
if (not (0 <= row_index < len(self))):
raise IndexError(('Index "%s" is out of bounds.' % row_index))
new_row = Row()
for idx in self.header:
if (self.index(idx) == row_index):
new_row[key] = value
new_row[idx] = self[idx]
self._keys = new_row.header
self._values = new_row.values
del new_row
self._BuildIndex() | Inserts new values at a specified offset.
Args:
key: string for header value.
value: string for a data value.
row_index: Offset into row for data.
Raises:
IndexError: If the offset is out of bands. | codesearchnet |
def static_uniform_row_length(self):
if self._uniform_row_length is not None:
return tensor_util.constant_value(self._uniform_row_length)
return None | The number of values in each row of this partition, if statically known.
Returns:
The number of values in each row of this partition as an `int` (if
statically known); or `None` (otherwise). | github-repos |
def _is_sequence_right_padded(mask):
max_seq_length = mask.shape[1]
count_of_true = torch.sum(mask, dim=1)
batch_size = mask.shape[0]
indices = torch.arange(max_seq_length, device=mask.device).repeat(batch_size, 1)
right_padded_mask = indices < count_of_true.unsqueeze(1)
return torch.all(mask == right_padded_mask) | Check the mask tensor and see if it right padded.
cuDNN uses the sequence length param to skip the tailing
timestep. If the data is left padded, or not a strict right padding (has
masked value in the middle of the sequence), then cuDNN won't work
properly in those cases.
Left padded data: [[False, False, True, True, True]].
Right padded data: [[True, True, True, False, False]].
Mixture of mask/unmasked data: [[True, False, True, False, False]].
Note that for the mixed data example above, the actually data RNN should see
are those 2 Trues (index 0 and 2), the index 1 False should be ignored and
not pollute the internal states.
Args:
mask: the Boolean tensor with shape [batch, timestep]
Returns:
boolean scalar tensor, whether the mask is strictly right padded. | github-repos |
def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor) -> torch.Tensor:
hidden_states = self.patch_embed(hidden_states)
rotary_pos_emb = self.rot_pos_emb(grid_thw)
window_index, cu_window_seqlens = self.get_window_index(grid_thw)
cu_window_seqlens = torch.tensor(cu_window_seqlens, device=hidden_states.device, dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32)
cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
seq_len, _ = hidden_states.size()
hidden_states = hidden_states.reshape(seq_len
hidden_states = hidden_states[window_index, :, :]
hidden_states = hidden_states.reshape(seq_len, -1)
rotary_pos_emb = rotary_pos_emb.reshape(seq_len
rotary_pos_emb = rotary_pos_emb[window_index, :, :]
rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(dim=0, dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
for layer_num, blk in enumerate(self.blocks):
if layer_num in self.fullatt_block_indexes:
cu_seqlens_now = cu_seqlens
else:
cu_seqlens_now = cu_window_seqlens
if self.gradient_checkpointing and self.training:
hidden_states = self._gradient_checkpointing_func(blk.__call__, hidden_states, cu_seqlens_now, None, position_embeddings)
else:
hidden_states = blk(hidden_states, cu_seqlens=cu_seqlens_now, position_embeddings=position_embeddings)
hidden_states = self.merger(hidden_states)
reverse_indices = torch.argsort(window_index)
hidden_states = hidden_states[reverse_indices, :]
return hidden_states | Args:
hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
The final hidden states of the model.
grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
The temporal, height and width of feature shape of each image in LLM.
Returns:
`torch.Tensor`: hidden_states. | github-repos |
def add_all_transport_reactions(model, boundaries, allow_duplicates=False):
all_reactions = {}
if not allow_duplicates:
for rxnid in model.database.reactions:
rx = model.database.get_reaction(rxnid)
all_reactions[rx] = rxnid
boundary_pairs = set()
for source, dest in boundaries:
if source != dest:
boundary_pairs.add(tuple(sorted((source, dest))))
added = set()
added_pairs = set()
initial_compounds = set(model.compounds)
reactions = set(model.database.reactions)
for compound in initial_compounds:
for c1, c2 in boundary_pairs:
compound1 = compound.in_compartment(c1)
compound2 = compound.in_compartment(c2)
pair = compound1, compound2
if pair in added_pairs:
continue
rxnid_tp = create_transport_id(reactions, compound1, compound2)
reaction_tp = Reaction(Direction.Both, {
compound1: -1,
compound2: 1
})
if reaction_tp not in all_reactions:
model.database.set_reaction(rxnid_tp, reaction_tp)
reactions.add(rxnid_tp)
else:
rxnid_tp = all_reactions[reaction_tp]
if not model.has_reaction(rxnid_tp):
added.add(rxnid_tp)
model.add_reaction(rxnid_tp)
added_pairs.add(pair)
return added | Add all transport reactions to database and to model.
Add transport reactions for all boundaries. Boundaries are defined
by pairs (2-tuples) of compartment IDs. Transport reactions are
added for all compounds in the model, not just for compounds in the
two boundary compartments.
Args:
model: :class:`psamm.metabolicmodel.MetabolicModel`.
boundaries: Set of compartment boundary pairs.
Returns:
Set of IDs of reactions that were added. | juraj-google-style |
def fdatasync(self, file_des):
if self.filesystem.is_windows_fs or self.filesystem.is_macos:
raise AttributeError("module 'os' has no attribute 'fdatasync'")
if 0 <= file_des < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
self.filesystem.get_open_file(file_des) | Perform fdatasync for a fake file (in other words, do nothing).
Args:
file_des: The file descriptor of the open file.
Raises:
OSError: file_des is an invalid file descriptor.
TypeError: file_des is not an integer. | juraj-google-style |
def _flatten_dict(original_dict):
flat_dict = {}
for (key, value) in original_dict.items():
if isinstance(value, dict):
for (name, tensor) in value.items():
if isinstance(tensor, dict):
raise ValueError('flatten_dict only handles 2 levels of nesting.')
flat_key = ((('__' + key) + '_') + name)
flat_dict[flat_key] = tensor
else:
flat_dict[key] = value
return flat_dict | Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting. | codesearchnet |
def analyze_results(import_dict_objects: Dict[str, List[str]], type_hint_objects: Dict[str, List[str]]) -> List[str]:
def find_duplicates(seq):
return [k for k, v in collections.Counter(seq).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ['Both sides of the init do not have the same backends!']
errors = []
for key in import_dict_objects.keys():
duplicate_imports = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}')
duplicate_type_hints = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}')
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
name = 'base imports' if key == 'none' else f'{key} backend'
errors.append(f'Differences for {name}:')
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.')
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.')
return errors | Analyze the differences between _import_structure objects and TYPE_CHECKING objects found in an init.
Args:
import_dict_objects (`Dict[str, List[str]]`):
A dictionary mapping backend names (`"none"` for the objects independent of any specific backend) to
list of imported objects.
type_hint_objects (`Dict[str, List[str]]`):
A dictionary mapping backend names (`"none"` for the objects independent of any specific backend) to
list of imported objects.
Returns:
`List[str]`: The list of errors corresponding to mismatches. | github-repos |
def assert_same_float_dtype(tensors=None, dtype=None):
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if (not dtype):
dtype = tf.float32
elif (not is_floating(dtype)):
raise ValueError('Expected floating point type, got {}.'.format(dtype))
return dtype | Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be a floating point type. If neither `tensors` nor `dtype` is supplied,
the function will return `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will
be ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float, or the common type of the inputs is not a floating point type. | codesearchnet |
def conv_json(self, uri_format="sparql_uri", add_ids=False):
def convert_item(ivalue):
nvalue = ivalue
if isinstance(ivalue, BaseRdfDataType):
if ivalue.type == 'uri':
if ivalue.startswith("pyuri") and uri_format == "pyuri":
nvalue = getattr(ivalue, "sparql")
else:
nvalue = getattr(ivalue, uri_format)
else:
nvalue = ivalue.to_json
elif isinstance(ivalue, RdfClassBase):
if ivalue.subject.type == "uri":
nvalue = ivalue.conv_json(uri_format, add_ids)
elif ivalue.subject.type == "bnode":
nvalue = ivalue.conv_json(uri_format, add_ids)
elif isinstance(ivalue, list):
nvalue = []
for item in ivalue:
temp = convert_item(item)
nvalue.append(temp)
return nvalue
rtn_val = {key: convert_item(value) for key, value in self.items()}
if add_ids:
if self.subject.type == 'uri':
rtn_val['uri'] = self.subject.sparql_uri
rtn_val['id'] = sha1(rtn_val['uri'].encode()).hexdigest()
return rtn_val | converts the class to a json compatable python dictionary
Args:
uri_format('sparql_uri','pyuri'): The format that uri values will
be returned
Returns:
dict: a json compatabile python dictionary | juraj-google-style |
def deserialize_sparse_tensors(tensors, types, shapes, classes):
ret = nest.pack_sequence_as(types, [sparse_ops.deserialize_sparse(tensor, dtype=ty, rank=shape.ndims) if c is sparse_tensor.SparseTensor else tensor for tensor, ty, shape, c in zip(nest.flatten(tensors), nest.flatten(types), nest.flatten(shapes), nest.flatten(classes))])
return ret | Deserializes sparse tensors.
Args:
tensors: a structure of tensors to deserialize.
types: a structure that holds information about types of `tensors`
shapes: a structure that holds information about shapes of `tensors`
classes: a structure of objects that identify the dataset item classes
Returns:
`tensors` with any serialized sparse tensors replaced by their deserialized
version. | github-repos |
def list_keyvaults_sub(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.KeyVault/vaults',
'?api-version=', KEYVAULT_API])
return do_get_next(endpoint, access_token) | Lists key vaults belonging to this subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. 200 OK. | juraj-google-style |
def get_package(self, name) -> 'EffectPackage':
(name, cls_name) = parse_package_string(name)
try:
return self.package_map[name]
except KeyError:
raise EffectError("No package '{}' registered".format(name)) | Get a package by python path. Can also contain path to an effect.
Args:
name (str): Path to effect package or effect
Returns:
The requested EffectPackage
Raises:
EffectError when no package is found | codesearchnet |
def update_file(filename, result, content, indent):
parts = re.split('---+', content, 2)
frontmatter = yaml.safe_load(parts[1])
frontmatter['counts'] = result['counts']
parts[1] = '\n{}'.format(yaml.safe_dump(frontmatter, default_flow_style=False, indent=indent))
result = '---'.join(parts)
with open(filename, 'wb') as f:
f.write(result.encode('utf-8'))
print('{} updated.'.format(filename)) | Updates a Jekyll file to contain the counts form an object
This just converts the results to YAML and adds to the Jekyll frontmatter.
Args:
filename: the Jekyll file to update
result: the results object from `wc`
content: the contents of the original file
indent: the indentation level for dumping YAML | codesearchnet |
def read_array(self, key, embedded=True):
return self.read(key, True, embedded) | Alias for read method that will read any type (e.g., String, KeyValue) and always
return array.
Args:
key (string): The variable to read from the DB.
embedded (boolean): Resolve embedded variables.
Returns:
(any): Results retrieved from DB | juraj-google-style |
def SubtractFromBalance(self, assetId, fixed8_val):
found = False
for (key, balance) in self.Balances.items():
if (key == assetId):
self.Balances[assetId] = (self.Balances[assetId] - fixed8_val)
found = True
if (not found):
self.Balances[assetId] = (fixed8_val * Fixed8((- 1))) | Subtract amount to the specified balance.
Args:
assetId (UInt256):
fixed8_val (Fixed8): amount to add. | codesearchnet |
def launch_run(self, command, project=None, entity=None, run_id=None):
query = gql('\n mutation launchRun(\n $entity: String\n $model: String\n $runId: String\n $image: String\n $command: String\n $patch: String\n $cwd: String\n $datasets: [String]\n ) {\n launchRun(input: {id: $runId, entityName: $entity, patch: $patch, modelName: $model,\n image: $image, command: $command, datasets: $datasets, cwd: $cwd}) {\n podName\n status\n runId\n }\n }\n ')
patch = BytesIO()
if self.git.dirty:
self.git.repo.git.execute(['git', 'diff'], output_stream=patch)
patch.seek(0)
cwd = '.'
if self.git.enabled:
cwd = (cwd + os.getcwd().replace(self.git.repo.working_dir, ''))
return self.gql(query, variable_values={'entity': (entity or self.settings('entity')), 'model': (project or self.settings('project')), 'command': command, 'runId': run_id, 'patch': patch.read().decode('utf8'), 'cwd': cwd}) | Launch a run in the cloud.
Args:
command (str): The command to run
program (str): The file to run
project (str): The project to scope the runs to
entity (str, optional): The entity to scope this project to. Defaults to public models
run_id (str, optional): The run_id to scope to
Returns:
[{"podName","status"}] | codesearchnet |
def reverse(self):
if self.closed():
raise ValueError('Attempt to call reverse() on a closed Queryable.')
try:
r = reversed(self._iterable)
return self._create(r)
except TypeError:
pass
return self._create(self._generate_reverse_result()) | Returns the sequence reversed.
Note: This method uses deferred execution, but the whole source
sequence is consumed once execution commences.
Returns:
The source sequence in reverse order.
Raises:
ValueError: If the Queryable is closed(). | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.