code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def sheets_tab_range(sheet_tab, sheet_range):
if sheet_range:
return '%s!%s' % (sheet_tab, sheet_range)
else:
return sheet_tab
|
Helper for creating range format.
Args:
sheet_tab - name of tab in sheet
sheet_range - A1 notation
Returns:
String containing full sheet range specification.
|
github-repos
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError('Invalid current offset value less than zero.')
if self._current_offset > self._size:
return b''
if size is None or self._current_offset + size > self._size:
size = self._size - self._current_offset
self._tar_ext_file.seek(self._current_offset, os.SEEK_SET)
data = self._tar_ext_file.read(size)
self._current_offset += len(data)
return data
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
juraj-google-style
|
def Open(self, path=None, read_only=True, **unused_kwargs):
if self._is_open:
raise IOError('Storage file already opened.')
if (not path):
raise ValueError('Missing path.')
path = os.path.abspath(path)
connection = sqlite3.connect(path, detect_types=(sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES))
cursor = connection.cursor()
if (not cursor):
return
self._connection = connection
self._cursor = cursor
self._is_open = True
self._read_only = read_only
if read_only:
self._ReadAndCheckStorageMetadata(check_readable_only=True)
else:
self._cursor.execute('PRAGMA synchronous=OFF')
if (not self._HasTable('metadata')):
self._WriteStorageMetadata()
else:
self._ReadAndCheckStorageMetadata()
if (self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB):
data_column_type = 'BLOB'
else:
data_column_type = 'TEXT'
for container_type in self._CONTAINER_TYPES:
if (not self._HasTable(container_type)):
if (container_type == self._CONTAINER_TYPE_EVENT):
query = self._CREATE_EVENT_TABLE_QUERY.format(container_type, data_column_type)
else:
query = self._CREATE_TABLE_QUERY.format(container_type, data_column_type)
self._cursor.execute(query)
self._connection.commit()
last_session_start = self._CountStoredAttributeContainers(self._CONTAINER_TYPE_SESSION_START)
last_session_completion = self._CountStoredAttributeContainers(self._CONTAINER_TYPE_SESSION_COMPLETION)
for container_type in self._REFERENCED_CONTAINER_TYPES:
container_list = self._GetSerializedAttributeContainerList(container_type)
container_list.next_sequence_number = self._CountStoredAttributeContainers(container_type)
if (last_session_start != last_session_completion):
logger.warning('Detected unclosed session.')
self._last_session = last_session_completion
|
Opens the storage.
Args:
path (Optional[str]): path to the storage file.
read_only (Optional[bool]): True if the file should be opened in
read-only mode.
Raises:
IOError: if the storage file is already opened or if the database
cannot be connected.
OSError: if the storage file is already opened or if the database
cannot be connected.
ValueError: if path is missing.
|
codesearchnet
|
def get_openapi_dict(self, services, hostname=None, x_google_api_name=False):
if not isinstance(services, (tuple, list)):
services = [services]
util.check_list_type(services, remote._ServiceClass, 'services',
allow_none=False)
return self.__api_openapi_descriptor(services, hostname=hostname, x_google_api_name=x_google_api_name)
|
JSON dict description of a protorpc.remote.Service in OpenAPI format.
Args:
services: Either a single protorpc.remote.Service or a list of them
that implements an api/version.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
dict, The OpenAPI descriptor document as a JSON dict.
|
juraj-google-style
|
def read_file(self, file: Union[IO, asyncio.StreamWriter]=None):
if file:
file_is_async = hasattr(file, 'drain')
while True:
data = yield from self._connection.read(4096)
if not data:
break
if file:
file.write(data)
if file_is_async:
yield from file.drain()
self._data_event_dispatcher.notify_read(data)
|
Read from connection to file.
Args:
file: A file object or a writer stream.
|
juraj-google-style
|
def __call__(self, obj, attr, obj_ref):
from textx.const import RULE_COMMON, RULE_ABSTRACT
from textx.model import ObjCrossRef
from textx.scoping.tools import get_parser
if obj_ref is None:
return None
assert type(obj_ref) is ObjCrossRef, type(obj_ref)
if get_parser(obj).debug:
get_parser(obj).dprint("Resolving obj crossref: {}:{}"
.format(obj_ref.cls, obj_ref.obj_name))
def _inner_resolve_link_rule_ref(cls, obj_name):
if cls._tx_type is RULE_ABSTRACT:
for inherited in cls._tx_inh_by:
result = _inner_resolve_link_rule_ref(inherited,
obj_name)
if result:
return result
elif cls._tx_type == RULE_COMMON:
if id(cls) in get_parser(obj)._instances:
objs = get_parser(obj)._instances[id(cls)]
return objs.get(obj_name)
if self.multi_metamodel_support:
from textx import get_model, get_children
from textx import textx_isinstance
result_lst = get_children(
lambda x:
hasattr(x, "name") and x.name == obj_ref.obj_name
and textx_isinstance(x, obj_ref.cls), get_model(obj))
if len(result_lst) == 1:
result = result_lst[0]
elif len(result_lst) > 1:
line, col = get_parser(obj).pos_to_linecol(obj_ref.position)
raise TextXSemanticError(
"name {} is not unique.".format(obj_ref.obj_name),
line=line, col=col, filename=get_model(obj)._tx_filename)
else:
result = None
else:
result = _inner_resolve_link_rule_ref(obj_ref.cls,
obj_ref.obj_name)
if result:
return result
return None
|
the default scope provider
Args:
obj: unused (used for multi_metamodel_support)
attr: unused
obj_ref: the cross reference to be resolved
Returns:
the resolved reference or None
|
juraj-google-style
|
def _query(self, path: str, method: str, data: Dict[(str, Any)]=None, expected_status: int=200) -> Union[(List[Dict[(str, Any)]], Dict[(str, Any)], None)]:
url = (Pycord.url_base + path)
self.logger.debug(f'Making {method} request to "{url}"')
if (method == 'GET'):
r = requests.get(url, headers=self._build_headers())
elif (method == 'POST'):
r = requests.post(url, headers=self._build_headers(), json=data)
r = requests.get(url, headers=self._build_headers())
elif (method == 'PATCH'):
r = requests.patch(url, headers=self._build_headers(), json=data)
else:
raise ValueError(f'Unknown HTTP method {method}')
self.logger.debug(f'{method} response from "{url}" was "{r.status_code}"')
if (r.status_code != expected_status):
raise ValueError(f'Non-{expected_status} {method} response from Discord API ({r.status_code}): {r.text}')
if (expected_status == 200):
return r.json()
return None
|
Make an HTTP request
Args:
path: the URI path (not including the base url, start with
the first uri segment, like 'users/...')
method: the HTTP method to use (GET, POST, PATCH, ...)
data: the data to send as JSON data
expected_status: expected HTTP status; other statuses
received will raise an Exception
Returns:
Data from the endpoint's response
|
codesearchnet
|
def set_video_pos(self, x1, y1, x2, y2):
position = "%s %s %s %s" % (str(x1),str(y1),str(x2),str(y2))
self._player_interface.VideoPos(ObjectPath('/not/used'), String(position))
|
Set the video position on the screen
Args:
x1 (int): Top left x coordinate (px)
y1 (int): Top left y coordinate (px)
x2 (int): Bottom right x coordinate (px)
y2 (int): Bottom right y coordinate (px)
|
juraj-google-style
|
def document(self, *document_path):
if (len(document_path) == 1):
path = document_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER)
else:
path = document_path
return DocumentReference(*path, client=self)
|
Get a reference to a document in a collection.
For a top-level document:
.. code-block:: python
>>> client.document('collek/shun')
>>> # is the same as
>>> client.document('collek', 'shun')
For a document in a sub-collection:
.. code-block:: python
>>> client.document('mydocs/doc/subcol/child')
>>> # is the same as
>>> client.document('mydocs', 'doc', 'subcol', 'child')
Documents in sub-collections can be nested deeper in a similar fashion.
Args:
document_path (Tuple[str, ...]): Can either be
* A single ``/``-delimited path to a document
* A tuple of document path segments
Returns:
~.firestore_v1beta1.document.DocumentReference: A reference
to a document in a collection.
|
codesearchnet
|
def memoizedmethod(method):
method_name = method.__name__
@wraps(method)
def patched(self, *args, **kwargs):
'Patched method'
try:
return self._cache[method_name]
except KeyError:
result = self._cache[method_name] = method(self, *args, **kwargs)
return result
return patched
|
Decorator that caches method result.
Args:
method (function): Method
Returns:
function: Memoized method.
Notes:
Target method class needs as "_cache" attribute (dict).
It is the case of "ObjectIOBase" and all its subclasses.
|
codesearchnet
|
def expect_no_raises(message=None, extras=None):
try:
(yield)
except Exception as e:
e_record = records.ExceptionRecord(e)
if extras:
e_record.extras = extras
msg = (message or 'Got an unexpected exception')
details = ('%s: %s' % (msg, e_record.details))
logging.exception(details)
e_record.details = details
recorder.add_error(e_record)
|
Expects no exception is raised in a context.
If the expectation is not met, the test is marked as fail after its
execution finishes.
A default message is added to the exception `details`.
Args:
message: string, custom message to add to exception's `details`.
extras: An optional field for extra information to be included in test
result.
|
codesearchnet
|
def submitTemplate(id, data={}):
conn = Qubole.agent()
path = str(id) + "/run"
return conn.post(Template.element_path(path), data)
|
Submit an existing Template.
Args:
`id`: ID of the template to submit
`data`: json data containing the input_vars
Returns:
Dictionary containing Command Object details.
|
juraj-google-style
|
def set_tif(self, interface):
if (not ((1 << interface) & self.supported_tifs())):
raise errors.JLinkException(('Unsupported target interface: %s' % interface))
res = self._dll.JLINKARM_TIF_Select(interface)
if (res != 0):
return False
self._tif = interface
return True
|
Selects the specified target interface.
Note that a restart must be triggered for this to take effect.
Args:
self (Jlink): the ``JLink`` instance
interface (int): integer identifier of the interface
Returns:
``True`` if target was updated, otherwise ``False``.
Raises:
JLinkException: if the given interface is invalid or unsupported.
|
codesearchnet
|
def merge(self, workdir, gswfk_file, dfpt_files, gkk_files, out_gkk, binascii=0):
raise NotImplementedError("This method should be tested")
gswfk_file = os.path.absath(gswfk_file)
dfpt_files = [os.path.abspath(s) for s in list_strings(dfpt_files)]
gkk_files = [os.path.abspath(s) for s in list_strings(gkk_files)]
print("Will merge %d 1WF files, %d GKK file in output %s" %
(len(dfpt_files), len(gkk_files), out_gkk))
if self.verbose:
for i, f in enumerate(dfpt_files): print(" [%d] 1WF %s" % (i, f))
for i, f in enumerate(gkk_files): print(" [%d] GKK %s" % (i, f))
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [workdir], ["mrggkk.stdin", "mrggkk.stdout", "mrggkk.stderr"])
inp = StringIO()
inp.write(out_gkk + "\n")
inp.write(str(binascii) + "\n")
inp.write(gswfk_file + "\n")
dims = " ".join([str(d) for d in dims])
inp.write(dims + "\n")
for fname in dfpt_files:
inp.write(fname + "\n")
for fname in gkk_files:
inp.write(fname + "\n")
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
fh.flush()
os.fsync(fh.fileno())
self.execute(workdir)
return out_gkk
|
Merge GGK files, return the absolute path of the new database.
Args:
gswfk_file: Ground-state WFK filename
dfpt_files: List of 1WFK files to merge.
gkk_files: List of GKK files to merge.
out_gkk: Name of the output GKK file
binascii: Integer flat. 0 --> binary output, 1 --> ascii formatted output
|
juraj-google-style
|
def _set_class_path(cls, module_dict=sys.modules):
found = cls.__dict__.get('_class_path')
if found is not None:
return
if cls is Pipeline:
return
class_path = '%s.%s' % (cls.__module__, cls.__name__)
if cls.__module__ == '__main__':
for name, module in module_dict.items():
if name == '__main__':
continue
found = getattr(module, cls.__name__, None)
if found is cls:
class_path = '%s.%s' % (name, cls.__name__)
break
cls._class_path = class_path
|
Sets the absolute path to this class as a string.
Used by the Pipeline API to reconstruct the Pipeline sub-class object
at execution time instead of passing around a serialized function.
Args:
module_dict: Used for testing.
|
juraj-google-style
|
def get_line_count(fname):
i = 0
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
|
Counts the number of lines in a file.
Args:
fname: string, name of the file.
Returns:
integer, the number of lines in the file.
|
juraj-google-style
|
def GetFormatStringAttributeNames(self, event):
event_formatter = self.GetEventFormatter(event)
if not event_formatter:
return None
return event_formatter.GetFormatStringAttributeNames()
|
Retrieves the attribute names in the format string.
Args:
event (EventObject): event.
Returns:
list[str]: list containing the attribute names. If no event formatter to
match the event can be found the function returns None.
|
juraj-google-style
|
def get_seed(seed):
seed, seed2 = random_seed.get_seed(seed)
if seed is None:
seed = constant_op.constant(0, dtype=dtypes.int64, name='seed')
else:
seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name='seed')
if seed2 is None:
seed2 = constant_op.constant(0, dtype=dtypes.int64, name='seed2')
else:
with ops.name_scope('seed2') as scope:
seed2 = ops.convert_to_tensor(seed2, dtype=dtypes.int64)
seed2 = array_ops.where_v2(math_ops.logical_and(math_ops.equal(seed, 0), math_ops.equal(seed2, 0)), constant_op.constant(2 ** 31 - 1, dtype=dtypes.int64), seed2, name=scope)
return (seed, seed2)
|
Returns the local seeds an operation should use given an op-specific seed.
See `random_seed.get_seed` for more details. This wrapper adds support for
the case where `seed` may be a tensor.
Args:
seed: An integer or a `tf.int64` scalar tensor.
Returns:
A tuple of two `tf.int64` scalar tensors that should be used for the local
seed of the calling dataset.
|
github-repos
|
def _scan_two_qubit_ops_into_matrix(self, circuit: circuits.Circuit, index: Optional[int], qubits: Tuple[(ops.Qid, ...)]) -> Tuple[(List[ops.Operation], List[int], np.ndarray)]:
product = np.eye(4, dtype=np.complex128)
all_operations = []
touched_indices = []
while (index is not None):
operations = list({circuit.operation_at(q, index) for q in qubits})
op_data = [self._op_to_matrix(op, qubits) for op in operations if (op is not None)]
if any(((e is None) for e in op_data)):
break
present_ops = [op for op in operations if op]
present_op_data = cast(List[np.ndarray], op_data)
for op_mat in present_op_data:
product = np.dot(op_mat, product)
all_operations.extend(present_ops)
touched_indices.append(index)
index = circuit.next_moment_operating_on(qubits, (index + 1))
return (all_operations, touched_indices, product)
|
Accumulates operations affecting the given pair of qubits.
The scan terminates when it hits the end of the circuit, finds an
operation without a known matrix, or finds an operation that interacts
the given qubits with other qubits.
Args:
circuit: The circuit to scan for operations.
index: The index to start scanning forward from.
qubits: The pair of qubits we care about.
Returns:
A tuple containing:
0. The operations.
1. The moment indices those operations were on.
2. A matrix equivalent to the effect of the scanned operations.
|
codesearchnet
|
def calculate_weighted_avg(bonds):
minimum_bond = min(bonds)
weighted_sum = 0.0
total_sum = 0.0
for entry in bonds:
weighted_sum += entry * exp(1 - (entry / minimum_bond) ** 6)
total_sum += exp(1 - (entry / minimum_bond) ** 6)
return weighted_sum / total_sum
|
Returns the weighted average bond length given by
Hoppe's effective coordination number formula.
Args:
bonds (list): list of floats that are the
bond distances between a cation and its
peripheral ions
|
juraj-google-style
|
def is_decomposed(P):
if P.shape:
return min([is_decomposed(poly) for poly in P])
return len(P.keys) <= 1
|
Check if a polynomial (array) is on component form.
Args:
P (Poly):
Input data.
Returns:
(bool):
True if all polynomials in ``P`` are on component form.
Examples:
>>> x,y = cp.variable(2)
>>> print(cp.is_decomposed(cp.Poly([1,x,x*y])))
True
>>> print(cp.is_decomposed(cp.Poly([x+1,x*y])))
False
|
juraj-google-style
|
def setEditorData(self, spinBox, index):
if index.isValid():
value = index.model().data(index, QtCore.Qt.EditRole)
spinBox.setValue(value)
|
Sets the data to be displayed and edited by the editor from the data model item specified by the model index.
Args:
spinBox (BigIntSpinbox): editor widget.
index (QModelIndex): model data index.
|
codesearchnet
|
def stft_magnitude(signal, fft_length,
hop_length=None,
window_length=None):
frames = frame(signal, window_length, hop_length)
window = periodic_hann(window_length)
windowed_frames = frames * window
return np.abs(np.fft.rfft(windowed_frames, int(fft_length)))
|
Calculate the short-time Fourier transform magnitude.
Args:
signal: 1D np.array of the input time-domain signal.
fft_length: Size of the FFT to apply.
hop_length: Advance (in samples) between each frame passed to FFT.
window_length: Length of each block of samples to pass to FFT.
Returns:
2D np.array where each row contains the magnitudes of the fft_length/2+1
unique values of the FFT for the corresponding frame of input samples.
|
juraj-google-style
|
def get_pmg_structure(phonopy_structure):
lattice = phonopy_structure.get_cell()
frac_coords = phonopy_structure.get_scaled_positions()
symbols = phonopy_structure.get_chemical_symbols()
masses = phonopy_structure.get_masses()
mms = phonopy_structure.get_magnetic_moments()
mms = mms or [0] * len(symbols)
return Structure(lattice, symbols, frac_coords,
site_properties={"phonopy_masses": masses,
"magnetic_moments": mms})
|
Convert a PhonopyAtoms object to pymatgen Structure object.
Args:
phonopy_structure (PhonopyAtoms): A phonopy structure object.
|
juraj-google-style
|
def refine_rotation(self):
new_x, y = get_uvec(self[0]), get_uvec(self[1])
new_y = y - np.dot(new_x, y) * new_x
new_z = np.cross(new_x, new_y)
return SquareTensor([new_x, new_y, new_z])
|
Helper method for refining rotation matrix by ensuring
that second and third rows are perpindicular to the first.
Gets new y vector from an orthogonal projection of x onto y
and the new z vector from a cross product of the new x and y
Args:
tol to test for rotation
Returns:
new rotation matrix
|
juraj-google-style
|
def sanitize_arg_name(name: str) -> str:
swapped = ''.join([c if c.isalnum() else '_' for c in name])
result = swapped if swapped[0].isalpha() else 'arg_' + swapped
global sanitization_warnings_given
if name != result and sanitization_warnings_given < MAX_SANITIZATION_WARNINGS:
logging.warning('`%s` is not a valid tf.function parameter name. Sanitizing to `%s`.', name, result)
sanitization_warnings_given += 1
return result
|
Sanitizes function argument names.
Matches Python symbol naming rules.
Without sanitization, names that are not legal Python parameter names can be
set which makes it challenging to represent callables supporting the named
calling capability.
Args:
name: The name to sanitize.
Returns:
A string that meets Python parameter conventions.
|
github-repos
|
def setLCD(self, password='00000000'):
result = False
self.setContext('setLCD')
try:
self.clearCmdMsg()
if (len(password) != 8):
self.writeCmdMsg('Invalid password length.')
self.setContext('')
return result
if (not self.request()):
self.writeCmdMsg('Bad read CRC on setting')
elif (not self.serialCmdPwdAuth(password)):
self.writeCmdMsg('Password failure')
else:
req_table = ''
fill_len = (40 - len(self.m_lcd_items))
for lcdid in self.m_lcd_items:
append_val = binascii.hexlify(str(lcdid).zfill(2))
req_table += append_val
for i in range(0, fill_len):
append_val = binascii.hexlify(str(0).zfill(2))
req_table += append_val
req_str = (('015731023030443228' + req_table) + '2903')
req_str += self.calc_crc16(req_str[2:].decode('hex'))
self.m_serial_port.write(req_str.decode('hex'))
if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):
self.writeCmdMsg('Success: 06 returned.')
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext('')
return result
|
Serial call to set LCD using meter object bufer.
Used with :func:`~ekmmeters.V4Meter.addLcdItem`.
Args:
password (str): Optional password
Returns:
bool: True on completion and ACK.
|
codesearchnet
|
def PushTask(self, task):
storage_file_size = getattr(task, 'storage_file_size', None)
if (not storage_file_size):
raise ValueError('Task storage file size not set.')
if (task.file_entry_type == dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY):
weight = 1
else:
weight = storage_file_size
task.merge_priority = weight
heap_values = (weight, task)
heapq.heappush(self._heap, heap_values)
self._task_identifiers.add(task.identifier)
|
Pushes a task onto the heap.
Args:
task (Task): task.
Raises:
ValueError: if the size of the storage file is not set in the task.
|
codesearchnet
|
def get_tensors(self, node_name, output_slot, debug_op, device_name=None):
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
try:
device_name = self._infer_device_name(device_name, node_name)
return [datum.get_tensor() for datum in self._watch_key_to_datum[device_name][watch_key]]
except (ValueError, KeyError):
raise WatchKeyDoesNotExistInDebugDumpDirError('Watch key "%s" does not exist in the debug dump of device %s' % (watch_key, device_name))
|
Get the tensor value from for a debug-dumped tensor.
The tensor may be dumped multiple times in the dump root directory, so a
list of tensors (`numpy.ndarray`) is returned.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
List of tensors (`numpy.ndarray`) loaded from the debug-dump file(s).
Raises:
WatchKeyDoesNotExistInDebugDumpDirError: If the tensor does not exist in
the debug-dump data.
|
github-repos
|
def _craft_s3_keys(self):
now = time.gmtime()
stub = "templates/{stack_name}/{version}".format(
stack_name=self._config.get('environment', {}).get('stack_name', None),
version=self._config.get('codeVersion')
)
stub = stub + "/" + str(now.tm_year)
stub = stub + "/" + str('%02d' % now.tm_mon)
stub = stub + "/" + str('%02d' % now.tm_mday)
stub = stub + "/" + str('%02d' % now.tm_hour)
stub = stub + ":" + str('%02d' % now.tm_min)
stub = stub + ":" + str('%02d' % now.tm_sec)
if self._yaml:
template_key = stub + "/stack.yaml"
else:
template_key = stub + "/stack.json"
property_key = stub + "/stack.properties"
return template_key, property_key
|
We are putting stuff into S3, were supplied the bucket. Here we
craft the key of the elements we are putting up there in the
internet clouds.
Args:
None
Returns:
a tuple of teplate file key and property file key
|
juraj-google-style
|
def _embedding_lookup_for_ragged_tensor(inp: ragged_tensor.RaggedTensor, weight: Optional[ragged_tensor.RaggedTensor], table: tf_variables.Variable, feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:
if inp.shape.rank != 2:
raise ValueError('Only rank 2 ragged tensor is supported, but got rank {}'.format(inp.shape.rank))
batch_size = inp.shape[0]
if feature.output_shape:
output_batch_size = math_ops.reduce_prod(feature.output_shape)
if output_batch_size == batch_size:
ragged_output = _ragged_embedding_lookup_with_reduce(table, inp, weight, feature.table.combiner)
ragged_output = array_ops.reshape(ragged_output, shape=feature.output_shape + [feature.table.dim])
elif output_batch_size > batch_size and output_batch_size % batch_size == 0:
ragged_output = embedding_ops.embedding_lookup_v2(table, inp)
ragged_output = ragged_output.to_tensor(shape=[batch_size, output_batch_size
ragged_output = array_ops.reshape(ragged_output, feature.output_shape + [feature.table.dim])
else:
raise ValueError('Output shape set in the FeatureConfig should be the factor of the input data batch size. But instead got output shape {}, input data batch size {}'.format(feature.output_shape, batch_size))
elif feature.max_sequence_length > 0:
output_shape = [batch_size, feature.max_sequence_length, feature.table.dim]
ragged_lookup = embedding_ops.embedding_lookup_v2(table, inp)
ragged_output = ragged_lookup.to_tensor(shape=output_shape)
else:
ragged_output = _ragged_embedding_lookup_with_reduce(table, inp, weight, feature.table.combiner)
return ragged_output
|
Embedding lookup for ragged tensor based on its feature config.
Args:
inp: a single rank 2 RaggedTensor input.
weight: None or RaggedTensor which has the same shape of the input.
table: a table variable.
feature: a feature config.
Returns:
Embedding lookup result.
Raises:
ValueError: if input ragged tensor is not rank 2 or output shape set in the
feature config doesn't match with the first dim size of the input.
|
github-repos
|
def get_graph(graph, conn, **kwargs):
sparql = render_without_request("sparqlGraphDataTemplate.rq",
prefix=NSM.prefix(),
graph=graph)
return conn.query(sparql, **kwargs)
|
Returns all the triples for a specific are graph
args:
graph: the URI of the graph to retreive
conn: the rdfframework triplestore connection
|
juraj-google-style
|
def xml_to_json(root, tag_prefix=None, on_tag={}):
def get_key(tag):
if (tag_prefix is not None):
return tag.split(tag_prefix)[1]
return tag
def parse_element(elmt):
key = get_key(elmt.tag)
if (key in on_tag):
return on_tag[key](elmt)
items = dict(elmt.items())
if (len(elmt) == 0):
if items:
return {**items, **{key: elmt.text}}
else:
return elmt.text
else:
tags = {child.tag for child in elmt}
max_children = max({len(child) for child in elmt})
if (len(tags) == 1):
value_list = [parse_element(child) for child in elmt]
if items:
return {**items, **{key: value_list}}
else:
return value_list
elif (len(tags) > 1):
tag2children = {tag: [] for tag in tags}
for child in elmt:
tag2children[child.tag].append(child)
if (max_children == 0):
value_dict = {get_key(tag): ([child.text for child in children] if (len(children) > 1) else children[0].text) for (tag, children) in tag2children.items()}
else:
value_dict = {get_key(tag): ([parse_element(child) for child in children] if (len(children) > 1) else parse_element(children[0])) for (tag, children) in tag2children.items()}
if items:
return {**items, **value_dict}
else:
return value_dict
return parse_element(root)
|
Parses a XML element to JSON format.
This is a relatively generic function parsing a XML element
to JSON format. It does not guarantee any specific formal
behaviour but is empirically known to "work well" with respect
to the author's needs. External verification of the returned
results by the user is therefore instrumental.
For bigger XML elements the whole procedure may take a while,
so the philosophy should be to save the laboriously mapped
JSON data structure to a file once you have it. This of course
also means that this functions is probably of little value
when you have to constantly JSONify big XMLs. In summary,
this function is mostly useful for one-time parsing of XML to
JSON for subsequent use of the resulting JSON data instead of
the XML-formated data.
Args:
root: A XML element
tag_prefix: A tag prefix which will be cut from the keys
on_tag: User-defined parsing for elements identified by tag
Returns:
A Python data structure corresponding to the JSON mapping
of the supplied XML element
|
codesearchnet
|
def unpack(container, path):
from benchbuild.utils.run import run
from benchbuild.utils.uchroot import no_args
path = local.path(path)
c_filename = local.path(container.filename)
name = c_filename.basename
if (not path.exists()):
path.mkdir()
with local.cwd(path):
Wget(container.remote, name)
uchroot = no_args()
uchroot = uchroot[('-E', '-A', '-C', '-r', '/', '-w', os.path.abspath('.'), '--')]
has_erlent = bash[('-c', "tar --list -f './{0}' | grep --silent '.erlent'".format(name))]
has_erlent = (has_erlent & TF)
untar = local['/bin/tar'][('xf', ('./' + name))]
if (not has_erlent):
untar = uchroot[untar]
run(untar['--exclude=dev/*'])
if (not os.path.samefile(name, container.filename)):
rm(name)
else:
LOG.warning('File contents do not match: %s != %s', name, container.filename)
cp((container.filename + '.hash'), path)
|
Unpack a container usable by uchroot.
Method that checks if a directory for the container exists,
checks if erlent support is needed and then unpacks the
container accordingly.
Args:
path: The location where the container is, that needs to be unpacked.
|
codesearchnet
|
def get_compatible_systems(self, id_or_uri):
uri = (self._client.build_uri(id_or_uri) + '/compatible-systems')
return self._client.get(uri)
|
Retrieves a collection of all storage systems that is applicable to this storage volume template.
Args:
id_or_uri:
Can be either the power device id or the uri
Returns:
list: Storage systems.
|
codesearchnet
|
def _dataset_partition(self, mode, config, params):
if ((mode != tf.estimator.ModeKeys.TRAIN) or (not hasattr(config, 'tpu_config'))):
self._next_partition_id = 0
return (0, 1)
phift = config.tpu_config.per_host_input_for_training
if (hasattr(tpu_config.InputPipelineConfig, 'BROADCAST') and (phift == tpu_config.InputPipelineConfig.BROADCAST)):
return (0, 1)
if phift:
num_hosts = (params['context'].num_hosts if ('context' in params) else (config.tpu_config.num_shards
num_partitions = max(num_hosts, 1)
else:
num_partitions = config.tpu_config.num_shards
partition_id = getattr(self, '_next_partition_id', 0)
self._next_partition_id = (partition_id + 1)
tf.logging.info(('num_partitions = %d partition_id = %d' % (num_partitions, partition_id)))
assert (partition_id < num_partitions)
return (partition_id, num_partitions)
|
Which part of the training data to read.
If there are multiple parallel calls to input_fn (multiple TPU hosts),
then we want each one to read from a separate partition of the training
data.
Args:
mode: tf.estimator.ModeKeys
config: RunConfig
params: A dict that contains parameters.
Returns:
partition_id: an integer
num_partitions: an integer
|
codesearchnet
|
def StatEntryFromStat(stat,
pathspec,
ext_attrs = True):
result = rdf_client_fs.StatEntry(pathspec=pathspec)
for attr in _STAT_ATTRS:
value = getattr(stat.GetRaw(), attr, None)
if value is None:
continue
value = int(value)
if value < 0:
value &= 0xFFFFFFFF
setattr(result, attr, value)
result.st_flags_linux = stat.GetLinuxFlags()
result.st_flags_osx = stat.GetOsxFlags()
if ext_attrs:
result.ext_attrs = list(GetExtAttrs(stat.GetPath()))
return result
|
Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
|
juraj-google-style
|
def victim(self, name, owner=None, **kwargs):
return Victim(self.tcex, name, owner=owner, **kwargs)
|
Create the Victim TI object.
Args:
owner:
name:
**kwargs:
Return:
|
juraj-google-style
|
def tar_archive(context):
logger.debug('start')
mode = get_file_mode_for_writing(context)
for item in context['tar']['archive']:
destination = context.get_formatted_string(item['out'])
source = context.get_formatted_string(item['in'])
with tarfile.open(destination, mode) as archive_me:
logger.debug(f"Archiving '{source}' to '{destination}'")
archive_me.add(source, arcname='.')
logger.info(f"Archived '{source}' to '{destination}'")
logger.debug('end')
|
Archive specified path to a tar archive.
Args:
context: dictionary-like. context is mandatory.
context['tar']['archive'] must exist. It's a dictionary.
keys are the paths to archive.
values are the destination output paths.
Example:
tar:
archive:
- in: path/to/dir
out: path/to/destination.tar.xs
- in: another/my.file
out: ./my.tar.xs
This will archive directory path/to/dir to path/to/destination.tar.xs,
and also archive file another/my.file to ./my.tar.xs
|
codesearchnet
|
def from_dict(event_dict):
return SnippetEvent(callback_id=event_dict['callbackId'], name=event_dict['name'], creation_time=event_dict['time'], data=event_dict['data'])
|
Create a SnippetEvent object from a dictionary.
Args:
event_dict: a dictionary representing an event.
Returns:
A SnippetEvent object.
|
codesearchnet
|
def from_vision_qformer_text_configs(cls, vision_config: Blip2VisionConfig, qformer_config: Blip2QFormerConfig, text_config: Optional[PretrainedConfig]=None, **kwargs):
return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict() if text_config is not None else None, **kwargs)
|
Instantiate a [`Blip2Config`] (or a derived class) from a BLIP-2 vision model, Q-Former and language model
configurations.
Args:
vision_config (`dict`):
Dictionary of configuration options used to initialize [`Blip2VisionConfig`].
qformer_config (`dict`):
Dictionary of configuration options used to initialize [`Blip2QFormerConfig`].
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize any [`PretrainedConfig`].
Returns:
[`Blip2Config`]: An instance of a configuration object
|
github-repos
|
def get_data(__pkg: str, __name: str) -> str:
for dname in get_data_dirs(__pkg):
test_path = path.join(dname, __name)
if path.exists(test_path):
return test_path
raise FileNotFoundError('No data file {!r} for {!r}'.format(__name, __pkg))
|
Return top-most data file for given package.
Args:
__pkg: Package name
__name: Data file name
|
juraj-google-style
|
def parents(self, sourcepath, recursive=True):
return self._get_recursive_dependancies(self._PARENTS_MAP, sourcepath, recursive=True)
|
Recursively find all parents that import the given source path.
Args:
sourcepath (str): Source file path to search for.
Keyword Arguments:
recursive (bool): Switch to enabled recursive finding (if True).
Default to True.
Returns:
set: List of finded parents path.
|
codesearchnet
|
def get_rooms(self, sort=True):
rooms = self._connection.get('rooms')
if sort:
rooms.sort(key=operator.itemgetter('name'))
return rooms
|
Get rooms list.
Kwargs:
sort (bool): If True, sort rooms by name
Returns:
array. List of rooms (each room is a dict)
|
codesearchnet
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
version = match.get('LastAttemptSystemVersion', 'N/A')
pending = match.get('LastUpdatesAvailable', None)
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'Last MacOS {0:s} full update.'.format(version)
event_data.key = ''
event_data.root = '/'
datetime_value = match.get('LastFullSuccessfulDate', None)
if datetime_value:
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = match.get('LastSuccessfulDate', None)
if (datetime_value and pending):
software = []
for update in match.get('RecommendedUpdates', []):
identifier = update.get('Identifier', '<IDENTIFIER>')
product_key = update.get('Product Key', '<PRODUCT_KEY>')
software.append('{0:s}({1:s})'.format(identifier, product_key))
if (not software):
return
software = ','.join(software)
event_data.desc = 'Last Mac OS {0!s} partially update, pending {1!s}: {2:s}.'.format(version, pending, software)
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts relevant MacOS update entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
|
codesearchnet
|
def inspect_container(self, container):
return self._result(
self._get(self._url("/containers/{0}/json", container)), True
)
|
Identical to the `docker inspect` command, but only for containers.
Args:
container (str): The container to inspect
Returns:
(dict): Similar to the output of `docker inspect`, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
juraj-google-style
|
def get_help(func):
help_text = ""
if isinstance(func, dict):
name = context_name(func)
help_text = "\n" + name + "\n\n"
doc = inspect.getdoc(func)
if doc is not None:
doc = inspect.cleandoc(doc)
help_text += doc + '\n'
return help_text
sig = func.metadata.signature()
doc = inspect.getdoc(func)
if doc is not None:
doc = inspect.cleandoc(doc)
help_text += "\n" + sig + "\n\n"
if doc is not None:
help_text += doc + '\n'
if inspect.isclass(func):
func = func.__init__
if func.metadata.load_from_doc:
return help_text
help_text += "\nArguments:\n"
for key, info in func.metadata.annotated_params.items():
type_name = info.type_name
desc = ""
if info.desc is not None:
desc = info.desc
help_text += " - %s (%s): %s\n" % (key, type_name, desc)
return help_text
|
Return usage information about a context or function.
For contexts, just return the context name and its docstring
For functions, return the function signature as well as its
argument types.
Args:
func (callable): An annotated callable function
Returns:
str: The formatted help text
|
juraj-google-style
|
def load_new_checkpoint_when_available(
self, sess, current_checkpoint, sleep_seconds=10):
while True:
next_checkpoint = self.load_from_checkpoint(sess)
if not next_checkpoint or next_checkpoint == current_checkpoint:
print('Model not yet available, sleeping for %d seconds: '
'path %s; found: %s' %
(sleep_seconds,
os.path.dirname(self._save_path), current_checkpoint))
sys.stdout.flush()
time.sleep(sleep_seconds)
else:
return next_checkpoint
|
Waits for a new checkpoint to be available and then loads it.
Args:
sess: The current session.
current_checkpoint: The current checkpoint or None to just load the next
one.
sleep_seconds: How long to sleep between checks.
Returns:
The next checkpoint to use.
|
juraj-google-style
|
def constant(duration: int, amp: complex, name: str = None) -> SamplePulse:
return _sampled_constant_pulse(duration, amp, name=name)
|
Generates constant-sampled `SamplePulse`.
Applies `left` sampling strategy to generate discrete pulse from continuous function.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Complex pulse amplitude.
name: Name of pulse.
|
juraj-google-style
|
def get_by_name(self, name):
try:
spec = self._dom.get('templates', {})[name]
except KeyError:
raise LagoMissingTemplateError(name, self._path)
return Template(name=name, versions={ver_name: TemplateVersion(name=('%s:%s:%s' % (self.name, name, ver_name)), source=self._providers[ver_spec['source']], handle=ver_spec['handle'], timestamp=ver_spec['timestamp']) for (ver_name, ver_spec) in spec['versions'].items()})
|
Retrieve a template by it's name
Args:
name (str): Name of the template to retrieve
Raises:
LagoMissingTemplateError: if no template is found
|
codesearchnet
|
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
output_format = getattr(options, 'output_format', 'dynamic')
output_filename = getattr(options, 'write', None)
if output_format != 'list':
if not output_manager.OutputManager.HasOutputClass(output_format):
raise errors.BadConfigOption(
'Unsupported output format: {0:s}.'.format(output_format))
if output_manager.OutputManager.IsLinearOutputModule(output_format):
if not output_filename:
raise errors.BadConfigOption((
'Output format: {0:s} requires an output file').format(
output_format))
if os.path.exists(output_filename):
raise errors.BadConfigOption(
'Output file already exists: {0:s}.'.format(output_filename))
setattr(configuration_object, '_output_format', output_format)
setattr(configuration_object, '_output_filename', output_filename)
|
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
|
juraj-google-style
|
def __init__(self, channel):
self.send = channel.stream_stream(
'/predix.eventhub.Publisher/send',
request_serializer=EventHub__pb2.PublishRequest.SerializeToString,
response_deserializer=EventHub__pb2.PublishResponse.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def assert_not_visible(self, selector, testid=None, **kwargs):
self.info_log(('Assert not visible selector(%s) testid(%s)' % (selector, testid)))
highlight = kwargs.get('highlight', BROME_CONFIG['highlight']['highlight_on_assertion_failure'])
self.debug_log(('effective highlight: %s' % highlight))
wait_until_not_visible = kwargs.get('wait_until_not_visible', BROME_CONFIG['proxy_driver']['wait_until_not_visible_before_assert_not_visible'])
self.debug_log(('effective wait_until_not_visible: %s' % wait_until_not_visible))
if wait_until_not_visible:
self.wait_until_not_visible(selector, raise_exception=False)
element = self.find(selector, raise_exception=False, wait_until_visible=False, wait_until_present=False)
if (element and element.is_displayed(raise_exception=False)):
data = self.execute_script('return arguments[0].getBoundingClientRect();', element._element)
if highlight:
element.highlight(style=BROME_CONFIG['highlight']['style_on_assertion_failure'])
if (testid is not None):
self.create_test_result(testid, False, extra_data={'bounding_client_rect': data, 'video_x_offset': self.browser_config.get('video_x_offset', 0), 'video_y_offset': self.browser_config.get('video_y_offset', 0)})
return False
else:
if (testid is not None):
self.create_test_result(testid, True)
return True
|
Assert that the element is not visible in the dom
Args:
selector (str): the selector used to find the element
test_id (str): the test_id or a str
Kwargs:
wait_until_not_visible (bool)
highlight (bool)
Returns:
bool: True is the assertion succeed; False otherwise.
|
codesearchnet
|
def ast_dict_to_objects(ast_dict: Mapping[(str, Any)], bel_obj) -> BELAst:
ast_subject = ast_dict.get('subject', None)
ast_object = ast_dict.get('object', None)
bel_subject = None
bel_object = None
bel_relation = ast_dict.get('relation')
if ast_subject:
bel_subject = function_ast_to_objects(ast_subject, bel_obj)
if ast_object:
bel_object = function_ast_to_objects(ast_object, bel_obj)
ast_obj = BELAst(bel_subject, bel_relation, bel_object, bel_obj.spec)
return ast_obj
|
Convert Tatsu AST dictionary to BEL AST object
Args:
ast_dict (Mapping[str, Any])
Returns:
BELAst: object representing the BEL Statement AST
|
codesearchnet
|
def __init__(self, metrics_namespace: Optional[str]=None, is_streaming: bool=False, gpu: Optional[costs.Accelerator]=None, pcollection: str='ProcessOutput.out0'):
self.is_streaming = is_streaming
self.gpu = gpu
self.pcollection = pcollection
super().__init__(metrics_namespace=metrics_namespace)
self.dataflow_client = DataflowApplicationClient(self.pipeline.get_pipeline_options())
self.monitoring_client = monitoring_v3.MetricServiceClient()
|
Initializes DataflowCostBenchmark.
Args:
metrics_namespace (Optional[str]): Namespace for metrics.
is_streaming (bool): Whether the pipeline is streaming or batch.
gpu (Optional[costs.Accelerator]): Optional GPU type.
pcollection (str): PCollection name to monitor throughput.
|
github-repos
|
def compute_cost_graph(self, devices=None):
cost_graph_def = cost_graph_pb2.CostGraphDef()
for (i, operation_name) in enumerate(self.get_all_operation_names()):
node = cost_graph_def.node.add(name=operation_name, device=self.get_operation_device(operation_name), id=i)
for input_name in self.get_operation_input_names(operation_name):
(id1, id2) = self._tensor_name_to_ids[input_name]
node.input_info.add(preceding_node=id1, preceding_port=id2)
for output_name in self.get_operation_output_names(operation_name):
tensor_device = self.get_tensor_device(output_name)
if ((devices is None) or (tensor_device is None) or (tensor_device in devices)):
node.output_info.add(size=self.get_tensor_num_entries(output_name), alias_input_port=(- 1), dtype=self.get_tensor_dtype(output_name).as_datatype_enum, shape=self.get_tensor_shape(output_name).as_proto())
else:
node.output_info.add(size=0, alias_input_port=(- 1), dtype=self.get_tensor_dtype(output_name).as_datatype_enum)
if self.is_tensor_final(output_name):
node.is_final = True
return cost_graph_def
|
Computes a CostGraphDef protobuf based on this graph.
Defined in tensorflow/core/framework/cost_graph.proto.
Args:
devices: optional [string], the names of devices to consider. If
specified, any tensor on a device not listed is given a size of zero.
Any device-less tensor (e.g. Mesh TensorFlow tensor) is not affected.
Returns:
a CostGraphDef protobuf with a Node for every operation in the graph, each
of which is populated with size/dtype information for its inputs and
outputs (which match the input/output order of the operation).
|
codesearchnet
|
def prepare(path, name):
setup_path = os.path.join(path, 'setup.py')
if not os.path.exists(setup_path):
data = textwrap.dedent( % name)
logger.info('Module %s does not provide a setup.py. \nGenerating setup.py' % name)
_files.write_file(setup_path, data)
data = textwrap.dedent()
logger.info('Generating setup.cfg')
_files.write_file(os.path.join(path, 'setup.cfg'), data)
data = textwrap.dedent()
logger.info('Generating MANIFEST.in')
_files.write_file(os.path.join(path, 'MANIFEST.in'), data)
|
Prepare a Python script (or module) to be imported as a module.
If the script does not contain a setup.py file, it creates a minimal setup.
Args:
path (str): path to directory with the script or module.
name (str): name of the script or module.
|
juraj-google-style
|
def insertImage(page, rect, filename=None, pixmap=None, stream=None, rotate=0,
keep_proportion = True,
overlay=True):
def calc_matrix(fw, fh, tr, rotate=0):
tmp = Point((tr.x1 + tr.x0) / 2., (tr.y1 + tr.y0) / 2.)
rot = Matrix(rotate)
m = Matrix(1, 0, 0, 1, -0.5, -0.5) * rot
small = min(fw, fh)
if rotate not in (0, 180):
fw, fh = fh, fw
if fw < 1:
if (float(tr.width) / fw) > (float(tr.height) / fh):
w = tr.height * small
h = tr.height
else:
w = tr.width
h = tr.width / small
elif fw != fh:
if (float(tr.width) / fw) > (float(tr.height) / fh):
w = tr.height / small
h = tr.height
else:
w = tr.width
h = tr.width * small
else:
w = tr.width
h = tr.height
m *= Matrix(w, h)
m *= Matrix(1, 0, 0, 1, tmp.x, tmp.y)
return m
CheckParent(page)
doc = page.parent
if not doc.isPDF:
raise ValueError("not a PDF")
if bool(filename) + bool(stream) + bool(pixmap) != 1:
raise ValueError("need exactly one of filename, pixmap, stream")
if filename and not os.path.exists(filename):
raise FileNotFoundError("No such file: '%s'" % filename)
elif stream and type(stream) not in (bytes, bytearray, io.BytesIO):
raise ValueError("stream must be bytes-like or BytesIO")
elif pixmap and type(pixmap) is not Pixmap:
raise ValueError("pixmap must be a Pixmap")
while rotate < 0:
rotate += 360
while rotate > 360:
rotate -= 360
if rotate not in (0, 90, 180, 270):
raise ValueError("bad rotate value")
r = page.rect & rect
if r.isEmpty or r.isInfinite:
raise ValueError("rect must be finite and not empty")
_imgpointer = None
if keep_proportion is True:
if pixmap:
w = pixmap.width
h = pixmap.height
elif stream:
img_size = TOOLS.image_size(stream, keep_image=True)
w, h = img_size[:2]
stream = None
_imgpointer = img_size[-1]
else:
img = open(filename, "rb")
stream = img.read()
img_size = TOOLS.image_size(stream, keep_image=True)
w, h = img_size[:2]
_imgpointer = img_size[-1]
stream = None
filename = None
img.close()
maxf = max(w, h).__float__()
fw = w / maxf
fh = h / maxf
else:
fw = fh = 1.0
clip = r * ~page._getTransformation()
matrix = calc_matrix(fw, fh, clip, rotate=rotate)
ilst = [i[7] for i in doc.getPageImageList(page.number)]
n = "fzImg"
i = 0
_imgname = n + "0"
while _imgname in ilst:
i += 1
_imgname = n + str(i)
page._insertImage(
filename=filename,
pixmap=pixmap,
stream=stream,
matrix=matrix,
overlay=overlay,
_imgname=_imgname,
_imgpointer=_imgpointer,
)
|
Insert an image in a rectangle on the current page.
Notes:
Exactly one of filename, pixmap or stream must be provided.
Args:
rect: (rect-like) where to place the source image
filename: (str) name of an image file
pixmap: (obj) a Pixmap object
stream: (bytes) an image in memory
rotate: (int) degrees (multiple of 90)
keep_proportion: (bool) whether to maintain aspect ratio
overlay: (bool) put in foreground
|
juraj-google-style
|
def set_ocha_url(cls, url=None):
if (url is None):
url = cls._ochaurl_int
cls._ochaurl = url
|
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
|
codesearchnet
|
def _determine_timeout(default_timeout, specified_timeout, retry):
if (specified_timeout is DEFAULT):
specified_timeout = default_timeout
if (specified_timeout is default_timeout):
if (retry and (retry is not DEFAULT) and isinstance(default_timeout, timeout.ExponentialTimeout)):
return default_timeout.with_deadline(retry._deadline)
else:
return default_timeout
if isinstance(specified_timeout, (int, float)):
return timeout.ConstantTimeout(specified_timeout)
else:
return specified_timeout
|
Determines how timeout should be applied to a wrapped method.
Args:
default_timeout (Optional[Timeout]): The default timeout specified
at method creation time.
specified_timeout (Optional[Timeout]): The timeout specified at
invocation time. If :attr:`DEFAULT`, this will be set to
the ``default_timeout``.
retry (Optional[Retry]): The retry specified at invocation time.
Returns:
Optional[Timeout]: The timeout to apply to the method or ``None``.
|
codesearchnet
|
def testConcreteFunctionStructuredSignatureError(self, conc_args=(), conc_kwargs=None, call_args=(), call_kwargs=None, error='.*', exception=TypeError):
conc_args = conc_args() if callable(conc_args) else conc_args
conc_kwargs = conc_kwargs() if callable(conc_kwargs) else conc_kwargs or {}
call_args = call_args() if callable(call_args) else call_args
call_kwargs = call_kwargs() if callable(call_kwargs) else call_kwargs or {}
self.assertIsInstance(conc_args, tuple)
self.assertIsInstance(call_args, tuple)
self.assertIsInstance(conc_kwargs, dict)
self.assertIsInstance(call_kwargs, dict)
@polymorphic_function.function
def func(x, y=5, *varargs, **kwargs):
del y, varargs, kwargs
return x
conc = func.get_concrete_function(*conc_args, **conc_kwargs)
with self.assertRaisesRegex(exception, error):
self.evaluate(conc(*call_args, **call_kwargs))
|
Tests for errors in the structured signature.
Args:
conc_args: Positional arguments used for get_concrete_function.
conc_kwargs: Keyword arguments used for get_concrete_function.
call_args: Positional arguments used to call the function.
call_kwargs: Keyword arguments used to call the function.
error: Expected exception message.
exception: Expected exception type.
|
github-repos
|
def load_img(path, grayscale=False, target_size=None):
img = io.imread(path, grayscale)
if target_size:
img = transform.resize(img, target_size, preserve_range=True).astype('uint8')
return img
|
Utility function to load an image from disk.
Args:
path: The image file path.
grayscale: True to convert to grayscale image (Default value = False)
target_size: (w, h) to resize. (Default value = None)
Returns:
The loaded numpy image.
|
codesearchnet
|
def get_index(self, prefix=''):
if prefix:
prefixed = '%s_index' % prefix
else:
prefixed = 'index'
if prefixed in self.__cli and self.__cli[prefixed]:
index = self.__cli.get(prefixed)
from_conf = False
else:
index = self.__config.get(prefixed)
from_conf = True
return self.__abspath(index, from_conf)
|
Retrieve the absolute path to an index, according to
`prefix`.
Args:
prefix: str, the desired prefix or `None`.
Returns:
str: An absolute path, or `None`
|
juraj-google-style
|
def build_error_response(self, version, reason, message):
batch_item = messages.ResponseBatchItem(result_status=contents.ResultStatus(enums.ResultStatus.OPERATION_FAILED), result_reason=contents.ResultReason(reason), result_message=contents.ResultMessage(message))
return self._build_response(version, [batch_item])
|
Build a simple ResponseMessage with a single error result.
Args:
version (ProtocolVersion): The protocol version the response
should be addressed with.
reason (ResultReason): An enumeration classifying the type of
error occurred.
message (str): A string providing additional information about
the error.
Returns:
ResponseMessage: The simple ResponseMessage containing a
single error result.
|
codesearchnet
|
def autobuild_release(family=None):
if family is None:
family = utilities.get_family('module_settings.json')
env = Environment(tools=[])
env['TILE'] = family.tile
target = env.Command(['
action=env.Action(create_release_settings_action, "Creating release manifest"))
env.AlwaysBuild(target)
if os.path.exists('RELEASE.md'):
env.Command(['build/output/RELEASE.md'], ['RELEASE.md'], Copy("$TARGET", "$SOURCE"))
copy_include_dirs(family.tile)
copy_tilebus_definitions(family.tile)
copy_dependency_docs(family.tile)
copy_linker_scripts(family.tile)
if not family.tile.settings.get('hide_dependency_images', False):
copy_dependency_images(family.tile)
copy_extra_files(family.tile)
build_python_distribution(family.tile)
|
Copy necessary files into build/output so that this component can be used by others
Args:
family (ArchitectureGroup): The architecture group that we are targeting. If not
provided, it is assumed that we are building in the current directory and the
module_settings.json file is read to create an ArchitectureGroup
|
juraj-google-style
|
def _dereference_args(pipeline_name, args, kwargs):
lookup_slots = set()
for arg in itertools.chain(args, kwargs.itervalues()):
if (arg['type'] == 'slot'):
lookup_slots.add(db.Key(arg['slot_key']))
slot_dict = {}
for (key, slot_record) in zip(lookup_slots, db.get(lookup_slots)):
if ((slot_record is None) or (slot_record.status != _SlotRecord.FILLED)):
raise SlotNotFilledError(('Slot "%s" missing its value. From %s(*args=%s, **kwargs=%s)' % (key, pipeline_name, _short_repr(args), _short_repr(kwargs))))
slot_dict[key] = slot_record.value
arg_list = []
for current_arg in args:
if (current_arg['type'] == 'slot'):
arg_list.append(slot_dict[db.Key(current_arg['slot_key'])])
elif (current_arg['type'] == 'value'):
arg_list.append(current_arg['value'])
else:
raise UnexpectedPipelineError(('Unknown parameter type: %r' % current_arg))
kwarg_dict = {}
for (key, current_arg) in kwargs.iteritems():
if (current_arg['type'] == 'slot'):
kwarg_dict[key] = slot_dict[db.Key(current_arg['slot_key'])]
elif (current_arg['type'] == 'value'):
kwarg_dict[key] = current_arg['value']
else:
raise UnexpectedPipelineError(('Unknown parameter type: %r' % current_arg))
return (arg_list, kwarg_dict)
|
Dereference a Pipeline's arguments that are slots, validating them.
Each argument value passed in is assumed to be a dictionary with the format:
{'type': 'value', 'value': 'serializable'} # A resolved value.
{'type': 'slot', 'slot_key': 'str() on a db.Key'} # A pending Slot.
Args:
pipeline_name: The name of the pipeline class; used for debugging.
args: Iterable of positional arguments.
kwargs: Dictionary of keyword arguments.
Returns:
Tuple (args, kwargs) where:
Args: A list of positional arguments values that are all dereferenced.
Kwargs: A list of keyword arguments values that are all dereferenced.
Raises:
SlotNotFilledError if any of the supplied 'slot_key' records are not
present in the Datastore or have not yet been filled.
UnexpectedPipelineError if an unknown parameter type was passed.
|
codesearchnet
|
def flash_from_file(self, partition, source_file, source_len=0,
info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None,
timeout_ms=None):
if source_len == 0:
source_len = os.stat(source_file).st_size
download_response = self.download(
source_file, source_len=source_len, info_cb=info_cb,
progress_callback=progress_callback)
flash_response = self.flash(partition, info_cb=info_cb,
timeout_ms=timeout_ms)
return download_response + flash_response
|
Flashes a partition from the file on disk.
Args:
partition: Partition name to flash to.
source_file: Filename to download to the device.
source_len: Optional length of source_file, uses os.stat if not provided.
info_cb: See Download.
progress_callback: See Download.
timeout_ms: The amount of time to wait on okay after flashing.
Returns:
Download and flash responses, normally nothing.
|
juraj-google-style
|
def __init__(self, vendor_id=9583, product_id=50735):
print("Opening SpaceMouse device")
self.device = hid.device()
self.device.open(vendor_id, product_id)
print("Manufacturer: %s" % self.device.get_manufacturer_string())
print("Product: %s" % self.device.get_product_string())
self._display_controls()
self.single_click_and_hold = False
self._control = [0., 0., 0., 0., 0., 0.]
self._reset_state = 0
self.rotation = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
self._enabled = False
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
|
Initialize a SpaceMouse handler.
Args:
vendor_id: HID device vendor id
product_id: HID device product id
Note:
Use hid.enumerate() to view all USB human interface devices (HID).
Make sure SpaceMouse is detected before running the script.
You can look up its vendor/product id from this method.
|
juraj-google-style
|
def constant_time_string_compare(a, b):
try:
return hmac.compare_digest(a, b)
except AttributeError:
if (len(a) != len(b)):
return False
result = 0
for (x, y) in zip(a, b):
result |= (ord(x) ^ ord(y))
return (result == 0)
|
Helper for comparing string in constant time, independent
of the python version being used.
Args:
a (str): A string to compare
b (str): A string to compare
|
codesearchnet
|
def contains_saved_model(export_dir):
if isinstance(export_dir, os.PathLike):
export_dir = os.fspath(export_dir)
return maybe_saved_model_directory(export_dir)
|
Checks whether the provided export directory could contain a SavedModel.
Note that the method does not load any data by itself. If the method returns
`false`, the export directory definitely does not contain a SavedModel. If the
method returns `true`, the export directory may contain a SavedModel but
provides no guarantee that it can be loaded.
Args:
export_dir: Absolute path to possible export location. For example,
'/my/foo/model'.
Returns:
True if the export directory contains SavedModel files, False otherwise.
|
github-repos
|
def write(self, data):
if (not isinstance(data, (bytes, bytearray, list))):
raise TypeError('Invalid data type, should be bytes, bytearray, or list.')
if isinstance(data, list):
data = bytearray(data)
try:
return os.write(self._fd, data)
except OSError as e:
raise SerialError(e.errno, ('Writing serial port: ' + e.strerror))
|
Write `data` to the serial port and return the number of bytes
written.
Args:
data (bytes, bytearray, list): a byte array or list of 8-bit integers to write.
Returns:
int: number of bytes written.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `data` type is invalid.
ValueError: if data is not valid bytes.
|
codesearchnet
|
def Progress(self):
now = time.time()
if ((now - self.last_progress_time) <= 2):
return
self.last_progress_time = now
client_utils.KeepAlive()
self.grr_worker.Heartbeat()
user_start = self.cpu_start.user
system_start = self.cpu_start.system
cpu_times = self.proc.cpu_times()
user_end = cpu_times.user
system_end = cpu_times.system
used_cpu = (((user_end - user_start) + system_end) - system_start)
if (used_cpu > self.cpu_limit):
self.grr_worker.SendClientAlert('Cpu limit exceeded.')
raise CPUExceededError('Action exceeded cpu limit.')
|
Indicate progress of the client action.
This function should be called periodically during client actions that do
not finish instantly. It will notify the nanny that the action is not stuck
and avoid the timeout and it will also check if the action has reached its
cpu limit.
Raises:
CPUExceededError: CPU limit exceeded.
|
codesearchnet
|
def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True, syntax=None):
if ((api_implementation.Type() == 'cpp') and build_file_if_cpp):
from typy.google.protobuf import descriptor_pb2
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.message_type.add().MergeFrom(desc_proto)
proto_name = str(uuid.uuid4())
if package:
file_descriptor_proto.name = os.path.join(package.replace('.', '/'), (proto_name + '.proto'))
file_descriptor_proto.package = package
else:
file_descriptor_proto.name = (proto_name + '.proto')
_message.default_pool.Add(file_descriptor_proto)
result = _message.default_pool.FindFileByName(file_descriptor_proto.name)
if _USE_C_DESCRIPTORS:
return result.message_types_by_name[desc_proto.name]
full_message_name = [desc_proto.name]
if package:
full_message_name.insert(0, package)
enum_types = {}
for enum_proto in desc_proto.enum_type:
full_name = '.'.join((full_message_name + [enum_proto.name]))
enum_desc = EnumDescriptor(enum_proto.name, full_name, None, [EnumValueDescriptor(enum_val.name, ii, enum_val.number) for (ii, enum_val) in enumerate(enum_proto.value)])
enum_types[full_name] = enum_desc
nested_types = {}
for nested_proto in desc_proto.nested_type:
full_name = '.'.join((full_message_name + [nested_proto.name]))
nested_desc = MakeDescriptor(nested_proto, package='.'.join(full_message_name), build_file_if_cpp=False, syntax=syntax)
nested_types[full_name] = nested_desc
fields = []
for field_proto in desc_proto.field:
full_name = '.'.join((full_message_name + [field_proto.name]))
enum_desc = None
nested_desc = None
if field_proto.HasField('type_name'):
type_name = field_proto.type_name
full_type_name = '.'.join((full_message_name + [type_name[(type_name.rfind('.') + 1):]]))
if (full_type_name in nested_types):
nested_desc = nested_types[full_type_name]
elif (full_type_name in enum_types):
enum_desc = enum_types[full_type_name]
field = FieldDescriptor(field_proto.name, full_name, (field_proto.number - 1), field_proto.number, field_proto.type, FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type), field_proto.label, None, nested_desc, enum_desc, None, False, None, options=field_proto.options, has_default_value=False)
fields.append(field)
desc_name = '.'.join(full_message_name)
return Descriptor(desc_proto.name, desc_name, None, None, fields, list(nested_types.values()), list(enum_types.values()), [], options=desc_proto.options)
|
Make a protobuf Descriptor given a DescriptorProto protobuf.
Handles nested descriptors. Note that this is limited to the scope of defining
a message inside of another message. Composite fields can currently only be
resolved if the message is defined in the same scope as the field.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: Optional package name for the new message Descriptor (string).
build_file_if_cpp: Update the C++ descriptor pool if api matches.
Set to False on recursion, so no duplicates are created.
syntax: The syntax/semantics that should be used. Set to "proto3" to get
proto3 field presence semantics.
Returns:
A Descriptor for protobuf messages.
|
codesearchnet
|
def assign_methods(self, resource_class):
assert all([
x.upper() in VALID_METHODS for x in resource_class.Meta.methods])
for method in resource_class.Meta.methods:
self._assign_method(
resource_class,
method.upper()
)
|
Given a resource_class and it's Meta.methods tuple,
assign methods for communicating with that resource.
Args:
resource_class: A single resource class
|
juraj-google-style
|
def update_box(self, box):
payload = None
if type(box) is not StreakBox:
return requests.codes.bad_request, None
payload = box.to_dict(rw = True)
try:
uri = self.box_root_uri + '/' + box.attributes['boxKey']
except KeyError:
return requests.codes.bad_request, None
code, data = self._req('post', uri , json.dumps(payload))
return code, data
|
Updates a box with the provided attributes.
Args:
box StreakBox object with updated info
return (status code, box in dict form)
|
juraj-google-style
|
def _setBitOn(x, bitNum):
_checkInt(x, minvalue=0, description='input value')
_checkInt(bitNum, minvalue=0, description='bitnumber')
return x | (1 << bitNum)
|
Set bit 'bitNum' to True.
Args:
* x (int): The value before.
* bitNum (int): The bit number that should be set to True.
Returns:
The value after setting the bit. This is an integer.
For example:
For x = 4 (dec) = 0100 (bin), setting bit number 0 results in 0101 (bin) = 5 (dec).
|
juraj-google-style
|
async def process_response(self, request, response):
(await super().process_response(request, response))
if (COOKIE_AUTH_KEY in request):
if response.started:
raise RuntimeError('Cannot save cookie into started response')
cookie = request[COOKIE_AUTH_KEY]
if (cookie == ''):
response.del_cookie(self.cookie_name)
else:
response.set_cookie(self.cookie_name, cookie)
|
Called to perform any processing of the response required.
This function stores any cookie data in the COOKIE_AUTH_KEY as a
cookie in the response object. If the value is a empty string, the
associated cookie is deleted instead.
This function requires the response to be a aiohttp Response object,
and assumes that the response has not started if the remember or
forget functions are called during the request.
Args:
request: aiohttp Request object.
response: response object returned from the handled view
Raises:
RuntimeError: Raised if response has already started.
|
codesearchnet
|
def _CalculateNTFSTimeHash(self, file_entry):
date_time_values = []
access_time = getattr(file_entry, 'access_time', None)
if access_time:
date_time_string = access_time.CopyToDateTimeString()
date_time_values.append('atime:{0:s}'.format(date_time_string))
creation_time = getattr(file_entry, 'creation_time', None)
if creation_time:
date_time_string = creation_time.CopyToDateTimeString()
date_time_values.append('crtime:{0:s}'.format(date_time_string))
modification_time = getattr(file_entry, 'modification_time', None)
if modification_time:
date_time_string = modification_time.CopyToDateTimeString()
date_time_values.append('mtime:{0:s}'.format(date_time_string))
change_time = getattr(file_entry, 'change_time', None)
if change_time:
date_time_string = change_time.CopyToDateTimeString()
date_time_values.append('ctime:{0:s}'.format(date_time_string))
date_time_values = ''.join(date_time_values)
date_time_values = date_time_values.encode('ascii')
hash_value = hashlib.md5()
hash_value.update(date_time_values)
return hash_value.hexdigest()
|
Calculates an MD5 from the date and time value of a NTFS file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
Returns:
str: hexadecimal representation of the MD5 hash value of the date and
time values of the file entry.
|
juraj-google-style
|
def locator(self, value):
self._locator = value
(self._latitude, self._longitude) = utils.from_grid_locator(value)
|
Update the locator, and trigger a latitude and longitude update.
Args:
value (str): New Maidenhead locator string
|
codesearchnet
|
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
|
Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
|
juraj-google-style
|
def create_detector(model_uri: str, **kwargs) -> OfflineDetector:
model_handler = KeyedModelHandler(PyODModelHandler(model_uri=model_uri)).with_postprocess_fn(OfflineDetector.score_prediction_adapter)
m = model_handler.load_model()
assert isinstance(m, PyODBaseDetector)
threshold = float(m.threshold_)
detector = OfflineDetector(model_handler, threshold_criterion=FixedThreshold(threshold), **kwargs)
return detector
|
A utility function to create OfflineDetector for a PyOD model.
**NOTE:** This API and its implementation are currently under active
development and may not be backward compatible.
Args:
model_uri: The URI specifying the location of the pickled PyOD model.
**kwargs: Additional keyword arguments.
|
github-repos
|
def restore(self, save_path, options=None):
self._checkpoint_options = copy.copy(options) if options else self._checkpoint_options
if self._checkpoint_options:
self._checkpoint_options.experimental_enable_async_checkpoint = False
self._queue.join()
status = self.checkpointer().restore(save_path, self._checkpoint_options)
return status
|
Restore the checkpointed variables.
Args:
save_path: The full name of the checkpoint file to be restored.
options: CheckpointOption instance.
Returns:
A load status object, which can be used to make assertions about the
status of a checkpoint restoration. See tf.train.Checkpoint.restore()
for more details.
|
github-repos
|
def add_response(self, req, resp):
if self._cache is None:
return
signature = sign(req.allocateQuotaRequest)
with self._cache as c:
now = self._timer()
item = c.get(signature)
if item is None:
c[signature] = CachedItem(
req, resp, self.service_name, now)
else:
item.last_check_time = now
item.response = resp
item.is_in_flight = False
c[signature] = item
|
Adds the response from sending to `req` to this instance's cache.
Args:
req (`ServicecontrolServicesAllocateQuotaRequest`): the request
resp (AllocateQuotaResponse): the response from sending the request
|
juraj-google-style
|
def get_all(self, include_archived=False):
return [conv for conv in self._conv_dict.values() if ((not conv.is_archived) or include_archived)]
|
Get all the conversations.
Args:
include_archived (bool): (optional) Whether to include archived
conversations. Defaults to ``False``.
Returns:
List of all :class:`.Conversation` objects.
|
codesearchnet
|
def to_dict(self, remove_nones=False):
content = {'uri': self.uri, 'protocol_info': self.protocol_info, 'import_uri': self.import_uri, 'size': self.size, 'duration': self.duration, 'bitrate': self.bitrate, 'sample_frequency': self.sample_frequency, 'bits_per_sample': self.bits_per_sample, 'nr_audio_channels': self.nr_audio_channels, 'resolution': self.resolution, 'color_depth': self.color_depth, 'protection': self.protection}
if remove_nones:
nones = [k for k in content if (content[k] is None)]
for k in nones:
del content[k]
return content
|
Return a dict representation of the `DidlResource`.
Args:
remove_nones (bool, optional): Optionally remove dictionary
elements when their value is `None`.
Returns:
dict: a dict representing the `DidlResource`
|
codesearchnet
|
def run_defense_work(self, work_id):
class_batch_id = (
self.defense_work.work[work_id]['output_classification_batch_id'])
class_batch = self.class_batches.read_batch_from_datastore(class_batch_id)
adversarial_batch_id = class_batch['adversarial_batch_id']
submission_id = class_batch['submission_id']
cloud_result_path = class_batch['result_path']
logging.info('Defense work piece: '
'adversarial_batch_id="%s" submission_id="%s"',
adversarial_batch_id, submission_id)
if submission_id in self.blacklisted_submissions:
raise WorkerError('Blacklisted submission')
defense = DefenseSubmission(submission_id, self.submissions,
self.storage_bucket)
defense.download()
input_dir = os.path.join(LOCAL_INPUT_DIR, adversarial_batch_id)
if os.path.exists(input_dir):
sudo_remove_dirtree(input_dir)
os.makedirs(input_dir)
try:
shell_call([
'gsutil', '-m', 'cp',
os.path.join('gs:
'adversarial_images', adversarial_batch_id, '*'),
input_dir
])
adv_images_files = os.listdir(input_dir)
if (len(adv_images_files) == 1) and adv_images_files[0].endswith('.zip'):
logging.info('Adversarial batch is in zip archive %s',
adv_images_files[0])
shell_call([
'unzip', os.path.join(input_dir, adv_images_files[0]),
'-d', input_dir
])
os.remove(os.path.join(input_dir, adv_images_files[0]))
adv_images_files = os.listdir(input_dir)
logging.info('%d adversarial images copied', len(adv_images_files))
except (subprocess.CalledProcessError, IOError) as e:
raise WorkerError('Can''t copy adversarial batch locally', e)
if os.path.exists(LOCAL_OUTPUT_DIR):
sudo_remove_dirtree(LOCAL_OUTPUT_DIR)
os.mkdir(LOCAL_OUTPUT_DIR)
output_filname = os.path.join(LOCAL_OUTPUT_DIR, 'result.csv')
elapsed_time_sec = defense.run(input_dir, output_filname)
batch_result = eval_lib.analyze_one_classification_result(
storage_client=None,
file_path=output_filname,
adv_batch=self.adv_batches.data[adversarial_batch_id],
dataset_batches=self.dataset_batches,
dataset_meta=self.dataset_meta)
try:
shell_call([
'gsutil', 'cp', output_filname,
os.path.join('gs:
])
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t result to Cloud Storage', e)
return elapsed_time_sec, submission_id, batch_result
|
Runs one defense work.
Args:
work_id: ID of the piece of work to run
Returns:
elapsed_time_sec, submission_id - elapsed time and id of the submission
Raises:
WorkerError: if error occurred during execution.
|
juraj-google-style
|
def from_dict(cls, tx, skip_schema_validation=True):
operation = tx.get('operation', Transaction.CREATE) if isinstance(tx, dict) else Transaction.CREATE
cls = Transaction.resolve_class(operation)
if not skip_schema_validation:
cls.validate_id(tx)
cls.validate_schema(tx)
inputs = [Input.from_dict(input_) for input_ in tx['inputs']]
outputs = [Output.from_dict(output) for output in tx['outputs']]
return cls(tx['operation'], tx['asset'], inputs, outputs,
tx['metadata'], tx['version'], hash_id=tx['id'], tx_dict=tx)
|
Transforms a Python dictionary to a Transaction object.
Args:
tx_body (dict): The Transaction to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
|
juraj-google-style
|
def sign(self, byts):
chosen_hash = c_hashes.SHA256()
hasher = c_hashes.Hash(chosen_hash, default_backend())
hasher.update(byts)
digest = hasher.finalize()
return self.priv.sign(digest,
c_ec.ECDSA(c_utils.Prehashed(chosen_hash))
)
|
Compute the ECC signature for the given bytestream.
Args:
byts (bytes): The bytes to sign.
Returns:
bytes: The RSA Signature bytes.
|
juraj-google-style
|
def show_stories(self, raw=False, limit=None):
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
|
Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
|
codesearchnet
|
def get_path_list(self, type_str=None):
return list(reversed([v.label_str for v in self.parent_gen if (type_str in (None, v.type_str))]))
|
Get list of the labels of the nodes leading up to this node from the root.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
list of str: The labels of the nodes leading up to this node from the root.
|
codesearchnet
|
def check(self, dsm, **kwargs):
med_matrix = CompleteMediation.generate_mediation_matrix(dsm)
return CompleteMediation.matrices_compliance(dsm, med_matrix)
|
Check if matrix and its mediation matrix are compliant.
It means that number of dependencies for each (line, column) is either
0 if the mediation matrix (line, column) is 0, or >0 if the mediation
matrix (line, column) is 1.
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
Returns:
bool: True if compliant, else False
|
juraj-google-style
|
def find_element_by_name(self, name, update=False) -> Elements:
return self.find_element(by=By.NAME, value=name, update=update)
|
Finds an element by name.
Args:
name: The name of the element to be found.
update: If the interface has changed, this option should be True.
Returns:
The element if it was found.
Raises:
NoSuchElementException - If the element wasn't found.
Usage:
element = driver.find_element_by_name('foo')
|
juraj-google-style
|
def _linear_interp(curve, test_x, round_result=False):
index = 0
for index in range((len(curve) - 1)):
if (curve[index][0] == curve[(index + 1)][0]):
continue
if (curve[index][0] <= test_x <= curve[(index + 1)][0]):
slope = ((curve[(index + 1)][1] - curve[index][1]) / (curve[(index + 1)][0] - curve[index][0]))
y_intercept = (curve[index][1] - (slope * curve[index][0]))
result = ((slope * test_x) + y_intercept)
if round_result:
return int(round(result))
elif result.is_integer():
return int(result)
else:
return result
else:
raise ProbabilityUndefinedError
|
Take a series of points and interpolate between them at ``test_x``.
Args:
curve (list[tuple]): A list of ``(x, y)`` points sorted in
nondecreasing ``x`` value. If multiple points have the same
``x`` value, all but the last will be ignored.
test_x (float): The ``x`` value to find the ``y`` value of
Returns:
float: The ``y`` value of the curve at ``test_x``
if ``round_result is False``
int: if ``round_result is True`` or the result is a whole number,
the ``y`` value of the curve at ``test_x`` rounded to the
nearest whole number.
Raises:
ProbabilityUndefinedError: if ``test_x`` is out of the
domain of ``curve``
Example:
>>> curve = [(0, 0), (2, 1)]
>>> _linear_interp(curve, 0.5)
0.25
>>> _linear_interp(curve, 0.5, round_result=True)
0
|
codesearchnet
|
def prep_parallel(self, binary_args, other_args):
if (self.length < 100):
raise Exception('Run this across 1 processor by setting num_processors kwarg to None.')
if (self.num_processors == (- 1)):
self.num_processors = mp.cpu_count()
split_val = int(np.ceil((self.length / self.num_splits)))
split_inds = [(self.num_splits * i) for i in np.arange(1, split_val)]
inds_split_all = np.split(np.arange(self.length), split_inds)
self.args = []
for (i, ind_split) in enumerate(inds_split_all):
trans_args = []
for arg in binary_args:
try:
trans_args.append(arg[ind_split])
except TypeError:
trans_args.append(arg)
self.args.append(((i, tuple(trans_args)) + other_args))
return
|
Prepare the parallel calculations
Prepares the arguments to be run in parallel.
It will divide up arrays according to num_splits.
Args:
binary_args (list): List of binary arguments for input into the SNR function.
other_args (tuple of obj): tuple of other args for input into parallel snr function.
|
codesearchnet
|
def write_sample(binary, payload, path, filename):
if not os.path.exists(path):
os.makedirs(path)
sample = os.path.join(path, filename)
if binary:
with open(sample, "wb") as f:
f.write(base64.b64decode(payload))
else:
with open(sample, "w") as f:
f.write(payload)
|
This function writes a sample on file system.
Args:
binary (bool): True if it's a binary file
payload: payload of sample, in base64 if it's a binary
path (string): path of file
filename (string): name of file
hash_ (string): file hash
|
juraj-google-style
|
def pre_fetch(self, feed):
pass
|
Pre-fetches all required items to be update into the cache.
This increases performance for update operations.
Args:
feed: List of feed items to retrieve
|
github-repos
|
def bottom(self, features):
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = collections.OrderedDict()
all_previous_modalities = []
target_modality = _create_target_modality(self._problem_hparams.modality)
for feature_name, modality in sorted(
six.iteritems(self._problem_hparams.modality)):
if feature_name not in features:
tf.logging.warning("Missing feature %s - ignoring." % feature_name)
continue
vocab_size = self._problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality_name = self._hparams.name.get(
feature_name,
modalities.get_name(modality))(self._hparams, vocab_size)
if feature_name in target_modality:
if len(target_modality) > 1:
variable_scope_name = "%s/%s" % (modality_name, feature_name)
else:
variable_scope_name = modality_name
bottom = self._hparams.bottom.get(
feature_name,
modalities.get_targets_bottom(modality))
with tf.variable_scope(variable_scope_name) as vs:
self._add_variable_scope(variable_scope_name, vs)
log_info("Transforming feature '%s' with %s.targets_bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
else:
bottom = self._hparams.bottom.get(feature_name,
modalities.get_bottom(modality))
do_reuse = modality_name in all_previous_modalities
with tf.variable_scope(modality_name, reuse=do_reuse) as vs:
self._add_variable_scope(modality_name, vs)
log_info("Transforming feature '%s' with %s.bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
all_previous_modalities.append(modality_name)
for key in features:
if key not in transformed_features:
transformed_features[key] = features[key]
else:
transformed_features[key + "_raw"] = features[key]
return transformed_features
|
Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
|
juraj-google-style
|
def enter_diff_mode(self, context_model=None):
assert (not self.diff_mode)
self.diff_mode = True
if (context_model is None):
self.diff_from_source = True
self.diff_context_model = self.context_model.copy()
else:
self.diff_from_source = False
self.diff_context_model = context_model
self.clear()
self.setColumnCount(5)
self.refresh()
|
Enter diff mode.
Args:
context_model (`ContextModel`): Context to diff against. If None, a
copy of the current context is used.
|
codesearchnet
|
def JobDueToRun(self, job):
if not job.enabled:
return False
if job.forced_run_requested:
return True
now = rdfvalue.RDFDatetime.Now()
if (job.last_run_time is not None and
job.last_run_time + job.frequency > now):
return False
if not job.current_run_id:
return True
if job.allow_overruns:
return True
return False
|
Determines if the given job is due for another run.
Args:
job: The cron job rdfvalue object.
Returns:
True if it is time to run based on the specified frequency.
|
juraj-google-style
|
def __copyfile2(source, destination):
logger.info("copyfile2: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.copy2(source, destination)
return True
except Exception as e:
logger.error(
"copyfile2: %s -> %s failed! Error: %s", source, destination, e
)
return False
|
Copy data and all stat info ("cp -p source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.