code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def save_imgs(x, fname):
n = x.shape[0]
fig = figure.Figure(figsize=(n, 1), frameon=False)
canvas = backend_agg.FigureCanvasAgg(fig)
for i in range(n):
ax = fig.add_subplot(1, n, i+1)
ax.imshow(x[i].squeeze(),
interpolation="none",
cmap=cm.get_cmap("binary"))
ax.axis("off")
canvas.print_figure(fname, format="png")
print("saved %s" % fname)
|
Helper method to save a grid of images to a PNG file.
Args:
x: A numpy array of shape [n_images, height, width].
fname: The filename to write to (including extension).
|
juraj-google-style
|
def wait_for(self, pattern, timeout=None):
should_continue = True
if self.block:
raise TypeError(NON_BLOCKING_ERROR_MESSAGE)
def stop(signum, frame):
nonlocal should_continue
if should_continue:
raise TimeoutError()
if timeout:
signal.signal(signal.SIGALRM, stop)
signal.alarm(timeout)
while should_continue:
output = (self.poll_output() + self.poll_error())
filtered = [line for line in output if re.match(pattern, line)]
if filtered:
should_continue = False
|
Block until a pattern have been found in stdout and stderr
Args:
pattern(:class:`~re.Pattern`): The pattern to search
timeout(int): Maximum number of second to wait. If None, wait infinitely
Raises:
TimeoutError: When timeout is reach
|
codesearchnet
|
async def forget(request):
auth_policy = request.get(POLICY_KEY)
if auth_policy is None:
raise RuntimeError('auth_middleware not installed')
return await auth_policy.forget(request)
|
Called to forget the userid for a request
Args:
request: aiohttp Request object
Raises:
RuntimeError: Middleware is not installed
|
juraj-google-style
|
def __init__(self, path):
super(FilterFile, self).__init__()
self._path = path
|
Initializes a filter file.
Args:
path (str): path to a file that contains one or more path filters.
|
juraj-google-style
|
def duration_to_string(duration):
m, s = divmod(duration, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
|
Converts a duration to a string
Args:
duration (int): The duration in seconds to convert
Returns s (str): The duration as a string
|
juraj-google-style
|
def convert_gemm(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting Linear ...')
if names == 'short':
tf_name = 'FC' + random_string(6)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
bias_name = '{0}.bias'.format(w_name)
weights_name = '{0}.weight'.format(w_name)
W = weights[weights_name].numpy().transpose()
input_channels, output_channels = W.shape
keras_weights = [W]
has_bias = False
if bias_name in weights:
bias = weights[bias_name].numpy()
keras_weights = [W, bias]
has_bias = True
dense = keras.layers.Dense(
output_channels,
weights=keras_weights, use_bias=has_bias, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros',
)
layers[scope_name] = dense(layers[inputs[0]])
|
Convert Linear.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
juraj-google-style
|
def get_student_item_dict(self, anonymous_user_id=None):
item_id = self._serialize_opaque_key(self.scope_ids.usage_id)
if hasattr(self, "xmodule_runtime"):
course_id = self.get_course_id()
if anonymous_user_id:
student_id = anonymous_user_id
else:
student_id = self.xmodule_runtime.anonymous_student_id
else:
course_id = "edX/Enchantment_101/April_1"
if self.scope_ids.user_id is None:
student_id = ''
else:
student_id = unicode(self.scope_ids.user_id)
student_item_dict = dict(
student_id=student_id,
item_id=item_id,
course_id=course_id,
item_type='ubcpi'
)
return student_item_dict
|
Create a student_item_dict from our surrounding context.
See also: submissions.api for details.
Args:
anonymous_user_id(str): A unique anonymous_user_id for (user, course) pair.
Returns:
(dict): The student item associated with this XBlock instance. This
includes the student id, item id, and course id.
|
juraj-google-style
|
def sym_getattr(self, key: Union[str, int], default: Any=RAISE_IF_NOT_FOUND) -> Any:
if not self.sym_hasattr(key):
if default is RAISE_IF_NOT_FOUND:
raise AttributeError(self._error_message(f'{self.__class__!r} object has no symbolic attribute {key!r}.'))
return default
return self._sym_getattr(key)
|
Gets a symbolic attribute.
Args:
key: Key of symbolic attribute.
default: Default value if attribute does not exist. If absent,
Returns:
Value of symbolic attribute if found, otherwise the default value
if it's specified.
Raises:
AttributeError if `key` does not exist and `default` is not provided.
|
github-repos
|
def slice(array, start, size, ty):
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
weld_template = '\n map(\n %(array)s,\n |array: %(ty)s| slice(array, %(start)dL, %(size)dL)\n )\n '
weld_obj.weld_code = (weld_template % {'array': array_var, 'start': start, 'ty': ty, 'size': size})
return weld_obj
|
Returns a new array-of-arrays with each array truncated, starting at
index `start` for `length` characters.
Args:
array (WeldObject / Numpy.ndarray): Input array
start (int): starting index
size (int): length to truncate at
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
|
codesearchnet
|
def Start(self, seed_list: List[str] = None, skip_seeds: bool = False) -> None:
if not seed_list:
seed_list = settings.SEED_LIST
logger.debug("Starting up nodeleader")
if not skip_seeds:
logger.debug("Attempting to connect to seed list...")
for bootstrap in seed_list:
if not is_ip_address(bootstrap):
host, port = bootstrap.split(':')
bootstrap = f"{hostname_to_ip(host)}:{port}"
addr = Address(bootstrap)
self.KNOWN_ADDRS.append(addr)
self.SetupConnection(addr)
logger.debug("Starting up nodeleader: starting peer, mempool, and blockheight check loops")
self.start_peer_check_loop()
self.start_memcheck_loop()
self.start_blockheight_loop()
if settings.ACCEPT_INCOMING_PEERS and not self.incoming_server_running:
class OneShotFactory(Factory):
def __init__(self, leader):
self.leader = leader
def buildProtocol(self, addr):
print(f"building new protocol for addr: {addr}")
self.leader.AddKnownAddress(Address(f"{addr.host}:{addr.port}"))
p = NeoNode(incoming_client=True)
p.factory = self
return p
def listen_err(err):
print(f"Failed start listening server for reason: {err.value}")
def listen_ok(value):
self.incoming_server_running = True
logger.debug(f"Starting up nodeleader: setting up listen server on port: {settings.NODE_PORT}")
server_endpoint = TCP4ServerEndpoint(self.reactor, settings.NODE_PORT)
listenport_deferred = server_endpoint.listen(OneShotFactory(leader=self))
listenport_deferred.addCallback(listen_ok)
listenport_deferred.addErrback(listen_err)
|
Start connecting to the seed list.
Args:
seed_list: a list of host:port strings if not supplied use list from `protocol.xxx.json`
skip_seeds: skip connecting to seed list
|
juraj-google-style
|
def list_storage_accounts_sub(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Storage/storageAccounts',
'?api-version=', STORAGE_API])
return do_get(endpoint, access_token)
|
List the storage accounts in the specified subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body list of storage accounts.
|
juraj-google-style
|
def save_shared_file(self, sharekey=None):
endpoint = '/api/sharedfile/{sharekey}/save'.format(sharekey=sharekey)
data = self._make_request("POST", endpoint=endpoint, data=None)
try:
sf = SharedFile.NewFromJSON(data)
sf.saved = True
return sf
except:
raise Exception("{0}".format(data['error']))
|
Save a SharedFile to your Shake.
Args:
sharekey (str): Sharekey for the file to save.
Returns:
SharedFile saved to your shake.
|
juraj-google-style
|
def Query(self, query):
cursor = self._database.cursor()
cursor.execute(query)
return cursor
|
Queries the database.
Args:
query (str): SQL query.
Returns:
sqlite3.Cursor: results.
Raises:
sqlite3.DatabaseError: if querying the database fails.
|
juraj-google-style
|
def animate_cli(animation_, step, event):
while True:
time.sleep(step)
frame = next(animation_)
sys.stdout.write(frame)
sys.stdout.flush()
if event.is_set():
break
sys.stdout.write(animation_.get_erase_frame())
sys.stdout.flush()
animation_.reset()
|
Print out the animation cycle to stdout. This function is for use with
synchronous functions and must be run in a thread.
Args:
animation_ (generator): A generator that produces strings for the
animation. Should be endless.
step (float): Seconds between each animation frame.
|
codesearchnet
|
def _create_service_api(credentials, service_name, version, developer_key=None, cache_discovery=False, http=None):
if (log.getEffectiveLevel() > logging.DEBUG):
logging.getLogger(discovery.__name__).setLevel(logging.WARNING)
discovery_kwargs = {'serviceName': service_name, 'version': version, 'developerKey': developer_key, 'cache_discovery': cache_discovery}
if http:
discovery_kwargs['http'] = http
else:
discovery_kwargs['credentials'] = credentials
return discovery.build(**discovery_kwargs)
|
Builds and returns a cloud API service object.
Args:
credentials (OAuth2Credentials): Credentials that will be used to
authenticate the API calls.
service_name (str): The name of the API.
version (str): The version of the API to use.
developer_key (str): The api key to use to determine the project
associated with the API call, most API services do not require
this to be set.
cache_discovery (bool): Whether or not to cache the discovery doc.
Returns:
object: A Resource object with methods for interacting with the service.
|
codesearchnet
|
def get_events(self) -> List[Event]:
LOG.debug('Getting events for %s', self.key)
return get_events(self.key)
|
Get events associated with the scheduling object.
Returns:
list of Event objects
|
codesearchnet
|
def mounts(prefix, __mounts):
i = 0
mntpoints = []
for mount in __mounts:
if (not isinstance(mount, dict)):
mntpoint = '{0}/{1}'.format(prefix, str(i))
mntpoints.append(mntpoint)
i = (i + 1)
return mntpoints
|
Compute the mountpoints of the current user.
Args:
prefix: Define where the job was running if it ran on a cluster.
mounts: All mounts the user currently uses in his file system.
Return:
mntpoints
|
codesearchnet
|
def merge_input_csv_forecast_json(input_csv_file, forecast_json_path, condition_models, dist_models):
try:
run_date = input_csv_file[:-4].split("_")[-1]
print(run_date)
ens_member = "_".join(input_csv_file.split("/")[-1][:-4].split("_")[3:-1])
ens_name = input_csv_file.split("/")[-1].split("_")[2]
input_data = pd.read_csv(input_csv_file, index_col="Step_ID")
full_json_path = forecast_json_path + "{0}/{1}/".format(run_date, ens_member)
track_ids = sorted(input_data["Track_ID"].unique())
model_pred_cols = []
condition_models_ns = []
dist_models_ns = []
gamma_params = ["Shape", "Location", "Scale"]
for condition_model in condition_models:
model_pred_cols.append(condition_model.replace(" ", "-") + "_Condition")
condition_models_ns.append(condition_model.replace(" ", "-"))
for dist_model in dist_models:
dist_models_ns.append(dist_model.replace(" ", "-"))
for param in gamma_params:
model_pred_cols.append(dist_model.replace(" ", "-") + "_" + param)
pred_data = pd.DataFrame(index=input_data.index, columns=model_pred_cols,
dtype=float)
for track_id in track_ids:
track_id_num = track_id.split("_")[-1]
json_filename = full_json_path + "{0}_{1}_{2}_model_track_{3}.json".format(ens_name,
run_date,
ens_member,
track_id_num)
json_file = open(json_filename)
json_data = json.load(json_file)
json_file.close()
for s, step in enumerate(json_data["features"]):
step_id = track_id + "_{0:02d}".format(s)
for cond_model in condition_models_ns:
pred_data.loc[step_id, cond_model + "_Condition"] = step["properties"]["condition_" + cond_model]
for dist_model in dist_models_ns:
pred_data.loc[step_id, [dist_model + "_" + p
for p in gamma_params]] = step["properties"]["dist_" + dist_model]
out_data = input_data.merge(pred_data, left_index=True, right_index=True)
return out_data, ens_name, ens_member
except Exception as e:
print(traceback.format_exc())
raise e
|
Reads forecasts from json files and merges them with the input data from the step csv files.
Args:
input_csv_file: Name of the input data csv file being processed
forecast_json_path: Path to the forecast json files toplevel directory
condition_models: List of models used to forecast hail or no hail
dist_models: List of models used to forecast the hail size distribution
Returns:
|
juraj-google-style
|
def list_resource_groups(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/',
'?api-version=', RESOURCE_API])
return do_get(endpoint, access_token)
|
List the resource groups in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response.
|
juraj-google-style
|
def stop(self, timeout=None):
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
|
Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Args:
timeout: maximum time to wait on `thread.join()`
|
github-repos
|
def _create_gates(self, inputs, memory):
num_gates = (2 * self._calculate_gate_size())
memory = tf.tanh(memory)
inputs = basic.BatchFlatten()(inputs)
gate_inputs = basic.BatchApply(basic.Linear(num_gates), n_dims=1)(inputs)
gate_inputs = tf.expand_dims(gate_inputs, axis=1)
gate_memory = basic.BatchApply(basic.Linear(num_gates))(memory)
gates = tf.split((gate_memory + gate_inputs), num_or_size_splits=2, axis=2)
(input_gate, forget_gate) = gates
input_gate = tf.sigmoid((input_gate + self._input_bias))
forget_gate = tf.sigmoid((forget_gate + self._forget_bias))
return (input_gate, forget_gate)
|
Create input and forget gates for this step using `inputs` and `memory`.
Args:
inputs: Tensor input.
memory: The current state of memory.
Returns:
input_gate: A LSTM-like insert gate.
forget_gate: A LSTM-like forget gate.
|
codesearchnet
|
def sender(self, jid: str):
if jid is not None and not isinstance(jid, str):
raise TypeError("'sender' MUST be a string")
self._sender = aioxmpp.JID.fromstr(jid) if jid is not None else None
|
Set jid of the sender
Args:
jid (str): jid of the sender
|
juraj-google-style
|
def preprocess(self, dataset, mode, hparams, interleave=True):
def _preprocess(example):
examples = self.preprocess_example(example, mode, hparams)
if (not isinstance(examples, tf.data.Dataset)):
examples = tf.data.Dataset.from_tensors(examples)
return examples
if interleave:
dataset = dataset.apply(tf.data.experimental.parallel_interleave(_preprocess, sloppy=True, cycle_length=8))
else:
dataset = dataset.flat_map(_preprocess)
return dataset
|
Runtime preprocessing on the whole dataset.
Return a tf.data.Datset -- the preprocessed version of the given one.
By default this function calls preprocess_example.
Args:
dataset: the Dataset of already decoded but not yet preprocessed features.
mode: tf.estimator.ModeKeys
hparams: HParams, model hyperparameters
interleave: bool, whether to use parallel_interleave, which is faster
but will alter the order of samples non-deterministically, or flat_map,
which is slower but will preserve the sample order.
Returns:
a Dataset
|
codesearchnet
|
def getfutureimports(entity):
if not (tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity)):
return tuple()
return tuple(sorted((name for name, value in entity.__globals__.items() if getattr(value, '__module__', None) == '__future__')))
|
Detects what future imports are necessary to safely execute entity source.
Args:
entity: Any object
Returns:
A tuple of future strings
|
github-repos
|
def ValidateDependencies(rdf_artifact):
for dependency in GetArtifactDependencies(rdf_artifact):
try:
dependency_obj = REGISTRY.GetArtifact(dependency)
except rdf_artifacts.ArtifactNotRegisteredError as e:
raise rdf_artifacts.ArtifactDependencyError(
rdf_artifact, "missing dependency", cause=e)
message = dependency_obj.error_message
if message:
raise rdf_artifacts.ArtifactDependencyError(
rdf_artifact, "dependency error", cause=message)
|
Validates artifact dependencies.
This method checks whether all dependencies of the artifact are present
and contain no errors.
This method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDependencyError: If a dependency is missing or contains errors.
|
juraj-google-style
|
def __init__(self, row_partitions: Tuple[RowPartitionSpec, ...], static_inner_shape: tensor_shape.TensorShape, dtype: dtypes.DType):
if not isinstance(row_partitions, Iterable):
raise TypeError('row_partitions should be an Iterable')
row_partitions = tuple(row_partitions)
static_inner_shape = tensor_shape.as_shape(static_inner_shape)
dtype = dtypes.as_dtype(dtype)
if not all((isinstance(rp, RowPartitionSpec) for rp in row_partitions)):
raise TypeError('row_partitions should be an Iterable of RowPartitionSpecs')
if dtype != dtypes.int32 and dtype != dtypes.int64:
raise ValueError('dtype must be tf.int32 or tf.int64')
for spec in row_partitions:
if spec.dtype != dtype:
raise ValueError(f'dtype of {spec!r} is {spec.dtype!r}: expected {dtype!r}')
row_partitions = tuple(row_partitions)
inner_rank = static_inner_shape.rank
if inner_rank == 0:
if row_partitions:
raise ValueError('If row_partitions are provided, must have inner_rank > 0')
else:
num_slices_in_dimension = []
for i in range(len(row_partitions)):
rp = row_partitions[i]
result = tensor_shape.Dimension(rp.nrows)
if i > 0:
previous_rp = row_partitions[i - 1]
result = result.merge_with(previous_rp.nvals)
result = result.merge_with(num_slices_in_dimension[-1] * previous_rp.uniform_row_length)
num_slices_in_dimension.append(result)
if row_partitions:
last_rp = row_partitions[-1]
result = (num_slices_in_dimension[-1] * last_rp.uniform_row_length).merge_with(last_rp.nvals)
if inner_rank is not None:
result = result.merge_with(tensor_shape.dimension_at_index(static_inner_shape, 0))
static_inner_shape = result + static_inner_shape[1:]
num_slices_in_dimension.append(result)
for i in range(len(num_slices_in_dimension) - 1, 0, -1):
num_slices_in_dimension[i - 1] = num_slices_in_dimension[i - 1].merge_with(_safe_floor_div(num_slices_in_dimension[i], row_partitions[i - 1].uniform_row_length))
row_partitions = [RowPartitionSpec(nrows=num_slices_in_dimension[i].value, uniform_row_length=rp.uniform_row_length, nvals=num_slices_in_dimension[i + 1].value, dtype=rp.dtype) for i, rp in enumerate(row_partitions)]
self._static_inner_shape = static_inner_shape
self._inner_shape = tensor_lib.TensorSpec([inner_rank], dtype=dtype)
self._row_partitions = row_partitions
|
Create a Spec given row partitions, a static inner shape, and a dtype.
Args:
row_partitions: A sequence of `RowPartitionSpec`s describing how the
ragged shape is partitioned.
static_inner_shape: The static shape of the flat_values.
dtype: The DType used to encode the shape (tf.int64 or tf.int32).
|
github-repos
|
def _initialize_slots(self, seed, hashvalues):
self.seed = seed
self.hashvalues = self._parse_hashvalues(hashvalues)
|
Initialize the slots of the LeanMinHash.
Args:
seed (int): The random seed controls the set of random
permutation functions generated for this LeanMinHash.
hashvalues: The hash values is the internal state of the LeanMinHash.
|
juraj-google-style
|
def CallHwclock(logger):
command = ['/sbin/hwclock', '--hctosys']
try:
subprocess.check_call(command)
except subprocess.CalledProcessError:
logger.warning('Failed to sync system time with hardware clock.')
else:
logger.info('Synced system time with hardware clock.')
|
Sync clock using hwclock.
Args:
logger: logger object, used to write to SysLog and serial port.
|
codesearchnet
|
def retry_loop(self, context, step_method):
logger.debug('starting')
context['retryCounter'] = 0
sleep = context.get_formatted_as_type(self.sleep, out_type=float)
if self.max:
max = context.get_formatted_as_type(self.max, out_type=int)
logger.info(f'retry decorator will try {max} times at {sleep}s intervals.')
else:
max = None
logger.info(f'retry decorator will try indefinitely at {sleep}s intervals.')
if poll.while_until_true(interval=sleep, max_attempts=max)(self.exec_iteration)(context=context, step_method=step_method):
logger.debug('retry loop complete, reporting success.')
logger.debug('retry loop done')
logger.debug('done')
|
Run step inside a retry loop.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
|
codesearchnet
|
def get_dataset_end_date(self, date_format=None):
dataset_date = self.get_dataset_end_date_as_datetime()
return self._get_formatted_date(dataset_date, date_format)
|
Get dataset date as string in specified format. For range returns start date.
If no format is supplied, an ISO 8601 string is returned.
Args:
date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None.
Returns:
Optional[str]: Dataset date string or None if no date is set
|
juraj-google-style
|
def checkPermissions(permissions=[], obj=None):
if (not obj):
return False
sm = getSecurityManager()
for perm in permissions:
if (not sm.checkPermission(perm, obj)):
return ''
return True
|
Checks if a user has permissions for a given object.
Args:
permissions: The permissions the current user must be compliant with
obj: The object for which the permissions apply
Returns:
1 if the user complies with all the permissions for the given object.
Otherwise, it returns empty.
|
codesearchnet
|
def _convert_tf1_model(flags):
if flags.custom_opdefs:
register_custom_opdefs(_parse_array(flags.custom_opdefs))
converter = _get_tflite_converter(flags)
if flags.inference_type:
converter.inference_type = _parse_inference_type(flags.inference_type, 'inference_type')
if flags.inference_input_type:
converter.inference_input_type = _parse_inference_type(flags.inference_input_type, 'inference_input_type')
if flags.output_format:
converter.output_format = _toco_flags_pb2.FileFormat.Value(flags.output_format)
if flags.mean_values and flags.std_dev_values:
input_arrays = converter.get_input_arrays()
std_dev_values = _parse_array(flags.std_dev_values, type_fn=float)
if converter.inference_type == dtypes.float32:
mean_values = _parse_array(flags.mean_values, type_fn=float)
else:
mean_values = _parse_array(flags.mean_values, type_fn=int)
quant_stats = list(zip(mean_values, std_dev_values))
if not flags.input_arrays and len(input_arrays) > 1 or len(input_arrays) != len(quant_stats):
raise ValueError("Mismatching --input_arrays, --std_dev_values, and --mean_values. The flags must have the same number of items. The current input arrays are '{0}'. --input_arrays must be present when specifying --std_dev_values and --mean_values with multiple input tensors in order to map between names and values.".format(','.join(input_arrays)))
converter.quantized_input_stats = dict(list(zip(input_arrays, quant_stats)))
if flags.default_ranges_min is not None and flags.default_ranges_max is not None:
converter.default_ranges_stats = (flags.default_ranges_min, flags.default_ranges_max)
if flags.drop_control_dependency:
converter.drop_control_dependency = flags.drop_control_dependency
if flags.reorder_across_fake_quant:
converter.reorder_across_fake_quant = flags.reorder_across_fake_quant
if flags.change_concat_input_ranges:
converter.change_concat_input_ranges = flags.change_concat_input_ranges == 'TRUE'
if flags.allow_custom_ops:
converter.allow_custom_ops = flags.allow_custom_ops
if flags.target_ops:
ops_set_options = lite.OpsSet.get_options()
converter.target_spec.supported_ops = set()
for option in flags.target_ops.split(','):
if option not in ops_set_options:
raise ValueError('Invalid value for --target_ops. Options: {0}'.format(','.join(ops_set_options)))
converter.target_spec.supported_ops.add(lite.OpsSet(option))
if flags.experimental_select_user_tf_ops:
if lite.OpsSet.SELECT_TF_OPS not in converter.target_spec.supported_ops:
raise ValueError('--experimental_select_user_tf_ops can only be set if --target_ops contains SELECT_TF_OPS.')
user_op_set = set()
for op_name in flags.experimental_select_user_tf_ops.split(','):
user_op_set.add(op_name)
converter.target_spec.experimental_select_user_tf_ops = list(user_op_set)
if flags.post_training_quantize:
converter.optimizations = [lite.Optimize.DEFAULT]
if converter.inference_type != dtypes.float32:
print('--post_training_quantize quantizes a graph of inference_type FLOAT. Overriding inference_type to FLOAT.')
converter.inference_type = dtypes.float32
if flags.quantize_to_float16:
converter.target_spec.supported_types = [dtypes.float16]
if not flags.post_training_quantize:
print('--quantize_to_float16 will only take effect with the --post_training_quantize flag enabled.')
if flags.dump_graphviz_dir:
converter.dump_graphviz_dir = flags.dump_graphviz_dir
if flags.dump_graphviz_video:
converter.dump_graphviz_vode = flags.dump_graphviz_video
if flags.conversion_summary_dir:
converter.conversion_summary_dir = flags.conversion_summary_dir
converter.experimental_new_converter = flags.experimental_new_converter
if flags.experimental_new_quantizer is not None:
converter.experimental_new_quantizer = flags.experimental_new_quantizer
output_data = converter.convert()
with gfile.GFile(flags.output_file, 'wb') as f:
f.write(output_data)
|
Calls function to convert the TensorFlow 1.X model into a TFLite model.
Args:
flags: argparse.Namespace object.
Raises:
ValueError: Invalid flags.
|
github-repos
|
def _get_read_preference(read_preference):
read_preference = getattr(pymongo.ReadPreference, read_preference, None)
if (read_preference is None):
raise ValueError(('Invalid read preference: %s' % read_preference))
return read_preference
|
Converts read_preference from string to pymongo.ReadPreference value.
Args:
read_preference: string containig the read_preference from the
config file
Returns:
A value from the pymongo.ReadPreference enum
Raises:
Exception: Invalid read preference
|
codesearchnet
|
def blend(self, other, percent=0.5):
dest = (1.0 - percent)
rgb = tuple((((u * percent) + (v * dest)) for (u, v) in zip(self.__rgb, other.__rgb)))
a = ((self.__a * percent) + (other.__a * dest))
return Color(rgb, 'rgb', a, self.__wref)
|
blend this color with the other one.
Args:
:other:
the grapefruit.Color to blend with this one.
Returns:
A grapefruit.Color instance which is the result of blending
this color on the other one.
>>> c1 = Color.from_rgb(1, 0.5, 0, 0.2)
>>> c2 = Color.from_rgb(1, 1, 1, 0.6)
>>> c3 = c1.blend(c2)
>>> c3
Color(1.0, 0.75, 0.5, 0.4)
|
codesearchnet
|
def merge_dims(value, outer_axis, inner_axis):
if outer_axis == inner_axis:
return value
while outer_axis == 0 and isinstance(value, RaggedTensor):
value = value.values
inner_axis -= 1
if inner_axis == 0:
return value
if not isinstance(value, RaggedTensor):
if value.shape.is_fully_defined():
old_shape = value.shape.as_list()
new_shape = old_shape[:outer_axis] + [-1] + old_shape[inner_axis + 1:]
else:
old_shape = array_ops.shape(value)
new_shape = array_ops.concat([old_shape[:outer_axis], [-1], old_shape[inner_axis + 1:]], axis=0)
return array_ops.reshape(value, new_shape)
if outer_axis > 1:
return value.with_values(merge_dims(value.values, outer_axis - 1, inner_axis - 1))
new_values = value.values
new_splits = value.row_splits
for axis in range(outer_axis, inner_axis):
if isinstance(new_values, RaggedTensor):
new_splits = array_ops.gather(new_values.row_splits, new_splits)
new_values = new_values.values
else:
shape_split = inner_axis - axis + 1
if new_values.shape.is_fully_defined():
old_shape = new_values.shape.as_list()
new_shape = [-1] + old_shape[shape_split:]
flat_size = _prod(old_shape[1:shape_split])
else:
old_shape = array_ops.shape(new_values)
new_shape = array_ops.concat([[-1], old_shape[shape_split:]], axis=0)
flat_size = math_ops.cast(math_ops.reduce_prod(old_shape[1:shape_split]), new_splits.dtype)
new_values = array_ops.reshape(new_values, new_shape)
new_splits = new_splits * flat_size
break
return RaggedTensor.from_row_splits(new_values, new_splits)
|
Merges value[outer_axis...inner_axis] into a single dimension.
See `RaggedTensor.merge_dims()` for more details. This helper differs from
`RaggedTensor.merge_dims()` in that `value` may be a dense or ragged tensor.
Args:
value: A `RaggedTensor` or `Tensor`
outer_axis: `int`
inner_axis: `int`
Returns:
A flattened `RaggedTensor` or `Tensor`.
|
github-repos
|
def write(gmt, out_path):
with open(out_path, 'w') as f:
for (_, each_dict) in enumerate(gmt):
f.write((each_dict[SET_IDENTIFIER_FIELD] + '\t'))
f.write((each_dict[SET_DESC_FIELD] + '\t'))
f.write('\t'.join([str(entry) for entry in each_dict[SET_MEMBERS_FIELD]]))
f.write('\n')
|
Write a GMT to a text file.
Args:
gmt (GMT object): list of dicts
out_path (string): output path
Returns:
None
|
codesearchnet
|
class QuantEmbedding(nn.Module):
def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, weight_bit=8, momentum=0.95, quant_mode=False):
super().__init__()
self.num_ = num_embeddings
self.dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))
self.register_buffer('weight_scaling_factor', torch.zeros(1))
self.register_buffer('weight_integer', torch.zeros_like(self.weight))
self.weight_bit = weight_bit
self.momentum = momentum
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def forward(self, x, positions=None, incremental_state=None):
if not self.quant_mode:
return (nn.functional.embedding(x, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse), None)
w = self.weight
w_transform = w.data.detach()
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.weight_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, False)
self.weight_integer = self.weight_function(self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor)
emb_int = nn.functional.embedding(x, self.weight_integer, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse)
return (emb_int * self.weight_scaling_factor, self.weight_scaling_factor)
|
Quantized version of `torch.nn.Embedding`. Adds quantization-specific arguments on top of `torch.nn.Embedding`.
Args:
weight_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the quantized weight.
momentum (`float`, *optional*, defaults to `0.95`):
Momentum for updating the activation quantization range.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
|
github-repos
|
def validate_additional_properties(self, valid_response, response):
assert isinstance(valid_response, dict)
assert isinstance(response, dict)
first_value = valid_response[list(valid_response)[0]]
if isinstance(first_value, dict):
definition = None
definition_name = self.get_dict_definition(first_value)
if (definition_name is None):
definition = self._definition_from_example(first_value)
definition_name = 'self generated'
for item in response.values():
if (not self.validate_definition(definition_name, item, definition=definition)):
return False
return True
if isinstance(first_value, list):
raise Exception('Not implemented yet')
try:
assert all((isinstance(y, type(first_value)) for (_, y) in response.items()))
assert all((isinstance(y, type(first_value)) for (_, y) in valid_response.items()))
return True
except Exception:
return False
|
Validates additional properties. In additional properties, we only
need to compare the values of the dict, not the keys
Args:
valid_response: An example response (for example generated in
_get_example_from_properties(self, spec))
Type is DICT
response: The actual dict coming from the response
Type is DICT
Returns:
A boolean - whether the actual response validates against the given example
|
codesearchnet
|
def remove_volume(self, name, force=False):
params = {}
if force:
if utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'force removal was introduced in API 1.25'
)
params = {'force': force}
url = self._url('/volumes/{0}', name, params=params)
resp = self._delete(url)
self._raise_for_status(resp)
|
Remove a volume. Similar to the ``docker volume rm`` command.
Args:
name (str): The volume's name
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
|
juraj-google-style
|
def metrics_format(self, metrics: dict[str, float]) -> dict[str, float]:
metrics_copy = metrics.copy()
for k, v in metrics_copy.items():
if '_mem_' in k:
metrics_copy[k] = f'{v >> 20}MB'
elif '_runtime' in k:
metrics_copy[k] = _secs2timedelta(v)
elif k == 'total_flos':
metrics_copy[k] = f'{int(v) >> 30}GF'
elif isinstance(metrics_copy[k], float):
metrics_copy[k] = round(v, 4)
return metrics_copy
|
Reformat Trainer metrics values to a human-readable format.
Args:
metrics (`Dict[str, float]`):
The metrics returned from train/evaluate/predict
Returns:
metrics (`Dict[str, float]`): The reformatted metrics
|
github-repos
|
def __init__(self, funcs, trackable_obj=None):
super(TFLiteFrozenGraphConverterV2, self).__init__()
self._funcs = funcs
self._trackable_obj = trackable_obj
self.experimental_lower_to_saved_model = True
|
Constructor for TFLiteConverter.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
|
github-repos
|
async def get(self, key):
log.info('Looking up key %s', key)
dkey = digest(key)
if (self.storage.get(dkey) is not None):
return self.storage.get(dkey)
node = Node(dkey)
nearest = self.protocol.router.find_neighbors(node)
if (not nearest):
log.warning('There are no known neighbors to get key %s', key)
return None
spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
return (await spider.find())
|
Get a key if the network has it.
Returns:
:class:`None` if not found, the value otherwise.
|
codesearchnet
|
def SetExtractionConfiguration(self, configuration):
self._hasher_file_size_limit = configuration.hasher_file_size_limit
self._SetHashers(configuration.hasher_names_string)
self._process_archives = configuration.process_archives
self._process_compressed_streams = configuration.process_compressed_streams
self._SetYaraRules(configuration.yara_rules_string)
|
Sets the extraction configuration settings.
Args:
configuration (ExtractionConfiguration): extraction configuration.
|
codesearchnet
|
def update(self, task_name, result):
with open(self.filepath, 'rb') as f:
existing_results = pickle.load(f)
if task_name not in self.tasks:
self._add_task(task_name)
existing_results['tasks'].append(task_name)
existing_results['results'].append([])
task_name_idx = existing_results['tasks'].index(task_name)
results = existing_results['results'][task_name_idx]
results.append(result)
with open(self.filepath, 'wb') as f:
pickle.dump(existing_results, f)
|
Update the results file with new information.
Args:
task_name (str): Name of the currently running task. A previously unseen
``task_name`` will create a new entry in both :attr:`tasks`
and :attr:`results`.
result: This will be appended to the list in :attr:`results` which
corresponds to the ``task_name`` in ``task_name``:attr:`tasks`.
|
juraj-google-style
|
def load_resource(resource_url: str, forceupdate: bool = False):
log.info(f"Loading resource {resource_url}")
try:
fo = bel.utils.download_file(resource_url)
if not fo:
log.error(f"Could not download and open file {resource_url}")
return "Failed to download resource_url"
fo.seek(0)
with gzip.open(fo, "rt") as f:
metadata = json.loads(f.__next__())
if "metadata" not in metadata:
log.error(f"Missing metadata entry for {resource_url}")
return "Cannot load resource file - missing metadata object in first line of file"
if metadata["metadata"]["type"] == "namespace":
bel.resources.namespace.load_terms(fo, metadata, forceupdate)
elif metadata["metadata"]["type"] == "ortholog":
bel.resources.ortholog.load_orthologs(fo, metadata)
finally:
fo.close()
|
Load BEL Resource file
Forceupdate will create a new index in Elasticsearch regardless of whether
an index with the resource version already exists.
Args:
resource_url: URL from which to download the resource to load into the BEL API
forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches
|
juraj-google-style
|
def load(self, path):
path = os.path.expandvars(os.path.expanduser(path))
gdg = cgaddag.gdg_load(path.encode("ascii"))
if not gdg:
errno = ctypes.c_int.in_dll(ctypes.pythonapi, "errno").value
raise OSError(errno, os.strerror(errno), path)
self.__del__()
self.gdg = gdg.contents
|
Load a GADDAG from file, replacing the words currently in this GADDAG.
Args:
path: path to saved GADDAG to be loaded.
|
juraj-google-style
|
def _wrap_el(self, value):
if isinstance(value, dict):
return {k: self._wrap_el(v) for k, v in value.items()}
elif isinstance(value, WebElement):
return {'ELEMENT': value.element_id}
elif isinstance(value, list) and not isinstance(value, str):
return [self._wrap_el(item) for item in value]
else:
return value
|
Convert WebElement Object to {'Element': 1234}
Args:
value(str|list|dict): The local value.
Returns:
The wrapped value.
|
juraj-google-style
|
def recent_all_projects(self, limit=30, offset=0):
method = 'GET'
url = ('/recent-builds?circle-token={token}&limit={limit}&'
'offset={offset}'.format(token=self.client.api_token,
limit=limit,
offset=offset))
json_data = self.client.request(method, url)
return json_data
|
Return information about recent builds across all projects.
Args:
limit (int), Number of builds to return, max=100, defaults=30.
offset (int): Builds returned from this point, default=0.
Returns:
A list of dictionaries.
|
juraj-google-style
|
def _process_dataset(name, directory, num_shards, labels_file):
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
|
Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
|
juraj-google-style
|
def sort_recursive(data):
newdict = {}
for i in data.items():
if (type(i[1]) is dict):
newdict[i[0]] = sort_recursive(i[1])
else:
newdict[i[0]] = i[1]
return OrderedDict(sorted(newdict.items(), key=(lambda item: (compare_type(type(item[1])), item[0]))))
|
Recursively sorts all elements in a dictionary
Args:
data (dict): The dictionary to sort
Returns:
sorted_dict (OrderedDict): The sorted data dict
|
codesearchnet
|
def remove_triple(self, subj: URIRef, pred: URIRef, obj: Union[(URIRef, Literal)]) -> None:
self.g.remove((subj, pred, obj))
|
Removes triple from rdflib Graph
You must input the triple in its URIRef or Literal form for each node exactly the way it
was inputed or it will not delete the triple.
Args:
subj: Entity subject to be removed it its the only node with this subject; else this is
just going to delete a desciption I.E. predicate_object of this entity.
pred: Entity predicate to be removed
obj: Entity object to be removed
|
codesearchnet
|
def remove(self, processor_identity):
with self._condition:
processor_types = self._identities.get(processor_identity)
if processor_types is None:
LOGGER.warning("transaction processor with identity %s tried "
"to unregister but was not registered",
processor_identity)
return
for processor_type in processor_types:
if processor_type not in self._processors:
LOGGER.warning("processor type %s not a known processor "
"type but is associated with identity %s",
processor_type,
processor_identity)
continue
self._processors[processor_type].remove_processor(
processor_identity=processor_identity)
if not self._processors[processor_type]:
del self._processors[processor_type]
|
Removes all of the Processors for
a particular transaction processor zeromq identity.
Args:
processor_identity (str): The zeromq identity of the transaction
processor.
|
juraj-google-style
|
def postprocess(x, n_bits_x=8):
x = tf.where(tf.is_finite(x), x, tf.ones_like(x))
x = tf.clip_by_value(x, -0.5, 0.5)
x += 0.5
x = x * 2**n_bits_x
return tf.cast(tf.clip_by_value(x, 0, 255), dtype=tf.uint8)
|
Converts x from [-0.5, 0.5], to [0, 255].
Args:
x: 3-D or 4-D Tensor normalized between [-0.5, 0.5]
n_bits_x: Number of bits representing each pixel of the output.
Defaults to 8, to default to 256 possible values.
Returns:
x: 3-D or 4-D Tensor representing images or videos.
|
juraj-google-style
|
def uses_star_kwargs_in_call(node):
if sys.version_info[:2] >= (3, 5):
for keyword in node.keywords:
if keyword.arg is None:
return True
elif node.kwargs:
return True
return False
|
Check if an ast.Call node uses arbitrary-length **kwargs.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
|
github-repos
|
def findContours(*args, **kwargs):
if cv2.__version__.startswith('4'):
(contours, hierarchy) = cv2.findContours(*args, **kwargs)
elif cv2.__version__.startswith('3'):
(_, contours, hierarchy) = cv2.findContours(*args, **kwargs)
else:
raise AssertionError('cv2 must be either version 3 or 4 to call this method')
return (contours, hierarchy)
|
Wraps cv2.findContours to maintain compatiblity between versions
3 and 4
Returns:
contours, hierarchy
|
codesearchnet
|
def pprint_value(self, value):
own_type = type(value) if self.type is None else self.type
formatter = (self.value_format if self.value_format
else self.type_formatters.get(own_type))
if formatter:
if callable(formatter):
return formatter(value)
elif isinstance(formatter, basestring):
if isinstance(value, (dt.datetime, dt.date)):
return value.strftime(formatter)
elif isinstance(value, np.datetime64):
return util.dt64_to_dt(value).strftime(formatter)
elif re.findall(r"\{(\w+)\}", formatter):
return formatter.format(value)
else:
return formatter % value
return unicode(bytes_to_unicode(value))
|
Applies the applicable formatter to the value.
Args:
value: Dimension value to format
Returns:
Formatted dimension value
|
juraj-google-style
|
def safe_indicator(self, indicator, errors='strict'):
if indicator is not None:
try:
indicator = quote(self.s(str(indicator), errors=errors), safe='~')
except KeyError:
indicator = quote(bytes(indicator), safe='~')
return indicator
|
Indicator encode value for safe HTTP request.
Args:
indicator (string): Indicator to URL Encode
errors (string): The error handler type.
Returns:
(string): The urlencoded string
|
juraj-google-style
|
def get_cuda_compute_capability(source_from_url=False):
if not GPU_TYPE:
if FLAGS.debug:
print('Warning: GPU_TYPE is empty. Make sure to call `get_gpu_type()` first.')
elif GPU_TYPE == 'unknown':
if FLAGS.debug:
print('Warning: Unknown GPU is detected. Skipping CUDA compute capability retrieval.')
else:
if source_from_url:
cuda_compute_capa = cuda_compute_capability.retrieve_from_web()
else:
cuda_compute_capa = cuda_compute_capability.retrieve_from_golden()
return cuda_compute_capa[GPU_TYPE]
return
|
Retrieves CUDA compute capability based on the detected GPU type.
This function uses the `cuda_compute_capability` module to retrieve the
corresponding CUDA compute capability for the given GPU type.
Args:
source_from_url: Boolean deciding whether to source compute capability
from NVIDIA website or from a local golden file.
Returns:
List of all supported CUDA compute capabilities for the given GPU type.
e.g. ['3.5', '3.7']
|
github-repos
|
def GetPathSegmentAndSuffix(self, base_path, path):
if path is None or base_path is None or not path.startswith(base_path):
return None, None
path_index = len(base_path)
if base_path and not base_path.endswith(self.PATH_SEPARATOR):
path_index += 1
if path_index == len(path):
return '', ''
path_segment, _, suffix = path[path_index:].partition(self.PATH_SEPARATOR)
return path_segment, suffix
|
Determines the path segment and suffix of the path.
None is returned if the path does not start with the base path and
an empty string if the path exactly matches the base path.
Args:
base_path (str): base path.
path (str): path.
Returns:
tuple[str, str]: path segment and suffix string.
|
juraj-google-style
|
def post_comment(self, sharekey=None, comment=None):
endpoint = '/api/sharedfile/{0}/comments'.format(sharekey)
post_data = {'body': comment}
data = self._make_request('POST', endpoint=endpoint, data=post_data)
return Comment.NewFromJSON(data)
|
Post a comment on behalf of the current user to the
SharedFile with the given sharekey.
Args:
sharekey (str): Sharekey of the SharedFile to which you'd like
to post a comment.
comment (str): Text of the comment to post.
Returns:
Comment object.
|
codesearchnet
|
def checkout_commit(repo: Repo, commit_id: str):
current_head = repo.head.commit if repo.head.is_detached else repo.head.ref
try:
repo.git.checkout(commit_id)
yield
finally:
repo.git.checkout(current_head)
|
Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit.
Args:
repo (`git.Repo`): A git repository (for instance the Transformers repo).
commit_id (`str`): The commit reference to checkout inside the context manager.
|
github-repos
|
def migrate(connection, dsn):
all_migrations = _get_all_migrations()
logger.debug('Collected migrations: {}'.format(all_migrations))
for version, modname in all_migrations:
if _is_missed(connection, version) and version <= SCHEMA_VERSION:
logger.info('Missed migration: {} migration is missed. Migrating...'.format(version))
module = __import__(modname, fromlist='dummy')
trans = connection.begin()
try:
module.Migration().migrate(connection)
_update_version(connection, version)
trans.commit()
except:
trans.rollback()
logger.error("Failed to migrate '{}' on {} ".format(version, dsn))
raise
|
Collects all migrations and applies missed.
Args:
connection (sqlalchemy connection):
|
juraj-google-style
|
def host_impl(self, run, tool):
hosts = {}
run_dir = self._run_dir(run)
if (not run_dir):
logger.warn('Cannot find asset directory for: %s', run)
return hosts
tool_pattern = ('*' + TOOLS[tool])
try:
files = tf.io.gfile.glob(os.path.join(run_dir, tool_pattern))
hosts = [os.path.basename(f).replace(TOOLS[tool], '') for f in files]
except tf.errors.OpError as e:
logger.warn('Cannot read asset directory: %s, OpError %s', run_dir, e)
return hosts
|
Returns available hosts for the run and tool in the log directory.
In the plugin log directory, each directory contains profile data for a
single run (identified by the directory name), and files in the run
directory contains data for different tools and hosts. The file that
contains profile for a specific tool "x" will have a prefix name TOOLS["x"].
Example:
log/
run1/
plugins/
profile/
host1.trace
host2.trace
run2/
plugins/
profile/
host1.trace
host2.trace
Returns:
A list of host names e.g.
{"host1", "host2", "host3"} for the example.
|
codesearchnet
|
def add_streamer(self, binary_descriptor):
streamer = streamer_descriptor.parse_binary_descriptor(binary_descriptor)
try:
self.graph.add_streamer(streamer)
self.streamer_status[len(self.graph.streamers) - 1] = StreamerStatus()
return Error.NO_ERROR
except ResourceUsageError:
return _pack_sgerror(SensorGraphError.NO_MORE_STREAMER_RESOURCES)
|
Add a streamer to the sensor_graph using a binary streamer descriptor.
Args:
binary_descriptor (bytes): An encoded binary streamer descriptor.
Returns:
int: A packed error code
|
juraj-google-style
|
def get_config(self, key, default=MISSING):
keyname = "config:" + key
try:
return self.kvstore.get(keyname)
except KeyError:
if default is MISSING:
raise ArgumentError("No config value found for key", key=key)
return default
|
Get the value of a persistent config key from the registry
If no default is specified and the key is not found ArgumentError is raised.
Args:
key (string): The key name to fetch
default (string): an optional value to be returned if key cannot be found
Returns:
string: the key's value
|
juraj-google-style
|
def logical_enclosures(self):
if (not self.__logical_enclosures):
self.__logical_enclosures = LogicalEnclosures(self.__connection)
return self.__logical_enclosures
|
Gets the LogicalEnclosures API client.
Returns:
LogicalEnclosures:
|
codesearchnet
|
def pnum_to_processor_coordinates(mesh_shape, pnum):
ret = []
for dimsize in mesh_shape.to_integer_list[::(- 1)]:
ret.append((pnum % dimsize))
pnum
return ret[::(- 1)]
|
Coordinates of a processor in the mesh.
Args:
mesh_shape: a Shape
pnum: an integer less than len(mesh_shape)
Returns:
a list of integers with length len(mesh_shape)
|
codesearchnet
|
def diff_toDelta(self, diffs):
text = []
for (op, data) in diffs:
if op == self.DIFF_INSERT:
data = data.encode("utf-8")
text.append("+" + urllib.quote(data, "!~*'();/?:@&=+$,
elif op == self.DIFF_DELETE:
text.append("-%d" % len(data))
elif op == self.DIFF_EQUAL:
text.append("=%d" % len(data))
return "\t".join(text)
|
Crush the diff into an encoded string which describes the operations
required to transform text1 into text2.
E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'.
Operations are tab-separated. Inserted text is escaped using %xx notation.
Args:
diffs: Array of diff tuples.
Returns:
Delta text.
|
juraj-google-style
|
def _get_sqlite_columns(connection, table):
SQL_TO_PYTHON_TYPES = {
'INT': int,
'INTEGER': int,
'TINYINT': int,
'SMALLINT': int,
'MEDIUMINT': int,
'BIGINT': int,
'UNSIGNED BIG INT': int,
'INT': int,
'INT8': int,
'NUMERIC': float,
'REAL': float,
'FLOAT': float,
'DOUBLE': float,
'BOOLEAN': bool,
'CHARACTER': str,
'VARCHAR': str,
'TEXT': str
}
query = 'PRAGMA table_info(\'{}\');'
result = connection.execute(query.format(table))
ret = []
for row in result:
position = row[0] + 1
name = row[1]
datatype = row[2]
try:
datatype = SQL_TO_PYTHON_TYPES[datatype]
except KeyError:
raise Exception(
'Do not know how to convert {} sql datatype to python data type.'
.format(datatype))
ret.append((name, datatype, position))
return ret
|
Returns list of tuple containg columns of the table.
Args:
connection: sqlalchemy connection to sqlite database.
table (str): name of the table
Returns:
list of (name, datatype, position): where name is column name, datatype is
python type of the column, position is ordinal position of the column.
|
juraj-google-style
|
def _create_deployment_object(self, job_name, job_image,
deployment_name, port=80,
replicas=1,
cmd_string=None,
engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json',
engine_dir='.',
volumes=[]):
security_context = None
if self.user_id and self.group_id:
security_context = client.V1SecurityContext(run_as_group=self.group_id,
run_as_user=self.user_id,
run_as_non_root=self.run_as_non_root)
environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA")
launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)]
volume_mounts = []
for volume in volumes:
volume_mounts.append(client.V1VolumeMount(mount_path=volume[1],
name=volume[0]))
container = None
if security_context:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
volume_mounts=volume_mounts,
command=['/bin/bash'],
args=launch_args,
env=[environment_vars],
security_context=security_context)
else:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
volume_mounts=volume_mounts,
command=['/bin/bash'],
args=launch_args,
env=[environment_vars])
secret = None
if self.secret:
secret = client.V1LocalObjectReference(name=self.secret)
volume_defs = []
for volume in volumes:
volume_defs.append(client.V1Volume(name=volume[0],
persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
claim_name=volume[0])))
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"app": job_name}),
spec=client.V1PodSpec(containers=[container],
image_pull_secrets=[secret],
volumes=volume_defs
))
spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,
template=template)
deployment = client.ExtensionsV1beta1Deployment(
api_version="extensions/v1beta1",
kind="Deployment",
metadata=client.V1ObjectMeta(name=deployment_name),
spec=spec)
return deployment
|
Create a kubernetes deployment for the job.
Args:
- job_name (string) : Name of the job and deployment
- job_image (string) : Docker image to launch
KWargs:
- port (integer) : Container port
- replicas : Number of replica containers to maintain
Returns:
- True: The deployment object to launch
|
juraj-google-style
|
def resolve_import(self, item):
name = item.name
short_name = None
if (item.is_from and (not item.is_star)):
if ('.' in name.lstrip('.')):
rindex = name.rfind('.')
else:
rindex = (name.rfind('.') + 1)
short_name = name[:rindex]
if import_finder.is_builtin(name):
filename = (name + '.so')
return Builtin(filename, name)
(filename, level) = convert_to_path(name)
if level:
filename = os.path.normpath(os.path.join(self.current_directory, filename))
files = [(name, filename)]
if short_name:
short_filename = os.path.dirname(filename)
files.append((short_name, short_filename))
for (module_name, path) in files:
for fs in self.fs_path:
f = self._find_file(fs, path)
if ((not f) or (f == self.current_module.path)):
continue
if item.is_relative():
package_name = self.current_module.package_name
if (package_name is None):
raise ImportException(name)
module_name = get_absolute_name(package_name, module_name)
if isinstance(self.current_module, System):
return System(f, module_name)
return Local(f, module_name, fs)
if item.source:
(prefix, ext) = os.path.splitext(item.source)
mod_name = name
if short_name:
mod = prefix.replace(os.path.sep, '.')
mod = utils.strip_suffix(mod, '.__init__')
if ((not mod.endswith(name)) and mod.endswith(short_name)):
mod_name = short_name
if (ext == '.pyc'):
pyfile = (prefix + '.py')
if os.path.exists(pyfile):
return System(pyfile, mod_name)
elif (not ext):
pyfile = os.path.join(prefix, '__init__.py')
if os.path.exists(pyfile):
return System(pyfile, mod_name)
return System(item.source, mod_name)
raise ImportException(name)
|
Simulate how Python resolves imports.
Returns the filename of the source file Python would load
when processing a statement like 'import name' in the module
we're currently under.
Args:
item: An instance of ImportItem
Returns:
A filename
Raises:
ImportException: If the module doesn't exist.
|
codesearchnet
|
def is_valid_geometry(geometry):
if (isinstance(geometry, Polygon) or isinstance(geometry, MultiPolygon)):
return True
else:
return False
|
Confirm that the geometry type is of type Polygon or MultiPolygon.
Args:
geometry (BaseGeometry): BaseGeometry instance (e.g. Polygon)
Returns:
bool
|
codesearchnet
|
def test_sample_sabr(self, supply_grad_vol_fn):
dtype = np.float64
drift_fn = lambda _, x: tf.zeros_like(x)
beta = tf.constant(0.5, dtype=dtype)
volvol = tf.constant(1.0, dtype=dtype)
rho = tf.constant(0.2, dtype=dtype)
def vol_fn(t, x):
del t
f = x[..., 0]
v = x[..., 1]
fb = f ** beta
m11 = v * fb * tf.math.sqrt(1 - tf.square(rho))
m12 = v * fb * rho
m21 = tf.zeros_like(m11)
m22 = volvol * v
mc1 = tf.concat([tf.expand_dims(m11, -1), tf.expand_dims(m21, -1)], -1)
mc2 = tf.concat([tf.expand_dims(m12, -1), tf.expand_dims(m22, -1)], -1)
should_be_zero = tf.expand_dims(tf.expand_dims((beta != 0) & (f <= 0.0), -1), -1)
vol_matrix = tf.concat([tf.expand_dims(mc1, -1), tf.expand_dims(mc2, -1)], -1)
return tf.where(should_be_zero, tf.zeros_like(vol_matrix), vol_matrix)
if supply_grad_vol_fn:
def _grad_volatility_fn(current_time, current_state, input_gradients):
return gradient.fwd_gradient(functools.partial(vol_fn, current_time), current_state, input_gradients=input_gradients, unconnected_gradients=tf.UnconnectedGradients.ZERO)
grad_volatility_fn = _grad_volatility_fn
else:
grad_volatility_fn = None
times = np.array([0.0, 0.1, 0.21, 0.32, 0.43, 0.55])
x0 = np.array([0.1, 0.2])
paths = self.evaluate(milstein_sampling.sample(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn, times=times, num_samples=1000, initial_state=x0, grad_volatility_fn=grad_volatility_fn, random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC, time_step=0.01, seed=[1, 42]))
mean = np.average(paths)
stddev = np.std(paths)
euler_paths = self.evaluate(euler_sampling.sample(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn, times=times, time_step=0.01, num_samples=10000, initial_state=x0, random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC, seed=[1, 42]))
euler_mean = np.average(euler_paths)
euler_stddev = np.std(euler_paths)
self.assertAllClose((mean, stddev), (euler_mean, euler_stddev), rtol=0.05, atol=0.05)
|
Tests path properties for SABR.
We construct the following Ito process.
```
dF_t = v_t * F_t ^ beta * dW_{F,t}
dv_t = volvol * v_t * dW_{v,t}
dW_{F,t} * dW_{v,t} = rho * dt
```
`F_t` is the forward. `v_t` is volatility. `beta` is the CEV parameter.
`volvol` is volatility of volatility. `W_{F,t}` and `W_{v,t}` are two
correlated Wiener processes with instantaneous correlation `rho`.
Args:
supply_grad_vol_fn: A bool. Whether or not to supply a grad_volatility_fn.
|
github-repos
|
def assert_same_rank(self, other):
other = as_shape(other)
if self.ndims is not None and other.ndims is not None:
if self.ndims != other.ndims:
raise ValueError(
"Shapes %s and %s must have the same rank" % (self, other)
)
|
Raises an exception if `self` and `other` do not have convertible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
|
juraj-google-style
|
def is_kdump_iommu_enabled(self):
for line in self._boot_entries:
if (line.cmdline and (IOMMU in line.cmdline)):
return True
return False
|
Does any kernel have 'intel_iommu=on' set?
Returns:
(bool): ``True`` when 'intel_iommu=on' is set, otherwise returns ``False``
|
codesearchnet
|
def add_data(self, data):
if self.data_size - self.data_index < len(data):
return Error.DESTINATION_BUFFER_TOO_SMALL
if self.in_progress is not None:
self.in_progress.data += data
return Error.NO_ERROR
|
Add data to the currently in progress entry.
Args:
data (bytes): The data that we want to add.
Returns:
int: An error code
|
juraj-google-style
|
def _apply_discount(values, discount_factors, exercise_index):
return discount_factors[exercise_index + 1] / discount_factors[exercise_index] * values
|
Returns discounted values at the exercise time.
Args:
values: A real `Tensor` of shape `[num_samples, batch_size]`. Tracks the
optimal cashflow of each sample path for each payoff dimension at
`exercise_index`.
discount_factors: A `Tensor` of shape
`[num_exercise_times + 1, num_samples, batch_size]`. The `dtype` should be
the same as of `samples`.
exercise_index: An integer scalar `Tensor` representing the index of the
exercise time of interest. Should be less than `num_exercise_times`.
Returns:
A `[num_samples, batch_size]` `Tensor` whose entries represent the sum of
those elements to the right of `exercise_index` in `cashflow`, discounted to
the time indexed by `exercise_index`. When `exercise_index` is zero, the
return represents the sum of the cashflow discounted to present value for
each sample path.
|
github-repos
|
def Resolve(self, env, resolved_params):
raise NotImplementedError('Resolve() is not implemented: ' + self.name)
|
Resolve object.
It resolves any internal attributes with unresolved values, then returns
the resolved values which can be used for graph.
Args:
env: Environment with all information necessary to resolve internal
attributes.
resolved_params: Resolved values which will possibly be referenced by
internal attributes.
Raises:
NotImplementedError
|
github-repos
|
def pack(self, value=None):
if (value is None):
output = self.header.pack()
output += self.value.pack()
return output
elif isinstance(value, type(self)):
return value.pack()
else:
msg = '{} is not an instance of {}'.format(value, type(self).__name__)
raise PackException(msg)
|
Pack the TLV in a binary representation.
Returns:
bytes: Binary representation of the struct object.
Raises:
:exc:`~.exceptions.ValidationError`: If validation fails.
|
codesearchnet
|
def experimental_make_numpy_dataset(self, numpy_input, session=None):
return self.extended.experimental_make_numpy_dataset(numpy_input, session=session)
|
Makes a tf.data.Dataset for input provided via a numpy array.
This avoids adding `numpy_input` as a large constant in the graph,
and copies the data to the machine or machines that will be processing
the input.
Note that you will likely need to use
tf.distribute.Strategy.experimental_distribute_dataset
with the returned dataset to further distribute it with the strategy.
Example:
```
numpy_input = np.ones([10], dtype=np.float32)
dataset = strategy.experimental_make_numpy_dataset(numpy_input)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
```
Args:
numpy_input: A nest of NumPy input arrays that will be converted into a
dataset. Note that lists of Numpy arrays are stacked, as that is normal
`tf.data.Dataset` behavior.
session: (TensorFlow v1.x graph execution only) A session used for
initialization.
Returns:
A `tf.data.Dataset` representing `numpy_input`.
|
github-repos
|
def __init__(self, initial_learning_rate, decay_steps, alpha=0.0, name=None):
super(CosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.alpha = alpha
self.name = name
|
Applies cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'CosineDecay'.
|
github-repos
|
def _ParseInformationalOptions(self, options):
self._debug_mode = getattr(options, 'debug', False)
self._quiet_mode = getattr(options, 'quiet', False)
if self._debug_mode and self._quiet_mode:
logger.warning(
'Cannot use debug and quiet mode at the same time, defaulting to '
'debug output.')
|
Parses the informational options.
Args:
options (argparse.Namespace): command line arguments.
|
juraj-google-style
|
def add_section(self, section):
if section in self.sections():
raise DuplicateSectionError(section)
if isinstance(section, str):
section = Section(section, container=self)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
self._structure.append(section)
|
Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
Args:
section (str or :class:`Section`): name or Section type
|
juraj-google-style
|
def get_flat_neurites(neuron, tol=0.1, method='ratio'):
return [n for n in neuron.neurites if is_flat(n, tol, method)]
|
Check if a neuron has neurites that are flat within a tolerance
Args:
neurite(Neurite): neurite to operate on
tol(float): the tolerance or the ratio
method(string): 'tolerance' or 'ratio' described in :meth:`is_flat`
Returns:
Bool list corresponding to the flatness check for each neurite
in neuron neurites with respect to the given criteria
|
codesearchnet
|
def sample(reader, writer, n, start=None, stop=None, tsCol=None, writeSampleOnly=True):
rows = list(reader)
if (tsCol is not None):
ts = rows[0][tsCol]
inc = (rows[1][tsCol] - ts)
if (start is None):
start = 0
if (stop is None):
stop = (len(rows) - 1)
initialN = ((stop - start) + 1)
numDeletes = (initialN - n)
for i in xrange(numDeletes):
delIndex = random.randint(start, (stop - i))
del rows[delIndex]
if writeSampleOnly:
rows = rows[start:(start + n)]
if (tsCol is not None):
ts = rows[0][tsCol]
for row in rows:
if (tsCol is not None):
row[tsCol] = ts
ts += inc
writer.appendRecord(row)
|
Samples n rows.
Args:
reader: A FileRecordStream object with input data.
writer: A FileRecordStream object to write output data to.
n: The number of elements to sample.
start: The first row in the range to sample from.
stop: The last row in the range to sample from.
tsCol: If specified, the timestamp column to update.
writeSampleOnly: If False, the rows before start are written before the
sample and the rows after stop are written after the sample.
|
codesearchnet
|
def clipped_zoom(img, zoom_factor):
h = img.shape[0]
ch = int(np.ceil((h / float(zoom_factor))))
top_h = ((h - ch)
w = img.shape[1]
cw = int(np.ceil((w / float(zoom_factor))))
top_w = ((w - cw)
img = tfds.core.lazy_imports.scipy.ndimage.zoom(img[(top_h:(top_h + ch), top_w:(top_w + cw))], (zoom_factor, zoom_factor, 1), order=1)
trim_top_h = ((img.shape[0] - h)
trim_top_w = ((img.shape[1] - w)
return img[(trim_top_h:(trim_top_h + h), trim_top_w:(trim_top_w + w))]
|
Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
|
codesearchnet
|
def recipe_dv360_data_warehouse(config, auth_bigquery, auth_dv, recipe_slug, partners):
dataset(config, {'description': 'Create a dataset for bigquery tables.', 'auth': auth_bigquery, 'dataset': recipe_slug})
google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'partners.get', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'legacy': False, 'query': 'SELECT CAST(partnerId AS STRING) partnerId FROM (SELECT DISTINCT * FROM UNNEST({partners}) AS partnerId)', 'parameters': {'partners': partners}}}, 'iterate': False, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Partners'}}})
google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(partnerId\nAS STRING) partnerId FROM `DV360_Partners`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Advertisers'}}})
google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.insertionOrders.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_InsertionOrders'}}})
google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.lineItems.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_LineItems'}}})
google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.campaigns.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Campaigns'}}})
google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.channels.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Channels'}}})
google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.creatives.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Creatives'}}})
google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'inventorySources.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Inventory_Sources'}}})
google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'googleAudiences.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Google_Audiences'}}})
google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'combinedAudiences.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Combined_Audiences'}}})
|
Deploy a BigQuery dataset mirroring DV360 account structure. Foundation for
solutions on top.
Args:
auth_bigquery (authentication) - Credentials used for writing data.
auth_dv (authentication) - Credentials used for reading data.
recipe_slug (string) - Name of Google BigQuery dataset to create.
partners (integer_list) - List of account ids to pull.
|
github-repos
|
def subscribe(self, devices_to_bind=[]):
if (self.entity_api_key == ''):
return {'status': 'failure', 'response': 'No API key found in request'}
self.bind(devices_to_bind)
loop = asyncio.new_event_loop()
t1 = threading.Thread(target=self.start_subscribe_worker, args=(loop,))
t1.daemon = True
t1.start()
|
This function allows an entity to subscribe for data from the devices specified in the bind operation. It
creates a thread with an event loop to manager the tasks created in start_subscribe_worker.
Args:
devices_to_bind (list): an array of devices to listen to
|
codesearchnet
|
def bootstrap(score_objs, n_boot=1000):
all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True)
return all_samples.sum(axis=1)
|
Given a set of DistributedROC or DistributedReliability objects, this function performs a
bootstrap resampling of the objects and returns n_boot aggregations of them.
Args:
score_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method
n_boot (int): Number of bootstrap samples
Returns:
An array of DistributedROC or DistributedReliability
|
juraj-google-style
|
def request_with_retry(func, *args, **kwargs):
max_retries = kwargs.pop('max_retries', 30)
sleep = 2
retry_count = 0
while True:
try:
response = func(*args, **kwargs)
response.raise_for_status()
return response
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout) as e:
if (retry_count == max_retries):
return e
retry_count += 1
delay = (sleep + ((random.random() * 0.25) * sleep))
if (isinstance(e, requests.exceptions.HTTPError) and (e.response.status_code == 429)):
logger.info(('Rate limit exceeded, retrying in %s seconds' % delay))
else:
logger.warning('requests_with_retry encountered retryable exception: %s. args: %s, kwargs: %s', e, args, kwargs)
time.sleep(delay)
sleep *= 2
if (sleep > MAX_SLEEP_SECONDS):
sleep = MAX_SLEEP_SECONDS
except requests.exceptions.RequestException as e:
logger.error(response.json()['error'])
logger.exception('requests_with_retry encountered unretryable exception: %s', e)
return e
|
Perform a requests http call, retrying with exponential backoff.
Args:
func: An http-requesting function to call, like requests.post
max_retries: Maximum retries before giving up. By default we retry 30 times in ~2 hours before dropping the chunk
*args: passed through to func
**kwargs: passed through to func
|
codesearchnet
|
def __init__(self, resolver_context):
super(ZipFile, self).__init__(resolver_context)
self._compressed_data = b''
self._current_offset = 0
self._file_system = None
self._realign_offset = True
self._uncompressed_data = b''
self._uncompressed_data_offset = 0
self._uncompressed_data_size = 0
self._uncompressed_stream_size = None
self._zip_ext_file = None
self._zip_file = None
self._zip_info = None
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
|
juraj-google-style
|
class Blip2Encoder(nn.Module):
def __init__(self, config: Blip2Config):
super().__init__()
self.config = config
self.layers = nn.ModuleList([Blip2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Blip2EncoderLayer`].
Args:
config (`Blip2Config`):
The corresponding vision configuration for the `Blip2Encoder`.
|
github-repos
|
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
with _handle_graph(self.handle), self._assign_dependencies():
assign_add_op = gen_resource_variable_ops.assign_add_variable_op(self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name)
if read_value:
return self._lazy_read(assign_add_op)
return assign_add_op
|
Adds a value to this variable.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
name: The name to use for the operation.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
|
github-repos
|
def execute_plan(plan):
results = [action() for action in plan]
return [result for result in results if actns.step_has_failed(result)]
|
Execute the plan.
Args:
plan (:obj:`list` of :obj:`actions.Step`): The plan we want to execute.
Returns:
(:obj:`list` of :obj:`actions.Step`): A list of failed actions.
|
codesearchnet
|
def __init__(self, functions):
self._functions = functions
self._location_key_to_location = {}
|
Constructor.
Args:
functions: A `Functions` object.
|
github-repos
|
def process(self, element: Entity) -> Optional[Iterable[Text]]:
text_line = element.properties.get('content', '')
if not text_line:
self.empty_line_counter.inc()
return None
words = re.findall("[A-Za-z\\']+", text_line)
for w in words:
self.word_length_counter.inc(len(w))
self.word_lengths_dist.update(len(w))
self.word_counter.inc()
return words
|
Extract words from the 'content' property of Cloud Datastore entities.
The element is a line of text. If the line is blank, note that, too.
Args:
element: the input entity to be processed
Returns:
A list of words found.
|
github-repos
|
def from_python_value(cls, value: Any) -> 'DType':
try:
return PY_TYPE_TO_DTYPE[type(value)]
except KeyError as e:
raise ValueError(f"Couldn't find a dtype to store a value of type {type(value)}. Value is: {value}") from e
|
Returns the corresponding DType for the given python-native value.
Args:
value: A python variable to infer DType from (e.g: str, float).
Returns:
The corresponding DType.
Raises:
ValueError: If there's no DType implemented for this type of value.
|
github-repos
|
def transformer_image_decoder(targets, encoder_output, ed_attention_bias, hparams, name=None):
with tf.variable_scope(name, default_name='transformer_dec'):
batch_size = common_layers.shape_list(targets)[0]
targets = tf.reshape(targets, [batch_size, hparams.img_len, hparams.img_len, (hparams.num_channels * hparams.hidden_size)])
(decoder_input, _, _) = cia.prepare_decoder(targets, hparams)
decoder_output = cia.transformer_decoder_layers(decoder_input, encoder_output, (hparams.num_decoder_layers or hparams.num_hidden_layers), hparams, attention_type=hparams.dec_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name='decoder')
decoder_output = tf.reshape(decoder_output, [batch_size, hparams.img_len, (hparams.img_len * hparams.num_channels), hparams.hidden_size])
return decoder_output
|
Transformer image decoder over targets with local attention.
Args:
targets: Tensor of shape [batch, ...], and whose size is batch * height *
width * hparams.num_channels * hparams.hidden_size.
encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size].
ed_attention_bias: Tensor which broadcasts with shape [batch,
hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias.
hparams: HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, height, width * hparams.num_channels,
hparams.hidden_size].
|
codesearchnet
|
def set_action_env_var(environ_cp, var_name, query_item, enabled_by_default, question=None, yes_reply=None, no_reply=None, bazel_config_name=None):
var = int(get_var(environ_cp, var_name, query_item, enabled_by_default, question, yes_reply, no_reply))
if not bazel_config_name:
write_action_env_to_bazelrc(var_name, var)
elif var:
write_to_bazelrc('build --config=%s' % bazel_config_name)
environ_cp[var_name] = str(var)
|
Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
bazel_config_name: adding config to .bazelrc instead of action_env.
|
github-repos
|
def add_key_value(self, key, value):
key = self._metadata_map.get(key, key)
if key in ['dateAdded', 'eventDate', 'firstSeen', 'publishDate']:
self._group_data[key] = self._utils.format_datetime(
value, date_format='%Y-%m-%dT%H:%M:%SZ'
)
elif key == 'file_content':
pass
else:
self._group_data[key] = value
|
Add custom field to Group object.
.. note:: The key must be the exact name required by the batch schema.
Example::
document = tcex.batch.group('Document', 'My Document')
document.add_key_value('fileName', 'something.pdf')
Args:
key (str): The field key to add to the JSON batch data.
value (str): The field value to add to the JSON batch data.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.