code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def item_to_mrc(code, val):
if isinstance(val, basestring):
return [val_to_mrc(code, val)]
if isinstance(val, dict):
val = [val]
return dicts_to_mrc(code, val)
|
Convert `val` to MRC, whether it is dict or string.
Args:
code (str): Code of the field.
val (str or dict): Value of the field.
Returns:
list: MRC lines for output template.
|
codesearchnet
|
def from_hising(cls, h, J, offset=None):
poly = {(k,): v for (k, v) in h.items()}
poly.update(J)
if (offset is not None):
poly[frozenset([])] = offset
return cls(poly, Vartype.SPIN)
|
Construct a binary polynomial from a higher-order Ising problem.
Args:
h (dict):
The linear biases.
J (dict):
The higher-order biases.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:obj:`.BinaryPolynomial`
Examples:
>>> poly = dimod.BinaryPolynomial.from_hising({'a': 2}, {'ab': -1}, 0)
|
codesearchnet
|
def auto_batch_size(sequence_length,
mesh_shape,
layout_rules,
tokens_per_split=2048):
num_splits = mtf.tensor_dim_to_mesh_dim_size(
layout_rules, mesh_shape, mtf.Dimension("batch", 0))
ret = max(1, tokens_per_split
tf.logging.info(
"AUTO_BATCH_SIZE tokens_per_split=%s num_splits=%s"
" sequence_length=%s batch_size=%s"
% (tokens_per_split, num_splits, sequence_length, ret))
return ret
|
Automatically compute batch size.
Args:
sequence_length: an integer
mesh_shape: an input to mtf.convert_to_shape()
layout_rules: an input to mtf.convert_to_layout_rules()
tokens_per_split: an integer
Returns:
an integer
|
juraj-google-style
|
def get_global_vars(func):
closure = getclosurevars(func)
if closure['nonlocal']:
raise TypeError("Can't launch a job with closure variables: %s" %
closure['nonlocals'].keys())
globalvars = dict(modules={},
functions={},
vars={})
for name, value in closure['global'].items():
if inspect.ismodule(value):
globalvars['modules'][name] = value.__name__
elif inspect.isfunction(value) or inspect.ismethod(value):
globalvars['functions'][name] = value
else:
globalvars['vars'][name] = value
return globalvars
|
Store any methods or variables bound from the function's closure
Args:
func (function): function to inspect
Returns:
dict: mapping of variable names to globally bound VARIABLES
|
juraj-google-style
|
def usufyToGmlExport(d, fPath):
try:
oldData = nx.read_gml(fPath)
except UnicodeDecodeError as e:
print(('UnicodeDecodeError:\t' + str(e)))
print('Something went wrong when reading the .gml file relating to the decoding of UNICODE.')
import time as time
fPath += ('_' + str(time.time()))
print(((('To avoid losing data, the output file will be renamed to use the timestamp as:\n' + fPath) + '_') + str(time.time())))
print()
oldData = nx.Graph()
except Exception as e:
oldData = nx.Graph()
newGraph = _generateGraphData(d, oldData)
nx.write_gml(newGraph, fPath)
|
Workaround to export data to a .gml file.
Args:
-----
d: Data to export.
fPath: File path for the output file.
|
codesearchnet
|
def unflatten1(flat_list, reverse_list):
unflat_list2 = [[flat_list[index] for index in tup]
for tup in reverse_list]
return unflat_list2
|
Rebuilds unflat list from invertible_flatten1
Args:
flat_list (list): the flattened list
reverse_list (list): the list which undoes flattenting
Returns:
unflat_list2: original nested list
SeeAlso:
invertible_flatten1
invertible_flatten2
unflatten2
|
juraj-google-style
|
def connect(self, host='localhost'):
get_logger().info("Connecting to RabbitMQ server...")
self._conn = pika.BlockingConnection(
pika.ConnectionParameters(host=host))
self._channel = self._conn.channel()
get_logger().info("Declaring topic exchanger {}...".format(
self.exchange))
self._channel.exchange_declare(exchange=self.exchange, type='topic')
get_logger().info("Creating RabbitMQ queue...")
result = self._channel.queue_declare(exclusive=True)
self._queue_name = result.method.queue
if self.listen_all:
get_logger().info(
"Binding queue to exchanger {} (listen all)...".format(
self.exchange
)
)
self._channel.queue_bind(
exchange=self.exchange,
queue=self._queue_name,
routing_key='*'
)
else:
for routing_key in self.topics:
get_logger().info(
"Binding queue to exchanger {} "
"with routing key {}...".format(
self.exchange, routing_key)
)
self._channel.queue_bind(
exchange=self.exchange,
queue=self._queue_name,
routing_key=routing_key
)
get_logger().info("Binding callback...")
self._channel.basic_consume(
self._callback, queue=self._queue_name, no_ack=True)
|
Connect to the server and set everything up.
Args:
host: hostname to connect to
|
juraj-google-style
|
def is_valid(self, tol: float = DISTANCE_TOLERANCE) -> bool:
if len(self.sites) == 1:
return True
all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]
return bool(np.min(all_dists) > tol)
|
True if SiteCollection does not contain atoms that are too close
together. Note that the distance definition is based on type of
SiteCollection. Cartesian distances are used for non-periodic
Molecules, while PBC is taken into account for periodic structures.
Args:
tol (float): Distance tolerance. Default is 0.5A.
Returns:
(bool) True if SiteCollection does not contain atoms that are too
close together.
|
juraj-google-style
|
def relaxed_value_for_var(value, var):
assert isinstance(var, tf.Variable)
name = var.op.name
varshape = tuple(var.get_shape().as_list())
if varshape != value.shape:
if np.prod(varshape) != np.prod(value.shape):
raise ValueError(
"Trying to load a tensor of shape {} into the variable '{}' whose shape is {}.".format(
value.shape, name, varshape))
logger.warn("The tensor is reshaped from {} to {} when assigned to '{}'".format(
value.shape, varshape, name))
value = value.reshape(varshape)
def upcast(vartype, valtype):
if vartype == tf.float64 and valtype == np.float32:
return np.float64
if vartype in [tf.int64, tf.int32] and valtype in [np.int32, np.int16, np.int8]:
return np.int64 if vartype == tf.int64 else np.int32
return None
if hasattr(value, 'dtype'):
vartype = var.dtype.as_numpy_dtype
if vartype != value.dtype:
msg = "Variable {} has dtype {} but was given a value of dtype {}.".format(name, vartype, value.dtype)
newtype = upcast(var.dtype.base_dtype, value.dtype)
if newtype is not None:
value = newtype(value)
logger.warn(msg + " Load it after casting!")
else:
assert vartype == value.dtype, msg
return value
|
Returns a relaxed (possibly reshaped/upcast-ed) version of value,
to be loaded to the given variable.
Args:
value (ndarray): an numpy array to be loaded to var
var (tf.Variable):
Returns:
ndarray: a possibly reshaped or casted version of value
|
juraj-google-style
|
def _CheckStorageFile(self, storage_file_path):
if os.path.exists(storage_file_path):
if (not os.path.isfile(storage_file_path)):
raise errors.BadConfigOption('Storage file: {0:s} already exists and is not a file.'.format(storage_file_path))
logger.warning('Appending to an already existing storage file.')
dirname = os.path.dirname(storage_file_path)
if (not dirname):
dirname = '.'
if (not os.access(dirname, os.W_OK)):
raise errors.BadConfigOption('Unable to write to storage file: {0:s}'.format(storage_file_path))
|
Checks if the storage file path is valid.
Args:
storage_file_path (str): path of the storage file.
Raises:
BadConfigOption: if the storage file path is invalid.
|
codesearchnet
|
def listdir(*paths, glob=None):
path = genpath(*paths)
names = os.listdir(path)
if glob is not None:
names = fnmatch.filter(names, glob)
retn = [os.path.join(path, name) for name in names]
return retn
|
List the (optionally glob filtered) full paths from a dir.
Args:
*paths ([str,...]): A list of path elements
glob (str): An optional fnmatch glob str
|
juraj-google-style
|
def __getitem__(self, thing: Any) -> np.ndarray:
if type(thing) is slice or type(thing) is np.ndarray or type(thing) is int:
am = AttributeManager(None, axis=self.axis)
for key, val in self.items():
am[key] = val[thing]
return am
elif type(thing) is tuple:
result: np.ndarray = None
for t in thing:
if t in self.__dict__["storage"]:
if result is None:
result = self.__getattr__(t)
else:
vals = self.__getattr__(t)
if vals.dtype != result.dtype:
raise AttributeError(f"Cannot stack attributes of different types ({vals.dtype} and {result.dtype})")
result = np.vstack((result, vals)).transpose()
if result is None:
raise AttributeError(f"'{type(self)}' object has no attribute {thing}")
else:
return result
else:
return self.__getattr__(thing)
|
Return a named attribute, or a slice through all the attributes
Args:
thing: if string, return the named attribute
if slice, np.ndarray or int, return a slice through all the attributes
|
juraj-google-style
|
def fit(self, X, y):
self.X = X
self.y = y
|
Fit
Args:
X (np.array): Array of hyperparameter values with shape (n_samples, len(tunables))
y (np.array): Array of scores with shape (n_samples, )
|
juraj-google-style
|
def Delete(self, request, global_params=None):
config = self.GetMethodConfig('Delete')
return self._RunMethod(config, request, global_params=global_params)
|
Deletes the model specified by modelId from the dataset.
Args:
request: (BigqueryModelsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BigqueryModelsDeleteResponse) The response message.
|
github-repos
|
def is_flash_attention_enabled():
from keras.src.backend.common import global_state
return global_state.get_global_attribute('flash_attention', default=None)
|
Checks whether flash attention is globally enabled in Keras.
Flash attention is a performance-optimized method for computing attention
in large models, such as transformers, allowing for faster and more
memory-efficient operations. This function checks the global Keras
configuration to determine if flash attention is enabled for compatible
layers (e.g., `MultiHeadAttention`).
Note that enabling flash attention does not guarantee it will always be
used. Typically, the inputs must be in `float16` or `bfloat16` dtype, and
input layout requirements may vary depending on the backend.
Returns:
`False` if disabled; otherwise, it indicates that it is enabled.
|
github-repos
|
def cancel_all(self, product_id=None):
if (product_id is not None):
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
|
With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
|
codesearchnet
|
def add_import(self, symbol, source_module_name, source_name, dest_module_name, dest_name):
if source_module_name.endswith('python.modules_with_exports'):
source_module_name = symbol.__module__
import_str = self.format_import(source_module_name, source_name, dest_name)
full_api_name = dest_name
if dest_module_name:
full_api_name = dest_module_name + '.' + full_api_name
symbol_id = -1 if not symbol else id(symbol)
self._check_already_imported(symbol_id, full_api_name)
if not dest_module_name and dest_name.startswith('_'):
self._underscore_names_in_root.add(dest_name)
priority = 0
if symbol:
if hasattr(symbol, '__module__'):
priority = int(source_module_name == symbol.__module__)
if hasattr(symbol, '__name__'):
priority += int(source_name == symbol.__name__)
self._module_imports[dest_module_name][full_api_name].add((import_str, priority))
|
Adds this import to module_imports.
Args:
symbol: TensorFlow Python symbol.
source_module_name: (string) Module to import from.
source_name: (string) Name of the symbol to import.
dest_module_name: (string) Module name to add import to.
dest_name: (string) Import the symbol using this name.
Raises:
SymbolExposedTwiceError: Raised when an import with the same
dest_name has already been added to dest_module_name.
|
github-repos
|
def get_list(self, **query_params):
list_json = self.get_list_json(self.base_uri, query_params=query_params)
return self.create_list(list_json)
|
Get list information for this card. Returns a List object.
Returns:
List: The list this card is attached to
|
codesearchnet
|
def get_sonos_playlist_by_attr(self, attr_name, match):
for sonos_playlist in self.get_sonos_playlists():
if (getattr(sonos_playlist, attr_name) == match):
return sonos_playlist
raise ValueError('No match on "{0}" for value "{1}"'.format(attr_name, match))
|
Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
|
codesearchnet
|
def _ParseCacheEntries(self, parser_mediator, index_table, data_block_files):
for cache_address in index_table:
cache_address_chain_length = 0
while cache_address.value != 0:
if cache_address_chain_length >= 64:
parser_mediator.ProduceExtractionWarning(
'Maximum allowed cache address chain length reached.')
break
data_block_file_object = data_block_files.get(
cache_address.filename, None)
if not data_block_file_object:
message = 'Cache address: 0x{0:08x} missing data file.'.format(
cache_address.value)
parser_mediator.ProduceExtractionWarning(message)
break
try:
cache_entry = self._data_block_file_parser.ParseCacheEntry(
data_block_file_object, cache_address.block_offset)
except (IOError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse cache entry with error: {0!s}'.format(
exception))
break
event_data = ChromeCacheEntryEventData()
event_data.original_url = cache_entry.original_url
date_time = dfdatetime_webkit_time.WebKitTime(
timestamp=cache_entry.creation_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
cache_address = cache_entry.next
cache_address_chain_length += 1
|
Parses Chrome Cache file entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
index_table (list[CacheAddress]): the cache addresses which are stored in
the index file.
data_block_files (dict[str: file]): look up table for the data block
file-like object handles.
|
juraj-google-style
|
def get_storage(request):
storage_model = oauth2_settings.storage_model
user_property = oauth2_settings.storage_model_user_property
credentials_property = oauth2_settings.storage_model_credentials_property
if storage_model:
module_name, class_name = storage_model.rsplit('.', 1)
module = importlib.import_module(module_name)
storage_model_class = getattr(module, class_name)
return storage.DjangoORMStorage(storage_model_class,
user_property,
request.user,
credentials_property)
else:
return dictionary_storage.DictionaryStorage(
request.session, key=_CREDENTIALS_KEY)
|
Gets a Credentials storage object provided by the Django OAuth2 Helper
object.
Args:
request: Reference to the current request object.
Returns:
An :class:`oauth2.client.Storage` object.
|
juraj-google-style
|
def save(self, resource):
resource_type = None
xid = None
if isinstance(resource, dict):
resource_type = resource.get('type')
xid = resource.get('xid')
else:
resource_type = resource.type
xid = resource.xid
if resource_type is not None and xid is not None:
saved = True
if resource_type in self.tcex.group_types:
try:
self.groups_shelf[xid] = resource
except Exception:
saved = False
if saved:
try:
del self._groups[xid]
except KeyError:
pass
elif resource_type in self.tcex.indicator_types_data.keys():
try:
self.indicators_shelf[xid] = resource
except Exception:
saved = False
if saved:
try:
del self._indicators[xid]
except KeyError:
pass
|
Save group|indicator dict or object to shelve.
Best effort to save group/indicator data to disk. If for any reason the save fails
the data will still be accessible from list in memory.
Args:
resource (dict|obj): The Group or Indicator dict or object.
|
juraj-google-style
|
def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None,
scope=None, syntax=None):
if package:
desc_name = '.'.join((package, desc_proto.name))
else:
desc_name = desc_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
if scope is None:
scope = {}
nested = [
self._ConvertMessageDescriptor(
nested, desc_name, file_desc, scope, syntax)
for nested in desc_proto.nested_type]
enums = [
self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, scope)
for enum in desc_proto.enum_type]
fields = [self._MakeFieldDescriptor(field, desc_name, index)
for index, field in enumerate(desc_proto.field)]
extensions = [
self._MakeFieldDescriptor(extension, desc_name, index,
is_extension=True)
for index, extension in enumerate(desc_proto.extension)]
oneofs = [
descriptor.OneofDescriptor(desc.name, '.'.join((desc_name, desc.name)),
index, None, [], desc.options)
for index, desc in enumerate(desc_proto.oneof_decl)]
extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range]
if extension_ranges:
is_extendable = True
else:
is_extendable = False
desc = descriptor.Descriptor(
name=desc_proto.name,
full_name=desc_name,
filename=file_name,
containing_type=None,
fields=fields,
oneofs=oneofs,
nested_types=nested,
enum_types=enums,
extensions=extensions,
options=_OptionsOrNone(desc_proto),
is_extendable=is_extendable,
extension_ranges=extension_ranges,
file=file_desc,
serialized_start=None,
serialized_end=None,
syntax=syntax)
for nested in desc.nested_types:
nested.containing_type = desc
for enum in desc.enum_types:
enum.containing_type = desc
for field_index, field_desc in enumerate(desc_proto.field):
if field_desc.HasField('oneof_index'):
oneof_index = field_desc.oneof_index
oneofs[oneof_index].fields.append(fields[field_index])
fields[field_index].containing_oneof = oneofs[oneof_index]
scope[_PrefixWithDot(desc_name)] = desc
self._descriptors[desc_name] = desc
return desc
|
Adds the proto to the pool in the specified package.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: The package the proto should be located in.
file_desc: The file containing this message.
scope: Dict mapping short and full symbols to message and enum types.
syntax: string indicating syntax of the file ("proto2" or "proto3")
Returns:
The added descriptor.
|
juraj-google-style
|
def build_url(self):
url = '{protocol}/{url}/{rest}/{version}/{restapi}/{rscpath}/{query}'.format(protocol=self.schema.protocol, url=self.schema.main_url, rest=self.schema.rest, version=self.schema.version, restapi=self.schema.restApi, rscpath=self.schema.resourcePath, query=self.schema.query)
return url.replace('/None/', '/')
|
Builds the URL for elevations API services based on the data given
by the user.
Returns:
url (str): URL for the elevations API services
|
codesearchnet
|
def _check_sleep(self, op):
delay = 0.3
start_t = time.time()
func = tf.function(lambda: op(delay))
results = self.evaluate(func())
end_t = time.time()
delta_t = end_t - start_t
self.assertEqual(results.shape, tuple())
self.assertGreater(delta_t, 0.9 * delay)
|
Check that one sleep op works in isolation.
See sleep_bin.py for an example of how the synchronous and asynchronous
sleep ops differ in behavior.
Args:
op: The sleep op, either sleep_op.SyncSleep or sleep_op.AsyncSleep.
|
github-repos
|
def setOutBoundLinkQuality(self, LinkQuality):
print '%s call setOutBoundLinkQuality' % self.port
print LinkQuality
try:
cmd = 'macfilter rss add-lqi * %s' % str(LinkQuality)
print cmd
return self.__sendCommand(cmd)[0] == 'Done'
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("setOutBoundLinkQuality() Error: " + str(e))
|
set custom LinkQualityIn for all receiving messages from the any address
Args:
LinkQuality: a given custom link quality
link quality/link margin mapping table
3: 21 - 255 (dB)
2: 11 - 20 (dB)
1: 3 - 9 (dB)
0: 0 - 2 (dB)
Returns:
True: successful to set the link quality
False: fail to set the link quality
|
juraj-google-style
|
def get_processed_events(self) -> List[Event]:
event_ids = DB.get_list(self._processed_key)
events = []
for event_id in event_ids:
event_str = DB.get_hash_value(self._data_key, event_id)
event_dict = ast.literal_eval(event_str)
event_dict['id'] = event_id
event_dict['subscriber'] = self._subscriber
events.append(Event.from_config(event_dict))
return events
|
Get all processed events.
This method is intended to be used to recover events stuck in the
processed state which could happen if an event handling processing
an processed event goes down before completing the event processing.
Returns:
list[Events], list of event objects.
|
codesearchnet
|
def read_xyz(cls, buf, start_index=0, get_bonds=True,
nrows=None, engine=None):
frame = pd.read_table(buf, skiprows=2, comment='
nrows=nrows,
delim_whitespace=True,
names=['atom', 'x', 'y', 'z'], engine=engine)
remove_digits = partial(re.sub, r'[0-9]+', '')
frame['atom'] = frame['atom'].apply(remove_digits)
molecule = cls(frame)
molecule.index = range(start_index, start_index + len(molecule))
if get_bonds:
molecule.get_bonds(use_lookup=False, set_lookup=True)
return molecule
|
Read a file of coordinate information.
Reads xyz-files.
Args:
inputfile (str):
start_index (int):
get_bonds (bool):
nrows (int): Number of rows of file to read.
Note that the first two rows are implicitly excluded.
engine (str): Wrapper for argument of :func:`pandas.read_csv`.
Returns:
Cartesian:
|
juraj-google-style
|
def read_windows_environ():
res = winapi.GetEnvironmentStringsW()
if (not res):
raise ctypes.WinError()
res = ctypes.cast(res, ctypes.POINTER(ctypes.c_wchar))
done = []
current = u''
i = 0
while 1:
c = res[i]
i += 1
if (c == u'\x00'):
if (not current):
break
done.append(current)
current = u''
continue
current += c
dict_ = {}
for entry in done:
try:
(key, value) = entry.split(u'=', 1)
except ValueError:
continue
key = _norm_key(key)
dict_[key] = value
status = winapi.FreeEnvironmentStringsW(res)
if (status == 0):
raise ctypes.WinError()
return dict_
|
Returns a unicode dict of the Windows environment.
Raises:
WindowsEnvironError
|
codesearchnet
|
def convert_upsample(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting upsample...')
if params['mode'] != 'nearest':
raise AssertionError('Cannot convert non-nearest upsampling')
if names == 'short':
tf_name = 'UPSL' + random_string(4)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
if 'height_scale' in params:
scale = (params['height_scale'], params['width_scale'])
elif len(inputs) == 2:
scale = layers[inputs[-1] + '_np'][-2:]
upsampling = keras.layers.UpSampling2D(
size=scale, name=tf_name
)
layers[scope_name] = upsampling(layers[inputs[0]])
|
Convert nearest upsampling layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
juraj-google-style
|
def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
login_type = event_values.get('type', None)
if login_type is None:
status = 'N/A'
else:
status = self._STATUS_TYPES.get(login_type, 'UNKNOWN')
event_values['status'] = status
return self._ConditionalFormatMessages(event_values)
|
Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
|
juraj-google-style
|
def average(numbers, numtype='float'):
if type == 'decimal':
return Decimal(sum(numbers)) / len(numbers)
else:
return float(sum(numbers)) / len(numbers)
|
Calculates the average or mean of a list of numbers
Args:
numbers: a list of integers or floating point numbers.
numtype: string, 'decimal' or 'float'; the type of number to return.
Returns:
The average (mean) of the numbers as a floating point number
or a Decimal object.
Requires:
The math module
|
juraj-google-style
|
def _compile_pvariable_expression(self, expr: Expression, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[List[tf.Tensor]]=None) -> TensorFluent:
etype = expr.etype
args = expr.args
name = expr._pvar_to_name(args)
if (name not in scope):
raise ValueError('Variable {} not in scope.'.format(name))
fluent = scope[name]
scope = (args[1] if (args[1] is not None) else [])
if isinstance(fluent, TensorFluent):
fluent = TensorFluent(fluent.tensor, scope, batch=fluent.batch)
elif isinstance(fluent, tf.Tensor):
fluent = TensorFluent(fluent, scope, batch=self.batch_mode)
else:
raise ValueError('Variable in scope must be TensorFluent-like: {}'.format(fluent))
return fluent
|
Compile a pvariable expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL pvariable expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
|
codesearchnet
|
def preview(self, n=10, k='items', kheader='displayLink', klink='link', kdescription='snippet'):
if 'searchType' in self.cseargs:
searchType = self.cseargs['searchType']
else:
searchType = None
items = self.metadata[k]
for i, kv in enumerate(items[:n]):
if 'start' in self.cseargs:
i += int(self.cseargs['start'])
header = '\n[' + str(i) + '] ' + kv[kheader]
print(header)
print('=' * len(header))
if searchType == 'image':
link = '\n' + path.basename(kv[klink])
print(link)
description = '\n' + kv[kdescription]
print(description)
|
Print a preview of the search results.
Args:
n (int):
Maximum number of search results to preview
k (str):
Key in :class:`api.results`.metadata to preview
kheader (str):
Key in :class:`api.results`.metadata[``k``] to use as the header
klink (str):
Key in :class:`api.results`.metadata[``k``] to use as the link if image search
kdescription (str):
Key in :class:`api.results`.metadata[``k``] to use as the description
|
juraj-google-style
|
def _full_shape_filter(t: List, shapes: List) -> bool:
if shapes:
for a_token in t:
if a_token._.full_shape not in shapes:
return False
return True
|
Shape filter
Args:
t: List, list of tokens
shapes: List
Returns: bool
|
juraj-google-style
|
def isbase(path1, path2):
_path1 = forcedir(abspath(path1))
_path2 = forcedir(abspath(path2))
return _path2.startswith(_path1)
|
Check if ``path1`` is a base of ``path2``.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
bool: `True` if ``path2`` starts with ``path1``
Example:
>>> isbase('foo/bar', 'foo/bar/baz/egg.txt')
True
|
codesearchnet
|
def _is_quantized_input_stats_required(conversion_flags: _conversion_flags_pb2.ConverterFlags) -> bool:
quantized_inference_types = [_types_pb2.QUANTIZED_UINT8, _types_pb2.QUANTIZED_INT8]
return (conversion_flags.inference_type in quantized_inference_types or conversion_flags.inference_input_type in quantized_inference_types) and (not conversion_flags.post_training_quantize)
|
Checks if the `quantized_input_stats` flag is required for conversion.
Args:
conversion_flags: A protocol buffer describing the conversion process.
Returns:
True, if the `inference_type` or the `inference_input_type` is a quantized
type and it is not post training quantization, else False.
|
github-repos
|
def set_default(self, default: Any, use_default_apply: bool=True, root_path: Optional[utils.KeyPath]=None) -> 'ValueSpec':
|
Sets the default value and returns `self`.
Args:
default: Default value.
use_default_apply: If True, invoke `apply` to the value, otherwise use
default value as is.
root_path: (Optional) The path of the field.
Returns:
ValueSpec itself.
Raises:
ValueError: If default value cannot be applied when use_default_apply
is set to True.
|
github-repos
|
def load_data(self, data, datatype='ttl', namespace=None, graph=None, is_file=False, **kwargs):
log.setLevel(kwargs.get('log_level', self.log_level))
time_start = datetime.datetime.now()
datatype_map = {'ttl': 'text/turtle', 'xml': 'application/rdf+xml', 'rdf': 'application/rdf+xml', 'nt': 'text/plain'}
if is_file:
datatype = data.split(os.path.extsep)[(- 1)]
file_name = data
log.debug('starting data load of %s', file_name)
data = open(data, 'rb').read()
else:
try:
data = data.encode('utf-8')
except AttributeError:
pass
try:
content_type = datatype_map[datatype]
except KeyError:
raise NotImplementedError("'%s' is not an implemented data format", datatype)
context_uri = pick(graph, self.graph)
result = requests.post(url=self._make_url(namespace), headers={'Content-Type': content_type}, params={'context-uri': context_uri}, data=data)
if (result.status_code == 200):
if is_file:
log.info(' loaded %s into blazegraph - %s', file_name, self.format_response(result.text))
else:
log.info(' loaded data - %s', self.format_response(result.text))
log.setLevel(self.log_level)
return result
else:
raise SyntaxError(result.text)
|
Loads data via file stream from python to triplestore
Args:
-----
data: The data or filepath to load
datatype(['ttl', 'xml', 'rdf']): the type of data to load
namespace: the namespace to use
graph: the graph to load the data to.
is_file(False): If true python will read the data argument as a
filepath, determine the datatype from the file extension,
read the file and send it to blazegraph as a datastream
|
codesearchnet
|
def group(text, size):
if (size <= 0):
raise ValueError('n must be a positive integer')
return [text[i:(i + size)] for i in range(0, len(text), size)]
|
Group ``text`` into blocks of ``size``.
Example:
>>> group("test", 2)
['te', 'st']
Args:
text (str): text to separate
size (int): size of groups to split the text into
Returns:
List of n-sized groups of text
Raises:
ValueError: If n is non positive
|
codesearchnet
|
def coupling(self, source_y, target_y, weight):
return ((np.ones_like(target_y) * np.mean(source_y)) * weight)
|
How to couple the output of one subsystem to the input of another.
This is a fallback default coupling function that should usually be
replaced with your own.
This example coupling function takes the mean of all variables of the
source subsystem and uses that value weighted by the connection
strength to drive all variables of the target subsystem.
Arguments:
source_y (array of shape (d,)): State of the source subsystem.
target_y (array of shape (d,)): State of target subsystem.
weight (float): the connection strength for this connection.
Returns:
input (array of shape (d,)): Values to drive each variable of the
target system.
|
codesearchnet
|
def coalescence_waiting_times(self, backward=True):
if not isinstance(backward, bool):
raise TypeError("backward must be a bool")
times = list(); lowest_leaf_dist = float('-inf')
for n,d in self.distances_from_root():
if len(n.children) > 1:
times.append(d)
elif len(n.children) == 0 and d > lowest_leaf_dist:
lowest_leaf_dist = d
times.append(lowest_leaf_dist)
times.sort(reverse=backward)
for i in range(len(times)-1):
yield abs(times[i]-times[i+1])
|
Generator over the waiting times of successive coalescence events
Args:
``backward`` (``bool``): ``True`` to go backward in time (i.e., leaves to root), otherwise ``False``
|
juraj-google-style
|
def decode_payload(cls, request):
if (request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION):
raise DeprecationWarning('Task is generated by an older incompatible version of mapreduce. Please kill this job manually')
return cls._decode_payload(request.body)
|
Decode task payload.
HugeTask controls its own payload entirely including urlencoding.
It doesn't depend on any particular web framework.
Args:
request: a webapp Request instance.
Returns:
A dict of str to str. The same as the params argument to __init__.
Raises:
DeprecationWarning: When task payload constructed from an older
incompatible version of mapreduce.
|
codesearchnet
|
def _transform_indices(self, key):
ndims = self.ndims
if all(not (isinstance(el, slice) or callable(el)) for el in key):
dim_inds = []
for dim in self.kdims:
dim_type = self.get_dimension_type(dim)
if isinstance(dim_type, type) and issubclass(dim_type, Number):
dim_inds.append(self.get_dimension_index(dim))
str_keys = iter(key[i] for i in range(self.ndims)
if i not in dim_inds)
num_keys = []
if len(dim_inds):
keys = list({tuple(k[i] if ndims > 1 else k for i in dim_inds)
for k in self.keys()})
q = np.array([tuple(key[i] if ndims > 1 else key for i in dim_inds)])
idx = np.argmin([np.inner(q - np.array(x), q - np.array(x))
if len(dim_inds) == 2 else np.abs(q-x)
for x in keys])
num_keys = iter(keys[idx])
key = tuple(next(num_keys) if i in dim_inds else next(str_keys)
for i in range(self.ndims))
elif any(not (isinstance(el, slice) or callable(el)) for el in key):
keys = self.keys()
for i, k in enumerate(key):
if isinstance(k, slice):
continue
dim_keys = np.array([ke[i] for ke in keys])
if dim_keys.dtype.kind in 'OSU':
continue
snapped_val = dim_keys[np.argmin(np.abs(dim_keys-k))]
key = list(key)
key[i] = snapped_val
key = tuple(key)
return key
|
Snaps indices into the GridSpace to the closest coordinate.
Args:
key: Tuple index into the GridSpace
Returns:
Transformed key snapped to closest numeric coordinates
|
juraj-google-style
|
def serialize_to_list(self, name, datas):
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if (items is None):
msg = "List reference '{}' lacks of required 'items' variable or is empty"
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items
|
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
|
codesearchnet
|
def get_load_balancer(self, id):
return LoadBalancer.get_object(api_token=self.token, id=id)
|
Returns a Load Balancer object by its ID.
Args:
id (str): Load Balancer ID
|
codesearchnet
|
def from_sub_model_configs(cls, text_config: ClvpEncoderConfig, speech_config: ClvpEncoderConfig, decoder_config: ClvpDecoderConfig, **kwargs):
return cls(text_config=text_config.to_dict(), speech_config=speech_config.to_dict(), decoder_config=decoder_config.to_dict(), **kwargs)
|
Instantiate a [`ClvpConfig`] (or a derived class) from CLVP text model configuration, CLVP speech model
configuration and CLVP decoder model configuration.
Args:
text_config (`ClvpEncoderConfig`):
Text model configuration of type [`ClvpEncoderConfig`].
speech_config (`ClvpEncoderConfig`):
Speech model configuration of type [`ClvpEncoderConfig`].
decoder_config (`ClvpDecoderConfig`):
Decoder model configuration of type [`ClvpDecoderConfig`].
Returns:
[`ClvpConfig`]: An instance of a configuration object
|
github-repos
|
def pop(stack, op_id):
if __debug__:
pushed_value, pushed_op_id = stack.pop()
assert pushed_op_id == op_id, 'Wanted %s, got %s' % (op_id, pushed_op_id)
else:
pushed_value = stack.pop()
return pushed_value
|
Pop a value from the stack (i.e. read it from the tape).
Args:
stack: The stack to pop from.
op_id: A unique variable that is also passed into the matching push.
Allows optimization passes to track pairs of pushes and pops.
Returns:
The last value.
|
juraj-google-style
|
def __init__(self, graph, canonical_device=None):
self._graph = graph
self.canonical_device = canonical_device
self._operations = self._initialize_operations()
self._operation_name_to_id = self._initialize_operation_name_to_id()
self._tensor_name_to_ids = self._initialize_tensor_name_to_ids()
self._final_tensors = set()
|
Initializer.
Args:
graph: either a tf.Graph or mtf.Graph.
canonical_device: optional string, the name of the canonical device for
IsTensoronCanonicalDevice.
|
juraj-google-style
|
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
for subkey in registry_key.GetSubkeys():
values_dict = {}
values_dict['subkey_name'] = subkey.name
vendor_identification = None
product_identification = None
try:
subkey_name_parts = subkey.name.split('&')
if len(subkey_name_parts) >= 2:
vendor_identification = subkey_name_parts[0]
product_identification = subkey_name_parts[1]
except ValueError as exception:
logger.warning(
'Unable to split string: {0:s} with error: {1!s}'.format(
subkey.name, exception))
if vendor_identification and product_identification:
values_dict['vendor'] = vendor_identification
values_dict['product'] = product_identification
for devicekey in subkey.GetSubkeys():
values_dict['serial'] = devicekey.name
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.source_append = self._SOURCE_APPEND
event = time_events.DateTimeValuesEvent(
devicekey.last_written_time,
definitions.TIME_DESCRIPTION_LAST_CONNECTED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
|
juraj-google-style
|
def remove(path, force=False):
path = os.path.expanduser(path)
if (not os.path.isabs(path)):
raise SaltInvocationError('File path must be absolute: {0}'.format(path))
if ((not os.path.exists(path)) and (not is_link(path))):
raise CommandExecutionError('Path not found: {0}'.format(path))
if force:
file_attributes = win32api.GetFileAttributes(path)
win32api.SetFileAttributes(path, win32con.FILE_ATTRIBUTE_NORMAL)
try:
if os.path.isfile(path):
os.remove(path)
elif is_link(path):
os.rmdir(path)
else:
for name in os.listdir(path):
item = '{0}\\{1}'.format(path, name)
remove(item, force)
os.rmdir(path)
except (OSError, IOError) as exc:
if force:
win32api.SetFileAttributes(path, file_attributes)
raise CommandExecutionError("Could not remove '{0}': {1}".format(path, exc))
return True
|
Remove the named file or directory
Args:
path (str): The path to the file or directory to remove.
force (bool): Remove even if marked Read-Only. Default is False
Returns:
bool: True if successful, False if unsuccessful
CLI Example:
.. code-block:: bash
salt '*' file.remove C:\\Temp
|
codesearchnet
|
def to_proto(self, export_scope=None):
if export_scope is None or self.name.startswith(export_scope):
context_def = control_flow_pb2.WhileContextDef()
context_def.context_name = ops.strip_name_scope(self.name, export_scope)
context_def.parallel_iterations = self._parallel_iterations
if self._maximum_iterations is not None:
context_def.maximum_iterations_name = ops.strip_name_scope(self._maximum_iterations.name, export_scope)
context_def.back_prop = self._back_prop
context_def.swap_memory = self._swap_memory
context_def.pivot_for_pred_name = ops.strip_name_scope(self._pivot_for_pred.name, export_scope)
context_def.pivot_for_body_name = ops.strip_name_scope(self._pivot_for_body.name, export_scope)
context_def.pivot_name = ops.strip_name_scope(self._pivot.name, export_scope)
context_def.loop_exit_names.extend([ops.strip_name_scope(l.name, export_scope) for l in self._loop_exits])
context_def.loop_enter_names.extend([ops.strip_name_scope(l.name, export_scope) for l in self._loop_enters])
context_def.values_def.MergeFrom(super(WhileContext, self)._to_values_def(export_scope=export_scope))
for nested in self._nested_contexts:
nested_def = context_def.nested_contexts.add()
nested.to_control_flow_context_def(nested_def)
return context_def
else:
return None
|
Converts a `WhileContext` to a `WhileContextDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `WhileContextDef` protocol buffer.
|
github-repos
|
def __init__(self, scope, parent, id, name, result):
CodeEntity.__init__(self, scope, parent)
self.id = id
self.name = name
self.result = result
self.value = None
self.member_of = None
self.references = []
self.writes = []
|
Constructor for variables.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
id: An unique identifier for this variable.
name (str): The name of the variable in the program.
result (str): The type of the variable in the program.
|
juraj-google-style
|
def InsertAll(self, request, global_params=None):
config = self.GetMethodConfig('InsertAll')
return self._RunMethod(config, request, global_params=global_params)
|
Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.
Args:
request: (BigqueryTabledataInsertAllRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TableDataInsertAllResponse) The response message.
|
github-repos
|
def refactor_tree(self, tree, name):
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except ValueError:
continue
if node.fixers_applied and fixer in node.fixers_applied:
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
for node in new.post_order():
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
|
Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
|
juraj-google-style
|
def Append(self, new_values):
newrow = self.NewRow()
newrow.values = new_values
self._table.append(newrow)
|
Adds a new row (list) to the table.
Args:
new_values: Tuple, dict, or Row() of new values to append as a row.
Raises:
TableError: Supplied tuple not equal to table width.
|
juraj-google-style
|
def _signal_handler(self, signal_interupt, frame):
if self.container is not None:
print('{}{}Stopping docker container.'.format(c.Style.BRIGHT, c.Fore.YELLOW))
self.container.stop()
print('{}{}Interrupt signal received.'.format(c.Style.BRIGHT, c.Fore.RED))
self.log.error('tcrun received an interrupt signal and will now exit.')
sys.exit(1)
|
Handle singal interrupt.
Args:
signal_interupt ([type]): [Description]
frame ([type]): [Description]
|
juraj-google-style
|
def open_sequence(path: Union[str, os.PathLike[str]], mode: str='r', *, perms: Optional[int]=436, serializer: Optional[Callable[[Any], Union[bytes, str]]]=None, deserializer: Optional[Callable[[Union[bytes, str]], Any]]=None, make_dirs_if_not_exist: bool=True) -> Sequence:
if 'w' in mode or 'a' in mode:
parent_dir = os.path.dirname(path)
if make_dirs_if_not_exist:
file_system.mkdirs(parent_dir, exist_ok=True)
return _registry.get(path).open(path, mode, perms=perms, serializer=serializer, deserializer=deserializer)
|
Open sequence for reading or writing.
Args:
path: The path to the sequence.
mode: The mode of the sequence.
perms: (Optional) The permissions of the sequence.
serializer: (Optional) A serializer function for converting a structured
object to a string or bytes.
deserializer: (Optional) A deserializer function for converting a string or
bytes to a structured object.
make_dirs_if_not_exist: (Optional) Whether to create the directories
if they do not exist. Applicable when opening in write or append mode.
Returns:
A sequence for reading or writing.
|
github-repos
|
def build_input_pipeline(x, y, batch_size):
training_dataset = tf.data.Dataset.from_tensor_slices((x, y))
training_batches = training_dataset.repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
(batch_features, batch_labels) = training_iterator.get_next()
return (batch_features, batch_labels)
|
Build a Dataset iterator for supervised classification.
Args:
x: Numpy `array` of features, indexed by the first dimension.
y: Numpy `array` of labels, with the same first dimension as `x`.
batch_size: Number of elements in each training batch.
Returns:
batch_features: `Tensor` feed features, of shape
`[batch_size] + x.shape[1:]`.
batch_labels: `Tensor` feed of labels, of shape
`[batch_size] + y.shape[1:]`.
|
codesearchnet
|
def gene_by_alias(self, symbol, build='37'):
res = self.hgnc_collection.find({'hgnc_symbol': symbol, 'build':build})
if res.count() == 0:
res = self.hgnc_collection.find({'aliases': symbol, 'build':build})
return res
|
Return a iterable with hgnc_genes.
If the gene symbol is listed as primary the iterable will only have
one result. If not the iterable will include all hgnc genes that have
the symbol as an alias.
Args:
symbol(str)
build(str)
Returns:
res(pymongo.Cursor(dict))
|
juraj-google-style
|
def read(url, encoding=None, cache=None, mode='rb'):
with read_handle(url, cache, mode=mode) as handle:
data = handle.read()
if encoding:
data = data.decode(encoding)
return data
|
Read from any URL.
Internally differentiates between URLs supported by tf.gfile, such as URLs
with the Google Cloud Storage scheme ('gs://...') or local paths, and HTTP
URLs. This way users don't need to know about the underlying fetch mechanism.
Args:
url: a URL including scheme or a local path
mode: mode in which to open the file. defaults to binary ('rb')
encoding: if specified, encoding that should be used to decode read data
if mode is specified to be text ('r'), this defaults to 'utf-8'.
cache: whether to attempt caching the resource. Defaults to True only if
the given URL specifies a remote resource.
Returns:
All bytes form the specified resource, or a decoded string of those.
|
codesearchnet
|
def unmanaged_devices(self):
if (not self.__unmanaged_devices):
self.__unmanaged_devices = UnmanagedDevices(self.__connection)
return self.__unmanaged_devices
|
Gets the Unmanaged Devices API client.
Returns:
UnmanagedDevices:
|
codesearchnet
|
def percentile_nearest(self, percentile):
if self._input_csv_files:
df = self._get_data_from_csv_files()
if 'target' not in df or 'predicted' not in df:
raise ValueError('Cannot find "target" or "predicted" column')
df = df[['target', 'predicted']].apply(pd.to_numeric)
abs_errors = np.array((df['target'] - df['predicted']).apply(abs))
return np.percentile(abs_errors, percentile, interpolation='nearest')
elif self._bigquery:
query = bq.Query( % (float(percentile) / 100, self._bigquery))
df = self._get_data_from_bigquery([query])
if df.empty:
return None
return df['percentile'][0]
|
Get nearest percentile from regression model evaluation results.
Args:
percentile: a 0~100 float number.
Returns:
the percentile float number.
Raises:
Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery
does not return 'target' or 'predicted' column, or if target or predicted is not
number.
|
juraj-google-style
|
def get_duration_h_m(start: Union[str, DateTime],
end: Union[str, DateTime],
default: str = "N/A") -> str:
start = coerce_to_pendulum(start)
end = coerce_to_pendulum(end)
if start is None or end is None:
return default
duration = end - start
minutes = duration.in_minutes()
(hours, minutes) = divmod(minutes, 60)
if hours < 0:
hours += 1
minutes = 60 - minutes
return "-{}:{}".format(hours, "00" if minutes == 0 else minutes)
else:
return "{}:{}".format(hours, "00" if minutes == 0 else minutes)
|
Calculate the time between two dates/times expressed as strings.
Args:
start: start date/time
end: end date/time
default: string value to return in case either of the inputs is
``None``
Returns:
a string that is one of
.. code-block:
'hh:mm'
'-hh:mm'
default
|
juraj-google-style
|
def kmip_version(self, value):
if isinstance(value, enums.KMIPVersion):
self.proxy.kmip_version = value
else:
raise ValueError("KMIP version must be a KMIPVersion enumeration")
|
Set the KMIP version for the client.
Args:
value (KMIPVersion): A KMIPVersion enumeration
Return:
None
Raises:
ValueError: if value is not a KMIPVersion enumeration
Example:
>>> client.kmip_version = enums.KMIPVersion.KMIP_1_1
>>>
|
juraj-google-style
|
def createCategoryFilter(self, filterName, positiveExamples, negativeExamples=[]):
samples = {"positiveExamples": [{"text": s} for s in positiveExamples],
"negativeExamples": [{"text": s} for s in negativeExamples]}
body = json.dumps(samples)
return self._classify.createCategoryFilter(self._retina, filterName, body)
|
Get a classifier filter (fingerprint) for positive and negative text samples
Args:
filterName, str: A unique name for the filter. (required)
positiveExamples, list(str): The list of positive example texts. (required)
negativeExamples, list(str): The list of negative example texts. (optional)
Returns:
CategoryFilter
Raises:
CorticalioException: if the request was not successful
|
juraj-google-style
|
def expand_abbreviations(self, text):
if not self.abbreviations:
raise LexiconError("No abbreviations in lexicon.")
def chunks(data, SIZE=25):
it = iter(data)
for i in range(0, len(data), SIZE):
yield {k: data[k] for k in islice(it, SIZE)}
def cb(g):
return self.abbreviations.get(g.group(0)) or g.group(0)
text = re.sub(r'w/', r'wi', text)
for subdict in chunks(self.abbreviations):
regex = r'(\b' + r'\b)|(\b'.join(subdict.keys()) + r'\b)'
text = re.sub(regex, cb, text)
return text
|
Parse a piece of text and replace any abbreviations with their full
word equivalents. Uses the lexicon.abbreviations dictionary to find
abbreviations.
Args:
text (str): The text to parse.
Returns:
str: The text with abbreviations replaced.
|
juraj-google-style
|
def pubsub_pop_message(self, deadline=None):
if (not self.subscribed):
excep = ClientError('you must subscribe before using pubsub_pop_message')
raise tornado.gen.Return(excep)
reply = None
try:
reply = self._reply_list.pop(0)
raise tornado.gen.Return(reply)
except IndexError:
pass
if (deadline is not None):
td = timedelta(seconds=deadline)
(yield self._condition.wait(timeout=td))
else:
(yield self._condition.wait())
try:
reply = self._reply_list.pop(0)
except IndexError:
pass
raise tornado.gen.Return(reply)
|
Pops a message for a subscribed client.
Args:
deadline (int): max number of seconds to wait (None => no timeout)
Returns:
Future with the popped message as result (or None if timeout
or ConnectionError object in case of connection errors
or ClientError object if you are not subscribed)
|
codesearchnet
|
def get_orbital_resolved_cohp(self, label, orbitals):
if (self.orb_res_cohp is None):
return None
elif (isinstance(orbitals, list) or isinstance(orbitals, tuple)):
cohp_orbs = [d['orbitals'] for d in self.orb_res_cohp[label].values()]
orbs = []
for orbital in orbitals:
if isinstance(orbital[1], int):
orbs.append(tuple((orbital[0], Orbital(orbital[1]))))
elif isinstance(orbital[1], Orbital):
orbs.append(tuple((orbital[0], orbital[1])))
elif isinstance(orbital[1], str):
orbs.append(tuple((orbital[0], Orbital[orbital[1]])))
else:
raise TypeError('Orbital must be str, int, or Orbital.')
orb_index = cohp_orbs.index(orbs)
orb_label = list(self.orb_res_cohp[label].keys())[orb_index]
elif isinstance(orbitals, str):
orb_label = orbitals
else:
raise TypeError('Orbitals must be str, list, or tuple.')
try:
icohp = self.orb_res_cohp[label][orb_label]['ICOHP']
except KeyError:
icohp = None
return Cohp(self.efermi, self.energies, self.orb_res_cohp[label][orb_label]['COHP'], icohp=icohp, are_coops=self.are_coops)
|
Get orbital-resolved COHP.
Args:
label: bond label (Lobster: labels as in ICOHPLIST/ICOOPLIST.lobster).
orbitals: The orbitals as a label, or list or tuple of the form
[(n1, orbital1), (n2, orbital2)]. Orbitals can either be str,
int, or Orbital.
Returns:
A Cohp object if CompleteCohp contains orbital-resolved cohp,
or None if it doesn't.
Note: It currently assumes that orbitals are str if they aren't the
other valid types. This is not ideal, but the easiest way to
avoid unicode issues between python 2 and python 3.
|
codesearchnet
|
def subscribe(self, requested_timeout=None, auto_renew=False, event_queue=None):
subscription = Subscription(self, event_queue)
subscription.subscribe(requested_timeout=requested_timeout, auto_renew=auto_renew)
return subscription
|
Subscribe to the service's events.
Args:
requested_timeout (int, optional): If requested_timeout is
provided, a subscription valid for that
number of seconds will be requested, but not guaranteed. Check
`Subscription.timeout` on return to find out what period of
validity is actually allocated.
auto_renew (bool): If auto_renew is `True`, the subscription will
automatically be renewed just before it expires, if possible.
Default is `False`.
event_queue (:class:`~queue.Queue`): a thread-safe queue object on
which received events will be put. If not specified,
a (:class:`~queue.Queue`) will be created and used.
Returns:
`Subscription`: an insance of `Subscription`, representing
the new subscription.
To unsubscribe, call the `unsubscribe` method on the returned object.
|
codesearchnet
|
def _process_update(self, item, feed_item):
item['name'] = feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_NAME, None)
item['url'] = feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_URL, None)
|
Updates an landing page based on the values from the feed.
Args:
item: Object representing the landing page to be updated, this object is
updated directly.
feed_item: Feed item representing landing page values from the Bulkdozer
feed.
|
github-repos
|
def list(self, resource):
return self.service.list(
resource, self.url_prefix, self.auth, self.session,
self.session_send_opts)
|
List metadata keys associated with the given resource.
Args:
resource (intern.resource.boss.BossResource): List keys associated with this resource.
Returns:
(list): List of key names.
Raises:
requests.HTTPError on failure.
|
juraj-google-style
|
def find_first(self, *args, **kwargs):
if capybara.wait_on_first_by_default:
kwargs.setdefault('minimum', 1)
try:
result = self.find_all(*args, **kwargs)
return (result[0] if (len(result) > 0) else None)
except ExpectationNotMet:
return None
|
Find the first element on the page matching the given selector and options, or None if no
element matches.
By default, no waiting behavior occurs. However, if ``capybara.wait_on_first_by_default``
is set to true, it will trigger Capybara's waiting behavior for a minimum of 1 matching
element to be found.
Args:
*args: Variable length argument list for :class:`SelectorQuery`.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
Element: The found element or None.
|
codesearchnet
|
def known(self, words):
tmp = [w.lower() for w in words]
return set((w for w in tmp if ((w in self._word_frequency.dictionary) or (not self._check_if_should_check(w)))))
|
The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus
|
codesearchnet
|
def matching_args(fn, dictionary):
arg_spec = getargspec(fn)
if arg_spec.keywords:
return dictionary
return _mapping.split_by_criteria(dictionary, arg_spec.args).included
|
Given a function fn and a dict dictionary, returns the function arguments that match the dict keys.
Example:
def train(channel_dirs, model_dir): pass
dictionary = {'channel_dirs': {}, 'model_dir': '/opt/ml/model', 'other_args': None}
args = functions.matching_args(train, dictionary) # {'channel_dirs': {}, 'model_dir': '/opt/ml/model'}
train(**args)
Args:
fn (function): a function
dictionary (dict): the dictionary with the keys
Returns:
(dict) a dictionary with only matching arguments.
|
codesearchnet
|
def check_codes_match(observed_code: str, theoretical_code: str) -> Optional[int]:
observed_code_header = observed_code.split('\n')[0]
theoretical_code_header = theoretical_code.split('\n')[0]
_re_class_match = re.compile('class\\s+([^\\(:]+)(?:\\(|:)')
_re_func_match = re.compile('def\\s+([^\\(]+)\\(')
for re_pattern in [_re_class_match, _re_func_match]:
if re_pattern.match(observed_code_header) is not None:
try:
observed_obj_name = re_pattern.search(observed_code_header).groups()[0]
except Exception:
raise ValueError('Tried to split a class or function. It did not work. Error comes from: \n```\n' + observed_code_header + '\n```\n')
try:
theoretical_name = re_pattern.search(theoretical_code_header).groups()[0]
except Exception:
raise ValueError('Tried to split a class or function. It did not work. Error comes from: \n```\n' + theoretical_code_header + '\n```\n')
theoretical_code_header = theoretical_code_header.replace(theoretical_name, observed_obj_name)
diff_index = 0
if theoretical_code_header != observed_code_header:
return 0
diff_index = 1
for observed_line, theoretical_line in zip(observed_code.split('\n')[1:], theoretical_code.split('\n')[1:]):
if observed_line != theoretical_line:
return diff_index
diff_index += 1
|
Checks if two version of a code match with the exception of the class/function name.
Args:
observed_code (`str`): The code found.
theoretical_code (`str`): The code to match.
Returns:
`Optional[int]`: The index of the first line where there is a difference (if any) and `None` if the codes
match.
|
github-repos
|
def swo_enable(self, cpu_speed, swo_speed=9600, port_mask=1):
if self.swo_enabled():
self.swo_stop()
res = self._dll.JLINKARM_SWO_EnableTarget(cpu_speed, swo_speed, enums.JLinkSWOInterfaces.UART, port_mask)
if (res != 0):
raise errors.JLinkException(res)
self._swo_enabled = True
return None
|
Enables SWO output on the target device.
Configures the output protocol, the SWO output speed, and enables any
ITM & stimulus ports.
This is equivalent to calling ``.swo_start()``.
Note:
If SWO is already enabled, it will first stop SWO before enabling it
again.
Args:
self (JLink): the ``JLink`` instance
cpu_speed (int): the target CPU frequency in Hz
swo_speed (int): the frequency in Hz used by the target to communicate
port_mask (int): port mask specifying which stimulus ports to enable
Returns:
``None``
Raises:
JLinkException: on error
|
codesearchnet
|
def allreduce(self, x, mesh_axes, reduction_fn_string):
return self._collective_with_groups(
x, mesh_axes, functools.partial(
allreduce_ring, reduction_fn_string=reduction_fn_string))
|
Grouped allreduce, (across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers - the mesh dimensions to be reduced
reduction_fn_string: "SUM" or "MAX"
Returns:
a LaidOutTensor
|
juraj-google-style
|
def replace_characters(self, text, characters, replacement=''):
if not characters:
return text
characters = ''.join(sorted(characters))
if characters in self._characters_regexes:
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile("[%s]" % re.escape(characters))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text)
|
Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
|
juraj-google-style
|
def __init__(self, project: str, location: str, api_endpoint: str, feature_store_name: str, feature_view_name: str, row_key: str, *, exception_level: ExceptionLevel=ExceptionLevel.WARN, **kwargs):
self.project = project
self.location = location
self.api_endpoint = api_endpoint
self.feature_store_name = feature_store_name
self.feature_view_name = feature_view_name
self.row_key = row_key
self.exception_level = exception_level
self.kwargs = kwargs if kwargs else {}
if 'client_options' in self.kwargs:
if not self.kwargs['client_options']['api_endpoint']:
self.kwargs['client_options']['api_endpoint'] = self.api_endpoint
elif self.kwargs['client_options']['api_endpoint'] != self.api_endpoint:
raise ValueError('Multiple values received for api_endpoint in api_endpoint and client_options parameters.')
else:
self.kwargs['client_options'] = {'api_endpoint': self.api_endpoint}
try:
admin_client = aiplatform.gapic.FeatureOnlineStoreAdminServiceClient(**self.kwargs)
except Exception:
_LOGGER.warning('Due to insufficient admin permission, could not verify the existence of feature store. If the `exception_level` is set to WARN then make sure the feature store exists otherwise the data enrichment will not happen without throwing an error.')
else:
location_path = admin_client.common_location_path(project=self.project, location=self.location)
feature_store_path = admin_client.feature_online_store_path(project=self.project, location=self.location, feature_online_store=self.feature_store_name)
feature_store = admin_client.get_feature_online_store(name=feature_store_path)
if not feature_store:
raise NotFound('Vertex AI Feature Store %s does not exists in %s' % (self.feature_store_name, location_path))
|
Initializes an instance of `VertexAIFeatureStoreEnrichmentHandler`.
Args:
project (str): The GCP project-id for the Vertex AI Feature Store.
location (str): The region for the Vertex AI Feature Store.
api_endpoint (str): The API endpoint for the Vertex AI Feature Store.
feature_store_name (str): The name of the Vertex AI Feature Store.
feature_view_name (str): The name of the feature view within the
Feature Store.
row_key (str): The row key field name containing the unique id
for the feature values.
exception_level: a `enum.Enum` value from
`apache_beam.transforms.enrichment_handlers.utils.ExceptionLevel`
to set the level when an empty row is returned from the BigTable query.
Defaults to `ExceptionLevel.WARN`.
kwargs: Optional keyword arguments to configure the
`aiplatform.gapic.FeatureOnlineStoreServiceClient`.
|
github-repos
|
def __init__(self, details):
if not isinstance(details, dict):
raise ValueError('details in ' + self.__class__.__name__ + '.' + sys._getframe().f_code.co_name + ' must be a dict')
if '__name__' not in details:
raise KeyError('__name__')
if not _standardField.match(details['__name__']):
raise ValueError('__name__')
self._name = details['__name__']
del details['__name__']
if '__array__' in details:
del details['__array__']
super(Tree, self).__init__(details)
self._class = 'Tree'
|
Constructor
Initialises the instance
Arguments:
details {dict} -- Details describing the type of values allowed for
the node
Raises:
KeyError
ValueError
Returns:
Tree
|
juraj-google-style
|
def initial_value_of_masked_time_series(time_series_tensor, broadcast_mask):
num_timesteps = tf.shape(input=time_series_tensor)[-1]
unmasked_negindices = (
tf.cast(~broadcast_mask, tf.int32) *
tf.range(num_timesteps, 0, -1))
first_unmasked_indices = num_timesteps - tf.reduce_max(
input_tensor=unmasked_negindices, axis=-1)
if first_unmasked_indices.shape.ndims is None:
raise NotImplementedError(
'Cannot compute initial values of a masked time series with'
'dynamic rank.')
return tf.squeeze(tf.compat.v1.batch_gather(
params=time_series_tensor,
indices=first_unmasked_indices[..., tf.newaxis]), axis=-1)
|
Get the first unmasked entry of each time series in the batch.
Args:
time_series_tensor: float `Tensor` of shape [..., num_timesteps].
broadcast_mask: bool `Tensor` of same shape as `time_series`.
|
juraj-google-style
|
def put(self, data):
|
Write data to file sequentially.
Args:
data: (memoryview) Data to write.
|
github-repos
|
def certificate_authority(self):
if (not self.__certificate_authority):
self.__certificate_authority = CertificateAuthority(self.__connection)
return self.__certificate_authority
|
Gets the Certificate Authority API client.
Returns:
CertificateAuthority:
|
codesearchnet
|
def fromTFExample(iter, binary_features=[]):
def _get_value(k, v):
if v.int64_list.value:
result = v.int64_list.value
elif v.float_list.value:
result = v.float_list.value
elif (k in binary_features):
return bytearray(v.bytes_list.value[0])
else:
return v.bytes_list.value[0].decode('utf-8')
if (len(result) > 1):
return list(result)
elif (len(result) == 1):
return result[0]
else:
return None
results = []
for record in iter:
example = tf.train.Example()
example.ParseFromString(bytes(record[0]))
d = {k: _get_value(k, v) for (k, v) in sorted(example.features.feature.items())}
row = Row(**d)
results.append(row)
return results
|
mapPartition function to convert an RDD of serialized tf.train.Example bytestring into an RDD of Row.
Note: TensorFlow represents both strings and binary types as tf.train.BytesList, and we need to
disambiguate these types for Spark DataFrames DTypes (StringType and BinaryType), so we require a "hint"
from the caller in the ``binary_features`` argument.
Args:
:iter: the RDD partition iterator
:binary_features: a list of tf.train.Example features which are expected to be binary/bytearrays.
Returns:
An array/iterator of DataFrame Row with features converted into columns.
|
codesearchnet
|
def get_symbol_list(rank, dim=6):
indices = list(itertools.combinations_with_replacement(range(dim), r=rank))
c_vec = np.zeros(len(indices), dtype=object)
c_arr = np.zeros(([dim] * rank), dtype=object)
for (n, idx) in enumerate(indices):
c_vec[n] = sp.Symbol(('c_' + ''.join([str(i) for i in idx])))
for perm in itertools.permutations(idx):
c_arr[perm] = c_vec[n]
return (c_vec, c_arr)
|
Returns a symbolic representation of the voigt-notation
tensor that places identical symbols for entries related
by index transposition, i. e. C_1121 = C_1211 etc.
Args:
dim (int): dimension of matrix/tensor, e. g. 6 for
voigt notation and 3 for standard
rank (int): rank of tensor, e. g. 3 for third-order ECs
Returns:
c_vec (array): array representing distinct indices
c_arr (array): array representing tensor with equivalent
indices assigned as above
|
codesearchnet
|
def aggregate_repo(repo, args, sem, err_queue):
try:
logger.debug('%s' % repo)
dirmatch = args.dirmatch
if not match_dir(repo.cwd, dirmatch):
logger.info("Skip %s", repo.cwd)
return
if args.command == 'aggregate':
repo.aggregate()
if args.do_push:
repo.push()
elif args.command == 'show-closed-prs':
repo.show_closed_prs()
elif args.command == 'show-all-prs':
repo.show_all_prs()
except Exception:
err_queue.put_nowait(sys.exc_info())
finally:
sem.release()
|
Aggregate one repo according to the args.
Args:
repo (Repo): The repository to aggregate.
args (argparse.Namespace): CLI arguments.
|
juraj-google-style
|
def intersection(L1, L2):
D = ((L1[0] * L2[1]) - (L1[1] * L2[0]))
Dx = ((L1[2] * L2[1]) - (L1[1] * L2[2]))
Dy = ((L1[0] * L2[2]) - (L1[2] * L2[0]))
if (D != 0):
x = (Dx / D)
y = (Dy / D)
return (x, y)
else:
return False
|
Intersects two line segments
Args:
L1 ([float, float]): x and y coordinates
L2 ([float, float]): x and y coordinates
Returns:
bool: if they intersect
(float, float): x and y of intersection, if they do
|
codesearchnet
|
def __init__(self, filename, mode='a', encoding='utf-8'):
if 't' not in mode and encoding and py2to3.PY_3:
mode = '{0:s}t'.format(mode)
super(CompressedFileHandler, self).__init__(
filename, mode=mode, encoding=encoding, delay=True)
|
Initializes a compressed file logging handler.
Args:
filename (str): name of the log file.
mode (Optional[str]): file access mode.
encoding (Optional[str]): encoding of the log lines.
|
juraj-google-style
|
def get_control_outputs(self, op):
if op.graph not in self.cache:
control_outputs = self.calc_control_outputs(op.graph)
self.cache[op.graph] = control_outputs
else:
control_outputs = self.cache[op.graph]
return control_outputs.get(op, [])
|
Return the control outputs for a given op.
Args:
op: The op to fetch control outputs for.
Returns:
Iterable of control output ops.
|
github-repos
|
def create_writer_of_type(type_name):
writers = available_writers()
if (type_name not in writers.keys()):
raise UnknownWriterException(('Unknown writer: %s' % (type_name,)))
return writers[type_name]()
|
Create an instance of the writer with the given name.
Args:
type_name: The name of a writer.
Returns:
An instance of the writer with the given type.
|
codesearchnet
|
def get_top_coins(tsym, limit=20):
url = build_url('volumes', tsym=tsym, limit=limit)
data = load_data(url)
return data['Data']
|
Get top coins by 24 hour trading volume value in the requested currency.
Args:
tsym: TO symbol.
limit: Number of results. Default value returns top 20 coins.
Returns:
Function returns a list containing a dictionary for each result:
[{'SUPPLY': ..., 'SYMBOL': ..., 'VOLUME24HOURTO': ...},
{...},
...]
The list is ordered based on the volume of the TO currency starting with
the highest value.
|
juraj-google-style
|
def validate(self, ticket, client_ip=None, now=None, encoding='utf-8'):
parts = self.parse(ticket)
new_ticket = self.new(*parts[1:], client_ip=client_ip, encoding=encoding)
if (new_ticket[:(self._hash.digest_size * 2)] != parts.digest):
raise TicketDigestError(ticket)
if (now is None):
now = time.time()
if (parts.valid_until <= now):
raise TicketExpired(ticket)
return parts
|
Validates the passed ticket, , raises a TicketError
on failure
Args:
ticket: String value (possibly generated by new function)
client_ip: Optional IPAddress of client, should be passed if the
ip address was passed on ticket creation.
now: Optional (defaults to time.time()) time to use when
validating ticket date
Returns:
Ticket a TicketInfo tuple containing the users authentication details on
success
Raises:
TicketParseError: Invalid ticket format
TicketDigestError: Digest is incorrect (ticket data was modified)
TicketExpired: Ticket has passed expiration date
|
codesearchnet
|
def set_authentication_profile(profile=None, deploy=False):
if (not profile):
raise CommandExecutionError('Profile name option must not be none.')
ret = {}
query = {'type': 'config', 'action': 'set', 'xpath': "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/authentication-profile", 'element': '<authentication-profile>{0}</authentication-profile>'.format(profile)}
ret.update(__proxy__['panos.call'](query))
if (deploy is True):
ret.update(commit())
return ret
|
Set the authentication profile of the Palo Alto proxy minion. A commit will be required before this is processed.
CLI Example:
Args:
profile (str): The name of the authentication profile to set.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_authentication_profile foo
salt '*' panos.set_authentication_profile foo deploy=True
|
codesearchnet
|
def get_file(self, file_name, local_destination=None, **kwargs):
if (not local_destination):
local_destination = file_name
return SubprocessTask((self._rsync_cmd() + ['-ut', ('%s:%s' % (self.hostname, file_name)), local_destination]), **kwargs)
|
Get a file from a remote host with rsync.
Args:
file_name (str): The relative location of the file on the remote
host.
local_destination (str): The destination for the file on the local
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task.
|
codesearchnet
|
def _decode_crop_and_flip(image_buffer, num_channels):
min_object_covered = 0.1
aspect_ratio_range = [0.75, 1.33]
area_range = [0.05, 1.0]
max_attempts = 100
mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_MIN_OBJ_COV, value=min_object_covered)
mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_RATIO_RANGE, value=aspect_ratio_range)
mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_AREA_RANGE, value=area_range)
mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_MAX_ATTEMPTS, value=max_attempts)
mlperf_log.resnet_print(key=mlperf_log.INPUT_CROP_USES_BBOXES, value=False)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(tf.image.extract_jpeg_shape(image_buffer), bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True)
(bbox_begin, bbox_size, _) = sample_distorted_bounding_box
(offset_y, offset_x, _) = tf.unstack(bbox_begin)
(target_height, target_width, _) = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
cropped = tf.image.decode_and_crop_jpeg(image_buffer, crop_window, channels=num_channels)
mlperf_log.resnet_print(key=mlperf_log.INPUT_RANDOM_FLIP)
cropped = tf.image.random_flip_left_right(cropped)
return cropped
|
Crops the given image to a random part of the image, and randomly flips.
We use the fused decode_and_crop op, which performs better than the two ops
used separately in series, but note that this requires that the image be
passed in as an un-decoded string Tensor.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
num_channels: Integer depth of the image buffer for decoding.
Returns:
3-D tensor with cropped image.
|
codesearchnet
|
def create_policy(self, account, client, document, name, arn=None):
if ((not arn) and (not name)):
raise ValueError('create_policy must be called with either arn or name in the argument list')
if arn:
response = client.list_policy_versions(PolicyArn=arn)
if (len(response['Versions']) >= 5):
version = [x for x in sorted(response['Versions'], key=(lambda k: k['CreateDate'])) if (not x['IsDefaultVersion'])][0]
self.log.info('Deleting oldest IAM Policy version {}/{}'.format(arn, version['VersionId']))
client.delete_policy_version(PolicyArn=arn, VersionId=version['VersionId'])
auditlog(event='iam.check_roles.delete_policy_version', actor=self.ns, data={'account': account.account_name, 'policyName': name, 'policyArn': arn, 'versionId': version['VersionId']})
res = client.create_policy_version(PolicyArn=arn, PolicyDocument=document, SetAsDefault=True)
else:
res = client.create_policy(PolicyName=name, PolicyDocument=document)
auditlog(event='iam.check_roles.create_policy', actor=self.ns, data={'account': account.account_name, 'policyName': name, 'policyArn': arn})
return res
|
Create a new IAM policy.
If the policy already exists, a new version will be added and if needed the oldest policy version not in use
will be removed. Returns a dictionary containing the policy or version information
Args:
account (:obj:`Account`): Account to create the policy on
client (:obj:`boto3.client`): A boto3 client object
document (`str`): Policy document
name (`str`): Name of the policy to create / update
arn (`str`): Optional ARN for the policy to update
Returns:
`dict`
|
codesearchnet
|
def model(self, inputs, mode='train'):
training = (mode == 'train')
with tf.variable_scope('conv1') as scope:
conv = tf.layers.conv2d(inputs=inputs, filters=16, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
conv = tf.layers.conv2d(inputs=bn, filters=16, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)
with tf.variable_scope('conv2') as scope:
conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)
with tf.variable_scope('conv3') as scope:
conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)
with tf.variable_scope('fc') as scope:
flat = tf.layers.flatten(pool)
fc = tf.layers.dense(inputs=flat, units=32, activation=tf.nn.relu)
softmax = tf.layers.dense(inputs=fc, units=self.num_classes, activation=tf.nn.softmax)
return softmax
|
Build a simple convnet (BN before ReLU).
Args:
inputs: a tensor of size [batch_size, height, width, channels]
mode: string in ['train', 'test']
Returns:
the last op containing the predictions
Note:
Best score
Step: 7015 - Epoch: 18/20 - best batch acc: 0.8984 - loss: 1.5656
Worst score
Step: 7523 - Epoch: 20/20 - best batch acc: 0.7734 - loss: 1.6874
|
juraj-google-style
|
def is_initialized(self, name=None):
return gen_resource_variable_ops.var_is_initialized_op(self.handle, name)
|
Checks whether a resource variable has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
|
github-repos
|
def ascii2h5(dat_fname, h5_fname):
table = np.loadtxt(dat_fname, skiprows=1, dtype='f4')
filter_kwargs = dict(
chunks=True,
compression='gzip',
compression_opts=3)
idx = ~np.all(table[:,2:32] < 1.e-5, axis=1)
with h5py.File(h5_fname, 'w') as f:
d = np.arange(0., 4.351, 0.15).astype('f4')
dset = f.create_dataset('dists', data=d, **filter_kwargs)
dset.attrs['description'] = 'Distances at which extinction is measured'
dset.attrs['units'] = 'kpc'
dset = f.create_dataset('pix_lb', data=table[idx,0:2], **filter_kwargs)
dset.attrs['description'] = 'Galactic (l, b) of each pixel'
dset.attrs['units'] = 'deg'
dset = f.create_dataset('A_r', data=table[idx,2:32], **filter_kwargs)
dset.attrs['description'] = 'Extinction'
dset.attrs['shape'] = '(pixel, distance)'
dset.attrs['band'] = 'r'
dset.attrs['units'] = 'mag'
dset = f.create_dataset('A_r_err', data=table[idx,32:], **filter_kwargs)
dset.attrs['description'] = 'Gaussian uncertainty in extinction'
dset.attrs['shape'] = '(pixel, distance)'
dset.attrs['band'] = 'r'
dset.attrs['units'] = 'mag'
|
Converts from the original ASCII format of the Chen+ (2014) 3D dust map to
the HDF5 format.
Args:
dat_fname (:obj:`str`): Filename of the original ASCII .dat file.
h5_fname (:obj:`str`): Output filename to write the resulting HDF5 file to.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.