code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _Matches(path, pattern_list):
return any((fnmatch.fnmatchcase(path, pattern) for pattern in pattern_list))
|
Returns true if path matches any patten found in pattern_list.
Args:
path: A dot separated path to a package, class, method or variable
pattern_list: A list of wildcard patterns
Returns:
True if path matches any wildcard found in pattern_list.
|
codesearchnet
|
def readSchedules(self, tableset):
self.setContext('readSchedules')
try:
req_table = binascii.hexlify(str(tableset).zfill(1))
req_str = (('01523102303037' + req_table) + '282903')
self.request(False)
req_crc = self.calc_crc16(req_str[2:].decode('hex'))
req_str += req_crc
self.m_serial_port.write(req_str.decode('hex'))
raw_ret = self.m_serial_port.getResponse(self.getContext())
self.serialPostEnd()
return_crc = self.calc_crc16(raw_ret[1:(- 2)])
if (tableset == ReadSchedules.Schedules_1_To_4):
unpacked_read = self.unpackStruct(raw_ret, self.m_schd_1_to_4)
self.convertData(unpacked_read, self.m_schd_1_to_4, self.m_kwh_precision)
if (str(return_crc) == str(self.m_schd_1_to_4['crc16'][MeterData.StringValue])):
ekm_log('Schedules 1 to 4 CRC success (06 return')
self.setContext('')
return True
elif (tableset == ReadSchedules.Schedules_5_To_6):
unpacked_read = self.unpackStruct(raw_ret, self.m_schd_5_to_6)
self.convertData(unpacked_read, self.m_schd_5_to_6, self.m_kwh_precision)
if (str(return_crc) == str(self.m_schd_5_to_6['crc16'][MeterData.StringValue])):
ekm_log('Schedules 5 to 8 CRC success (06 return)')
self.setContext('')
return True
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext('')
return False
|
Serial call to read schedule tariffs buffer
Args:
tableset (int): :class:`~ekmmeters.ReadSchedules` buffer to return.
Returns:
bool: True on completion and ACK.
|
codesearchnet
|
def load_extra_data(cls, data):
try:
cls._extra_config.update(json.loads(data))
except ValueError as exception:
sys.stderr.write('Could convert to JSON. {0:s}'.format(exception))
exit((- 1))
|
Loads extra JSON configuration parameters from a data buffer.
The data buffer must represent a JSON object.
Args:
data: str, the buffer to load the JSON data from.
|
codesearchnet
|
def aggregate_single_gradient(grad_and_vars, use_mean, check_inf_nan):
grads = [g for (g, _) in grad_and_vars]
grad = tf.add_n(grads)
if (use_mean and (len(grads) > 1)):
grad = tf.multiply(grad, (1.0 / len(grads)))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
return ((grad, v), has_nan_or_inf)
else:
return ((grad, v), None)
|
Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
|
codesearchnet
|
def model_from_path(model_path, fuzziness=False):
app_name = '.'.join(model_path.split('.')[:(- 1)])
model_name = model_path.split('.')[(- 1)]
if (not app_name):
return None
module = importlib.import_module(app_name)
try:
model = getattr(module, model_name)
except AttributeError:
try:
model = getattr(getattr(module, 'models'), model_name)
except AttributeError:
model = get_model(model_name, app_name, fuzziness=fuzziness)
return model
|
Find the model class for a given model path like 'project.app.model'
Args:
path (str): dot-delimited model path, like 'project.app.model'
Returns:
Django Model-based class
|
codesearchnet
|
def _CreateRouteTripsFolder(self, parent, route, style_id=None, schedule=None):
if (not route.trips):
return None
trips = list(route.trips)
trips.sort(key=(lambda x: x.trip_id))
trips_folder = self._CreateFolder(parent, 'Trips', visible=False)
for trip in trips:
if (self.date_filter and (not trip.service_period.IsActiveOn(self.date_filter))):
continue
if trip.trip_headsign:
description = ('Headsign: %s' % trip.trip_headsign)
else:
description = None
coordinate_list = []
for (secs, stoptime, tp) in trip.GetTimeInterpolatedStops():
if (self.altitude_per_sec > 0):
coordinate_list.append((stoptime.stop.stop_lon, stoptime.stop.stop_lat, ((secs - (3600 * 4)) * self.altitude_per_sec)))
else:
coordinate_list.append((stoptime.stop.stop_lon, stoptime.stop.stop_lat))
placemark = self._CreatePlacemark(trips_folder, trip.trip_id, style_id=style_id, visible=False, description=description)
self._CreateLineString(placemark, coordinate_list)
return trips_folder
|
Create a KML Folder containing all the trips in the route.
The folder contains a placemark for each of these trips. If there are no
trips in the route, no folder is created and None is returned.
Args:
parent: The parent ElementTree.Element instance.
route: The transitfeed.Route instance.
style_id: A style id string for the placemarks or None.
Returns:
The Folder ElementTree.Element instance or None.
|
codesearchnet
|
def list_skus(access_token, subscription_id, location, publisher, offer):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers/', offer, '/skus?api-version=', COMP_API])
return do_get(endpoint, access_token)
|
List available VM image skus for a publisher offer.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
location (str): Azure data center location. E.g. westus.
publisher (str): VM image publisher. E.g. MicrosoftWindowsServer.
offer (str): VM image offer. E.g. WindowsServer.
Returns:
HTTP response with JSON list of skus.
|
codesearchnet
|
def split(self, value, lengths, name=None):
return self._implementation.split(value, lengths, name=name)
|
Split the values of a `Tensor` into the TensorArray.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to split.
lengths: 1-D. int32 vector with the lengths to use when splitting `value`
along its first dimension.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the split occurs.
Use this object for all subsequent operations.
Raises:
ValueError: if the shape inference fails.
|
github-repos
|
def JoinKeyPath(path_segments):
path_segments = [
segment.split(definitions.KEY_PATH_SEPARATOR)
for segment in path_segments]
path_segments = [
element for sublist in path_segments for element in sublist]
path_segments = filter(None, path_segments)
key_path = definitions.KEY_PATH_SEPARATOR.join(path_segments)
if not key_path.startswith('HKEY_'):
key_path = '{0:s}{1:s}'.format(definitions.KEY_PATH_SEPARATOR, key_path)
return key_path
|
Joins the path segments into key path.
Args:
path_segments (list[str]): Windows Registry key path segments.
Returns:
str: key path.
|
juraj-google-style
|
def LoadConfig(configuration):
parser = ConfigParser()
configuration.log.debug('Attempting to parse configuration file: %s', configuration.config_file)
parser.read(configuration.config_file)
default = 'DEFAULT'
default_source = FixValue(parser.get(default, Config.OPT_SOURCE))
default_cache = FixValue(parser.get(default, Config.OPT_CACHE))
configuration.timestamp_dir = FixValue(parser.get(default, Config.OPT_TIMESTAMP_DIR))
if parser.has_option(default, Config.OPT_LOCKFILE):
configuration.lockfile = FixValue(parser.get(default, Config.OPT_LOCKFILE))
if not configuration.maps:
maplist = FixValue(parser.get(default, Config.OPT_MAPS))
if maplist:
configuration.maps = [m.strip() for m in maplist.split(',')]
else:
configuration.maps = []
for map_name in configuration.maps:
map_options = MapOptions()
source = default_source
cache = default_cache
if parser.has_section(map_name):
if parser.has_option(map_name, Config.OPT_SOURCE):
source = FixValue(parser.get(map_name, Config.OPT_SOURCE))
if parser.has_option(map_name, Config.OPT_CACHE):
cache = FixValue(parser.get(map_name, Config.OPT_CACHE))
map_options.source = Options(parser.items(default), source)
map_options.cache = Options(parser.items(default), cache)
if parser.has_section(map_name):
options = Options(parser.items(map_name), source)
map_options.source.update(options)
options = Options(parser.items(map_name), cache)
map_options.cache.update(options)
map_options.source['name'] = source
map_options.cache['name'] = cache
configuration.options[map_name] = map_options
configuration.log.info('Configured maps are: %s', ', '.join(configuration.maps))
configuration.log.debug('loaded configuration: %r', configuration)
|
Load the on-disk configuration file and merge it into config.
Args:
configuration: a config.Config object
Raises:
error.NoConfigFound: no configuration file was found
|
github-repos
|
def ParseFileObject(self, parser_mediator, file_object):
file_header_map = self._GetDataTypeMap('asl_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse file header with error: {0!s}'.format(
exception))
if file_header.signature != self._FILE_SIGNATURE:
raise errors.UnableToParseFile('Invalid file signature.')
file_size = file_object.get_size()
if file_header.first_log_entry_offset > 0:
last_log_entry_offset = 0
file_offset = file_header.first_log_entry_offset
while file_offset < file_size:
last_log_entry_offset = file_offset
try:
file_offset = self._ParseRecord(
parser_mediator, file_object, file_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse record with error: {0!s}'.format(exception))
return
if file_offset == 0:
break
if last_log_entry_offset != file_header.last_log_entry_offset:
parser_mediator.ProduceExtractionWarning(
'last log entry offset does not match value in file header.')
|
Parses an ASL file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
juraj-google-style
|
def _on_channel_open(self, channel):
channel.add_on_close_callback(self._on_channel_close)
channel.add_on_cancel_callback(self._on_cancel)
channel.basic_qos(callback=self._on_qosok, **config.conf["qos"])
|
Callback used when a channel is opened.
This registers all the channel callbacks.
Args:
channel (pika.channel.Channel): The channel that successfully opened.
|
juraj-google-style
|
def GetSubkeyByIndex(self, index):
if not self._registry_key and self._registry:
self._GetKeyFromRegistry()
subkeys = list(self._subkeys.values())
if index < 0 or index >= len(subkeys):
raise IndexError('Index out of bounds.')
return subkeys[index]
|
Retrieves a subkey by index.
Args:
index (int): index of the subkey.
Returns:
WinRegistryKey: Windows Registry subkey or None if not found.
Raises:
IndexError: if the index is out of bounds.
|
juraj-google-style
|
def partition(self, id_):
from ..orm import Partition as OrmPartition
from sqlalchemy import or_
from ..identity import PartialPartitionName
if isinstance(id_, PartitionIdentity):
id_ = id_.id_
elif isinstance(id_, PartialPartitionName):
id_ = id_.promote(self.bundle.identity.name)
session = self.bundle.dataset._database.session
q = session.query(OrmPartition).filter((OrmPartition.d_vid == self.bundle.dataset.vid)).filter(or_((OrmPartition.id == str(id_).encode('ascii')), (OrmPartition.vid == str(id_).encode('ascii'))))
try:
orm_partition = q.one()
return self.bundle.wrap_partition(orm_partition)
except NoResultFound:
orm_partition = None
if (not orm_partition):
q = session.query(OrmPartition).filter((OrmPartition.d_vid == self.bundle.dataset.vid)).filter((OrmPartition.name == str(id_).encode('ascii')))
try:
orm_partition = q.one()
return self.bundle.wrap_partition(orm_partition)
except NoResultFound:
orm_partition = None
return orm_partition
|
Get a partition by the id number.
Arguments:
id_ -- a partition id value
Returns:
A partitions.Partition object
Throws:
a Sqlalchemy exception if the partition either does not exist or
is not unique
Because this method works on the bundle, the id_ ( without version information )
is equivalent to the vid ( with version information )
|
codesearchnet
|
def on_success(self, inv_plugin, emit_set_slot):
self.dirty = set()
self.apply(inv_plugin)
for changed_slot in self.dirty:
emit_set_slot(changed_slot)
|
Called when the click was successful
and should be applied to the inventory.
Args:
inv_plugin (InventoryPlugin): inventory plugin instance
emit_set_slot (func): function to signal a slot change,
should be InventoryPlugin().emit_set_slot
|
codesearchnet
|
def housekeeping(self, **kwargs):
path = ('/projects/%s/housekeeping' % self.get_id())
self.manager.gitlab.http_post(path, **kwargs)
|
Start the housekeeping task.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabHousekeepingError: If the server failed to perform the
request
|
codesearchnet
|
def assignHolidayDate(self, holiday, month, day):
holiday += 1
if (month > 12) or (month < 0) or (day > 31) or (day < 0) or (holiday < 1) or (holiday > Extents.Holidays):
ekm_log("Out of bounds: month " + str(month) + " day " + str(day) + " holiday " + str(holiday))
return False
day_str = "Holiday_" + str(holiday) + "_Day"
mon_str = "Holiday_" + str(holiday) + "_Month"
if day_str not in self.m_holiday_date_params:
ekm_log("Incorrect index: " + day_str)
return False
if mon_str not in self.m_holiday_date_params:
ekm_log("Incorrect index: " + mon_str)
return False
self.m_holiday_date_params[day_str] = day
self.m_holiday_date_params[mon_str] = month
return True
|
Set a singe holiday day and month in object buffer.
There is no class style enum for holidays.
Args:
holiday (int): 0-19 or range(Extents.Holidays).
month (int): Month 1-12.
day (int): Day 1-31
Returns:
bool: True on completion.
|
juraj-google-style
|
def delete(self, **options):
fut = delete_async(self.key(), **options)
fut.get_result()
|
Permanently delete this blob from Blobstore.
Args:
**options: Options for create_rpc().
|
codesearchnet
|
def send_msg_to_webhook(self, message):
payload = {
'content':message
}
header = {
'Content-Type':'application/json'
}
try:
request = requests.post(
self.api_url,
headers=header,
json=payload
)
request.raise_for_status()
except Exception as error_msg:
warning_msg = (
'EXCEPTION: UNABLE TO COMMIT LOG MESSAGE' +
'\n\texception={0}'.format(repr(error_msg)) +
'\n\tmessage={0}'.format(message)
)
warnings.warn(
warning_msg,
exceptions.WebhookFailedEmitWarning
)
|
separated Requests logic for easier testing
Args:
message (str): actual logging string to be passed to REST endpoint
Todo:
* Requests.text/json return for better testing options
|
juraj-google-style
|
def _decompose_and_get_unitary(val: Union[('cirq.Operation', 'cirq.Gate')]) -> np.ndarray:
from cirq.protocols.apply_unitary import apply_unitary, ApplyUnitaryArgs
from cirq.protocols.decompose import decompose_once, decompose_once_with_qubits
from cirq import Gate, LineQubit, Operation
if isinstance(val, Operation):
qubits = val.qubits
decomposed_val = decompose_once(val, default=None)
elif isinstance(val, Gate):
qubits = tuple(LineQubit.range(val.num_qubits()))
decomposed_val = decompose_once_with_qubits(val, qubits, default=None)
if (decomposed_val is not None):
n = len(qubits)
state = np.eye((1 << n), dtype=np.complex128)
state.shape = ((2,) * (2 * n))
buffer = np.zeros(state.shape, dtype=np.complex128)
qubit_map = {q: i for (i, q) in enumerate(qubits)}
result = state
for op in decomposed_val:
indices = [qubit_map[q] for q in op.qubits]
result = apply_unitary(unitary_value=op, args=ApplyUnitaryArgs(state, buffer, indices), default=None)
if (result is None):
return None
if (result is buffer):
buffer = state
state = result
if (result is not None):
return result.reshape(((1 << n), (1 << n)))
|
Try to decompose a cirq.Operation or cirq.Gate, and return its unitary
if it exists.
Returns:
If `val` can be decomposed into unitaries, calculate the resulting
unitary and return it. If it doesn't exist, None is returned.
|
codesearchnet
|
def CopyFromDateTimeString(self, time_string):
date_time_values = self._CopyDateTimeFromString(time_string)
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minutes = date_time_values.get('minutes', 0)
seconds = date_time_values.get('seconds', 0)
microseconds = date_time_values.get('microseconds', None)
if (year > 9999):
raise ValueError('Unsupported year value: {0:d}.'.format(year))
timestamp = self._GetNumberOfSecondsFromElements(year, month, day_of_month, hours, minutes, seconds)
timestamp = (float(timestamp) / definitions.SECONDS_PER_DAY)
timestamp += self._DELPHI_TO_POSIX_BASE
if (microseconds is not None):
timestamp += (float(microseconds) / definitions.MICROSECONDS_PER_DAY)
self._normalized_timestamp = None
self._timestamp = timestamp
self.is_local_time = False
|
Copies a Delphi TDateTime timestamp from a string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
fraction and time zone offset are optional. The default time zone
is UTC.
Raises:
ValueError: if the time string is invalid or not supported.
|
codesearchnet
|
def set_local_interface(self, value=None, default=False, disable=False):
return self._configure_mlag('local-interface', value, default, disable)
|
Configures the mlag local-interface value
Args:
value (str): The value to configure the local-interface
default (bool): Configures the local-interface using the
default keyword
disable (bool): Negates the local-interface using the no keyword
Returns:
bool: Returns True if the commands complete successfully
|
juraj-google-style
|
def match_date(date):
date_pattern = re.compile("^(19|20)\d\d[- /.](0[1-9]|1[012])[- /.](0[1-9]|[12][0-9]|3[01])")
if re.match(date_pattern, date):
return True
return False
|
Check if a string is a valid date
Args:
date(str)
Returns:
bool
|
juraj-google-style
|
def profile_operations(self, options):
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(print_mdl.Profile('op'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
|
Profile the statistics of the Operation types (e.g.
MatMul, Conv2D).
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
|
github-repos
|
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
self.hashing_algorithm.write(tstream, kmip_version=kmip_version)
self.digest_value.write(tstream, kmip_version=kmip_version)
self.key_format_type.write(tstream, kmip_version=kmip_version)
self.length = tstream.length()
super(Digest, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer)
|
Write the data encoding the Digest object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
|
juraj-google-style
|
def show(self, objtype, objid):
url = self._object_url(objtype, int(objid))
return self._make_request(url, method="get")
|
Query for a specific resource by ID
Args:
objtype (str): object type, e.g. 'device', 'interface'
objid (int): object ID (DeviceID, etc.)
Returns:
A dict with that object
Raises:
requests.exceptions.HTTPError
|
juraj-google-style
|
def wait_for_stop(self, timeout=None):
return self._stop_event.wait(timeout)
|
Wait till the Coordinator is told to stop.
Args:
timeout: Float. Sleep for up to that many seconds waiting for
should_stop() to become True.
Returns:
True if the Coordinator is told stop, False if the timeout expired.
|
github-repos
|
def extract_certs(certs_txt: str) -> List[crypto.X509]:
pattern = '-----BEGIN CERTIFICATE-----.+?-----END CERTIFICATE-----'
certs_txt = re.findall(pattern, certs_txt, flags=re.DOTALL)
certs = [crypto.load_certificate(crypto.FILETYPE_PEM, cert_txt) for cert_txt in certs_txt]
return certs
|
Extracts pycrypto X509 objects from SSL certificates chain string.
Args:
certs_txt: SSL certificates chain string.
Returns:
result: List of pycrypto X509 objects.
|
codesearchnet
|
def __init__(self, file_handle, schema):
if not file_handle.writable():
raise ValueError('Output stream must be writable')
self._file_handle = file_handle
avro_schema = fastavro.parse_schema(get_avro_schema_from_table_schema(schema))
self._avro_writer = fastavro.write.Writer(self._file_handle, avro_schema)
|
Initialize an AvroRowWriter.
Args:
file_handle (io.IOBase): Output stream to write Avro records to.
schema (Dict[Text, Any]): BigQuery table schema.
|
github-repos
|
def _ParseFileEntry(self, knowledge_base, file_entry):
if file_entry.link:
_, _, time_zone = file_entry.link.partition('zoneinfo/')
else:
file_object = file_entry.GetFileObject()
time_zone = None
try:
time_zone_file = tz.tzfile(file_object)
date_time = datetime.datetime(2017, 1, 1)
time_zone = time_zone_file.tzname(date_time)
except ValueError:
logger.error('Unable to read time zone information file.')
finally:
file_object.close()
if time_zone:
try:
knowledge_base.SetTimeZone(time_zone)
except ValueError:
logger.error('Unable to set time zone in knowledge base.')
|
Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
|
juraj-google-style
|
def Merge(self, other):
if type(self) != type(other):
raise TypeError('Attempt to Merge() differently typed Maps: %r != %r' % (type(self), type(other)))
if other.GetModifyTimestamp() and self.GetModifyTimestamp():
if other.GetModifyTimestamp() < self.GetModifyTimestamp():
raise error.InvalidMerge('Attempt to Merge a map with an older modify time into a newer one: other: %s, self: %s' % (other.GetModifyTimestamp(), self.GetModifyTimestamp()))
if other.GetUpdateTimestamp() and self.GetUpdateTimestamp():
if other.GetUpdateTimestamp() < self.GetUpdateTimestamp():
raise error.InvalidMerge('Attempt to Merge a map with an older update time into a newer one: other: %s, self: %s' % (other.GetUpdateTimestamp(), self.GetUpdateTimestamp()))
self.log.info('merging from a map of %d entries', len(other))
merge_count = 0
for their_entry in other:
if their_entry not in self:
if self.Add(their_entry):
merge_count += 1
self.log.info('%d of %d entries were new or modified', merge_count, len(other))
if merge_count > 0:
self.SetModifyTimestamp(other.GetModifyTimestamp())
self.SetUpdateTimestamp(other.GetUpdateTimestamp())
return merge_count > 0
|
Update this Map based on another Map.
Walk over other and for each entry, Add() it if it doesn't
exist -- this will update changed entries as well as adding
new ones.
Args:
other: A maps.Map instance.
Returns:
True if anything was added or modified, False if
nothing changed.
Raises:
TypeError: Merging differently typed Maps.
InvalidMerge: Attempt to Merge an older map into a newer one.
|
github-repos
|
def ProcessNewBlock(self, block):
added = set()
changed = set()
deleted = set()
try:
for tx in block.FullTransactions:
for (index, output) in enumerate(tx.outputs):
state = self.CheckAddressState(output.ScriptHash)
if ((state & AddressState.InWallet) > 0):
key = CoinReference(tx.Hash, index)
if (key in self._coins.keys()):
coin = self._coins[key]
coin.State |= CoinState.Confirmed
changed.add(coin)
else:
newcoin = Coin.CoinFromRef(coin_ref=key, tx_output=output, state=CoinState.Confirmed, transaction=tx)
self._coins[key] = newcoin
added.add(newcoin)
if ((state & AddressState.WatchOnly) > 0):
self._coins[key].State |= CoinState.WatchOnly
changed.add(self._coins[key])
for tx in block.FullTransactions:
for input in tx.inputs:
if (input in self._coins.keys()):
if (self._coins[input].Output.AssetId == Blockchain.SystemShare().Hash):
coin = self._coins[input]
coin.State |= (CoinState.Spent | CoinState.Confirmed)
changed.add(coin)
else:
deleted.add(self._coins[input])
del self._coins[input]
for claimTx in [tx for tx in block.Transactions if (tx.Type == TransactionType.ClaimTransaction)]:
for ref in claimTx.Claims:
if (ref in self._coins.keys()):
deleted.add(self._coins[ref])
del self._coins[ref]
self._current_height += 1
self.OnProcessNewBlock(block, added, changed, deleted)
if (((len(added) + len(deleted)) + len(changed)) > 0):
self.BalanceChanged()
except Exception as e:
traceback.print_stack()
traceback.print_exc()
logger.error(('could not process %s ' % e))
|
Processes a block on the blockchain. This should be done in a sequential order, ie block 4 should be
only processed after block 3.
Args:
block: (neo.Core.Block) a block on the blockchain.
|
codesearchnet
|
def _GetStat(self):
stat_object = vfs_stat.VFSStat()
stat_object.size = self.path_spec.range_size
stat_object.type = stat_object.TYPE_FILE
return stat_object
|
Retrieves a stat object.
Returns:
VFSStat: a stat object.
Raises:
BackEndError: when the encoded stream is missing.
|
codesearchnet
|
def from_celery(cls, worker_name, job_dict, celery_app):
if ((not isinstance(job_dict, dict)) or ('id' not in job_dict)):
raise JobStatInvalid('The job description is missing important fields.')
async_result = AsyncResult(id=job_dict['id'], app=celery_app)
a_info = (async_result.info if isinstance(async_result.info, dict) else None)
return JobStats(name=(a_info.get('name', '') if (a_info is not None) else ''), job_id=job_dict['id'], job_type=(a_info.get('type', '') if (a_info is not None) else ''), workflow_id=(a_info.get('workflow_id', '') if (a_info is not None) else ''), queue=(a_info.get('queue', '') if (a_info is not None) else ''), start_time=(a_info.get('start_time', None) if (a_info is not None) else None), arguments=(a_info.get('arguments', {}) if (a_info is not None) else {}), acknowledged=job_dict['acknowledged'], func_name=job_dict['type'], hostname=job_dict['hostname'], worker_name=worker_name, worker_pid=job_dict['worker_pid'], routing_key=job_dict['delivery_info']['routing_key'])
|
Create a JobStats object from the dictionary returned by celery.
Args:
worker_name (str): The name of the worker this jobs runs on.
job_dict (dict): The dictionary as returned by celery.
celery_app: Reference to a celery application object.
Returns:
JobStats: A fully initialized JobStats object.
|
codesearchnet
|
def get_metrics_namespace(self) -> str:
return 'BeamML_Onnx'
|
Returns:
A namespace for metrics collected by the RunInference transform.
|
github-repos
|
def initialize_schema(connection):
cursor = connection.cursor()
cursor.execute("PRAGMA application_id={}".format(_TENSORBOARD_APPLICATION_ID))
cursor.execute("PRAGMA user_version={}".format(_TENSORBOARD_USER_VERSION))
with connection:
for statement in _SCHEMA_STATEMENTS:
lines = statement.strip('\n').split('\n')
message = lines[0] + ('...' if len(lines) > 1 else '')
logger.debug('Running DB init statement: %s', message)
cursor.execute(statement)
|
Initializes the TensorBoard sqlite schema using the given connection.
Args:
connection: A sqlite DB connection.
|
juraj-google-style
|
def create_datasets():
if use_device:
datasets = []
for i in range(len(self.embedding_devices)):
datasets.append(dataset_ops.DatasetV2.from_tensor_slices({'feature': [[[i % self._num_cores_per_replica]]]}).repeat())
return datasets
else:
dataset = strategy.distribute_datasets_from_function(input_fn, options=distribute_lib.InputOptions(experimental_fetch_to_device=False))
return [dataset]
|
Creates either a per-replica dataset, or multiple per-devices ones.
This function explicitly creates per-device datasets because the strategy
does not produce a distributed dataset in the model-parallel case; there
is only one replica. Without this consideration, the embeddings would be
read as [0, 0] instead of the expected [0, 1] since all the devices would
receive the same value.
Returns:
A list of one or more dataset(s).
|
github-repos
|
def aggregate(self, dataset_ids=None, boundary='exact', side='left', func='mean', **dim_kwargs):
new_scn = self.copy(datasets=dataset_ids)
for (src_area, ds_ids) in new_scn.iter_by_area():
if (src_area is None):
for ds_id in ds_ids:
new_scn.datasets[ds_id] = self[ds_id]
continue
if (boundary != 'exact'):
raise NotImplementedError("boundary modes appart from 'exact' are not implemented yet.")
target_area = src_area.aggregate(**dim_kwargs)
resolution = max(target_area.pixel_size_x, target_area.pixel_size_y)
for ds_id in ds_ids:
res = self[ds_id].coarsen(boundary=boundary, side=side, func=func, **dim_kwargs)
new_scn.datasets[ds_id] = getattr(res, func)()
new_scn.datasets[ds_id].attrs['area'] = target_area
new_scn.datasets[ds_id].attrs['resolution'] = resolution
return new_scn
|
Create an aggregated version of the Scene.
Args:
dataset_ids (iterable): DatasetIDs to include in the returned
`Scene`. Defaults to all datasets.
func (string): Function to apply on each aggregation window. One of
'mean', 'sum', 'min', 'max', 'median', 'argmin',
'argmax', 'prod', 'std', 'var'.
'mean' is the default.
boundary: Not implemented.
side: Not implemented.
dim_kwargs: the size of the windows to aggregate.
Returns:
A new aggregated scene
See also:
xarray.DataArray.coarsen
Example:
`scn.aggregate(func='min', x=2, y=2)` will aggregate 2x2 pixels by
applying the `min` function.
|
codesearchnet
|
def BuildDefaultValue(self, value_cls):
try:
return value_cls()
except Exception as e:
logging.exception(e)
raise DefaultValueError(("Can't create default for value %s: %s" % (value_cls.__name__, e)))
|
Renders default value of a given class.
Args:
value_cls: Default value of this class will be rendered. This class has to
be (or to be a subclass of) a self.value_class (i.e. a class that this
renderer is capable of rendering).
Returns:
An initialized default value.
Raises:
DefaultValueError: if something goes wrong.
|
codesearchnet
|
def remove(self, key):
self.raise_error_if_not_open()
if (key in self._file):
del self._file[key]
|
Remove the data stored for the given key.
Args:
key (str): Key of the data to remove.
Note:
The container has to be opened in advance.
|
codesearchnet
|
def load(config):
if config.sys_path:
logger.debug("Appending %s to sys.path.", config.sys_path)
sys.path.append(config.sys_path)
logger.debug("sys.path is now %s", sys.path)
if config.lookups:
for key, handler in config.lookups.items():
register_lookup_handler(key, handler)
return config
|
Loads a stacker configuration by modifying sys paths, loading lookups,
etc.
Args:
config (:class:`Config`): the stacker config to load.
Returns:
:class:`Config`: the stacker config provided above.
|
juraj-google-style
|
def parse_init_dat(infile):
init_dict = {}
log.debug('{}: reading file...'.format(infile))
with open(infile, 'r') as f:
head = [next(f).strip() for x in range(2)]
summary = head[0].split()
difficulty = summary[1]
top_template_info = head[1].split()
top_template_pdbchain = top_template_info[3]
top_template_pdb = top_template_pdbchain[:4]
top_template_chain = top_template_pdbchain[4:]
init_dict['difficulty'] = difficulty
init_dict['top_template_pdb'] = top_template_pdb
init_dict['top_template_chain'] = top_template_chain
return init_dict
|
Parse the main init.dat file which contains the modeling results
The first line of the file init.dat contains stuff like::
"120 easy 40 8"
The other lines look like this::
" 161 11.051 1 1guqA MUSTER"
and getting the first 10 gives you the top 10 templates used in modeling
Args:
infile (stt): Path to init.dat
Returns:
dict: Dictionary of parsed information
|
juraj-google-style
|
def resolve_mode(self, name):
if name not in settings.CODEMIRROR_MODES:
msg = ("Given config name '{}' does not exists in "
"'settings.CODEMIRROR_MODES'.")
raise UnknowModeError(msg.format(name))
return settings.CODEMIRROR_MODES.get(name)
|
From given mode name, return mode file path from
``settings.CODEMIRROR_MODES`` map.
Arguments:
name (string): Mode name.
Raises:
KeyError: When given name does not exist in
``settings.CODEMIRROR_MODES``.
Returns:
string: Mode file path.
|
juraj-google-style
|
class PatchTSMixerLinearHead(nn.Module):
def __init__(self, config: PatchTSMixerConfig, distribution_output=None):
super().__init__()
self.head_aggregation = config.head_aggregation
self.output_range = config.output_range
if config.head_aggregation is None:
mul_factor = config.num_patches
else:
mul_factor = 1
self.distribution_output = distribution_output
if distribution_output is None:
self.projection = nn.Linear(config.d_model * config.num_input_channels * mul_factor, config.num_targets)
else:
self.projection = distribution_output.get_parameter_projection(config.d_model * config.num_input_channels * mul_factor)
if config.head_aggregation is None:
self.flatten = nn.Flatten(start_dim=-3)
else:
self.flatten = nn.Flatten(start_dim=-2)
self.dropout = nn.Dropout(config.head_dropout)
def forward(self, hidden_features):
hidden_features = hidden_features.transpose(-1, -2)
if self.head_aggregation == 'use_last':
hidden_features = hidden_features[..., -1]
elif self.head_aggregation == 'max_pool':
hidden_features = hidden_features.max(dim=-1).values
elif self.head_aggregation == 'avg_pool':
hidden_features = hidden_features.mean(dim=-1)
if self.flatten:
hidden_features = self.flatten(hidden_features)
hidden_features = self.dropout(hidden_features)
hidden_features = self.projection(hidden_features)
if self.distribution_output is None and self.output_range is not None:
hidden_features = torch.sigmoid(hidden_features) * (self.output_range[1] - self.output_range[0]) + self.output_range[0]
return hidden_features
|
Linear head for Classification and Regression.
Args:
config (`PatchTSMixerConfig`):
Configuration.
|
github-repos
|
def field(*, validate: Optional[Callable[[_In], _OutT]]=None, **kwargs: Any) -> dataclasses.Field[_OutT]:
if validate is None:
return dataclasses.field(**kwargs)
else:
field_ = _Field(validate=validate, field_kwargs=kwargs)
return typing.cast(dataclasses.Field, field_)
|
Like `dataclasses.field`, but allow `validator`.
Args:
validate: A callable `(x) -> x` called each time the variable is assigned.
**kwargs: Kwargs forwarded to `dataclasses.field`
Returns:
The field.
|
github-repos
|
def for_new_graph(*args, **kwargs):
graph = tf.Graph()
with graph.as_default():
return for_default_graph(*args, **kwargs)
|
Creates a Bookkeeper for a new graph.
You must use `m.g.as_default()` to put the graph in scope:
m = Bookkeeper.for_new_graph()
with m.g.as_default():
...
Args:
*args: Arguments to pass into Bookkeeper's constructor.
**kwargs: Arguments to pass into Bookkeeper's constructor.
Returns:
A new Bookkeeper.
|
juraj-google-style
|
def progress(self):
return Progress(done=len(self._get_all_set_properties()), base=len(worker_mapping()))
|
Get progress.
Returns:
namedtuple: :class:`Progress`.
|
codesearchnet
|
def inspect_node(self, node_id):
url = self._url('/nodes/{0}', node_id)
return self._result(self._get(url), True)
|
Retrieve low-level information about a swarm node
Args:
node_id (string): ID of the node to be inspected.
Returns:
A dictionary containing data about this node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def __init__(self, match=Match(), table_id=0xff, out_port=Port.OFPP_NONE):
super().__init__()
self.match = match
self.table_id = table_id
self.out_port = out_port
|
Create a AggregateStatsRequest with the optional parameters below.
Args:
match (~pyof.v0x01.common.flow_match.Match): Fields to match.
table_id (int): ID of table to read (from pyof_table_stats) 0xff
for all tables or 0xfe for emergency.
out_port (int): Require matching entries to include this as an
output port. A value of OFPP_NONE indicates no restriction.
|
juraj-google-style
|
def expected_counts(dataframe, rownames, colnames):
cont_table = contingency_table(dataframe, rownames=rownames, colnames=colnames, margins=True)
row_counts = cont_table['All']
column_counts = cont_table.loc['All']
total_observations = cont_table['All']['All']
for column in cont_table.columns:
for row in cont_table.index:
cont_table[column][row] = column_counts[column]*row_counts[row]/total_observations
return cont_table
|
Expected counts of the multivariate frequency distribution of the variables given the
null hypothesis of complete independence between variables.
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns
|
juraj-google-style
|
class PerceiverAudioPostprocessor(nn.Module):
def __init__(self, config: PerceiverConfig, in_channels: int, postproc_type: str='patches') -> None:
super().__init__()
if postproc_type not in ('patches',):
raise ValueError('Invalid postproc_type!')
self.classifier = nn.Linear(in_channels, config.samples_per_patch)
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, modality_sizes=None) -> torch.Tensor:
logits = self.classifier(inputs)
return torch.reshape(logits, [inputs.shape[0], -1])
|
Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features.
Args:
config ([*PerceiverConfig*]):
Model configuration.
in_channels (`int`):
Number of channels in the input.
postproc_type (`str`, *optional*, defaults to `"patches"`):
Postprocessor type to use. Currently, only "patches" is supported.
|
github-repos
|
def dataset(self, mode, hparams=None, global_step=None, **kwargs):
datasets = [p.dataset(mode, **kwargs) for p in self.problems]
datasets = [d.map((lambda x, i=j: self.normalize_example(dict(x, problem_id=tf.constant([i])), hparams))) for (j, d) in enumerate(datasets)]
if (mode is problem.DatasetSplit.TRAIN):
if (global_step is None):
global_step = tf.train.get_or_create_global_step()
pmf = get_schedule_distribution(self.schedule, global_step)
return get_multi_dataset(datasets, pmf)
elif self.only_eval_first_problem:
return datasets[0]
else:
datasets = [d.repeat() for d in datasets]
return tf.data.Dataset.zip(tuple(datasets)).flat_map((lambda *x: functools.reduce(tf.data.Dataset.concatenate, map(tf.data.Dataset.from_tensors, x))))
|
Returns a dataset containing examples from multiple problems.
Args:
mode: A member of problem.DatasetSplit.
hparams: A tf.HParams object, the model hparams.
global_step: A scalar tensor used to compute the sampling distribution.
If global_step is None, we call tf.train.get_or_create_global_step by
default.
**kwargs: Keywords for problem.Problem.Dataset.
Returns:
A dataset containing examples from multiple problems.
|
codesearchnet
|
def set_number_of_partitions(self, number_of_partitions):
if self._frozen:
if self._number_of_partitions != number_of_partitions:
raise ValueError(f"Can't set number_of_partitions to {number_of_partitions} since it has been frozen to use {self._number_of_partitions}.")
else:
self._number_of_partitions = number_of_partitions
|
Sets the number of partitions for the current policy.
If the policy has been frozen then shard_dimension must match the
existing setting.
Args:
number_of_partitions: The number of partitions to use in the policy.
Raises:
ValueError: If the policy has been frozen and shard_dimension
differs from the frozen value.
|
github-repos
|
def to_script(self, wf_name='wf'):
self._closed()
script = []
params = []
returns = []
for (name, typ) in self.wf_inputs.items():
params.append("{}='{}'".format(name, typ))
returns.append(name)
script.append('{} = {}.add_inputs({})'.format(', '.join(returns), wf_name, ', '.join(params)))
returns = []
for (name, step) in self.wf_steps.items():
pyname = step.python_name
returns = ['{}_{}'.format(pyname, o) for o in step['out']]
params = ['{}={}'.format(name, python_name(param)) for (name, param) in step['in'].items()]
script.append('{} = {}.{}({})'.format(', '.join(returns), wf_name, pyname, ', '.join(params)))
params = []
for (name, details) in self.wf_outputs.items():
params.append('{}={}'.format(name, python_name(details['outputSource'])))
script.append('{}.add_outputs({})'.format(wf_name, ', '.join(params)))
return '\n'.join(script)
|
Generated and print the scriptcwl script for the currunt workflow.
Args:
wf_name (str): string used for the WorkflowGenerator object in the
generated script (default: ``wf``).
|
codesearchnet
|
def safe_indicator(self, indicator, errors='strict'):
if (indicator is not None):
try:
indicator = quote(self.s(str(indicator), errors=errors), safe='~')
except KeyError:
indicator = quote(bytes(indicator), safe='~')
return indicator
|
Indicator encode value for safe HTTP request.
Args:
indicator (string): Indicator to URL Encode
errors (string): The error handler type.
Returns:
(string): The urlencoded string
|
codesearchnet
|
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(batch, seq_len, embed_dim)`.
attention_mask (`torch.FloatTensor`):
Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
|
github-repos
|
def __solve_for_scalar(expr, vars):
var = solve(expr, vars).value
try:
scalar = repeated.getvalue(var)
except TypeError:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Wasn't expecting more than one value here. Got %r."
% (var,))
if isinstance(scalar, row_tuple.RowTuple):
try:
return scalar.get_singleton()
except ValueError:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Was expecting a scalar value here. Got %r."
% (scalar,))
else:
return scalar
|
Helper: solve 'expr' always returning a scalar (not IRepeated).
If the output of 'expr' is a single value or a single RowTuple with a single
column then return the value in that column. Otherwise raise.
Arguments:
expr: Expression to solve.
vars: The scope.
Returns:
A scalar value (not an IRepeated).
Raises:
EfilterTypeError if it cannot get a scalar.
|
juraj-google-style
|
def safe_rt(resource_type, lower=False):
if resource_type is not None:
resource_type = resource_type.replace(' ', '_')
if lower:
resource_type = resource_type.lower()
return resource_type
|
Format the Resource Type.
Takes Custom Indicator types with a space character and return a *safe* string.
(e.g. *User Agent* is converted to User_Agent or user_agent.)
Args:
resource_type (string): The resource type to format.
lower (boolean): Return type in all lower case
Returns:
(string): The formatted resource type.
|
juraj-google-style
|
def collection(self, collection_id):
child_path = self._path + (collection_id,)
return self._client.collection(*child_path)
|
Create a sub-collection underneath the current document.
Args:
collection_id (str): The sub-collection identifier (sometimes
referred to as the "kind").
Returns:
~.firestore_v1beta1.collection.CollectionReference: The
child collection.
|
juraj-google-style
|
def write(self, file_name):
try:
assert (file_name[(- 6):] == '.xhtml')
except (AssertionError, IndexError):
raise ValueError('filename must end with .xhtml')
with open(file_name, 'wb') as f:
f.write(self.content.encode('utf-8'))
|
Writes the chapter object to an xhtml file.
Args:
file_name (str): The full name of the xhtml file to save to.
|
codesearchnet
|
def create_autoscale_setting(access_token, subscription_id, resource_group, setting_name, vmss_name, location, minval, maxval, default, autoscale_rules, notify=None):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/microsoft.insights/autoscaleSettings/', setting_name, '?api-version=', INSIGHTS_API])
autoscale_setting = {'location': location}
profile = {'name': 'Profile1'}
capacity = {'minimum': str(minval)}
capacity['maximum'] = str(maxval)
capacity['default'] = str(default)
profile['capacity'] = capacity
profile['rules'] = autoscale_rules
profiles = [profile]
properties = {'name': setting_name}
properties['profiles'] = profiles
properties['targetResourceUri'] = ((((('/subscriptions/' + subscription_id) + '/resourceGroups/') + resource_group) + '/providers/Microsoft.Compute/virtualMachineScaleSets/') + vmss_name)
properties['enabled'] = True
if (notify is not None):
notification = {'operation': 'Scale'}
email = {'sendToSubscriptionAdministrato': False}
email['sendToSubscriptionCoAdministrators'] = False
email['customEmails'] = [notify]
notification = {'email': email}
properties['notifications'] = [notification]
autoscale_setting['properties'] = properties
body = json.dumps(autoscale_setting)
return do_put(endpoint, body, access_token)
|
Create a new autoscale setting for a scale set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
setting_name (str): Name of the autoscale setting.
vmss_name (str): Name of scale set to apply scale events to.
location (str): Azure data center location. E.g. westus.
minval (int): Minimum number of VMs.
maxval (int): Maximum number of VMs.
default (int): Default VM number when no data available.
autoscale_rules (list): List of outputs from create_autoscale_rule().
notify (str): Optional.
Returns:
HTTP response. JSON body of autoscale setting.
|
codesearchnet
|
def isPortAvailable(port='/dev/ttyUSB0'):
isPortAvailable = serial.tools.list_ports.grep(port)
try:
next(isPortAvailable)
available = True
except StopIteration:
available = False
return available
|
Checks whether specified port is available.
Source code derived from @lqdev suggestion per #38
Args:
port: Serial port location i.e. 'COM1'. Default is /dev/ttyUSB0
Returns:
available: Boolean value indicating presence of port
|
juraj-google-style
|
def export(bundle, force=False, force_restricted=False):
if (not ckan):
raise EnvironmentError(MISSING_CREDENTIALS_MSG)
try:
ckan.action.package_create(**_convert_bundle(bundle))
except ckanapi.ValidationError:
if force:
logger.warning('{} dataset already exported, but new export forced. Continue to export dataset stuff.'.format(bundle.dataset))
else:
raise
access = bundle.dataset.config.metadata.about.access
if ((access == 'restricted') and force_restricted):
access = 'private'
assert access, 'CKAN publishing requires access level.'
if (access in ('internal', 'controlled', 'restricted', 'census')):
raise UnpublishedAccessError('{} dataset can not be published because of {} access.'.format(bundle.dataset.vid, bundle.dataset.config.metadata.about.access))
elif (access == 'public'):
user_roles = [{'user': 'visitor', 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}, {'user': 'logged_in', 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}]
elif (access == 'registered'):
user_roles = [{'user': 'visitor', 'domain_object': bundle.dataset.vid.lower(), 'roles': []}, {'user': 'logged_in', 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}]
elif (access in ('private', 'licensed', 'test')):
user_roles = [{'user': 'visitor', 'domain_object': bundle.dataset.vid.lower(), 'roles': []}, {'user': 'logged_in', 'domain_object': bundle.dataset.vid.lower(), 'roles': []}]
organization_users = ckan.action.organization_show(id=CKAN_CONFIG.organization)['users']
for user in organization_users:
(user_roles.append({'user': user['id'], 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}),)
for role in user_roles:
ckan.action.user_role_update(**role)
for partition in bundle.partitions:
ckan.action.resource_create(**_convert_partition(partition))
ckan.action.resource_create(**_convert_schema(bundle))
for (name, external) in six.iteritems(bundle.dataset.config.metadata.external_documentation):
ckan.action.resource_create(**_convert_external(bundle, name, external))
|
Exports bundle to ckan instance.
Args:
bundle (ambry.bundle.Bundle):
force (bool, optional): if True, ignore existance error and continue to export.
force_restricted (bool, optional): if True, then export restricted bundles as private (for debugging
purposes).
Raises:
EnvironmentError: if ckan credentials are missing or invalid.
UnpublishedAccessError: if dataset has unpublished access - one from ('internal', 'test',
'controlled', 'restricted', 'census').
|
codesearchnet
|
def _batch_norm_op(self, input_batch, mean, variance, use_batch_stats, stat_dtype):
if self._fused:
(batch_norm_op, mean, variance) = self._fused_batch_norm_op(input_batch, self._moving_mean, self._moving_variance, use_batch_stats)
else:
batch_norm_op = tf.nn.batch_normalization(input_batch, mean, variance, self._beta, self._gamma, self._eps, name='batch_norm')
if (input_batch.dtype.base_dtype != stat_dtype):
mean = tf.cast(mean, stat_dtype)
variance = tf.cast(variance, stat_dtype)
return (batch_norm_op, mean, variance)
|
Creates a batch normalization op.
It uses the tf.nn.batch_normalization op by default and the
tf.nn.fused_batch_norm op to support fused batch normalization.
Args:
input_batch: A input Tensor of arbitrary dimension.
mean: A mean tensor, of the same dtype as `input_batch`.
variance: A variance tensor, of the same dtype as `input_batch`.
use_batch_stats: A bool value that indicates whether the operation should
use the batch statistics.
stat_dtype: TensorFlow datatype used for the moving mean and variance.
Returns:
A batch normalization operation.
The current mean tensor, of datatype `stat_dtype`.
The current variance tensor, of datatype `stat_dtype`.
|
codesearchnet
|
def output(self, stream, value):
if stream not in self.outputs:
raise ValueError("Stream is not an output of this operator.")
e = self.expression(value)
e._stream = stream
return e
|
SPL output port assignment expression.
Arguments:
stream(Stream): Output stream the assignment is for.
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator.
|
juraj-google-style
|
def sentences(self, index = None):
if index is None:
return self.select(Sentence,None,True,default_ignore_structure)
else:
if index < 0:
index = self.count(Sentence,None,True,default_ignore_structure) + index
for i,e in enumerate(self.select(Sentence,None,True,default_ignore_structure)):
if i == index:
return e
raise IndexError
|
Returns a generator of Sentence elements found (recursively) under this element
Arguments:
index (int or None): If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning a generator of all
|
juraj-google-style
|
def start(self, input_data, output_data, transform_resources, **kwargs):
self.transform_resources = transform_resources
self.input_data = input_data
self.output_data = output_data
image = self.primary_container['Image']
instance_type = transform_resources['InstanceType']
instance_count = 1
environment = self._get_container_environment(**kwargs)
self.container = _SageMakerContainer(instance_type, instance_count, image, self.local_session)
self.container.serve(self.primary_container['ModelDataUrl'], environment)
serving_port = get_config_value('local.serving_port', self.local_session.config) or 8080
_wait_for_serving_container(serving_port)
endpoint_url = 'http:
response, code = _perform_request(endpoint_url)
if code == 200:
execution_parameters = json.loads(response.read())
for setting in ('BatchStrategy', 'MaxPayloadInMB'):
if setting not in kwargs and setting in execution_parameters:
kwargs[setting] = execution_parameters[setting]
kwargs.update(self._get_required_defaults(**kwargs))
self.start_time = datetime.datetime.now()
self.batch_strategy = kwargs['BatchStrategy']
if 'Environment' in kwargs:
self.environment = kwargs['Environment']
self._perform_batch_inference(input_data, output_data, **kwargs)
self.end_time = datetime.datetime.now()
self.state = self._COMPLETED
|
Start the Local Transform Job
Args:
input_data (dict): Describes the dataset to be transformed and the location where it is stored.
output_data (dict): Identifies the location where to save the results from the transform job
transform_resources (dict): compute instances for the transform job. Currently only supports local or
local_gpu
**kwargs: additional arguments coming from the boto request object
|
juraj-google-style
|
def add_state(self, state_name, initial_state, batch_size=None):
state_shape = initial_state.get_shape().as_list()
full_shape = [batch_size] + state_shape
if not batch_size:
shape_proto = self._as_shape_proto([0] + state_shape)
batch_size = 1
else:
shape_proto = self._as_shape_proto([batch_size] + state_shape)
tiles = [batch_size] + ([1] * len(initial_state.get_shape()))
feed_op = tf.placeholder_with_default(
tf.tile(
tf.expand_dims(initial_state, [0]), tiles),
shape=full_shape,
name='%s_feed' % state_name)
s = {'feed_op': feed_op,
'feed_type': initial_state.dtype,
'feed_shape': shape_proto}
self._states[state_name] = s
|
Adds a state to the state saver.
Args:
state_name: The name of this state.
initial_state: The initial state vector. Only zeros are supported.
batch_size: The batch_size or None for unknown.
|
juraj-google-style
|
def from_csv(cls, filename=None, text=None):
if ((filename is None) and (text is None)):
raise LegendError('You must provide a filename or CSV text.')
if (filename is not None):
with open(filename, 'r') as f:
text = f.read()
try:
f = StringIO(text)
except TypeError:
f = StringIO(unicode(text))
r = csv.DictReader(f, skipinitialspace=True)
(list_of_Decors, components) = ([], [])
kind = 'component'
for row in r:
(d, component) = ({}, {})
for (k, v) in row.items():
if (k in [None, '']):
continue
if (v in [None, '']):
if (k.lower() not in ['color', 'colour']):
continue
if (k[:4].lower() == 'comp'):
prop = ' '.join(k.split()[1:])
if (v.lower() == 'true'):
component[prop] = True
elif (v.lower() == 'false'):
component[prop] = False
else:
try:
component[prop] = float(v)
except ValueError:
component[prop] = v.lower()
elif (k[:5].lower() == 'curve'):
prop = ' '.join(k.split()[1:])
component[prop] = v.lower()
kind = 'curve'
else:
try:
d[k] = float(v)
except ValueError:
d[k] = v.lower()
this_component = Component(component)
d[kind] = this_component
if (this_component in components):
with warnings.catch_warnings():
warnings.simplefilter('always')
w = 'This legend contains duplicate components.'
warnings.warn(w)
components.append(this_component)
list_of_Decors.append(Decor(d))
return cls(list_of_Decors)
|
Read CSV text and generate a Legend.
Args:
string (str): The CSV string.
In the first row, list the properties. Precede the properties of the
component with 'comp ' or 'component '. For example:
colour, width, comp lithology, comp colour
#FFFFFF, 0, ,
#F7E9A6, 3, Sandstone, Grey
#FF99CC, 2, Anhydrite,
... etc
Note:
To edit a legend, the easiest thing to do is probably this:
- `legend.to_csv()`
- Edit the legend, call it `new_legend`.
- `legend = Legend.from_csv(text=new_legend)`
|
codesearchnet
|
def list_metadata(self, resource):
self.metadata_service.set_auth(self._token_metadata)
return self.metadata_service.list(resource)
|
List all keys associated with the given resource.
Args:
resource (intern.resource.boss.BossResource)
Returns:
(list)
Raises:
requests.HTTPError on a failure.
|
codesearchnet
|
def read_molden(inputfile, start_index=0, get_bonds=True):
from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian
with open(inputfile, 'r') as f:
found = False
while (not found):
line = f.readline()
if ('[N_GEO]' in line):
found = True
number_of_molecules = int(f.readline().strip())
energies = []
found = False
while (not found):
line = f.readline()
if ('energy' in line):
found = True
for _ in range(number_of_molecules):
energies.append(float(f.readline().strip()))
found = False
while (not found):
line = f.readline()
if ('[GEOMETRIES] (XYZ)' in line):
found = True
current_line = f.tell()
number_of_atoms = int(f.readline().strip())
f.seek(current_line)
cartesians = []
for energy in energies:
cartesian = Cartesian.read_xyz(f, start_index=start_index, get_bonds=get_bonds, nrows=number_of_atoms, engine='python')
cartesian.metadata['energy'] = energy
cartesians.append(cartesian)
return cartesians
|
Read a molden file.
Args:
inputfile (str):
start_index (int):
Returns:
list: A list containing :class:`~chemcoord.Cartesian` is returned.
|
codesearchnet
|
def do_patch(endpoint, body, access_token):
headers = {"content-type": "application/json", "Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.patch(endpoint, data=body, headers=headers)
|
Do an HTTP PATCH request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to patch.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
|
juraj-google-style
|
def _new_open_bin(self, remaining_rect):
factories_to_delete = set()
new_bin = None
for (key, binfac) in self._empty_bins.items():
a_rectangle_fits = False
for (_, rect) in remaining_rect.items():
if binfac.fits_inside(rect[0], rect[1]):
a_rectangle_fits = True
break
if (not a_rectangle_fits):
factories_to_delete.add(key)
continue
new_bin = binfac.new_bin()
if (new_bin is None):
continue
self._open_bins.append(new_bin)
if binfac.is_empty():
factories_to_delete.add(key)
break
for f in factories_to_delete:
del self._empty_bins[f]
return new_bin
|
Extract the next bin where at least one of the rectangles in
rem
Arguments:
remaining_rect (dict): rectangles not placed yet
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found
|
codesearchnet
|
def exchange(self, pubkey):
try:
return self.priv.exchange(c_ec.ECDH(), pubkey.publ)
except ValueError as e:
raise s_exc.BadEccExchange(mesg=str(e))
|
Perform a ECDH key exchange with a public key.
Args:
pubkey (PubKey): A PubKey to perform the ECDH with.
Returns:
bytes: The ECDH bytes. This is deterministic for a given pubkey
and private key.
|
juraj-google-style
|
def upload_from_url(cls, url, store=None, filename=None):
if (store is None):
store = 'auto'
elif store:
store = '1'
else:
store = '0'
data = {'source_url': url, 'store': store}
if filename:
data['filename'] = filename
result = uploading_request('POST', 'from_url/', data=data)
if ('token' not in result):
raise APIError('could not find token in result: {0}'.format(result))
file_from_url = cls.FileFromUrl(result['token'])
return file_from_url
|
Uploads file from given url and returns ``FileFromUrl`` instance.
Args:
- url (str): URL of file to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
- filename (Optional[str]): Name of the uploaded file. If this not
specified the filename will be obtained from response headers
or source URL. Defaults to None.
Returns:
``FileFromUrl`` instance
|
codesearchnet
|
def _get_resource_view(self, resource_view):
if isinstance(resource_view, dict):
resource_view = ResourceView(resource_view, configuration=self.configuration)
if isinstance(resource_view, ResourceView):
return resource_view
raise HDXError('Type %s is not a valid resource view!' % type(resource_view).__name__)
|
Get resource view id
Args:
resource_view (Union[ResourceView,Dict]): ResourceView metadata from a ResourceView object or dictionary
Returns:
ResourceView: ResourceView object
|
juraj-google-style
|
def to_bytesize(value, default_unit=None, base=DEFAULT_BASE):
if isinstance(value, (int, float)):
return unitized(value, default_unit, base)
if (value is None):
return None
try:
if (value[(- 1)].lower() == 'b'):
value = value[:(- 1)]
unit = value[(- 1):].lower()
if unit.isdigit():
unit = default_unit
else:
value = value[:(- 1)]
return unitized(to_number(float, value), unit, base)
except (IndexError, TypeError, ValueError):
return None
|
Convert `value` to bytes, accepts notations such as "4k" to mean 4096 bytes
Args:
value (str | unicode | int | None): Number of bytes optionally suffixed by a char from UNITS
default_unit (str | unicode | None): Default unit to use for unqualified values
base (int): Base to use (usually 1024)
Returns:
(int | None): Deduced bytesize value, if possible
|
codesearchnet
|
def read_trailer(self):
_logger.debug('Reading chunked trailer.')
trailer_data_list = []
while True:
trailer_data = (yield from self._connection.readline())
trailer_data_list.append(trailer_data)
if (not trailer_data.strip()):
break
return b''.join(trailer_data_list)
|
Read the HTTP trailer fields.
Returns:
bytes: The trailer data.
Coroutine.
|
codesearchnet
|
def show(self, *args, **kwargs):
plt = self.get_pourbaix_plot(*args, **kwargs)
plt.show()
|
Shows the pourbaix plot
Args:
*args: args to get_pourbaix_plot
**kwargs: kwargs to get_pourbaix_plot
Returns:
None
|
codesearchnet
|
def random_square_mask(shape, fraction):
mask = np.ones(shape)
patch_area = ((shape[0] * shape[1]) * fraction)
patch_dim = np.int(math.floor(math.sqrt(patch_area)))
if ((patch_area == 0) or (patch_dim == 0)):
return mask
x = np.random.randint((shape[0] - patch_dim))
y = np.random.randint((shape[1] - patch_dim))
mask[(x:(x + patch_dim), y:(y + patch_dim), :)] = 0
return mask
|
Create a numpy array with specified shape and masked fraction.
Args:
shape: tuple, shape of the mask to create.
fraction: float, fraction of the mask area to populate with `mask_scalar`.
Returns:
numpy.array: A numpy array storing the mask.
|
codesearchnet
|
def non_trainable_variables(self):
return tuple(self._flatten(predicate=_is_non_trainable_variable, expand_composites=True))
|
Sequence of non-trainable variables owned by this module and its submodules.
Note: this method uses reflection to find variables on the current instance
and submodules. For performance reasons you may wish to cache the result
of calling this method if you don't expect the return value to change.
Returns:
A sequence of variables for the current module (sorted by attribute
name) followed by variables from all submodules recursively (breadth
first).
|
github-repos
|
def Cancel(self, request, global_params=None):
config = self.GetMethodConfig('Cancel')
return self._RunMethod(config, request, global_params=global_params)
|
Requests that a job be cancelled. This call will return immediately, and the client will need to poll for the job status to see if the cancel completed successfully. Cancelled jobs may still incur costs.
Args:
request: (BigqueryJobsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(JobCancelResponse) The response message.
|
github-repos
|
def download_and_extract(path, url, input_filename, target_filename):
logging.info(('Downloading and extracting data to: %s' % path))
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if (input_file and target_file):
logging.info(('Already downloaded and extracted %s.' % url))
return (input_file, target_file)
compressed_file = download_from_url(path, url)
logging.info(('Extracting %s.' % compressed_file))
with tarfile.open(compressed_file, 'r:gz') as corpus_tar:
corpus_tar.extractall(path)
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if (input_file and target_file):
return (input_file, target_file)
raise OSError(('Download/extraction failed for url %s to path %s' % (url, path)))
|
Extract files from downloaded compressed archive file.
Args:
path: string directory where the files will be downloaded
url: url containing the compressed input and target files
input_filename: name of file containing data in source language
target_filename: name of file containing data in target language
Returns:
Full paths to extracted input and target files.
Raises:
OSError: if the the download/extraction fails.
|
codesearchnet
|
def expand_tile(units, axis):
assert axis in (1, 2)
n_time_steps = tf.shape(units)[1]
repetitions = [1, 1, 1, 1]
repetitions[axis] = n_time_steps
return tf.tile(tf.expand_dims(units, axis), repetitions)
|
Expand and tile tensor along given axis
Args:
units: tf tensor with dimensions [batch_size, time_steps, n_input_features]
axis: axis along which expand and tile. Must be 1 or 2
|
juraj-google-style
|
def remove_location(self, location):
res = self._remove_hdxobject(self.data.get('groups'), location, matchon='name')
if (not res):
res = self._remove_hdxobject(self.data.get('groups'), location.upper(), matchon='name')
if (not res):
res = self._remove_hdxobject(self.data.get('groups'), location.lower(), matchon='name')
return res
|
Remove a location. If the location is already added, it is ignored.
Args:
location (str): Location to remove
Returns:
bool: True if location removed or False if not
|
codesearchnet
|
def _add_dns_records(self, conf, mgmts):
nets = conf['nets']
dns_mgmt = mgmts[-1]
LOGGER.debug('Using network %s as main DNS server', dns_mgmt)
forward = conf['nets'][dns_mgmt].get('gw')
dns_records = {}
for net_name, net_spec in nets.iteritems():
dns_records.update(net_spec['mapping'].copy())
if net_name not in mgmts:
net_spec['dns_forward'] = forward
for mgmt in mgmts:
if nets[mgmt].get('dns_records'):
nets[mgmt]['dns_records'].update(dns_records)
else:
nets[mgmt]['dns_records'] = dns_records
|
Add DNS records dict('dns_records') to ``conf`` for each
management network. Add DNS forwarder IP('dns_forward') for each none
management network.
Args:
conf(spec): spec
mgmts(list): management networks names
Returns:
None
|
juraj-google-style
|
def get_answers_for_student(student_item):
submissions = sub_api.get_submissions(student_item)
if (not submissions):
return Answers()
latest_submission = submissions[0]
latest_answer_item = latest_submission.get('answer', {})
return Answers(latest_answer_item.get(ANSWER_LIST_KEY, []))
|
Retrieve answers from backend for a student and question
Args:
student_item (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
Returns:
Answers: answers for the student
|
codesearchnet
|
def _validate_recurse_directive_types(current_schema_type, field_schema_type, context):
type_hints = context['type_equivalence_hints'].get(field_schema_type)
type_hints_inverse = context['type_equivalence_hints_inverse'].get(field_schema_type)
allowed_current_types = {field_schema_type}
if (type_hints and isinstance(type_hints, GraphQLUnionType)):
allowed_current_types.update(type_hints.types)
if (type_hints_inverse and isinstance(type_hints_inverse, GraphQLUnionType)):
allowed_current_types.update(type_hints_inverse.types)
current_scope_is_allowed = (current_schema_type in allowed_current_types)
is_implemented_interface = (isinstance(field_schema_type, GraphQLInterfaceType) and isinstance(current_schema_type, GraphQLObjectType) and (field_schema_type in current_schema_type.interfaces))
if (not any((current_scope_is_allowed, is_implemented_interface))):
raise GraphQLCompilationError(u'Edges expanded with a @recurse directive must either be of the same type as their enclosing scope, a supertype of the enclosing scope, or be of an interface type that is implemented by the type of their enclosing scope. Enclosing scope type: {}, edge type: {}'.format(current_schema_type, field_schema_type))
|
Perform type checks on the enclosing type and the recursed type for a recurse directive.
Args:
current_schema_type: GraphQLType, the schema type at the current location
field_schema_type: GraphQLType, the schema type at the inner scope
context: dict, various per-compilation data (e.g. declared tags, whether the current block
is optional, etc.). May be mutated in-place in this function!
|
codesearchnet
|
def output_shapes(self):
return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), self._element_spec)
|
Returns the shape of each component of an element of this iterator.
Returns:
A (nested) structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
|
github-repos
|
def subcomponents(self, subcomponents):
for arg in self.args:
if (arg.__class__.__name__ == 'Function'):
subcomponents.append(arg.to_string())
if (arg.function_type == 'primary'):
arg.subcomponents(subcomponents)
else:
subcomponents.append(arg.to_string())
return subcomponents
|
Generate subcomponents of the BEL subject or object
These subcomponents are used for matching parts of a BEL
subject or Object in the Edgestore.
Args:
AST
subcomponents: Pass an empty list to start a new subcomponents request
Returns:
List[str]: subcomponents of BEL subject or object
|
codesearchnet
|
def logistic(x: Union[float, np.ndarray],
k: float,
theta: float) -> Optional[float]:
r
if x is None or k is None or theta is None:
return None
return 1 / (1 + np.exp(-k * (x - theta)))
|
r"""
Standard logistic function.
.. math::
y = \frac {1} {1 + e^{-k (x - \theta)}}
Args:
x: :math:`x`
k: :math:`k`
theta: :math:`\theta`
Returns:
:math:`y`
|
juraj-google-style
|
def create(self, document_data):
batch = self._client.batch()
batch.create(self, document_data)
write_results = batch.commit()
return _first_write_result(write_results)
|
Create the current document in the Firestore database.
Args:
document_data (dict): Property names and values to use for
creating a document.
Returns:
google.cloud.firestore_v1beta1.types.WriteResult: The
write result corresponding to the committed document. A write
result contains an ``update_time`` field.
Raises:
~google.cloud.exceptions.Conflict: If the document already exists.
|
codesearchnet
|
def getall(self):
matches = ROUTES_RE.findall(self.config)
routes = dict()
for match in matches:
ip_dest = match[0]
next_hop = match[1]
next_hop_ip = (None if (match[2] is '') else match[2])
distance = int(match[3])
data = {}
data['tag'] = (None if (match[4] is '') else int(match[4]))
data['route_name'] = (None if (match[5] is '') else match[5])
ip_dict = routes[ip_dest] = routes.get(ip_dest, {})
nh_dict = ip_dict[next_hop] = ip_dict.get(next_hop, {})
nhip_dict = nh_dict[next_hop_ip] = nh_dict.get(next_hop_ip, {})
nhip_dict[distance] = data
return routes
|
Return all ip routes configured on the switch as a resource dict
Returns:
dict: An dict object of static route entries in the form::
{ ip_dest:
{ next_hop:
{ next_hop_ip:
{ distance:
{ 'tag': tag,
'route_name': route_name
}
}
}
}
}
If the ip address specified does not have any associated
static routes, then None is returned.
Notes:
The keys ip_dest, next_hop, next_hop_ip, and distance in
the returned dictionary are the values of those components
of the ip route specification. If a route does not contain
a next_hop_ip, then that key value will be set as 'None'.
|
codesearchnet
|
def transformer_latent_decoder(x, encoder_output, ed_attention_bias, hparams, name=None):
with tf.variable_scope(name, default_name='transformer_latent_dec'):
batch_size = common_layers.shape_list(x)[0]
compressed_img_len = (hparams.img_len
x = tf.reshape(x, [batch_size, compressed_img_len, (compressed_img_len * hparams.num_latents), hparams.hidden_size])
(decoder_input, _, _) = cia.prepare_decoder(x, hparams)
decoder_output = cia.transformer_decoder_layers(decoder_input, encoder_output, (hparams.num_latent_layers or hparams.num_hidden_layers), hparams, attention_type=hparams.latent_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name='decoder')
decoder_output = tf.reshape(decoder_output, [batch_size, ((compressed_img_len ** 2) * hparams.num_latents), hparams.hidden_size])
return decoder_output
|
Transformer decoder over latents using latent_attention_type.
Args:
x: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the
latent length, which is
height * width * hparams.num_latents / (2**hparams.num_compress_steps).
encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size].
ed_attention_bias: Tensor which broadcasts with shape [batch,
hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias.
hparams: HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, length_q, hparams.hidden_size].
|
codesearchnet
|
def _get_row_fetcher(self, start_row=0, max_rows=None, page_size=_DEFAULT_PAGE_SIZE):
if (not start_row):
start_row = 0
elif (start_row < 0):
if (self.length >= 0):
start_row += self.length
else:
raise Exception('Cannot use negative indices for table of unknown length')
schema = self.schema._bq_schema
name_parts = self._name_parts
def _retrieve_rows(page_token, count):
page_rows = []
if (max_rows and (count >= max_rows)):
page_token = None
else:
if (max_rows and (page_size > (max_rows - count))):
max_results = (max_rows - count)
else:
max_results = page_size
try:
if page_token:
response = self._api.tabledata_list(name_parts, page_token=page_token, max_results=max_results)
else:
response = self._api.tabledata_list(name_parts, start_index=start_row, max_results=max_results)
except Exception as e:
raise e
page_token = (response['pageToken'] if ('pageToken' in response) else None)
if ('rows' in response):
page_rows = response['rows']
rows = []
for row_dict in page_rows:
rows.append(_parser.Parser.parse_row(schema, row_dict))
return (rows, page_token)
return _retrieve_rows
|
Get a function that can retrieve a page of rows.
The function returned is a closure so that it can have a signature suitable for use
by Iterator.
Args:
start_row: the row to start fetching from; default 0.
max_rows: the maximum number of rows to fetch (across all calls, not per-call). Default
is None which means no limit.
page_size: the maximum number of results to fetch per page; default 1024.
Returns:
A function that can be called repeatedly with a page token and running count, and that
will return an array of rows and a next page token; when the returned page token is None
the fetch is complete.
|
codesearchnet
|
def add_key_value(self, key, value):
key = self._metadata_map.get(key, key)
if key in ['dateAdded', 'lastModified']:
self._data[key] = self._utils.format_datetime(value, date_format='%Y-%m-%dT%H:%M:%SZ')
elif key == 'confidence':
self._data[key] = int(value)
elif key == 'rating':
self._data[key] = float(value)
elif key == 'unique_id':
self._unique_id = quote_plus(value)
else:
self._data[key] = value
|
Converts the value and adds it as a data field.
Args:
key:
value:
|
juraj-google-style
|
def has_attribute(self, attribute: str) -> bool:
return any([
key_node.value == attribute for key_node, _ in self.yaml_node.value
])
|
Whether the node has an attribute with the given name.
Use only if is_mapping() returns True.
Args:
attribute: The name of the attribute to check for.
Returns:
True iff the attribute is present.
|
juraj-google-style
|
def _gal2idx(self, gal):
l = coordinates.Longitude(gal.l, wrap_angle=(180.0 * units.deg))
j = (self._inv_pix_scale * (l.deg - self._l_bounds[0])).astype('i4')
k = (self._inv_pix_scale * (gal.b.deg - self._b_bounds[0])).astype('i4')
idx = ((((j < 0) | (j >= self._shape[0])) | (k < 0)) | (k >= self._shape[1]))
if np.any(idx):
j[idx] = (- 1)
k[idx] = (- 1)
return (j, k, (~ idx))
|
Converts from Galactic coordinates to pixel indices.
Args:
gal (:obj:`astropy.coordinates.SkyCoord`): Galactic coordinates. Must
store an array of coordinates (i.e., not be scalar).
Returns:
``j, k, mask`` - Pixel indices of the coordinates, as well as a mask
of in-bounds coordinates. Outputs have the same shape as the input
coordinates.
|
codesearchnet
|
def deleted(self, main_type, sub_type, deleted_since, owner=None, filters=None, params=None):
params = params or {}
if filters and filters.filters:
params['filters'] = filters.filters_string
if owner:
params['owner'] = owner
if deleted_since:
params['deleteSince'] = deleted_since
if not sub_type:
url = '/v2/{}/deleted'.format(main_type)
else:
url = '/v2/{}/{}/deleted'.format(main_type, sub_type)
return self.tcex.session.get(url, params=params)
|
Args:
owner:
filters:
main_type:
sub_type:
deleted_since:
params:
Return:
|
juraj-google-style
|
def raw_filter(self, filters):
return SearchResult(self, self._api.get(self._href, **{'filter[]': filters}))
|
Sends all filters to the API.
No fancy, just a wrapper. Any advanced functionality shall be implemented as another method.
Args:
filters: List of filters (strings)
Returns: :py:class:`SearchResult`
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.