code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def buy(self, product_id, order_type, **kwargs):
return self.place_order(product_id, 'buy', order_type, **kwargs)
|
Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
|
codesearchnet
|
def download_image(self, handle, dest):
with log_utils.LogTask(('Download image %s' % handle), logger=LOGGER):
self.open_url(url=handle, dest=dest)
self.extract_image_xz(dest)
|
Downloads the image from the http server
Args:
handle (str): url from the `self.baseurl` to the remote template
dest (str): Path to store the downloaded url to, must be a file
path
Returns:
None
|
codesearchnet
|
def discover(package, cls_match_func):
matched_classes = set()
for _, module_name, _ in pkgutil.walk_packages(
package.__path__,
prefix=package.__name__ + '.',
):
module = __import__(module_name, fromlist=[str('__trash')], level=0)
for _, imported_class in inspect.getmembers(module, inspect.isclass):
if imported_class.__module__ != module.__name__:
continue
if cls_match_func(imported_class):
matched_classes.add(imported_class)
return matched_classes
|
Returns a set of classes in the directory matched by cls_match_func
Args:
path - A Python package
cls_match_func - Function taking a class and returning true if the
class is to be included in the output.
|
juraj-google-style
|
def _get_row_partition_type_tensor_pairs(rt_input):
partitions = rt_input._nested_row_partitions
tail = [_get_row_partition_type_tensor_pairs_tail(x) for x in partitions[1:]]
if partitions[0]._value_rowids is not None:
return [('FIRST_DIM_SIZE', partitions[0].nrows()), ('VALUE_ROWIDS', partitions[0].value_rowids())] + tail
else:
return [('ROW_SPLITS', partitions[0].row_splits())] + tail
|
Gets a list of the row partitions for rt_input.
If value_rowids are defined, then they are used. Otherwise, row_splits
are used. If the outermost level has value_rowids defind, then nrows is
also added.
Args:
rt_input: a ragged tensor.
Returns:
A list of (row_partition_type, row_partition_tensor) pairs.
|
github-repos
|
def get_details(self, ids):
if isinstance(ids, list):
if (len(ids) > 5):
ids = ids[:5]
id_param = (';'.join(ids) + '/')
else:
ids = str(ids)
id_param = (ids + '/')
(header, content) = self._http_request(id_param)
resp = json.loads(content)
if (not self._is_http_response_ok(header)):
error = resp.get('error_message', 'Unknown Error')
raise HttpException(header.status, header.reason, error)
return resp
|
Locu Venue Details API Call Wrapper
Args:
list of ids : ids of a particular venues to get insights about. Can process up to 5 ids
|
codesearchnet
|
def __init__(self, scaffold=None, master='', config=None, max_wait_secs=30 * 60):
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
self._max_wait_secs = max_wait_secs
|
Initializes a worker session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
max_wait_secs: Maximum time to wait for the session to become available.
|
github-repos
|
def stringize(
self,
rnf_profile=RnfProfile(),
):
sorted_segments = sorted(self.segments,
key=lambda x: (
x.genome_id * (10 ** 23) +
x.chr_id * (10 ** 21) +
(x.left + (int(x.left == 0) * x.right - 1)) * (10 ** 11) +
x.right * (10 ** 1) +
int(x.direction == "F")
)
)
segments_strings = [x.stringize(rnf_profile) for x in sorted_segments]
read_tuple_name = "__".join(
[
self.prefix,
format(self.read_tuple_id, 'x').zfill(rnf_profile.read_tuple_id_width),
",".join(segments_strings),
self.suffix,
]
)
return read_tuple_name
|
Create RNF representation of this read.
Args:
read_tuple_id_width (int): Maximal expected string length of read tuple ID.
genome_id_width (int): Maximal expected string length of genome ID.
chr_id_width (int): Maximal expected string length of chromosome ID.
coor_width (int): Maximal expected string length of a coordinate.
|
juraj-google-style
|
def implicit_static(cls, for_type=None, for_types=None):
for type_ in cls.__get_type_args(for_type, for_types):
implementations = {}
for function in cls.required():
method = getattr(type_, function.__name__, None)
if not callable(method):
raise TypeError(
"%s.implicit invokation on type %r is missing instance "
"method %r."
% (cls.__name__, type_, function.__name__))
implementations[function] = method
for function in cls.optional():
method = getattr(type_, function.__name__, None)
if callable(method):
implementations[function] = method
return cls.implement(for_type=type_,
implementations=implementations)
|
Automatically generate implementations for a type.
Implement the protocol for the 'for_type' type by dispatching each
member function of the protocol to an instance method of the same name
declared on the type 'for_type'.
Arguments:
for_type: The type to implictly implement the protocol with.
Raises:
TypeError if not all implementations are provided by 'for_type'.
|
juraj-google-style
|
def generate_pb_config(pb_id: str, pb_config: dict=None, workflow_config: dict=None) -> dict:
if (workflow_config is None):
workflow_config = dict()
if (pb_config is None):
pb_config = dict()
pb_type = pb_config.get('type', choice(PB_TYPES))
workflow_id = workflow_config.get('id')
if (workflow_id is None):
if (pb_type == 'offline'):
workflow_id = choice(OFFLINE_WORKFLOWS)
else:
workflow_id = choice(REALTIME_WORKFLOWS)
workflow_version = workflow_config.get('version', generate_version())
workflow_parameters = workflow_config.get('parameters', dict())
pb_data = dict(id=pb_id, version=__pb_version__, type=pb_type, priority=pb_config.get('priority', randint(0, 10)), dependencies=pb_config.get('dependencies', []), resources_required=pb_config.get('resources_required', []), workflow=dict(id=workflow_id, version=workflow_version, parameters=workflow_parameters))
return pb_data
|
Generate a PB configuration dictionary.
Args:
pb_id (str): Processing Block Id
pb_config (dict, optional) PB configuration.
workflow_config (dict, optional): Workflow configuration
Returns:
dict, PB configuration dictionary.
|
codesearchnet
|
class InputExample:
example_id: str
question: str
contexts: list[str]
endings: list[str]
label: Optional[str]
|
A single training/test example for multiple choice
Args:
example_id: Unique id for the example.
question: string. The untokenized text of the second sequence (question).
contexts: list of str. The untokenized text of the first sequence (context of corresponding question).
endings: list of str. multiple choice's options. Its length must be equal to contexts' length.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
|
github-repos
|
def training_loop_hparams_from_scoped_overrides(scoped_overrides, trial_id):
trial_hp_overrides = scoped_overrides.values()
loop_hp = create_loop_hparams()
model_hp_name = trial_hp_overrides.get(
"loop.generative_model_params", loop_hp.generative_model_params)
model_hp = registry.hparams(model_hp_name).parse(FLAGS.hparams)
base_algo_params_name = trial_hp_overrides.get(
"loop.base_algo_params", loop_hp.base_algo_params)
algo_hp = registry.hparams(base_algo_params_name)
combined_hp = merge_unscoped_hparams(
zip(HP_SCOPES, [loop_hp, model_hp, algo_hp]))
combined_hp.override_from_dict(trial_hp_overrides)
loop_hp, model_hp, algo_hp = (
split_scoped_hparams(HP_SCOPES, combined_hp))
model_hp_name = "model_hp_%s" % str(trial_id)
dynamic_register_hparams(model_hp_name, model_hp)
loop_hp.generative_model_params = model_hp_name
algo_hp_name = "algo_hp_%s" % str(trial_id)
dynamic_register_hparams(algo_hp_name, algo_hp)
loop_hp.base_algo_params = algo_hp_name
return loop_hp
|
Create HParams suitable for training loop from scoped HParams.
Args:
scoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These
parameters are overrides for the base HParams created by
create_loop_hparams.
trial_id: str, trial identifier. This is used to register unique HParams
names for the underlying model and ppo HParams.
Returns:
HParams suitable for passing to training_loop.
|
juraj-google-style
|
def _is_apk_install_success(stdout: bytes, stderr: str) -> bool:
if utils.grep('Failure', stdout):
return False
return any([not stderr, stderr == 'Success', 'waiting for device' in stderr])
|
Checks output of the adb install command and decides if install succeeded.
Args:
stdout: string, the standard out output of an adb install command.
stderr: string, the standard error output of an adb install command.
Returns:
True if the installation succeeded; False otherwise.
|
github-repos
|
def remove_duplicate_sg(security_groups):
for each_sg, duplicate_sg_name in SECURITYGROUP_REPLACEMENTS.items():
if each_sg in security_groups and duplicate_sg_name in security_groups:
LOG.info('Duplicate SG found. Removing %s in favor of %s.', duplicate_sg_name, each_sg)
security_groups.remove(duplicate_sg_name)
return security_groups
|
Removes duplicate Security Groups that share a same name alias
Args:
security_groups (list): A list of security group id to compare against SECURITYGROUP_REPLACEMENTS
Returns:
security_groups (list): A list of security groups with duplicate aliases removed
|
juraj-google-style
|
def running_instances(self, context, process_name):
handle = (id(context), process_name)
it = self.processes.get(handle, {}).itervalues()
entries = [x for x in it if x[0].poll() is None]
return entries
|
Get a list of running instances.
Args:
context (`ResolvedContext`): Context the process is running in.
process_name (str): Name of the process.
Returns:
List of (`subprocess.Popen`, start-time) 2-tuples, where start_time
is the epoch time the process was added.
|
juraj-google-style
|
def _FormatSocketUnixToken(self, token_data):
protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN')
return {'protocols': protocol, 'family': token_data.socket_family, 'path': token_data.socket_path}
|
Formats an Unix socket token as a dictionary of values.
Args:
token_data (bsm_token_data_sockunix): AUT_SOCKUNIX token data.
Returns:
dict[str, str]: token values.
|
codesearchnet
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
backup_alias_map = self._GetDataTypeMap('timemachine_backup_alias')
destinations = match.get('Destinations', [])
for destination in destinations:
backup_alias_data = destination.get('BackupAlias', b'')
try:
backup_alias = self._ReadStructureFromByteStream(
backup_alias_data, 0, backup_alias_map)
alias = backup_alias.string
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse backup alias value with error: {0!s}'.format(
exception))
alias = 'Unknown alias'
destination_identifier = (
destination.get('DestinationID', None) or 'Unknown device')
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'TimeMachine Backup in {0:s} ({1:s})'.format(
alias, destination_identifier)
event_data.key = 'item/SnapshotDates'
event_data.root = '/Destinations'
snapshot_dates = destination.get('SnapshotDates', [])
for datetime_value in snapshot_dates:
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts relevant TimeMachine entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
|
juraj-google-style
|
def _run_graph(self, device, input_shape, variable, num_inputs, axis, grad, num_iters):
graph = ops.Graph()
with graph.as_default():
outputs = build_graph(device, input_shape, variable, num_inputs, axis, grad)
config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(optimizer_options=config_pb2.OptimizerOptions(opt_level=config_pb2.OptimizerOptions.L0)))
with session_lib.Session(graph=graph, config=config) as session:
variables.global_variables_initializer().run()
_ = session.run(outputs)
start_time = time.time()
for _ in range(num_iters):
_ = session.run(outputs)
duration = time.time() - start_time
print('%s shape:%d/%d var: %r
name_template = 'concat_bench_{device}_input_shape_{shape}_variable_{variable}_num_inputs_{num_inputs}_axis_{axis}_grad_{grad}'
self.report_benchmark(name=name_template.format(device=device, num_inputs=num_inputs, variable=variable, grad=grad, shape=str(input_shape).replace(' ', ''), axis=str(axis), iters=num_iters))
return duration
|
Run the graph and print its execution time.
Args:
device: string, the device to run on.
input_shape: shape of the input tensors.
variable: whether or not the input shape should be fixed
num_inputs: the number of inputs to concat
axis: axis to be concat'ed
grad: if True compute the gradient
num_iters: number of steps to run.
Returns:
The duration of the run in seconds.
|
github-repos
|
def wait_for_boot_completion(self, timeout=DEFAULT_TIMEOUT_BOOT_COMPLETION_SECOND):
timeout_start = time.time()
self.adb.wait_for_device(timeout=timeout)
while (time.time() < (timeout_start + timeout)):
try:
if self.is_boot_completed():
return
except adb.AdbError:
pass
time.sleep(5)
raise DeviceError(self, 'Booting process timed out')
|
Waits for Android framework to broadcast ACTION_BOOT_COMPLETED.
This function times out after 15 minutes.
Args:
timeout: float, the number of seconds to wait before timing out.
If not specified, no timeout takes effect.
|
codesearchnet
|
def Open(self, file_object):
if not file_object:
raise ValueError('Missing file-like object.')
file_object.seek(0, os.SEEK_SET)
data = file_object.read(len(self._HEADER_SIGNATURE))
if data != self._HEADER_SIGNATURE:
file_object.close()
raise IOError('Unsupported SQLite database signature.')
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
self._temp_file_path = temp_file.name
while data:
temp_file.write(data)
data = file_object.read(self._COPY_BUFFER_SIZE)
self._connection = sqlite3.connect(self._temp_file_path)
self._connection.text_factory = bytes
self._cursor = self._connection.cursor()
|
Opens the database file object.
Args:
file_object (FileIO): file-like object.
Raises:
IOError: if the SQLite database signature does not match.
OSError: if the SQLite database signature does not match.
ValueError: if the file-like object is invalid.
|
juraj-google-style
|
def work_model_factory(*, validator=validators.is_work_model, **kwargs):
kwargs['ld_type'] = 'AbstractWork'
return _model_factory(validator=validator, **kwargs)
|
Generate a Work model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'AbstractWork' ``ld_type`` keyword
argument is given.
|
codesearchnet
|
def _get_head_block(self, request):
if request.head_id:
if (self._id_regex.fullmatch(request.head_id) is None):
LOGGER.debug('Invalid head id requested: %s', request.head_id)
raise _ResponseFailed(self._status.NO_ROOT)
try:
return self._block_store[request.head_id]
except KeyError as e:
LOGGER.debug('Unable to find block "%s" in store', e)
raise _ResponseFailed(self._status.NO_ROOT)
else:
return self._get_chain_head()
|
Fetches the request specified head block, or the chain head.
Note:
This method will fail if `_block_store` has not been set
Args:
request (object): The parsed protobuf request object
Returns:
Block: the block object at the head of the requested chain
Raises:
ResponseFailed: Failed to retrieve a head block
|
codesearchnet
|
def non_deterministic_ints(shape, dtype=dtypes.int64):
return gen_stateful_random_ops.non_deterministic_ints(shape=shape, dtype=dtype)
|
Non-deterministically generates some integers.
This op may use some OS-provided source of non-determinism (e.g. an RNG), so
each execution will give different results.
Args:
shape: the shape of the result.
dtype: (optional) the dtype of the result.
Returns:
a tensor whose element values are non-deterministically chosen.
|
github-repos
|
def from_file_obj(cls, fp):
log.debug('Parsing email from file object')
try:
fp.seek(0)
except IOError:
pass
finally:
s = fp.read()
return cls.from_string(s)
|
Init a new object from a file-like object.
Not for Outlook msg.
Args:
fp (file-like object): file-like object of raw email
Returns:
Instance of MailParser
|
codesearchnet
|
def find_library_windows(cls):
dll = cls.get_appropriate_windows_sdk_name() + '.dll'
root = 'C:\\'
for d in os.listdir(root):
dir_path = os.path.join(root, d)
if d.startswith('Program Files') and os.path.isdir(dir_path):
dir_path = os.path.join(dir_path, 'SEGGER')
if not os.path.isdir(dir_path):
continue
ds = filter(lambda x: x.startswith('JLink'), os.listdir(dir_path))
for jlink_dir in ds:
lib_path = os.path.join(dir_path, jlink_dir, dll)
if os.path.isfile(lib_path):
yield lib_path
|
Loads the SEGGER DLL from the windows installation directory.
On Windows, these are found either under:
- ``C:\\Program Files\\SEGGER\\JLink``
- ``C:\\Program Files (x86)\\SEGGER\\JLink``.
Args:
cls (Library): the ``Library`` class
Returns:
The paths to the J-Link library files in the order that they are
found.
|
juraj-google-style
|
def decrypt(key, ciphertext):
key = ''.join(key)
alphabet = string.ascii_letters
cipher_alphabet = (key.lower() + key.upper())
return ciphertext.translate(str.maketrans(cipher_alphabet, alphabet))
|
Decrypt Simple Substitution enciphered ``ciphertext`` using ``key``.
Example:
>>> decrypt("PQSTUVWXYZCODEBRAKINGFHJLM", "XUOOB")
HELLO
Args:
key (iterable): The key to use
ciphertext (str): The text to decrypt
Returns:
Decrypted ciphertext
|
codesearchnet
|
def upgrade_code(self):
if (not self.__squid):
return ''
have_scan_key = '{0}\\{1}\\{2}'.format(self.__reg_hive, self.__reg_upgradecode_path, self.__reg_32bit)
if ((not self.__upgrade_codes) or (self.__reg_key_guid not in self.__upgrade_codes)):
try:
uc_handle = win32api.RegOpenKeyEx(getattr(win32con, self.__reg_hive), self.__reg_upgradecode_path, 0, (win32con.KEY_READ | self.__reg_32bit_access))
except pywintypes.error as exc:
if (exc.winerror == winerror.ERROR_FILE_NOT_FOUND):
log.warning('Not Found %s\\%s 32bit %s', self.__reg_hive, self.__reg_upgradecode_path, self.__reg_32bit)
return ''
raise
(squid_upgrade_code_all, _, _, suc_pytime) = zip(*win32api.RegEnumKeyEx(uc_handle))
if ((have_scan_key in self.__upgrade_code_have_scan) and (self.__upgrade_code_have_scan[have_scan_key] == (squid_upgrade_code_all, suc_pytime))):
log.debug('Scan skipped for upgrade codes, no changes (%s)', have_scan_key)
return ''
log.debug('Scan for upgrade codes (%s) for product codes', have_scan_key)
for upgrade_code_squid in squid_upgrade_code_all:
upgrade_code_guid = self.__squid_to_guid(upgrade_code_squid)
pc_handle = win32api.RegOpenKeyEx(uc_handle, upgrade_code_squid, 0, (win32con.KEY_READ | self.__reg_32bit_access))
(_, pc_val_count, _) = win32api.RegQueryInfoKey(pc_handle)
for item_index in range(pc_val_count):
product_code_guid = self.__squid_to_guid(win32api.RegEnumValue(pc_handle, item_index)[0])
if product_code_guid:
self.__upgrade_codes[product_code_guid] = upgrade_code_guid
win32api.RegCloseKey(pc_handle)
win32api.RegCloseKey(uc_handle)
self.__upgrade_code_have_scan[have_scan_key] = (squid_upgrade_code_all, suc_pytime)
return self.__upgrade_codes.get(self.__reg_key_guid, '')
|
For installers which follow the Microsoft Installer standard, returns
the ``Upgrade code``.
Returns:
value (str): ``Upgrade code`` GUID for installed software.
|
codesearchnet
|
def get_arguments(context):
context.assert_key_has_value(key='pype', caller=__name__)
pype = context.get_formatted('pype')
try:
pipeline_name = pype['name']
if (pipeline_name is None):
raise KeyInContextHasNoValueError("pypyr.steps.pype ['pype']['name'] exists but is empty.")
except KeyError as err:
raise KeyNotInContextError("pypyr.steps.pype missing 'name' in the 'pype' context item. You need to specify the pipeline name to run another pipeline.") from err
use_parent_context = pype.get('useParentContext', True)
pipe_arg = pype.get('pipeArg', None)
skip_parse = pype.get('skipParse', True)
raise_error = pype.get('raiseError', True)
loader = pype.get('loader', None)
return (pipeline_name, use_parent_context, pipe_arg, skip_parse, raise_error, loader)
|
Parse arguments for pype from context and assign default values.
Args:
context: pypyr.context.Context. context is mandatory.
Returns:
tuple (pipeline_name, #str
use_parent_context, #bool
pipe_arg, #str
skip_parse, #bool
raise_error #bool
)
Raises:
pypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.
pypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but
is None.
|
codesearchnet
|
def get_protocol_version(protocol=None, target=None):
target = get_py_internals(target)
if protocol is None:
protocol = target['pickle_default_protocol']
if protocol > cPickle.HIGHEST_PROTOCOL:
warnings.warn('Downgrading pickle protocol, running python supports up to %d.' % cPickle.HIGHEST_PROTOCOL)
protocol = cPickle.HIGHEST_PROTOCOL
target_highest_protocol = target['pickle_highest_protocol']
if protocol > target_highest_protocol:
warnings.warn('Downgrading pickle protocol, target python supports up to %d.' % target_highest_protocol)
protocol = target_highest_protocol
return protocol
|
Return a suitable pickle protocol version for a given target.
Arguments:
target: The internals description of the targeted python
version. If this is ``None`` the specification of the currently
running python version will be used.
protocol(None or int): The requested protocol version (or None for the
default of the target python version).
Returns:
int: A suitable pickle protocol version.
|
juraj-google-style
|
def preprocess_input(x, data_format=None):
return x
|
A placeholder method for backward compatibility.
The preprocessing logic has been included in the convnext model
implementation. Users are no longer required to call this method to
normalize the input data. This method does nothing and only kept as a
placeholder to align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a tensor.
data_format: Optional data format of the image tensor/array. Defaults to
None, in which case the global setting
`keras.backend.image_data_format()` is used
(unless you changed it, it defaults to `"channels_last"`).{mode}
Returns:
Unchanged `numpy.array` or tensor.
|
github-repos
|
def generate_packer_filename(provider, region, builder):
filename = '{0}_{1}_{2}.json'.format(provider, region, builder)
return filename
|
Generate a filename to be used by packer.
Args:
provider (str): Name of Spinnaker provider.
region (str): Name of provider region to use.
builder (str): Name of builder process type.
Returns:
str: Generated filename based on parameters.
|
codesearchnet
|
def get_ip_address_country(ip_address, parallel=False):
def download_country_database(location="GeoLite2-Country.mmdb"):
if parallel:
logging.warning("Cannot download GeoIP database in parallel mode")
return
url = "https:
"GeoLite2-Country.tar.gz"
headers = {"User-Agent": USER_AGENT}
original_filename = "GeoLite2-Country.mmdb"
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
tar_bytes = response.content
tar_file = tarfile.open(fileobj=BytesIO(tar_bytes), mode="r:gz")
tar_dir = tar_file.getnames()[0]
tar_path = "{0}/{1}".format(tar_dir, original_filename)
tar_file.extract(tar_path)
shutil.move(tar_path, location)
shutil.rmtree(tar_dir)
except Exception as e:
logger.warning("Error downloading {0}: {1}".format(url,
e.__str__()))
system_paths = [
"GeoLite2-Country.mmdb",
"/usr/local/share/GeoIP/GeoLite2-Country.mmdb",
"/usr/share/GeoIP/GeoLite2-Country.mmdb",
"/var/lib/GeoIP/GeoLite2-Country.mmdb",
"/var/local/lib/GeoIP/GeoLite2-Country.mmdb",
"C:\\GeoIP\\GeoLite2-Country.mmdb"
]
db_path = None
for system_path in system_paths:
if os.path.exists(system_path):
db_path = system_path
break
if db_path is None:
db_path = os.path.join(tempdir, "GeoLite2-Country.mmdb")
if not os.path.exists(db_path):
download_country_database(db_path)
if not os.path.exists(db_path):
return None
else:
db_age = datetime.now() - datetime.fromtimestamp(
os.stat(db_path).st_mtime)
if db_age > timedelta(days=7):
download_country_database()
db_path = db_path
db_reader = geoip2.database.Reader(db_path)
country = None
try:
country = db_reader.country(ip_address).country.iso_code
except geoip2.errors.AddressNotFoundError:
pass
return country
|
Uses the MaxMind Geolite2 Country database to return the ISO code for the
country associated with the given IPv4 or IPv6 address
Args:
ip_address (str): The IP address to query for
parallel (bool): Parallel processing
Returns:
str: And ISO country code associated with the given IP address
|
juraj-google-style
|
def as_money(self, number, **options):
if isinstance(number, list):
return map((lambda val: self.as_money(val, **options)))
decimal = options.get('decimal')
number = self.parse(number, decimal)
if check_type(options, 'dict'):
options = self.settings['currency'].update(options)
formats = self._check_currency_format(options['format'])
use_format = (lambda num: (formats['pos'] if (num > 0) else (formats['neg'] if (num < 0) else formats['zero'])))(number)
precision = self._change_precision(number, options['precision'])
thousands = options['thousand']
decimal = options['decimal']
formater = self.format(abs(number), precision, thousands, decimal)
amount = use_format.replace('%s', options['symbol']).replace('%v', formater)
return amount
|
Format a number into currency.
Usage: accounting.formatMoney(number, symbol, precision, thousandsSep,
decimalSep, format)
defaults: (0, "$", 2, ",", ".", "%s%v")
Localise by overriding the symbol, precision,
thousand / decimal separators and format
Second param can be an object matching `settings.currency`
which is the easiest way.
Args:
number (TYPE): Description
precision (TYPE): Description
thousand (TYPE): Description
decimal (TYPE): Description
Returns:
name (TYPE): Description
|
codesearchnet
|
def sun_events(latitude, longitude, date, timezone=0, zenith=None):
return (sun_rise_set(latitude, longitude, date, 'rise', timezone, zenith), sun_rise_set(latitude, longitude, date, 'set', timezone, zenith))
|
Convenience function for calculating sunrise and sunset.
Civil twilight starts/ends when the Sun's centre is 6 degrees below
the horizon.
Nautical twilight starts/ends when the Sun's centre is 12 degrees
below the horizon.
Astronomical twilight starts/ends when the Sun's centre is 18 degrees below
the horizon.
Args:
latitude (float): Location's latitude
longitude (float): Location's longitude
date (datetime.date): Calculate rise or set for given date
timezone (int): Offset from UTC in minutes
zenith (str): Calculate rise/set events, or twilight times
Returns:
tuple of datetime.time: The time for the given events in the specified
timezone
|
codesearchnet
|
def parse_functions(
bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
parens = char_locs["parens"]
if not parens:
bels_len = len(bels) - 1
span = (0, bels_len)
parsed[span] = {
"name": "".join(bels),
"type": "Function",
"span": span,
"name_span": (span),
"function_level": "top",
}
return parsed, errors
for sp in sorted(parens):
ep, function_level = parens[sp]
if bels[sp - 1] == " ":
continue
for i in range(sp - 1, 0, -1):
if bels[i] in [" ", ",", "("]:
if i < sp - 1:
if ep == -1:
span = (i + 1, len(bels) - 1)
else:
span = (i + 1, ep)
parsed[span] = {
"name": "".join(bels[i + 1 : sp]),
"type": "Function",
"span": span,
"name_span": (i + 1, sp - 1),
"parens_span": (sp, ep),
"function_level": function_level,
}
break
else:
if ep == -1:
span = (0, len(bels) - 1)
else:
span = (0, ep)
parsed[span] = {
"name": "".join(bels[0:sp]),
"type": "Function",
"span": span,
"name_span": (0, sp - 1),
"parens_span": (sp, ep),
"function_level": function_level,
}
return parsed, errors
|
Parse functions from BEL using paren, comma, quote character locations
Args:
bels: BEL string as list of chars
char_locs: paren, comma, quote character locations
errors: Any error messages generated during the parse
Returns:
(functions, errors): function names and locations and error messages
|
juraj-google-style
|
def url(self, pattern, method=None, name=None):
def _inner(call):
self._url_manager.add(pattern, method, call, name)
return call
return _inner
|
Decorator to map url pattern to the callable.
Args:
pattern (:obj:`str`): URL pattern to add. This is usually '/'
separated path. Parts of the URL can be parameterised using
curly braces.
Examples: "/", "/path/to/resource", "/resoures/{param}"
method (:obj:`str`, :obj:`list` of :obj:`str`, optional): HTTP
methods for the path specied. By default, GET method is added.
Value can be either a single method, by passing a string, or
multiple methods, by passing a list of strings.
name (:obj:`str`): Name for the pattern that can be used for
reverse matching
Note:
A trailing '/' is always assumed in the pattern.
Example:
>>> @app.url(pattern='/path/to/resource', method='GET')
>>> def function(ctx):
>>> return 'Hello world'
See Also:
:func:`drongo.managers.url.UrlManager.add`
|
codesearchnet
|
def _get_colordata(bs, elements, bs_projection):
contribs = {}
if bs_projection and bs_projection.lower() == "elements":
projections = bs.get_projection_on_elements()
for spin in (Spin.up, Spin.down):
if spin in bs.bands:
contribs[spin] = []
for band_idx in range(bs.nb_bands):
colors = []
for k_idx in range(len(bs.kpoints)):
if bs_projection and bs_projection.lower() == "elements":
c = [0, 0, 0]
projs = projections[spin][band_idx][k_idx]
projs = dict(
[(k, v ** 2) for k, v in projs.items()])
total = sum(projs.values())
if total > 0:
for idx, e in enumerate(elements):
c[idx] = math.sqrt(projs[
e] / total)
c = [c[1], c[2],
c[0]]
else:
c = [0, 0, 0] if spin == Spin.up \
else [0, 0,
1]
colors.append(c)
contribs[spin].append(colors)
contribs[spin] = np.array(contribs[spin])
return contribs
|
Get color data, including projected band structures
Args:
bs: Bandstructure object
elements: elements (in desired order) for setting to blue, red, green
bs_projection: None for no projection, "elements" for element projection
Returns:
|
juraj-google-style
|
def _alter_code(code, **attrs):
PyCode_New = ctypes.pythonapi.PyCode_New
PyCode_New.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.c_int, ctypes.py_object)
PyCode_New.restype = ctypes.py_object
args = [[code.co_argcount, 'co_argcount'], [code.co_kwonlyargcount, 'co_kwonlyargcount'], [code.co_nlocals, 'co_nlocals'], [code.co_stacksize, 'co_stacksize'], [code.co_flags, 'co_flags'], [code.co_code, 'co_code'], [code.co_consts, 'co_consts'], [code.co_names, 'co_names'], [code.co_varnames, 'co_varnames'], [code.co_freevars, 'co_freevars'], [code.co_cellvars, 'co_cellvars'], [code.co_filename, 'co_filename'], [code.co_name, 'co_name'], [code.co_firstlineno, 'co_firstlineno'], [code.co_lnotab, 'co_lnotab']]
for arg in args:
if (arg[1] in attrs):
arg[0] = attrs[arg[1]]
return PyCode_New(args[0][0], args[1][0], args[2][0], args[3][0], args[4][0], args[5][0], args[6][0], args[7][0], args[8][0], args[9][0], args[10][0], args[11][0], args[12][0], args[13][0], args[14][0])
|
Create a new code object by altering some of ``code`` attributes
Args:
code: code objcect
attrs: a mapping of names of code object attrs to their values
|
codesearchnet
|
def all_cities():
cities = []
fname = pkg_resources.resource_filename(__name__, 'resources/CityPops.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
cities.append(row[0])
cities.sort()
return cities
|
Get a list of all Backpage city names.
Returns:
list of city names as Strings
|
codesearchnet
|
def BuildParamsWithMask(self, graph_fn, dtype, input_shapes, output_shapes, input_mask, output_mask, extra_inputs, extra_outputs):
def _ValidateShapes(shapes):
for shape in shapes:
assert all(shape), f'Shape unspecified: {shape}'
_ValidateShapes(input_shapes)
_ValidateShapes(output_shapes)
assert len(input_mask) == len(input_shapes), f'Inconsistent input_mask and input_shapes: len({input_mask}) != len({input_shapes}).'
assert len(output_mask) == len(output_shapes), f'Inconsistent output_mask and output_shapes: len({output_mask}) != len({output_shapes}).'
for extra_in_shape, extra_out_shape in zip(extra_inputs, extra_outputs):
assert len(input_shapes) == len(extra_in_shape), f'Inconsistent input_shapes and extra_in_shape: len({input_shapes}) != len({extra_in_shape}).'
assert len(output_shapes) == len(extra_out_shape), f'Inconsistent output_shapes and extra_out_shape: len({output_shapes}) != len({extra_out_shape}).'
return TfTrtIntegrationTestParams(graph_fn=graph_fn, input_specs=[self._GetTensorSpec(shape, mask, dtype, 'input_%d' % i) for i, (shape, mask) in enumerate(zip(input_shapes, input_mask))], output_specs=[self._GetTensorSpec(shape, mask, dtype, 'output_%d' % i) for i, (shape, mask) in enumerate(zip(output_shapes, output_mask))], input_dims=[input_shapes] + extra_inputs, expected_output_dims=[output_shapes] + extra_outputs)
|
Build test parameters with static or dynamic input shapes.
To define dynamic shapes give a boolean mask that describes which
dimensions to treat as known. The values in input_mask are interpreted the
following way:
- True: known dim (use the corresponding value from input_shapes)
- False: unknown dim (replace the corresponding value from input_shapes
with None)
For example, to define the first two dimension with unknown size use
input_shapes=[[1,2,1,8]], input_mask=[[False, False, True, True]].
Args:
graph_fn: The function to build the graph.
dtype: The element type.
input_shapes: The input shapes.
output_shapes: The output shapes.
input_mask: The input shape masks.
output_mask: the output shape masks.
extra_inputs: list of additional input shapes
extra_outputs: list of additional outputs shapes
Returns:
The test parameters.
|
github-repos
|
def process_file(filename: str,
filetypes: List[str],
move_to: str,
delete_if_not_specified_file_type: bool,
show_zip_output: bool) -> None:
try:
reader = CorruptedOpenXmlReader(filename,
show_zip_output=show_zip_output)
if reader.file_type in filetypes:
log.info("Found {}: {}", reader.description, filename)
if move_to:
dest_file = os.path.join(move_to, os.path.basename(filename))
_, ext = os.path.splitext(dest_file)
if ext != reader.suggested_extension():
dest_file += reader.suggested_extension()
reader.move_to(destination_filename=dest_file)
else:
log.info("Unrecognized or unwanted contents: " + filename)
if delete_if_not_specified_file_type:
log.info("Deleting: " + filename)
os.remove(filename)
except Exception as e:
log.critical("Uncaught error in subprocess: {!r}\n{}", e,
traceback.format_exc())
raise
|
Deals with an OpenXML, including if it is potentially corrupted.
Args:
filename: filename to process
filetypes: list of filetypes that we care about, e.g.
``['docx', 'pptx', 'xlsx']``.
move_to: move matching files to this directory
delete_if_not_specified_file_type: if ``True``, and the file is **not**
a type specified in ``filetypes``, then delete the file.
show_zip_output: show the output from the external ``zip`` tool?
|
juraj-google-style
|
def get_book_progress(self, asin):
kbp = self._get_api_call('get_book_progress', ('"%s"' % asin))
return KindleCloudReaderAPI._kbp_to_progress(kbp)
|
Returns the progress data available for a book.
NOTE: A summary of the two progress formats can be found in the
docstring for `ReadingProgress`.
Args:
asin: The asin of the book to be queried.
Returns:
A `ReadingProgress` instance corresponding to the book associated with
`asin`.
|
codesearchnet
|
def add_toolkit(topology, location):
import streamsx.topology.topology
assert isinstance(topology, streamsx.topology.topology.Topology)
tkinfo = dict()
tkinfo['root'] = os.path.abspath(location)
topology.graph._spl_toolkits.append(tkinfo)
|
Add an SPL toolkit to a topology.
Args:
topology(Topology): Topology to include toolkit in.
location(str): Location of the toolkit directory.
|
juraj-google-style
|
def get_obj(self, objpath, metahash, dst_path):
incachepath = self.path_in_cache(objpath, metahash)
if not os.path.exists(incachepath):
raise CacheMiss('%s not in cache.' % incachepath)
else:
log.debug('Cache hit! %s~%s', objpath, metahash.hexdigest())
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
os.link(incachepath, dst_path)
|
Get object from cache, write it to dst_path.
Args:
objpath: filename relative to buildroot
(example: mini-boot/blahblah/somefile.bin)
metahash: metahash. See targets/base.py
dst_path: Absolute path where the file should be written.
Raises:
CacheMiss: if the item is not in the cache
|
juraj-google-style
|
def is_generator_function(obj):
CO_GENERATOR = 32
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and (obj.func_code.co_flags & CO_GENERATOR)))
|
Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
|
codesearchnet
|
def _log_submission(submission, student_item):
logger.info(u'Created submission uuid={submission_uuid} for (course_id={course_id}, item_id={item_id}, anonymous_student_id={anonymous_student_id})'.format(submission_uuid=submission['uuid'], course_id=student_item['course_id'], item_id=student_item['item_id'], anonymous_student_id=student_item['student_id']))
|
Log the creation of a submission.
Args:
submission (dict): The serialized submission model.
student_item (dict): The serialized student item model.
Returns:
None
|
codesearchnet
|
def GetFileEntryByPath(self, path):
if path is None:
return None
file_entry_type, _ = self._paths.get(path, (None, None))
if not file_entry_type:
return None
path_spec = fake_path_spec.FakePathSpec(location=path)
return fake_file_entry.FakeFileEntry(
self._resolver_context, self, path_spec,
file_entry_type=file_entry_type)
|
Retrieves a file entry for a path.
Args:
path (str): path of the file entry.
Returns:
FakeFileEntry: a file entry or None if not available.
|
juraj-google-style
|
def pmean(tensor, axis_name=None):
if axis_name != _pmap_config.axis_name():
raise ValueError('axis_name (%s) is not equal to that of the surrounding pmap (%s)' % (axis_name, _pmap_config.axis_name()))
devices = _pmap_config.devices()
if devices is None:
raise ValueError("Can't retrieve the device list from the surrounding pmap")
if tpu_devices(devices):
raise ValueError('pmean for TPU is not supported yet.')
else:
return gen_collective_ops.collective_reduce(input=tensor, group_size=len(devices), group_key=_GROUP_KEY, instance_key=_get_instance_key(), merge_op='Add', final_op='Div', subdiv_offsets=(0,))
|
Mean all-reduction.
Args:
tensor: A tensor.
axis_name: The axis name to reduce. Must equal to that of the surrounding
pmap.
Returns:
The mean of the `tensor` replicas on each participating devices.
|
github-repos
|
def defaults(cls, *options, **kwargs):
if kwargs and len(kwargs) != 1 and list(kwargs.keys())[0] != 'backend':
raise Exception('opts.defaults only accepts "backend" keyword argument')
cls._linemagic(cls._expand_options(merge_options_to_dict(options)), backend=kwargs.get('backend'))
|
Set default options for a session.
Set default options for a session. whether in a Python script or
a Jupyter notebook.
Args:
*options: Option objects used to specify the defaults.
backend: The plotting extension the options apply to
|
juraj-google-style
|
def abort_expired_batches(self, request_timeout_ms, cluster):
expired_batches = []
to_remove = []
count = 0
for tp in list(self._batches.keys()):
assert (tp in self._tp_locks), 'TopicPartition not in locks dict'
if (tp in self.muted):
continue
with self._tp_locks[tp]:
dq = self._batches[tp]
for batch in dq:
is_full = bool((bool((batch != dq[(- 1)])) or batch.records.is_full()))
if batch.maybe_expire(request_timeout_ms, self.config['retry_backoff_ms'], self.config['linger_ms'], is_full):
expired_batches.append(batch)
to_remove.append(batch)
count += 1
self.deallocate(batch)
else:
break
if to_remove:
for batch in to_remove:
dq.remove(batch)
to_remove = []
if expired_batches:
log.warning('Expired %d batches in accumulator', count)
return expired_batches
|
Abort the batches that have been sitting in RecordAccumulator for
more than the configured request_timeout due to metadata being
unavailable.
Arguments:
request_timeout_ms (int): milliseconds to timeout
cluster (ClusterMetadata): current metadata for kafka cluster
Returns:
list of ProducerBatch that were expired
|
codesearchnet
|
def on_snapshot(self, proto):
TargetChange = firestore_pb2.TargetChange
target_changetype_dispatch = {TargetChange.NO_CHANGE: self._on_snapshot_target_change_no_change, TargetChange.ADD: self._on_snapshot_target_change_add, TargetChange.REMOVE: self._on_snapshot_target_change_remove, TargetChange.RESET: self._on_snapshot_target_change_reset, TargetChange.CURRENT: self._on_snapshot_target_change_current}
target_change = proto.target_change
if str(target_change):
target_change_type = target_change.target_change_type
_LOGGER.debug(('on_snapshot: target change: ' + str(target_change_type)))
meth = target_changetype_dispatch.get(target_change_type)
if (meth is None):
_LOGGER.info(('on_snapshot: Unknown target change ' + str(target_change_type)))
self.close(reason=('Unknown target change type: %s ' % str(target_change_type)))
else:
try:
meth(proto)
except Exception as exc2:
_LOGGER.debug(('meth(proto) exc: ' + str(exc2)))
raise
elif str(proto.document_change):
_LOGGER.debug('on_snapshot: document change')
target_ids = (proto.document_change.target_ids or [])
removed_target_ids = (proto.document_change.removed_target_ids or [])
changed = False
removed = False
if (WATCH_TARGET_ID in target_ids):
changed = True
if (WATCH_TARGET_ID in removed_target_ids):
removed = True
if changed:
_LOGGER.debug('on_snapshot: document change: CHANGED')
document_change = proto.document_change
document = document_change.document
data = _helpers.decode_dict(document.fields, self._firestore)
document_name = document.name
db_str = self._firestore._database_string
db_str_documents = (db_str + '/documents/')
if document_name.startswith(db_str_documents):
document_name = document_name[len(db_str_documents):]
document_ref = self._firestore.document(document_name)
snapshot = self.DocumentSnapshot(reference=document_ref, data=data, exists=True, read_time=None, create_time=document.create_time, update_time=document.update_time)
self.change_map[document.name] = snapshot
elif removed:
_LOGGER.debug('on_snapshot: document change: REMOVED')
document = proto.document_change.document
self.change_map[document.name] = ChangeType.REMOVED
elif str(proto.document_delete):
_LOGGER.debug('on_snapshot: document change: DELETE')
name = proto.document_delete.document
self.change_map[name] = ChangeType.REMOVED
elif str(proto.document_remove):
_LOGGER.debug('on_snapshot: document change: REMOVE')
name = proto.document_remove.document
self.change_map[name] = ChangeType.REMOVED
elif proto.filter:
_LOGGER.debug('on_snapshot: filter update')
if (proto.filter.count != self._current_size()):
self._reset_docs()
else:
_LOGGER.debug('UNKNOWN TYPE. UHOH')
self.close(reason=ValueError(('Unknown listen response type: %s' % proto)))
|
Called everytime there is a response from listen. Collect changes
and 'push' the changes in a batch to the customer when we receive
'current' from the listen response.
Args:
listen_response(`google.cloud.firestore_v1beta1.types.ListenResponse`):
Callback method that receives a object to
|
codesearchnet
|
def GetLastHealthyElement(self):
for element in reversed(self.elements):
if not element.HasError():
return element
return self.elements[0]
|
Returns the last element of the trace that is not an error.
This element will contain the final component indicated by the trace.
Returns:
The last element of the trace that is not an error.
|
github-repos
|
def __init__(self, args):
self.args = args.args
self.varargs = args.vararg
self.kwarg = args.kwarg
self.kwonlyargs = args.kwonlyargs
self.defaults = args.defaults
self.kw_defaults = args.kw_defaults
self.arguments = list()
if self.args:
self.arguments.extend([x.arg for x in self.args])
if self.varargs:
self.arguments.extend(self.varargs.arg)
if self.kwarg:
self.arguments.extend(self.kwarg.arg)
if self.kwonlyargs:
self.arguments.extend([x.arg for x in self.kwonlyargs])
|
Argument container class.
Args:
args(list(ast.args): The arguments in a function AST node.
|
juraj-google-style
|
def build_cfg(cls, node):
if not isinstance(node, gast.FunctionDef):
raise TypeError('input must be a function definition')
cfg = cls()
cfg.entry = Node(node.args)
cfg.head = [cfg.entry]
cfg.visit_statements(node.body)
cfg.exit = Node(None)
cfg.set_head(cfg.exit)
cfg.backlink(cfg.entry)
return cfg
|
Build a CFG for a function.
Args:
node: A function definition the body of which to analyze.
Returns:
A CFG object.
Raises:
TypeError: If the input is not a function definition.
|
juraj-google-style
|
def pairwise_intersection(boxlist1, boxlist2):
x_min1, y_min1, x_max1, y_max1 = tf.split(boxlist1, 4, axis=1)
x_min2, y_min2, x_max2, y_max2 = tf.split(boxlist2, 4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
|
Compute pairwise intersection areas between boxes.
Args:
boxlist1: Nx4 floatbox
boxlist2: Mx4
Returns:
a tensor with shape [N, M] representing pairwise intersections
|
juraj-google-style
|
def transformer_revnet_decoder(decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
name="decoder"):
def f(x, side_input):
decoder_self_attention_bias = side_input[0]
encoder_decoder_attention_bias = side_input[1]
encoder_output = side_input[2]
old_hid_size = hparams.hidden_size
hparams.hidden_size = old_hid_size
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams), None, decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)
y = common_layers.layer_postprocess(x, y, hparams)
if encoder_output is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams), encoder_output, encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)
y = common_layers.layer_postprocess(x, y, hparams)
hparams.hidden_size = old_hid_size
return y
def g(x):
old_hid_size = hparams.hidden_size
hparams.hidden_size = old_hid_size
with tf.variable_scope("ffn"):
y = transformer.transformer_ffn_layer(
common_layers.layer_preprocess(x, hparams), hparams)
y = common_layers.layer_postprocess(x, y, hparams)
hparams.hidden_size = old_hid_size
return y
x1, x2 = tf.split(decoder_input, 2, axis=-1)
with tf.variable_scope(name):
y1, y2 = tf.contrib.layers.rev_block(
x1,
x2,
f,
g,
num_layers=hparams.num_hidden_layers,
f_side_input=[
decoder_self_attention_bias, encoder_decoder_attention_bias,
encoder_output
],
is_training=hparams.mode == tf.estimator.ModeKeys.TRAIN)
y = tf.concat([y1, y2], axis=-1)
return common_layers.layer_preprocess(y, hparams)
|
A stack of transformer layers.
Args:
decoder_input: a Tensor
encoder_output: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
Returns:
y: a Tensors
|
juraj-google-style
|
def gather_available_device_info():
device_info_list = []
devices = device_lib.list_local_devices()
for d in devices:
device_info = test_log_pb2.AvailableDeviceInfo()
device_info.name = d.name
device_info.type = d.device_type
device_info.memory_limit = d.memory_limit
device_info.physical_description = d.physical_device_desc
device_info_list.append(device_info)
return device_info_list
|
Gather list of devices available to TensorFlow.
Returns:
A list of test_log_pb2.AvailableDeviceInfo messages.
|
github-repos
|
def restore(self, state):
own_properties = set(self.get_properties())
state_properties = set(state)
to_restore = own_properties.intersection(state_properties)
for name in to_restore:
value = state.get(name)
if name in self._complex_properties:
value = self._complex_properties[name][1](value)
setattr(self, name, value)
|
Restore this state from the output of a previous call to dump().
Only those properties in this object and listed in state will be
updated. Other properties will not be modified and state may contain
keys that do not correspond with properties in this object.
Args:
state (dict): A serialized representation of this object.
|
juraj-google-style
|
def set(self, **kwargs):
for (port_name, port_value) in kwargs.items():
if hasattr(port_value, 'value'):
port_value = port_value.value
self.inputs.__setattr__(port_name, port_value)
|
Set input values on task
Args:
arbitrary_keys: values for the keys
Returns:
None
|
codesearchnet
|
def matches(x, y, regex_expr=False):
x = strip_regex(x) if regex_expr and isregex_expr(x) else x
if PY_3:
x = x.pattern if isregex(x) else x
return test_case().assertRegex(y, x) or True
if isinstance(x, str):
x = re.compile(x, re.IGNORECASE)
assert x.match(y) is not None
|
Tries to match a regular expression value ``x`` against ``y``.
Aliast``unittest.TestCase.assertEqual()``
Arguments:
x (regex|str): regular expression to test.
y (str): value to match.
regex_expr (bool): enables regex string based expression matching.
Raises:
AssertionError: in case of mismatching.
Returns:
bool
|
juraj-google-style
|
def local_reduction_attention(x, block_length, multihead_params):
@expert_utils.add_name_scope()
def dot_product_self_local_attention_flattened(q, k, v):
'Strided block local self-attention.\n\n No overlap between the blocks.\n\n Args:\n q (tf.Tensor): shape [batch, heads, length, depth_k]\n k (tf.Tensor): shape [batch, heads, length, depth_k]\n v (tf.Tensor): shape [batch, heads, length, depth_v]\n\n Returns:\n tf.Tensor: shape [batch, heads, length, depth_v]\n '
(_, num_head, _, depth) = q.get_shape().as_list()
def pad_and_reshape(x):
'Split the length dim into [num_block, block_length].'
length_x = common_layers.shape_list(x)[2]
x = tf.pad(x, [[0, 0], [0, 0], [0, ((- length_x) % block_length)], [0, 0]])
x = tf.reshape(x, [common_layers.shape_list(x)[0], num_head, (common_layers.shape_list(x)[2]
return x
(q, k, v) = [pad_and_reshape(t) for t in (q, k, v)]
logits = tf.matmul(q, k, transpose_b=True)
logits = tf.reshape(logits, [common_layers.shape_list(logits)[0], num_head, common_layers.shape_list(logits)[2], (block_length ** 2)])
weights = tf.nn.softmax(logits)
weights = tf.reshape(weights, [common_layers.shape_list(weights)[0], num_head, common_layers.shape_list(weights)[2], block_length, block_length])
weights = tf.reduce_sum(weights, axis=3, keep_dims=True)
v_out = tf.matmul(weights, v)
v_out = tf.squeeze(v_out, axis=3)
return v_out
return multihead_attention(x, None, bias=None, output_depth=x.get_shape().as_list()[(- 1)], attention_type=dot_product_self_local_attention_flattened, **multihead_params)
|
Reduce the length dimension using self attention.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
block_length (int): Block length for local attention (Compression factor)
multihead_params (dict): parameters for multihead attention
Returns:
tf.Tensor: Compressed tensor of shape [batch, length // factor, depth]
|
codesearchnet
|
def _restore_checkpoint(self, master: str, saver: saver_lib.Saver=None, checkpoint_dir: str=None, checkpoint_filename_with_path: str=None, wait_for_checkpoint=False, max_wait_secs=7200, config=None) -> Tuple[session.Session, bool]:
self._target = master
strategy = distribute_lib.get_strategy()
if strategy and hasattr(strategy.extended, '_experimental_initialize_system'):
strategy.extended._experimental_initialize_system()
sess = session.Session(self._target, graph=self._graph, config=config)
if checkpoint_dir and checkpoint_filename_with_path:
raise ValueError('Can not provide both checkpoint_dir and checkpoint_filename_with_path.')
if not saver or not (checkpoint_dir or checkpoint_filename_with_path):
return (sess, False)
if checkpoint_filename_with_path:
_restore_checkpoint_and_maybe_run_saved_model_initializers(sess, saver, checkpoint_filename_with_path)
return (sess, True)
wait_time = 0
ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)
while not ckpt or not ckpt.model_checkpoint_path:
if wait_for_checkpoint and wait_time < max_wait_secs:
logging.info('Waiting for checkpoint to be available.')
time.sleep(self._recovery_wait_secs)
wait_time += self._recovery_wait_secs
ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)
else:
return (sess, False)
_restore_checkpoint_and_maybe_run_saved_model_initializers(sess, saver, ckpt.model_checkpoint_path)
saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)
return (sess, True)
|
Creates a `Session`, and tries to restore a checkpoint.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, is_restored) where 'is_restored' is `True` if
the session could be restored, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
|
github-repos
|
def GetModifyTimestamp(self):
if self.modify_time is None:
self.modify_time = self._ReadTimestamp(self.modify_file)
return self.modify_time
|
Return the timestamp of the last cache modification.
Args: None
Returns:
An int with the number of seconds since epoch, or None if the timestamp
file doesn't exist or has errors.
|
github-repos
|
def decode_metar(self, metar):
try:
from metar import Metar
except:
return "Unable to parse metars. Please install parser from https:
m = Metar.Metar(metar)
return m.string()
|
Simple method that decodes a given metar string.
Args:
metar (str): The metar data
Returns:
The metar data in readable format
Example::
from pyflightdata import FlightData
f=FlightData()
f.decode_metar('WSSS 181030Z 04009KT 010V080 9999 FEW018TCU BKN300 29/22 Q1007 NOSIG')
|
juraj-google-style
|
def touch(self, mode=438, exist_ok=True):
if self._closed:
self._raise_closed()
if self.exists():
if exist_ok:
self.filesystem.utime(self._path(), None)
else:
self.filesystem.raise_os_error(errno.EEXIST, self._path())
else:
fake_file = self.open('w')
fake_file.close()
self.chmod(mode)
|
Create a fake file for the path with the given access mode,
if it doesn't exist.
Args:
mode: the file mode for the file if it does not exist
exist_ok: if the file already exists and this is True, nothing
happens, otherwise FileExistError is raised
Raises:
OSError: (Python 2 only) if the file exists and exits_ok is False.
FileExistsError: (Python 3 only) if the file exists and exits_ok is
False.
|
codesearchnet
|
def GetClientURNsForHostnames(hostnames, token=None):
if data_store.RelationalDBEnabled():
index = ClientIndex()
else:
index = CreateClientIndex(token=token)
keywords = set()
for hostname in hostnames:
if hostname.startswith("host:"):
keywords.add(hostname)
else:
keywords.add("host:%s" % hostname)
results = index.ReadClientPostingLists(keywords)
result = {}
for keyword, hits in iteritems(results):
result[keyword[len("host:"):]] = hits
return result
|
Gets all client_ids for a given list of hostnames or FQDNS.
Args:
hostnames: A list of hostnames / FQDNs.
token: An ACL token.
Returns:
A dict with a list of all known GRR client_ids for each hostname.
|
juraj-google-style
|
def _process_has_edge_degree_filter_directive(filter_operation_info, location, context, parameters):
if isinstance(filter_operation_info.field_ast, InlineFragment):
raise AssertionError(u'Received InlineFragment AST node in "has_edge_degree" filter handler. This should have been caught earlier: {}'.format(filter_operation_info.field_ast))
filtered_field_name = filter_operation_info.field_name
if ((filtered_field_name is None) or (not is_vertex_field_name(filtered_field_name))):
raise AssertionError(u'Invalid value for "filtered_field_name" in "has_edge_degree" filter: {}'.format(filtered_field_name))
if (not is_vertex_field_type(filter_operation_info.field_type)):
raise AssertionError(u'Invalid value for "filter_operation_info.field_type" in "has_edge_degree" filter: {}'.format(filter_operation_info))
argument = parameters[0]
if (not is_variable_argument(argument)):
raise GraphQLCompilationError(u'The "has_edge_degree" filter only supports runtime variable arguments. Tagged values are not supported.Argument name: {}'.format(argument))
argument_inferred_type = GraphQLInt
(argument_expression, non_existence_expression) = _represent_argument(location, context, argument, argument_inferred_type)
if (non_existence_expression is not None):
raise AssertionError(u'Since we do not support tagged values, non_existence_expression should have been None. However, it was: {}'.format(non_existence_expression))
argument_is_zero = expressions.BinaryComposition(u'=', argument_expression, expressions.ZeroLiteral)
edge_field_is_null = expressions.BinaryComposition(u'=', expressions.LocalField(filtered_field_name), expressions.NullLiteral)
edge_degree_is_zero = expressions.BinaryComposition(u'&&', argument_is_zero, edge_field_is_null)
edge_field_is_not_null = expressions.BinaryComposition(u'!=', expressions.LocalField(filtered_field_name), expressions.NullLiteral)
edge_degree = expressions.UnaryTransformation(u'size', expressions.LocalField(filtered_field_name))
edge_degree_matches_argument = expressions.BinaryComposition(u'=', edge_degree, argument_expression)
edge_degree_is_non_zero = expressions.BinaryComposition(u'&&', edge_field_is_not_null, edge_degree_matches_argument)
filter_predicate = expressions.BinaryComposition(u'||', edge_degree_is_zero, edge_degree_is_non_zero)
return blocks.Filter(filter_predicate)
|
Return a Filter basic block that checks the degree of the edge to the given vertex field.
Args:
filter_operation_info: FilterOperationInfo object, containing the directive and field info
of the field where the filter is to be applied.
location: Location where this filter is used.
context: dict, various per-compilation data (e.g. declared tags, whether the current block
is optional, etc.). May be mutated in-place in this function!
parameters: list of 1 element, containing the value to check the edge degree against;
if the parameter is optional and missing, the check will return True
Returns:
a Filter basic block that performs the check
|
codesearchnet
|
def sg_inject(path, mod_name):
import sys
if (path not in list(sys.path)):
sys.path.append(path)
globals()[mod_name] = importlib.import_module(mod_name)
for func_name in dir(globals()[mod_name]):
if isinstance(globals()[mod_name].__dict__.get(func_name), types.FunctionType):
if (not func_name.startswith('_')):
exec(('tf.Variable.%s = %s.%s' % (func_name, mod_name, func_name)))
exec(('tf.Tensor.%s = %s.%s' % (func_name, mod_name, func_name)))
|
r"""Converts all functions in the given Python module to sugar functions
so that they can be used in a chainable manner.
Args:
path: A string. Path to the Python module
mod_name: A string. The name of the Python module to inject.
Returns:
None
|
codesearchnet
|
def serialize_cert_to_der(cert_obj):
return cert_obj.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.DER
)
|
Serialize certificate to DER.
Args:
cert_obj: cryptography.Certificate
Returns:
bytes: DER encoded certificate
|
juraj-google-style
|
def set_key_color(self, color: Tuple[(int, int, int)]) -> None:
lib.TCOD_image_set_key_color(self.image_c, color)
|
Set a color to be transparent during blitting functions.
Args:
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
|
codesearchnet
|
def all_sample_md5s(self, type_tag=None):
if type_tag:
cursor = self.database[self.sample_collection].find({'type_tag': type_tag}, {'md5': 1, '_id': 0})
else:
cursor = self.database[self.sample_collection].find({}, {'md5': 1, '_id': 0})
return [match.values()[0] for match in cursor]
|
Return a list of all md5 matching the type_tag ('exe','pdf', etc).
Args:
type_tag: the type of sample.
Returns:
a list of matching samples.
|
juraj-google-style
|
def _to_key_ranges_by_shard(cls, app, namespaces, shard_count, query_spec):
key_ranges_by_ns = []
for namespace in namespaces:
ranges = cls._split_ns_by_scatter(shard_count, namespace, query_spec.entity_kind, app)
random.shuffle(ranges)
key_ranges_by_ns.append(ranges)
ranges_by_shard = [[] for _ in range(shard_count)]
for ranges in key_ranges_by_ns:
for (i, k_range) in enumerate(ranges):
if k_range:
ranges_by_shard[i].append(k_range)
key_ranges_by_shard = []
for ranges in ranges_by_shard:
if ranges:
key_ranges_by_shard.append(key_ranges.KeyRangesFactory.create_from_list(ranges))
return key_ranges_by_shard
|
Get a list of key_ranges.KeyRanges objects, one for each shard.
This method uses scatter index to split each namespace into pieces
and assign those pieces to shards.
Args:
app: app_id in str.
namespaces: a list of namespaces in str.
shard_count: number of shards to split.
query_spec: model.QuerySpec.
Returns:
a list of key_ranges.KeyRanges objects.
|
codesearchnet
|
def run_simulations(self, parameter_list, data_folder):
self.data_folder = data_folder
with Pool(processes=MAX_PARALLEL_PROCESSES) as pool:
for result in pool.imap_unordered(self.launch_simulation,
parameter_list):
yield result
|
This function runs multiple simulations in parallel.
Args:
parameter_list (list): list of parameter combinations to simulate.
data_folder (str): folder in which to create output folders.
|
juraj-google-style
|
def filter_cold_days(input_data, month_filter):
projection_fields = ['year', 'month', 'day', 'mean_temp']
fields_of_interest = input_data | 'Projected' >> beam.Map(lambda row: {f: row[f] for f in projection_fields})
global_mean = AsSingleton(fields_of_interest | 'ExtractMean' >> beam.Map(lambda row: row['mean_temp']) | 'GlobalMean' >> beam.combiners.Mean.Globally())
return fields_of_interest | 'DesiredMonth' >> beam.Filter(lambda row: row['month'] == month_filter) | 'BelowMean' >> beam.Filter(lambda row, mean: row['mean_temp'] < mean, global_mean)
|
Workflow computing rows in a specific month with low temperatures.
Args:
input_data: a PCollection of dictionaries representing table rows. Each
dictionary must have the keys ['year', 'month', 'day', and 'mean_temp'].
month_filter: an int representing the month for which colder-than-average
days should be returned.
Returns:
A PCollection of dictionaries with the same keys described above. Each
row represents a day in the specified month where temperatures were
colder than the global mean temperature in the entire dataset.
|
github-repos
|
def to_routing_header(params):
if (sys.version_info[0] < 3):
return urlencode(params).replace('%2F', '/')
return urlencode(params, safe='/')
|
Returns a routing header string for the given request parameters.
Args:
params (Mapping[str, Any]): A dictionary containing the request
parameters used for routing.
Returns:
str: The routing header string.
|
codesearchnet
|
def insert_and_get(self, **fields):
if ((not self.conflict_target) and (not self.conflict_action)):
return super().create(**fields)
compiler = self._build_insert_compiler([fields])
rows = compiler.execute_sql(return_id=False)
columns = rows[0]
model_columns = {}
for field in self.model._meta.local_concrete_fields:
model_columns[field.column] = field.attname
model_init_fields = {}
for (column_name, column_value) in columns.items():
try:
model_init_fields[model_columns[column_name]] = column_value
except KeyError:
pass
return self.model(**model_init_fields)
|
Creates a new record in the database and then gets
the entire row.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
fields:
The fields of the row to create.
Returns:
The model instance representing the row that was created.
|
codesearchnet
|
def get_hyperparameters(self):
hyperparameters = {}
for (block_name, block) in self.blocks.items():
hyperparameters[block_name] = block.get_hyperparameters()
return hyperparameters
|
Get the current hyperparamters of each block.
Returns:
dict:
A dictionary containing the block names as keys and
the current block hyperparameters dictionary as values.
|
codesearchnet
|
def adapt_logger(logger):
if isinstance(logger, logging.Logger):
return logger
if isinstance(logger, (SimpleLogger, NoOpLogger)):
return logger.logger
return logger
|
Adapt our custom logger.BaseLogger object into a standard logging.Logger object.
Adaptations are:
- NoOpLogger turns into a logger with a single NullHandler.
- SimpleLogger turns into a logger with a StreamHandler and level.
Args:
logger: Possibly a logger.BaseLogger, or a standard python logging.Logger.
Returns: a standard python logging.Logger.
|
juraj-google-style
|
def _MakePackagePages(self, package, showprivate=False, nested=False, showinh=False):
def checkNoNested(mod):
try:
all = mod.__all__
except AttributeError:
return False
mems = inspect.getmembers(mod, inspect.ismodule)
mems = [m for m in mems if m[0] in mod.__all__]
if len(mems) > 0:
return False
return True
mods = inspect.getmembers(package, inspect.ismodule)
nmods, pvt, npkgs = [], [], []
for mod in mods:
if checkNoNested(mod[1]):
if mod[0][0] == '_': pvt.append(mod)
else: nmods.append(mod)
else: npkgs.append(mod)
if showprivate: nmods += pvt
files = []
ignore = []
for pkg in npkgs:
pt = '%s/%s/%s' % (self.path, package.__name__.replace('.', '/'), pkg[1].__name__.split('.')[-1])
if os.path.exists(pt): shutil.rmtree(pt)
os.makedirs(pt)
ignore += inspect.getmembers(pkg[1])
f = self._MakePackagePages(pkg[1], showprivate=showprivate, nested=True, showinh=showinh)
files.append(f.split(package.__name__.replace('.', '/')+'/')[1])
if nested:
try:
name = package.__displayname__
except AttributeError:
name = package.__name__
index = r % (name, '*' * len(name))
index += '\n '.join(files)
index += '\n ' + self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh)
findex = 'content/%s/index.rst' % (package.__name__.replace('.', '/'))
with open(findex, 'w') as f:
if package.__doc__: f.write(package.__doc__)
f.write(index)
return '\n ' + findex
names = '\n %s/%s/' % ( self.path, package.__name__.replace('.', '/'))
nmods = [m for m in nmods if m not in ignore]
return names.join(self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh).split('\n ')+files)
|
An internal helper to generate all of the pages for a given package
Args:
package (module): The top-level package to document
showprivate (bool): A flag for whether or not to display private members
nested (bool): Foor internal use ONLY
Returns:
str: The file names ready to be appended to a top-level toctree
|
juraj-google-style
|
def assertAllDifferent(self, tensors):
tensors = [array_ops.reshape(t, shape=[-1]) for t in tensors]
ls = array_ops.concat(tensors, axis=0).numpy().tolist()
self.assertAllEqual(len(ls), len(set(ls)))
|
Checks that there are no duplicate elements anywhere among the tensors.
Args:
tensors: a list of tensors. They can have different shapes.
|
github-repos
|
def email(self, subject, text_body, html_body=None, sender=None, **kwargs):
self.configuration.emailer().send([self.data['email']], subject, text_body, html_body=html_body, sender=sender, **kwargs)
|
Emails a user.
Args:
subject (str): Email subject
text_body (str): Plain text email body
html_body (str): HTML email body
sender (Optional[str]): Email sender. Defaults to SMTP username.
**kwargs: See below
mail_options (List): Mail options (see smtplib documentation)
rcpt_options (List): Recipient options (see smtplib documentation)
Returns:
None
|
codesearchnet
|
def delete(self, webhookId):
check_type(webhookId, basestring, may_be_none=False)
self._session.delete(((API_ENDPOINT + '/') + webhookId))
|
Delete a webhook, by ID.
Args:
webhookId(basestring): The ID of the webhook to be deleted.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
|
codesearchnet
|
def ApprovalRevokeRaw(aff4_path, token):
try:
urn = rdf_client.ClientURN(aff4_path)
except type_info.TypeValueError:
urn = rdfvalue.RDFURN(aff4_path)
approval_urn = aff4.ROOT_URN.Add("ACL").Add(urn.Path()).Add(
token.username).Add(utils.EncodeReasonString(token.reason))
super_token = access_control.ACLToken(username="raw-approval-superuser")
super_token.supervisor = True
approval_request = aff4.FACTORY.Open(
approval_urn, mode="rw", token=super_token)
approval_request.DeleteAttribute(approval_request.Schema.APPROVER)
approval_request.Close()
|
Revokes an approval for a given token.
This method requires raw datastore access to manipulate approvals directly.
Args:
aff4_path: The aff4_path or client id the approval should be created for.
token: The token that should be revoked.
|
juraj-google-style
|
def parse_from_xml(root):
if (root.tag != 'ubcpi'):
raise UpdateFromXmlError(_('Every peer instruction tool must contain an "ubcpi" element.'))
display_name_el = root.find('display_name')
if (display_name_el is None):
raise UpdateFromXmlError(_('Every peer instruction tool must contain a "display_name" element.'))
else:
display_name = _safe_get_text(display_name_el)
rationale_size_min = (int(root.attrib['rationale_size_min']) if ('rationale_size_min' in root.attrib) else None)
rationale_size_max = (int(root.attrib['rationale_size_max']) if ('rationale_size_max' in root.attrib) else None)
question_el = root.find('question')
if (question_el is None):
raise UpdateFromXmlError(_('Every peer instruction must tool contain a "question" element.'))
else:
question = parse_question_xml(question_el)
options_el = root.find('options')
if (options_el is None):
raise UpdateFromXmlError(_('Every peer instruction must tool contain a "options" element.'))
else:
(options, correct_answer, correct_rationale) = parse_options_xml(options_el)
seeds_el = root.find('seeds')
if (seeds_el is None):
raise UpdateFromXmlError(_('Every peer instruction must tool contain a "seeds" element.'))
else:
seeds = parse_seeds_xml(seeds_el)
algo = (unicode(root.attrib['algorithm']) if ('algorithm' in root.attrib) else None)
num_responses = (unicode(root.attrib['num_responses']) if ('num_responses' in root.attrib) else None)
return {'display_name': display_name, 'question_text': question, 'options': options, 'rationale_size': {'min': rationale_size_min, 'max': rationale_size_max}, 'correct_answer': correct_answer, 'correct_rationale': correct_rationale, 'seeds': seeds, 'algo': {'name': algo, 'num_responses': num_responses}}
|
Update the UBCPI XBlock's content from an XML definition.
We need to be strict about the XML we accept, to avoid setting
the XBlock to an invalid state (which will then be persisted).
Args:
root (lxml.etree.Element): The XML definition of the XBlock's content.
Returns:
A dictionary of all of the XBlock's content.
Raises:
UpdateFromXmlError: The XML definition is invalid
|
codesearchnet
|
def __init__(self, access_token, access_token_type, refresh_token=None, expires_in=None, state=None):
self.access_token = access_token
self.access_token_type = access_token_type
self.refresh_token = refresh_token
self.expires_in = expires_in
self.state = state
|
Initialziation of the object
Args:
access_token (str): Access token
access_token_type (str): Access token type
refresh_token (str):
expires_in (int): Seconds after which the token will expire
state (str):
|
juraj-google-style
|
def __new__(cls, month=1, day=1, hour=0, minute=0, leap_year=False):
year = 2016 if leap_year else 2017
hour, minute = cls._calculate_hour_and_minute(hour + minute / 60.0)
try:
return datetime.__new__(cls, year, month, day, hour, minute)
except ValueError as e:
raise ValueError("{}:\n\t({}/{}@{}:{})(m/d@h:m)".format(
e, month, day, hour, minute
))
|
Create Ladybug datetime.
Args:
month: A value for month between 1-12 (Defualt: 1).
day: A value for day between 1-31 (Defualt: 1).
hour: A value for hour between 0-23 (Defualt: 0).
minute: A value for month between 0-59 (Defualt: 0).
leap_year: A boolean to indicate if datetime is for a leap year
(Default: False).
|
juraj-google-style
|
def plot_densities(self, ax=None, **kwargs):
(ax, fig, plt) = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel('r [Bohr]')
for (i, den_name) in enumerate(['ae_core_density', 'pseudo_core_density']):
rden = getattr(self, den_name)
label = ('$n_c$' if (i == 1) else '$\\tilde{n}_c$')
ax.plot(rden.mesh, (rden.mesh * rden.values), label=label, lw=2)
ax.legend(loc='best')
return fig
|
Plot the PAW densities.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
|
codesearchnet
|
def GetArtifactCollectorArgs(flow_args, knowledge_base):
args = rdf_artifacts.ClientArtifactCollectorArgs()
args.knowledge_base = knowledge_base
args.apply_parsers = flow_args.apply_parsers
args.ignore_interpolation_errors = flow_args.ignore_interpolation_errors
args.max_file_size = flow_args.max_file_size
args.use_tsk = flow_args.use_tsk
if (not flow_args.recollect_knowledge_base):
artifact_names = flow_args.artifact_list
else:
artifact_names = GetArtifactsForCollection(knowledge_base.os, flow_args.artifact_list)
expander = ArtifactExpander(knowledge_base, flow_args.path_type, flow_args.max_file_size)
for artifact_name in artifact_names:
rdf_artifact = artifact_registry.REGISTRY.GetArtifact(artifact_name)
if (not MeetsConditions(knowledge_base, rdf_artifact)):
continue
if (artifact_name in expander.processed_artifacts):
continue
requested_by_user = (artifact_name in flow_args.artifact_list)
for expanded_artifact in expander.Expand(rdf_artifact, requested_by_user):
args.artifacts.append(expanded_artifact)
return args
|
Prepare bundle of artifacts and their dependencies for the client.
Args:
flow_args: An `ArtifactCollectorFlowArgs` instance.
knowledge_base: contains information about the client
Returns:
rdf value object containing a list of extended artifacts and the
knowledge base
|
codesearchnet
|
def is_uniform(self):
return self._uniform_row_length is not None
|
Returns true if the partition is known to be uniform statically.
This is based upon the existence of self._uniform_row_length. For example:
RowPartition.from_row_lengths([3,3,3]).is_uniform()==false
RowPartition.from_uniform_row_length(5, nvals=20).is_uniform()==true
RowPartition.from_row_lengths([2,0,2]).is_uniform()==false
Returns:
Whether a RowPartition is known to be uniform statically.
|
github-repos
|
def visualize_reconstruction(inputs, reconstruct, num=3, name='reconstruction'):
reconstruct = tf.clip_by_value(reconstruct, 0.0, 1.0)
inputs_and_reconstruct = tf.concat((inputs[:num], reconstruct[:num]), axis=0)
image_summary(inputs_and_reconstruct, name)
|
Visualizes the reconstruction of inputs in TensorBoard.
Args:
inputs: A tensor of the original inputs, of shape [batch, timesteps,
h, w, c].
reconstruct: A tensor of a reconstruction of inputs, of shape
[batch, timesteps, h, w, c].
num: Integer for the number of examples to visualize.
name: String name of this summary.
|
codesearchnet
|
def Detect(self, baseline, host_data):
result = CheckResult()
for detector in self.detectors:
finding = detector(baseline, host_data)
if finding:
result.ExtendAnomalies([finding])
if result:
return result
|
Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors identified an
issue, None otherwise.
|
codesearchnet
|
def GetEnabledInterfaces():
interfaces = []
show_args = ['/c', 'netsh', 'show', 'interface']
res = client_utils_common.Execute('cmd', show_args, time_limit=(- 1), bypass_whitelist=True)
pattern = re.compile('\\s*')
for line in res[0].split('\r\n'):
interface_info = pattern.split(line)
if ('Enabled' in interface_info):
interfaces.extend(interface_info[(- 1):])
return interfaces
|
Gives a list of enabled interfaces. Should work on all windows versions.
Returns:
interfaces: Names of interfaces found enabled.
|
codesearchnet
|
def get_port_monitor(self):
uri = '{}{}'.format(self.data['uri'], self.PORT_MONITOR_PATH)
return self._helper.do_get(uri)
|
Gets the port monitor configuration of a logical interconnect.
Returns:
dict: The Logical Interconnect.
|
codesearchnet
|
def get_countries_in_region(cls, region, use_live=True, exception=None):
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
|
juraj-google-style
|
def get_updates(
self,
display_all_distributions=False,
verbose=False
):
if verbose:
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(message)s',
)
logging.info('Checking installed packages for updates...')
updates = self._get_environment_updates(
display_all_distributions=display_all_distributions
)
if updates:
for update in updates:
logging.info(update)
if updates and self._csv_file_name:
self.write_updates_to_csv(updates)
if updates and self._new_config:
self.write_new_config(updates)
return updates
|
When called, get the environment updates and write updates to a CSV
file and if a new config has been provided, write a new configuration
file.
Args:
display_all_distributions (bool): Return distribution even if it is
up-to-date.
verbose (bool): If ``True``, log to terminal to terminal.
|
juraj-google-style
|
def get_data(name, train_batch_size, test_batch_size):
if name not in ['mnist', 'cifar10']:
raise ValueError(
'Expected dataset \'mnist\' or \'cifar10\', but got %s' % name)
dataset = getattr(tf.keras.datasets, name)
num_classes = 10
raw_data = dataset.load_data()
(images_train, labels_train), (images_test, labels_test) = raw_data
images_train = images_train.astype(np.float32) / 255.
images_test = images_test.astype(np.float32) / 255.
labels_train = labels_train.astype(np.int32).squeeze()
labels_test = labels_test.astype(np.int32).squeeze()
if images_train.ndim == 3:
images_train = np.expand_dims(images_train, -1)
images_test = np.expand_dims(images_test, -1)
train_data = tf.data.Dataset.from_tensor_slices((images_train, labels_train))
test_data = tf.data.Dataset.from_tensor_slices((images_test, labels_test))
train_iterator = (
train_data
.shuffle(buffer_size=len(images_train))
.batch(train_batch_size)
.repeat()
.make_one_shot_iterator()
)
test_iterator = test_data.batch(test_batch_size).make_initializable_iterator()
return dict(
train_iterator=train_iterator,
test_iterator=test_iterator,
num_classes=num_classes)
|
Gets training and testing dataset iterators.
Args:
name: String. Name of dataset, either 'mnist' or 'cifar10'.
train_batch_size: Integer. Batch size for training.
test_batch_size: Integer. Batch size for testing.
Returns:
Dict containing:
train_iterator: A tf.data.Iterator, over training data.
test_iterator: A tf.data.Iterator, over test data.
num_classes: Integer. Number of class labels.
|
juraj-google-style
|
def QueryAllFeatures(self, url=None, where='1=1', out_fields='*', timeFilter=None, geometryFilter=None, returnFeatureClass=False, out_fc=None, outSR=None, chunksize=1000, printIndent=''):
if (url is None):
return
fl = None
try:
fl = FeatureLayer(url=url, securityHandler=self._securityHandler)
qRes = fl.query(where=where, returnIDsOnly=True, timeFilter=timeFilter, geometryFilter=geometryFilter)
if ('error' in qRes):
print((printIndent + qRes))
return []
elif ('objectIds' in qRes):
oids = qRes['objectIds']
total = len(oids)
if (total == 0):
return fl.query(where=where, returnGeometry=True, out_fields=out_fields, timeFilter=timeFilter, geometryFilter=geometryFilter, outSR=outSR)
print((printIndent + ('%s features to be downloaded' % total)))
chunksize = min(chunksize, fl.maxRecordCount)
combinedResults = None
totalQueried = 0
for chunk in chunklist(l=oids, n=chunksize):
oidsQuery = ','.join(map(str, chunk))
if (not oidsQuery):
continue
else:
results = fl.query(objectIds=oidsQuery, returnGeometry=True, out_fields=out_fields, timeFilter=timeFilter, geometryFilter=geometryFilter, outSR=outSR)
if isinstance(results, FeatureSet):
if (combinedResults is None):
combinedResults = results
else:
for feature in results.features:
combinedResults.features.append(feature)
totalQueried += len(results.features)
print((printIndent + '{:.0%} Completed: {}/{}'.format((totalQueried / float(total)), totalQueried, total)))
else:
print((printIndent + results))
if (returnFeatureClass == True):
return combinedResults.save(*os.path.split(out_fc))
else:
return combinedResults
else:
print((printIndent + qRes))
except:
(line, filename, synerror) = trace()
raise common.ArcRestHelperError({'function': 'QueryAllFeatures', 'line': line, 'filename': filename, 'synerror': synerror})
finally:
fl = None
del fl
gc.collect()
|
Performs an SQL query against a hosted feature service layer
and returns all features regardless of service limit.
Args:
url (str): The URL of the feature service layer.
where - the selection sql statement
out_fields - the attribute fields to return
timeFilter - a TimeFilter object where either the start time
or start and end time are defined to limit the
search results for a given time. The values in
the timeFilter should be as UTC timestampes in
milliseconds. No checking occurs to see if they
are in the right format.
geometryFilter - a GeometryFilter object to parse down a given
query by another spatial dataset.
returnFeatureClass - Default False. If true, query will be
returned as feature class
chunksize (int): The maximum amount of features to query at a time. Defaults to 1000.
out_fc - only valid if returnFeatureClass is set to True.
Output location of query.
Output:
A list of Feature Objects (default) or a path to the output featureclass if
returnFeatureClass is set to True.
|
codesearchnet
|
def description(self, force_refresh=False):
if force_refresh:
self.clear_cache()
if (not self._tuning_job_describe_result):
self._tuning_job_describe_result = self._sage_client.describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=self.name)
return self._tuning_job_describe_result
|
Call ``DescribeHyperParameterTuningJob`` for the hyperparameter tuning job.
Args:
force_refresh (bool): Set to True to fetch the latest data from SageMaker API.
Returns:
dict: The Amazon SageMaker response for ``DescribeHyperParameterTuningJob``.
|
codesearchnet
|
def __init__(self, parent):
super(ModuleUIFrame, self).__init__(parent)
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
from ....datatools import get_data
data = get_data()
api_frame = ttk.LabelFrame(self, padding=8, text="Google API")
api_frame.grid(row=0, column=0, sticky="W E N S")
api_frame.columnconfigure(0, weight=1)
self.google_api_key = tk.StringVar()
ttk.Label(api_frame, text="Google API Key").grid(column=0, row=0, sticky="W E N S")
ttk.Entry(api_frame, textvariable=self.google_api_key).grid(
column=0, row=1, padx=0, pady=4, sticky="W E N S")
self.soundcloud_client_id = tk.StringVar()
ttk.Label(api_frame, text="SoundCloud Client ID").grid(column=0, row=2, sticky="W E N S")
ttk.Entry(api_frame, textvariable=self.soundcloud_client_id).grid(
column=0, row=3, padx=0, pady=4, sticky="W E N S")
ttk.Button(api_frame, command=lambda: self.update_keys(), text="Update API Data").grid(
column=0, row=4, padx=0, pady=4, sticky="W E N S")
if "google_api_key" in data["discord"]["keys"]:
self.google_api_key.set(data["discord"]["keys"]["google_api_key"])
if "soundcloud_client_id" in data["discord"]["keys"]:
self.soundcloud_client_id.set(data["discord"]["keys"]["soundcloud_client_id"])
|
Create a new UI for the module
Args:
parent: A tk or ttk object
|
juraj-google-style
|
def get_barycenter(self):
try:
mass = self['mass'].values
except KeyError:
mass = self.add_data('mass')['mass'].values
pos = self.loc[(:, ['x', 'y', 'z'])].values
return ((pos * mass[(:, None)]).sum(axis=0) / self.get_total_mass())
|
Return the mass weighted average location.
Args:
None
Returns:
:class:`numpy.ndarray`:
|
codesearchnet
|
def createList(self, title=None, items=None):
if items is None:
items = []
node = _node.List()
if title is not None:
node.title = title
for text, checked in items:
node.add(text, checked)
self.add(node)
return node
|
Create a new list and populate it. Any changes to the note will be uploaded when :py:meth:`sync` is called.
Args:
title (str): The title of the list.
items (List[(str, bool)]): A list of tuples. Each tuple represents the text and checked status of the listitem.
Returns:
gkeepapi.node.List: The new list.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.