code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def result(self):
self.wait()
if self._fatal_error:
raise self._fatal_error
return self._result | Get the result for a job. This will block if the job is incomplete.
Returns:
The result for the Job.
Raises:
An exception if the Job resulted in an exception. | codesearchnet |
def _GeneratePathString(self, mediator, pathspec, hashes):
display_name = mediator.GetDisplayNameForPathSpec(pathspec)
path_string = '{0:s}:'.format(display_name)
for (hash_name, hash_value) in sorted(hashes.items()):
path_string = '{0:s} {1:s}={2:s}'.format(path_string, hash_name, hash_value)
return path_string | Generates a string containing a pathspec and its hashes.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
pathspec (dfvfs.Pathspec): the path specification) to generate a string
for.
hashes (dict[str, str]): mapping of hash attribute names to the value of
that hash for the path specification being processed.
Returns:
str: string of the form "display_name: hash_type=hash_value". For example,
"OS:/path/spec: test_hash=4 other_hash=5". | codesearchnet |
def describe_enum(enum_definition):
enum_descriptor = EnumDescriptor()
enum_descriptor.name = enum_definition.definition_name().split('.')[(- 1)]
values = []
for number in enum_definition.numbers():
value = enum_definition.lookup_by_number(number)
values.append(describe_enum_value(value))
if values:
enum_descriptor.values = values
return enum_descriptor | Build descriptor for Enum class.
Args:
enum_definition: Enum class to provide descriptor for.
Returns:
Initialized EnumDescriptor instance describing the Enum class. | codesearchnet |
def import_mapping(connection_id, mapping):
url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT,
'connections', connection_id, 'actions', 'import')
response = requests.post(
url=url,
json=mapping,
headers=_get_authorization_headers()
)
response.raise_for_status() | Import Heroku Connection mapping for given connection.
Args:
connection_id (str): Heroku Connection connection ID.
mapping (dict): Heroku Connect mapping.
Raises:
requests.HTTPError: If an error occurs uploading the mapping.
ValueError: If the mapping is not JSON serializable. | juraj-google-style |
def GetFeedMapping(client, feed, placeholder_type):
feed_mapping_service = client.GetService('FeedMappingService', 'v201809')
attribute_mappings = {}
more_pages = True
selector = {'fields': ['FeedMappingId', 'AttributeFieldMappings'], 'predicates': [{'field': 'FeedId', 'operator': 'EQUALS', 'values': [feed['id']]}, {'field': 'PlaceholderType', 'operator': 'EQUALS', 'values': [placeholder_type]}], 'paging': {'startIndex': 0, 'numberResults': PAGE_SIZE}}
while more_pages:
page = feed_mapping_service.get(selector)
if ('entries' in page):
for feed_mapping in page['entries']:
for attribute_mapping in feed_mapping['attributeFieldMappings']:
if (attribute_mapping['feedAttributeId'] in attribute_mappings):
attribute_mappings[attribute_mapping['feedAttributeId']].append(attribute_mapping['fieldId'])
else:
attribute_mappings[attribute_mapping['feedAttributeId']] = [attribute_mapping['fieldId']]
selector['paging']['startIndex'] += PAGE_SIZE
more_pages = (selector['paging']['startIndex'] < int(page['totalNumEntries']))
return attribute_mappings | Gets the Feed Mapping for a given Feed.
Args:
client: an AdWordsClient instance.
feed: the Feed we are retrieving the Feed Mapping for.
placeholder_type: the Placeholder Type we are looking for.
Returns:
A dictionary containing the Feed Mapping. | codesearchnet |
def _calculate_hash(files, root):
file_hash = hashlib.md5()
for fname in sorted(files):
f = os.path.join(root, fname)
file_hash.update((fname + "\0").encode())
with open(f, "rb") as fd:
for chunk in iter(lambda: fd.read(4096), ""):
if not chunk:
break
file_hash.update(chunk)
file_hash.update("\0".encode())
return file_hash.hexdigest() | Returns a hash of all of the given files at the given root.
Args:
files (list[str]): file names to include in the hash calculation,
relative to ``root``.
root (str): base directory to analyze files in.
Returns:
str: A hash of the hashes of the given files. | juraj-google-style |
def _init_boto3_clients(self):
try:
profile = self._config.get('environment', {}).get('profile')
region = self._config.get('environment', {}).get('region')
if profile:
self._b3Sess = boto3.session.Session(profile_name=profile)
else:
self._b3Sess = boto3.session.Session()
self._s3 = self._b3Sess.client('s3')
self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)
self._ssm = self._b3Sess.client('ssm', region_name=region)
return True
except Exception as wtf:
logging.error('Exception caught in intialize_session(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False | The utililty requires boto3 clients to Cloud Formation and S3. Here is
where we make them.
Args:
None
Returns:
Good or Bad; True or False | juraj-google-style |
def from_json(cls, data):
optional_keys = ('city', 'state', 'country', 'latitude', 'longitude', 'time_zone', 'elevation', 'station_id', 'source')
for key in optional_keys:
if (key not in data):
data[key] = None
return cls(data['city'], data['state'], data['country'], data['latitude'], data['longitude'], data['time_zone'], data['elevation'], data['station_id'], data['source']) | Create a location from a dictionary.
Args:
data: {
"city": "-",
"latitude": 0,
"longitude": 0,
"time_zone": 0,
"elevation": 0} | codesearchnet |
def _fdopen_ver2(self, file_des, mode='r', bufsize=None):
if (not is_int_type(file_des)):
raise TypeError('an integer is required')
try:
return FakeFileOpen(self.filesystem).call(file_des, mode=mode)
except IOError as exc:
self.filesystem.raise_os_error(exc.errno, exc.filename) | Returns an open file object connected to the file descriptor
file_des.
Args:
file_des: An integer file descriptor for the file object requested.
mode: Additional file flags. Currently checks to see if the mode
matches the mode of the requested file object.
bufsize: ignored. (Used for signature compliance with
__builtin__.fdopen)
Returns:
File object corresponding to file_des.
Raises:
OSError: if bad file descriptor or incompatible mode is given.
TypeError: if file descriptor is not an integer. | codesearchnet |
def AddServiceDescriptor(self, service_desc):
if not isinstance(service_desc, descriptor.ServiceDescriptor):
raise TypeError('Expected instance of descriptor.ServiceDescriptor.')
self._service_descriptors[service_desc.full_name] = service_desc | Adds a ServiceDescriptor to the pool.
Args:
service_desc: A ServiceDescriptor. | juraj-google-style |
def __resource_descriptor(self, resource_path, methods):
descriptor = {}
method_map = {}
sub_resource_index = collections.defaultdict(list)
sub_resource_map = {}
resource_path_tokens = resource_path.split('.')
for service, protorpc_meth_info in methods:
method_info = getattr(protorpc_meth_info, 'method_info', None)
path = method_info.get_path(service.api_info)
method_id = method_info.method_id(service.api_info)
canonical_method_id = self._get_canonical_method_id(method_id)
current_resource_path = self._get_resource_path(method_id)
if (current_resource_path[:len(resource_path_tokens)] !=
resource_path_tokens):
raise api_exceptions.ToolError(
'Internal consistency error in resource path {0}'.format(
current_resource_path))
effective_resource_path = current_resource_path[
len(resource_path_tokens):]
if effective_resource_path:
sub_resource_name = effective_resource_path[0]
new_resource_path = '.'.join([resource_path, sub_resource_name])
sub_resource_index[new_resource_path].append(
(service, protorpc_meth_info))
else:
method_map[canonical_method_id] = self.__method_descriptor(
service, method_info, protorpc_meth_info)
for sub_resource, sub_resource_methods in sub_resource_index.items():
sub_resource_name = sub_resource.split('.')[-1]
sub_resource_map[sub_resource_name] = self.__resource_descriptor(
sub_resource, sub_resource_methods)
if method_map:
descriptor['methods'] = method_map
if sub_resource_map:
descriptor['resources'] = sub_resource_map
return descriptor | Describes a resource.
Args:
resource_path: string, the path of the resource (e.g., 'entries.items')
methods: list of tuples of type
(endpoints.Service, protorpc.remote._RemoteMethodInfo), the methods
that serve this resource.
Returns:
Dictionary describing the resource. | juraj-google-style |
def PrepareMatches(self, file_system):
if self._location is not None:
self._location_segments = self._SplitPath(
self._location, file_system.PATH_SEPARATOR)
elif self._location_regex is not None:
path_separator = file_system.PATH_SEPARATOR
if path_separator == '\\':
path_separator = '\\\\'
self._location_segments = self._SplitPath(
self._location_regex, path_separator)
if self._location_segments is not None:
self._number_of_location_segments = len(self._location_segments) | Prepare find specification for matching.
Args:
file_system (FileSystem): file system. | juraj-google-style |
def _ParseAndValidateRecord(self, parser_mediator, text_file_object):
try:
title = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
url = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
timestamp = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
popularity_index = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
except UnicodeDecodeError:
return False
if len(title) == self._MAXIMUM_LINE_SIZE and title[-1] != '\n':
return False
if len(url) == self._MAXIMUM_LINE_SIZE and url[-1] != '\n':
return False
if len(timestamp) == self._MAXIMUM_LINE_SIZE and timestamp[-1] != '\n':
return False
if (len(popularity_index) == self._MAXIMUM_LINE_SIZE and
popularity_index[-1] != '\n'):
return False
title = title.strip()
url = url.strip()
timestamp = timestamp.strip()
popularity_index = popularity_index.strip()
if not title or not url or not timestamp or not popularity_index:
return False
event_data = OperaGlobalHistoryEventData()
if not self._IsValidUrl(url):
return False
event_data.url = url
if title != url:
event_data.title = title
try:
event_data.popularity_index = int(popularity_index, 10)
timestamp = int(timestamp, 10)
except ValueError:
return False
if event_data.popularity_index < 0:
event_data.description = 'First and Only Visit'
else:
event_data.description = 'Last Visit'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
return True | Parses and validates an Opera global history record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
text_file_object (dfvfs.TextFile): text file.
Returns:
bool: True if the record was successfully parsed. | juraj-google-style |
def cleanup(context):
for name in ('work_dir', 'artifact_dir', 'task_log_dir'):
path = context.config[name]
if os.path.exists(path):
log.debug('rm({})'.format(path))
rm(path)
makedirs(path) | Clean up the work_dir and artifact_dir between task runs, then recreate.
Args:
context (scriptworker.context.Context): the scriptworker context. | codesearchnet |
def vr60baro(msg):
d = hex2bin(data(msg))
if d[34] == '0':
return None
sign = int(d[35])
value = bin2int(d[36:45])
if value == 0 or value == 511:
return 0
value = value - 512 if sign else value
roc = value * 32
return roc | Vertical rate from barometric measurement, this value may be very noisy.
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
int: vertical rate in feet/minutes | juraj-google-style |
def memory_zones(self):
count = self.num_memory_zones()
if count == 0:
return list()
buf = (structs.JLinkMemoryZone * count)()
res = self._dll.JLINK_GetMemZones(buf, count)
if res < 0:
raise errors.JLinkException(res)
return list(buf) | Gets all memory zones supported by the current target.
Some targets support multiple memory zones. This function provides the
ability to get a list of all the memory zones to facilate using the
memory zone routing functions.
Args:
self (JLink): the ``JLink`` instance
Returns:
A list of all the memory zones as ``JLinkMemoryZone`` structures.
Raises:
JLinkException: on hardware errors. | juraj-google-style |
def create_ltp_package(aleph_record, book_id, ebook_fn, data, url, urn_nbn=None):
(root_dir, orig_dir, meta_dir) = _create_package_hierarchy(book_id=book_id)
original_fn = os.path.join(orig_dir, fn_composers.original_fn(book_id, ebook_fn))
with open(original_fn, 'wb') as f:
f.write(data)
metadata_filenames = []
records = marcxml2mods(marc_xml=aleph_record, uuid=book_id, url=url)
for (cnt, mods_record) in enumerate(records):
fn = os.path.join(meta_dir, fn_composers.volume_fn(cnt))
with open(fn, 'w') as f:
f.write(mods_record)
metadata_filenames.append(fn)
md5_fn = os.path.join(root_dir, fn_composers.checksum_fn(book_id))
checksums = checksum_generator.generate_hashfile(root_dir)
with open(md5_fn, 'w') as f:
f.write(checksums)
info_fn = os.path.join(root_dir, fn_composers.info_fn(book_id))
with open(info_fn, 'w') as f:
f.write(info_composer.compose_info(root_dir=root_dir, files=([original_fn] + metadata_filenames), hash_fn=md5_fn, aleph_record=aleph_record, urn_nbn=urn_nbn))
return root_dir | Create LTP package as it is specified in specification v1.0 as I understand
it.
Args:
aleph_record (str): XML containing full aleph record.
book_id (str): UUID of the book.
ebook_fn (str): Original filename of the ebook.
data (str/bytes): Ebook's content.
url (str): URL of the publication used when the URL can't be found in
`aleph_record`.
urn_nbn (str, default None): URN:NBN.
Returns:
str: Name of the package's directory in ``/tmp``. | codesearchnet |
def identify(text):
filtered_text = set(list(text)).intersection(ALL_CHARS)
if (len(filtered_text) is 0):
return None
if filtered_text.issubset(SHARED_CHARS):
return EITHER
if filtered_text.issubset(TRAD_CHARS):
return TRAD
if filtered_text.issubset(SIMP_CHARS):
return SIMP
if filtered_text.difference(TRAD_CHARS).issubset(SIMP_CHARS):
return BOTH | Identify whether a string is simplified or traditional Chinese.
Returns:
None: if there are no recognizd Chinese characters.
EITHER: if the test is inconclusive.
TRAD: if the text is traditional.
SIMP: if the text is simplified.
BOTH: the text has characters recognized as being solely traditional
and other characters recognized as being solely simplified. | codesearchnet |
def test_noninlined_funcdef(self, mode):
self._maybe_skip(mode)
with ops.device(_get_device(mode)):
random_seed.set_random_seed(0)
x = _input([8, 8])
y = _matmul_act(x)
y = _example_noninlined_funcdef(y)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.01)
g = optimizer.compute_gradients(y, [x])
output = (g, y)
output_val_ref, output_val, cost_graph = self._run(mode, output)
node_map = _build_node_map(cost_graph.node)
self._assert_output_f16(mode, node_map, 'MatMul')
tol = 0.01 if mode == 'mkl' else 0.001
atol = 0.01 if test.is_built_with_rocm() else tol
self.assertAllClose(output_val_ref, output_val, atol=atol, rtol=tol) | Test graph with non-inlined function subgraph.
This requires the grappler pass to handle an OpDef that only appears in the
graph's function registry instead of the global op registry.
Args:
mode: Either 'cuda' or 'mkl'. | github-repos |
def run_function_on_all_workers(self, function, run_on_other_drivers=False):
if (self.mode is None):
self.cached_functions_to_run.append(function)
else:
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.sha1(pickled_function).digest()
key = (b'FunctionsToRun:' + function_to_run_id)
function({'worker': self})
function_exported = self.redis_client.setnx((b'Lock:' + key), 1)
if (not function_exported):
return
check_oversized_pickle(pickled_function, function.__name__, 'function', self)
self.redis_client.hmset(key, {'driver_id': self.task_driver_id.binary(), 'function_id': function_to_run_id, 'function': pickled_function, 'run_on_other_drivers': str(run_on_other_drivers)})
self.redis_client.rpush('Exports', key) | Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers. | codesearchnet |
def scrape(self, url):
if isinstance(url, str) is False:
raise TypeError("The type of url must be str.")
if self.readable_web_pdf is not None and self.readable_web_pdf.is_pdf_url(url) is True:
web_data = self.readable_web_pdf.url_to_text(url)
else:
web_data = ""
req = urllib.request.Request(url=url)
with urllib.request.urlopen(req) as f:
web = f.read().decode('utf-8')
dom = pq(web)
[dom(remove_object).remove() for remove_object in self.__remove_object_list]
for dom_object in self.__dom_object_list:
web_data += dom(dom_object).text()
sleep(1)
return web_data | Execute Web-Scraping.
The target dom objects are in self.__dom_object_list.
Args:
url: Web site url.
Returns:
The result. this is a string.
@TODO(chimera0): check URLs format. | juraj-google-style |
def _normalize_field_name(self, field_name) -> str:
if isinstance(field_name, tuple):
field_name, _ = field_name
return field_name | Normalizes a field name into a string by
extracting the field name if it was specified
as a reference to a HStore key (as a tuple).
Arguments:
field_name:
The field name to normalize.
Returns:
The normalized field name. | juraj-google-style |
def model_fn(self, x: core.Tensor) -> Mapping[str, core.Tensor]:
if math_ops.reduce_sum(x) > 10.0:
out = math_ops.matmul(x, self.filters_0)
out = nn_ops.bias_add(out, self.bias_0)
return {'output': out}
out = math_ops.matmul(x, self.filters_1)
out = nn_ops.bias_add(out, self.bias_1)
return {'output': out} | Runs the input tensor to a branched operations.
The graph is branched by a condition whether the sum of elements of `x`
is greater than 10.
Args:
x: Input tensor.
Returns:
A map of: output key -> output result. | github-repos |
def sample_from_likelihood(self, n_timesteps=10):
self.latent_state_sequences = lmap(
lambda A: ltake(
n_timesteps,
iterate(
lambda s: pd.Series(A @ s.values, index=s.index), self.s0
),
),
self.transition_matrix_collection,
)
self.observed_state_sequences = [
[self.sample_observed_state(s) for s in latent_state_sequence]
for latent_state_sequence in self.latent_state_sequences
] | Sample a collection of observed state sequences from the likelihood
model given a collection of transition matrices.
Args:
n_timesteps: The number of timesteps for the sequences. | juraj-google-style |
def __init__(self, request, async, callback=None, callbacks=dict(), root_object=None):
self._uses_authentication = True
self._has_timeouted = False
self._ignore_request_idle = False
self._xhr_timeout = 3000
self._response = None
self._error_message = None
self._transaction_id = uuid.uuid4().hex
self._request = request
self._async = async
self._callback = callback
self._callbacks = callbacks
self._user_info = None
self._object_last_action_timer = None
self._root_object = root_object | Intializes a new connection for a given request
NURESTConnection object is in charge of the HTTP call. It relies on request library
Args:
request: the NURESTRequest to send
callback: the method that will be fired after sending
callbacks: a dictionary of user callbacks. Should contains local and remote callbacks | juraj-google-style |
def get_sendback(self, uuid, key):
def send_back_callback(data):
self.sendResponse(
serializers.serialize(data),
uuid,
key
)
return send_back_callback | Return function for sending progress messages back to original caller.
Args:
uuid (str): UUID of the received message.
key (str): Routing key.
Returns:
fn reference: Reference to function which takes only one data \
argument. | juraj-google-style |
def get_capture_handler_config_by_name(self, name):
handler_confs = []
for (address, stream_capturer) in self._stream_capturers.iteritems():
handler_data = stream_capturer[0].dump_handler_config_data()
for h in handler_data:
if (h['handler']['name'] == name):
handler_confs.append(h)
return handler_confs | Return data for handlers of a given name.
Args:
name:
Name of the capture handler(s) to return config data for.
Returns:
Dictionary dump from the named capture handler as given by
the :func:`SocketStreamCapturer.dump_handler_config_data` method. | codesearchnet |
def __init__(self, file_handle):
if not file_handle.writable():
raise ValueError('Output stream must be writable')
self._file_handle = file_handle
self._coder = RowAsDictJsonCoder() | Initialize an JsonRowWriter.
Args:
file_handle (io.IOBase): Output stream to write to. | github-repos |
def precision(truth, recommend, k=None):
if len(recommend) == 0:
if len(truth) == 0:
return 1.
return 0.
if k is None:
k = len(recommend)
return count_true_positive(truth, recommend[:k]) / float(k) | Precision@k.
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
k (int): Top-k items in `recommend` will be recommended.
Returns:
float: Precision@k. | juraj-google-style |
def _validate_isvalid_orcid(self, isvalid_orcid, field, value):
if isvalid_orcid and 'ORCID' in value:
try:
res = search_orcid(value['ORCID'])
except ConnectionError:
warn('network not available, ORCID not validated.')
return
except HTTPError:
self._error(field, 'ORCID incorrect or invalid for ' +
value['name']
)
return
family_name = res['name']['family-name']['value']
given_name = res['name']['given-names']['value']
if not compare_name(given_name, family_name, value['name']):
self._error(field, 'Name and ORCID do not match. Name supplied: ' +
value['name'] + '. Name associated with ORCID: ' +
' '.join([given_name, family_name])
) | Checks for valid ORCID if given.
Args:
isvalid_orcid (`bool`): flag from schema indicating ORCID to be checked.
field (`str`): 'author'
value (`dict`): dictionary of author metadata.
The rule's arguments are validated against this schema:
{'isvalid_orcid': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'dict'}} | juraj-google-style |
def direct_normal_illuminance(self, value=999999.0):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `direct_normal_illuminance`'.format(value))
if (value < 0.0):
raise ValueError('value need to be greater or equal 0.0 for field `direct_normal_illuminance`')
self._direct_normal_illuminance = value | Corresponds to IDD Field `direct_normal_illuminance`
will be missing if >= 999900
Args:
value (float): value for IDD Field `direct_normal_illuminance`
Unit: lux
value >= 0.0
Missing value: 999999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | codesearchnet |
def run(self, xml, **kwargs):
kwargs['output'] = self.__graph__()
if isinstance(xml, str):
try:
self.source = etree.XML(xml)
except ValueError:
try:
self.source = etree.XML(xml.encode())
except:
raise ValueError('Cannot run error {}'.format(sys.exc_info()[0]))
else:
self.source = xml
super(XMLProcessor, self).run(**kwargs)
self.output = kwargs['output']
return kwargs['output'] | Method takes either an etree.ElementTree or raw XML text
as the first argument.
Args:
xml(etree.ElementTree or text | codesearchnet |
def db_insert_record(self, table_name, columns):
bindings = ('?,' * len(columns)).strip(',')
values = [None] * len(columns)
sql = 'INSERT INTO {} ({}) VALUES ({})'.format(table_name, ', '.join(columns), bindings)
cur = self.db_conn.cursor()
cur.execute(sql, values) | Insert records into DB.
Args:
table_name (str): The name of the table.
columns (list): List of columns for insert statement. | juraj-google-style |
def __init__(self, message=None, host=None):
self.message = message
self.hostname = str(host) if host else None | Initialize the GeneralError object.
Args:
message (str): Custom message to be passed to the exceptions. Defaults to *None*.
If *None* then the general class *__doc__* is used.
host (str): Custom string which can be used to enhance the exception message by adding the "`host`: "
prefix to the message string. Defaults to *None*. If `host` is *None* then message stays unchanged. | juraj-google-style |
async def getTempCortex(mods=None):
with s_common.getTempDir() as dirn:
async with await Cortex.anit(dirn) as core:
if mods:
for mod in mods:
await core.loadCoreModule(mod)
async with core.getLocalProxy() as prox:
yield prox | Get a proxy to a cortex backed by a temporary directory.
Args:
mods (list): A list of modules which are loaded into the cortex.
Notes:
The cortex and temporary directory are town down on exit.
This should only be called from synchronous code.
Returns:
Proxy to the cortex. | juraj-google-style |
def RegisterDecoder(cls, decoder):
encoding_method = decoder.ENCODING_METHOD.lower()
if (encoding_method in cls._decoders):
raise KeyError('Decoder for encoding method: {0:s} already set.'.format(decoder.ENCODING_METHOD))
cls._decoders[encoding_method] = decoder | Registers a decoder for a specific encoding method.
Args:
decoder (type): decoder class.
Raises:
KeyError: if the corresponding decoder is already set. | codesearchnet |
def _add_parameters(self, parameter_map, parameter_list):
for parameter in parameter_list:
if parameter.get('$ref'):
parameter = self.specification['parameters'].get(parameter.get('$ref').split('/')[(- 1)])
parameter_map[parameter['name']] = parameter | Populates the given parameter map with the list of parameters provided, resolving any reference objects encountered.
Args:
parameter_map: mapping from parameter names to parameter objects
parameter_list: list of either parameter objects or reference objects | codesearchnet |
def get_ccc_handle_from_uuid(self, uuid):
if uuid in self.uuid_cccds:
return self.uuid_cccds[uuid].handle
char = self.get_characteristic_from_uuid(uuid)
if char is None:
return None
ccc = char.get_descriptor_by_uuid(UUID_GATT_CCC)
if ccc is not None:
self.uuid_cccds[uuid] = ccc
return None if ccc is None else ccc.handle | Utility function to retrieve the client characteristic configuration
descriptor handle for a given characteristic.
Args:
uuid (str): a string containing the hex-encoded UUID
Returns:
None if an error occurs, otherwise an integer handle. | juraj-google-style |
def _GetElementDataTypeDefinition(self, data_type_definition):
if not data_type_definition:
raise errors.FormatError('Missing data type definition')
element_data_type_definition = getattr(
data_type_definition, 'element_data_type_definition', None)
if not element_data_type_definition:
raise errors.FormatError(
'Invalid data type definition missing element')
return element_data_type_definition | Retrieves the element data type definition.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Returns:
DataTypeDefinition: element data type definition.
Raises:
FormatError: if the element data type cannot be determined from the data
type definition. | juraj-google-style |
def Lease(self, request, global_params=None):
config = self.GetMethodConfig('Lease')
return self._RunMethod(config, request, global_params=global_params) | Leases a dataflow WorkItem to run.
Args:
request: (DataflowProjectsLocationsJobsWorkItemsLeaseRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(LeaseWorkItemResponse) The response message. | github-repos |
def get_pending_computer_name():
current = get_computer_name()
pending = __utils__['reg.read_value']('HKLM', 'SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters', 'NV Hostname')['vdata']
if pending:
return (pending if (pending != current) else None)
return False | Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
Returns:
str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name | codesearchnet |
def configure_and_build(self, show_progress=True, optimized=True,
skip_configuration=False):
if not skip_configuration:
configuration_command = ['python', 'waf', 'configure', '--enable-examples',
'--disable-gtk', '--disable-python']
if optimized:
configuration_command += ['--build-profile=optimized',
'--out=build/optimized']
subprocess.call(configuration_command, cwd=self.path,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
build_process = subprocess.Popen(['python', 'waf', 'build'], cwd=self.path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if show_progress:
line_iterator = self.get_build_output(build_process)
pbar = None
try:
[initial, total] = next(line_iterator)
pbar = tqdm(line_iterator, initial=initial, total=total,
unit='file', desc='Building ns-3', smoothing=0)
for current, total in pbar:
pbar.n = current
except (StopIteration):
if pbar is not None:
pbar.n = pbar.total
else:
build_process.communicate() | Configure and build the ns-3 code.
Args:
show_progress (bool): whether or not to display a progress bar
during compilation.
optimized (bool): whether to use an optimized build. If False, use
a standard ./waf configure.
skip_configuration (bool): whether to skip the configuration step,
and only perform compilation. | juraj-google-style |
def get_security_group_id(name='', env='', region=''):
vpc_id = get_vpc_id(env, region)
LOG.info('Find %s sg in %s [%s] in %s', name, env, region, vpc_id)
url = '{0}/securityGroups/{1}/{2}/{3}?vpcId={4}'.format(API_URL, env, region, name, vpc_id)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert response.ok
result = response.json()
try:
security_group_id = result['id']
except KeyError:
msg = 'Security group ({0}) not found'.format(name)
raise SpinnakerSecurityGroupError(msg)
LOG.info('Found: %s', security_group_id)
return security_group_id | Get a security group ID.
Args:
name (str): Security Group name to find.
env (str): Deployment environment to search.
region (str): AWS Region to search.
Returns:
str: ID of Security Group, e.g. sg-xxxx.
Raises:
AssertionError: Call to Gate API was not successful.
SpinnakerSecurityGroupError: Security Group _name_ was not found for
_env_ in _region_. | codesearchnet |
def run_bottleneck_on_image(sess, image_data, image_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor):
resized_input_values = sess.run(decoded_image_tensor, {image_data_tensor: image_data})
bottleneck_values = sess.run(bottleneck_tensor, {resized_input_tensor: resized_input_values})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values | Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
decoded_image_tensor: Output of initial image resizing and preprocessing.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values. | codesearchnet |
def supervised_to_dict(dataset, text2self):
def my_fn(inputs, targets):
if text2self:
return {'targets': targets}
else:
return {'inputs': inputs, 'targets': targets}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) | Turns a supervised dataset into a dataset with a feature dictionary.
if text2self, then the features dictionary contains a "targets" key.
else, the features dictionary contains "inputs" and "targets" keys.
Args:
dataset: a tf.data.Dataset
text2self: a boolean
Returns:
a tf.data.Dataset | codesearchnet |
def process_equities(equities: List[str], mask: types.IntTensor=None) -> Tuple[List[str], List[int]]:
equity_list = cashflow_streams.to_list(equities)
if mask is not None:
return (equity_list, mask)
mask, mask_map, num_unique_equities = cashflow_streams.create_mask(equity_list)
equity_types = [mask_map[i] for i in range(num_unique_equities)]
return (equity_types, mask) | Extracts unique equities and computes an integer mask.
#### Example
```python
process_equities(["GOOG", "MSFT", "GOOG", "GOOG"])
# Returns
(['GOOG', 'MSFT'], [0, 1, 0, 0])
```
Args:
equities: A list of equity names.
mask: An optional integer mask for the sorted equity sequence. If supplied,
becomes a no-op.
Returns:
A Tuple of `(equities, mask)` where `equities` is a list of unique sorted
equities and `mask` is a list of integers which is the mask for `equities`. | github-repos |
def mark_typed_object(self, name, type_object):
if (not hasattr(type_object, 'dump')):
raise ArgumentError(('The passed type object %s is missing required method: dump()' % type_object))
if (not hasattr(type_object, 'Restore')):
raise ArgumentError(('The passed type object %s is missing required method: Restore()' % type_object))
def _dump_obj(obj):
if (obj is None):
return None
return obj.dump()
def _restore_obj(obj):
if (obj is None):
return obj
return type_object.Restore(obj)
self.mark_complex(name, _dump_obj, _restore_obj) | Mark a property as containing a serializable object.
This convenience method allows you to avoid having to call
``mark_complex()`` whenever you need to serialize a complex object.
This method requires that property ``name`` be a single class that
contains a dump() method and a Restore() class method where
type_object.Restore(x.dump()) == x.
Args:
name (str): The name of the complex property.
type_object: The class object that will be contained inside
this property. | codesearchnet |
def update_hash(src_file):
hash_file = (local.path(src_file) + '.hash')
new_hash = 0
with open(hash_file, 'w') as h_file:
new_hash = get_hash_of_dirs(src_file)
h_file.write(str(new_hash))
return new_hash | Update the hash for the given file.
Args:
src: The file name.
root: The path of the given file. | codesearchnet |
def _stream_data(self, chunk=None):
self._stream_sm_running = True
if (chunk is None):
chunk = self._next_streaming_chunk(20)
if ((chunk is None) or (len(chunk) == 0)):
self._stream_sm_running = False
return
try:
self._send_notification(StreamingChar.value_handle, chunk)
self._defer(self._stream_data)
except bable_interface.BaBLEException as err:
if (err.packet.status == 'Rejected'):
time.sleep(0.05)
self._defer(self._stream_data, [chunk])
else:
self._audit('ErrorStreamingReport')
self._logger.exception('Error while streaming data') | Stream reports to the ble client in 20 byte chunks
Args:
chunk (bytearray): A chunk that should be sent instead of requesting a
new chunk from the pending reports. | codesearchnet |
def get_commits(self, since_sha=None):
assert self.tempdir
cmd = ['git', 'log', '--first-parent', '--reverse', COMMIT_FORMAT]
if since_sha:
commits = [self.get_commit(since_sha)]
cmd.append('{}..HEAD'.format(since_sha))
else:
commits = []
cmd.append('HEAD')
output = cmd_output(*cmd, cwd=self.tempdir)
for (sha, date) in chunk_iter(output.splitlines(), 2):
commits.append(Commit(sha, int(date)))
return commits | Returns a list of Commit objects.
Args:
since_sha - (optional) A sha to search from | codesearchnet |
def normalize(model: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Dict[str, int]]:
is_old_format = all([isinstance(v, int) for v in model.values()])
if is_old_format:
output = {}
sorted_items = sorted(model.items(), key=lambda x: x[0])
groups = itertools.groupby(sorted_items, key=lambda x: x[0].split(':')[0])
for group in groups:
output[group[0]] = dict(((item[0].split(':')[-1], item[1]) for item in group[1]))
return output
try:
assert all([isinstance(v, int) for groups in model.values() for v in groups.values()]), 'Scores should be integers'
except (AssertionError, AttributeError) as e:
raise Exception('Unsupported model format:', e)
else:
return model | Updates a model to the latest format. Does nothing if it's updated already.
Args:
model: A model.
Returns:
An updated model. | github-repos |
def add(self, decorations):
added = 0
if isinstance(decorations, list):
not_repeated = (set(decorations) - set(self._decorations))
self._decorations.extend(list(not_repeated))
added = len(not_repeated)
elif (decorations not in self._decorations):
self._decorations.append(decorations)
added = 1
if (added > 0):
self._order_decorations()
self.update()
return added | Add text decorations on a CodeEditor instance.
Don't add duplicated decorations, and order decorations according
draw_order and the size of the selection.
Args:
decorations (sourcecode.api.TextDecoration) (could be a list)
Returns:
int: Amount of decorations added. | codesearchnet |
def user_has_access(self, user):
if (ROLE_ADMIN in user.roles):
return True
if self.enabled:
if (not self.required_roles):
return True
for role in self.required_roles:
if (role in user.roles):
return True
return False | Check if a user has access to view information for the account
Args:
user (:obj:`User`): User object to check
Returns:
True if user has access to the account, else false | codesearchnet |
def create_dir(path):
full_path = abs_path(path)
if not os.path.exists(full_path):
try:
os.makedirs(full_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise | Creates a directory if it does not exist already.
Args:
path: The path of the directory to create. | github-repos |
def prepare_subprocess_cmd(subprocess_cmd):
help_cmd = subprocess_cmd + ['--helpfull']
help_output = subprocess.run(help_cmd, stdout=subprocess.PIPE).stdout
help_output = help_output.decode('ascii')
if 'python' in subprocess_cmd[0]:
valid_flags = parse_helpfull_output(help_output)
else:
valid_flags = parse_helpfull_output(help_output, regex=FLAG_HELP_RE_CC)
parsed_flags = flags.FlagValues().read_flags_from_files(subprocess_cmd[1:])
filtered_flags = filter_flags(parsed_flags, valid_flags)
return [subprocess_cmd[0]] + filtered_flags | Prepares a subprocess command by running --helpfull and masking flags.
Args:
subprocess_cmd: List[str], what would be passed into subprocess.call()
i.e. ['python', 'train.py', '--flagfile=flags']
Returns:
['python', 'train.py', '--train_flag=blah', '--more_flags'] | juraj-google-style |
def obtain(self, dest):
url, rev_options = self.get_url_rev_options(self.url)
if not os.path.exists(dest):
self.fetch_new(dest, url, rev_options)
return
rev_display = rev_options.to_display()
if self.is_repository_directory(dest):
existing_url = self.get_remote_url(dest)
if self.compare_urls(existing_url, url):
logger.debug(
'%s in %s exists, and has correct URL (%s)',
self.repo_name.title(),
display_path(dest),
url,
)
if not self.is_commit_id_equal(dest, rev_options.rev):
logger.info(
'Updating %s %s%s',
display_path(dest),
self.repo_name,
rev_display,
)
self.update(dest, url, rev_options)
else:
logger.info('Skipping because already up-to-date.')
return
logger.warning(
'%s %s in %s exists with URL %s',
self.name,
self.repo_name,
display_path(dest),
existing_url,
)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warning(
'Directory %s already exists, and is not a %s %s.',
dest,
self.name,
self.repo_name,
)
prompt = ('(i)gnore, (w)ipe, (b)ackup ',
('i', 'w', 'b'))
logger.warning(
'The plan is to install the %s repository %s',
self.name,
url,
)
response = ask_path_exists('What to do? %s' % prompt[0], prompt[1])
if response == 'a':
sys.exit(-1)
if response == 'w':
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
self.fetch_new(dest, url, rev_options)
return
if response == 'b':
dest_dir = backup_dir(dest)
logger.warning(
'Backing up %s to %s', display_path(dest), dest_dir,
)
shutil.move(dest, dest_dir)
self.fetch_new(dest, url, rev_options)
return
if response == 's':
logger.info(
'Switching %s %s to %s%s',
self.repo_name,
display_path(dest),
url,
rev_display,
)
self.switch(dest, url, rev_options) | Install or update in editable mode the package represented by this
VersionControl object.
Args:
dest: the repository directory in which to install or update. | juraj-google-style |
def compute(self, x):
q_learning = copy(self.__greedy_q_learning)
q_learning.epsilon_greedy_rate = x[0]
q_learning.alpha_value = x[1]
q_learning.gamma_value = x[2]
if self.__init_state_key is not None:
q_learning.learn(state_key=self.__init_state_key, limit=int(x[3]))
else:
q_learning.learn(limit=x[3])
q_sum = q_learning.q_df.q_value.sum()
if q_sum != 0:
cost = q_learning.q_df.shape[0] / q_sum
else:
cost = q_learning.q_df.shape[0] / 1e-4
return cost | Compute cost.
Args:
x: `np.ndarray` of explanatory variables.
Returns:
cost | juraj-google-style |
def Write(self, output_writer):
if self._title:
output_writer.Write('
if not self._columns:
self._columns = ['' for _ in range(0, self._number_of_columns)]
output_writer.Write(' | '.join(self._columns))
output_writer.Write('\n')
output_writer.Write(' | '.join(['---' for _ in self._columns]))
output_writer.Write('\n')
for values in self._rows:
values = ['{0!s}'.format(value) for value in values]
output_writer.Write(' | '.join(values))
output_writer.Write('\n')
output_writer.Write('\n') | Writes the table to the output writer.
Args:
output_writer (OutputWriter): output writer. | juraj-google-style |
def word_matches(s1, s2, n=3):
return __matches(s1, s2, word_ngrams, n=n) | Word-level n-grams that match between two strings
Args:
s1: a string
s2: another string
n: an int for the n in n-gram
Returns:
set: the n-grams found in both strings | codesearchnet |
def get_video_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):
batch_size, frames, channel, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width)
vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)
image_embeds = vision_outputs[0]
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
if qformer_attention_mask is None:
qformer_attention_mask = torch.ones_like(qformer_input_ids)
qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0)
qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0)
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True)
query_output = query_outputs[0][:, :query_tokens.size(1), :]
language_model_inputs = self.language_projection(query_output)
language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1)
if return_dict:
return (language_model_inputs, vision_outputs, query_outputs)
return language_model_inputs | Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images. | github-repos |
def symm_reduce(self, coords_set, threshold=1e-6):
surf_sg = SpacegroupAnalyzer(self.slab, 0.1)
symm_ops = surf_sg.get_symmetry_operations()
unique_coords = []
coords_set = [self.slab.lattice.get_fractional_coords(coords)
for coords in coords_set]
for coords in coords_set:
incoord = False
for op in symm_ops:
if in_coord_list_pbc(unique_coords, op.operate(coords),
atol=threshold):
incoord = True
break
if not incoord:
unique_coords += [coords]
return [self.slab.lattice.get_cartesian_coords(coords)
for coords in unique_coords] | Reduces the set of adsorbate sites by finding removing
symmetrically equivalent duplicates
Args:
coords_set: coordinate set in cartesian coordinates
threshold: tolerance for distance equivalence, used
as input to in_coord_list_pbc for dupl. checking | juraj-google-style |
def from_file(cls, filename):
with zopen(filename) as f:
return cls.from_string(f.read()) | Read an Fiesta input from a file. Currently tested to work with
files generated from this class itself.
Args:
filename: Filename to parse.
Returns:
FiestaInput object | juraj-google-style |
def _ReadMemberHeader(self, file_object):
file_offset = file_object.get_offset()
member_header = self._ReadStructure(file_object, file_offset, self._MEMBER_HEADER_SIZE, self._MEMBER_HEADER, 'member header')
if (member_header.signature != self._GZIP_SIGNATURE):
raise errors.FileFormatError('Unsupported signature: 0x{0:04x}.'.format(member_header.signature))
if (member_header.compression_method != self._COMPRESSION_METHOD_DEFLATE):
raise errors.FileFormatError('Unsupported compression method: {0:d}.'.format(member_header.compression_method))
self.modification_time = member_header.modification_time
self.operating_system = member_header.operating_system
if (member_header.flags & self._FLAG_FEXTRA):
file_offset = file_object.get_offset()
extra_field_data_size = self._ReadStructure(file_object, file_offset, self._UINT16LE_SIZE, self._UINT16LE, 'extra field data size')
file_object.seek(extra_field_data_size, os.SEEK_CUR)
if (member_header.flags & self._FLAG_FNAME):
file_offset = file_object.get_offset()
string_value = self._ReadString(file_object, file_offset, self._CSTRING, 'original filename')
self.original_filename = string_value.rstrip('\x00')
if (member_header.flags & self._FLAG_FCOMMENT):
file_offset = file_object.get_offset()
string_value = self._ReadString(file_object, file_offset, self._CSTRING, 'comment')
self.comment = string_value.rstrip('\x00')
if (member_header.flags & self._FLAG_FHCRC):
file_object.read(2) | Reads a member header.
Args:
file_object (FileIO): file-like object to read from.
Raises:
FileFormatError: if the member header cannot be read. | codesearchnet |
def to_barrier_key(cls, barrier_index_key):
barrier_index_path = barrier_index_key.to_path()
(pipeline_kind, dependent_pipeline_id, unused_kind, purpose) = barrier_index_path[(- 4):]
barrier_record_path = (pipeline_kind, dependent_pipeline_id, _BarrierRecord.kind(), purpose)
return db.Key.from_path(*barrier_record_path) | Converts a _BarrierIndex key to a _BarrierRecord key.
Args:
barrier_index_key: db.Key for a _BarrierIndex entity.
Returns:
db.Key for the corresponding _BarrierRecord entity. | codesearchnet |
def compose_tree_path(tree, issn=False):
if issn:
return join(
"/",
ISSN_DOWNLOAD_KEY,
basename(tree.issn)
)
return join(
"/",
PATH_DOWNLOAD_KEY,
quote_plus(tree.path).replace("%2F", "/"),
) | Compose absolute path for given `tree`.
Args:
pub (obj): :class:`.Tree` instance.
issn (bool, default False): Compose URL using ISSN.
Returns:
str: Absolute path of the tree, without server's address and protocol. | juraj-google-style |
def proto_refactor_files(dest_dir, namespace, namespace_path):
for (dn, dns, fns) in os.walk(dest_dir):
for fn in fns:
fn = os.path.join(dn, fn)
if fnmatch.fnmatch(fn, '*.proto'):
data = proto_refactor(fn, namespace, namespace_path)
with open(fn, 'w') as f:
f.write(data) | This method runs the refactoring on all the Protobuf files in the
Dropsonde repo.
Args:
dest_dir (str): directory where the Protobuf files lives.
namespace (str): the desired package name (i.e. "dropsonde.py2")
namespace_path (str): the desired path corresponding to the package
name (i.e. "dropsonde/py2") | codesearchnet |
def Add(self, file_desc_proto):
proto_name = file_desc_proto.name
if (proto_name not in self._file_desc_protos_by_file):
self._file_desc_protos_by_file[proto_name] = file_desc_proto
elif (self._file_desc_protos_by_file[proto_name] != file_desc_proto):
raise DescriptorDatabaseConflictingDefinitionError(('%s already added, but with different descriptor.' % proto_name))
package = file_desc_proto.package
for message in file_desc_proto.message_type:
self._file_desc_protos_by_symbol.update(((name, file_desc_proto) for name in _ExtractSymbols(message, package)))
for enum in file_desc_proto.enum_type:
self._file_desc_protos_by_symbol['.'.join((package, enum.name))] = file_desc_proto
for extension in file_desc_proto.extension:
self._file_desc_protos_by_symbol['.'.join((package, extension.name))] = file_desc_proto
for service in file_desc_proto.service:
self._file_desc_protos_by_symbol['.'.join((package, service.name))] = file_desc_proto | Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
Raises:
DescriptorDatabaseConflictingDefinitionError: if an attempt is made to
add a proto with the same name but different definition than an
exisiting proto in the database. | codesearchnet |
def SendSourceFiles(self, request, context):
return debug_service_pb2.EventReply() | Base implementation of the handling of SendSourceFiles calls.
The base implementation does nothing with the incoming request.
Override in an implementation of the server if necessary.
Args:
request: A `DebuggedSourceFiles` proto, containing the path, content, size
and last-modified timestamp of source files.
context: Server context.
Returns:
A `EventReply` proto. | github-repos |
def _abort_workflow(pb: ProcessingBlock, workflow_stage_dict: dict,
docker: DockerSwarmClient):
_abort_flag = False
if _abort_flag:
for workflow_stage in pb.workflow_stages:
for service_id, _ in \
workflow_stage_dict[workflow_stage.id]['services'].items():
docker.delete_service(service_id)
LOG.info("Deleted Service Id %s", service_id)
return True
return False | Abort the workflow.
TODO(BMo): This function currently does nothing as the abort flag
is hardcoded to False!
This function is used by `execute_processing_block`.
Args:
pb (ProcessingBlock): Configuration database Processing block object.
workflow_stage_dict (dict): Workflow stage metadata dictionary.
docker (DockerClient): Docker Swarm Client object.
Returns:
bool, True if the stage is aborted, otherwise False. | juraj-google-style |
def CreateCampaignWithBiddingStrategy(client, bidding_strategy_id, budget_id):
campaign_service = client.GetService('CampaignService', version='v201809')
campaign = {'name': ('Interplanetary Cruise
operation = {'operator': 'ADD', 'operand': campaign}
response = campaign_service.mutate([operation])
new_campaign = response['value'][0]
print(('Campaign with name "%s", ID "%s" and bidding scheme ID "%s" was created.' % (new_campaign['name'], new_campaign['id'], new_campaign['biddingStrategyConfiguration']['biddingStrategyId'])))
return new_campaign | Create a Campaign with a Shared Bidding Strategy.
Args:
client: AdWordsClient the client to run the example with.
bidding_strategy_id: string the bidding strategy ID to use.
budget_id: string the shared budget ID to use.
Returns:
dict An object representing a campaign. | codesearchnet |
def snapshot(self, name):
return self.get_data(
"volumes/%s/snapshots/" % self.id,
type=POST,
params={"name": name}
) | Create a snapshot of the volume.
Args:
name: string - a human-readable name for the snapshot | juraj-google-style |
def get_dos(self, partial_dos=False, npts_mu=10000, T=None):
spin = (self.data.spin if isinstance(self.data.spin, int) else 1)
(energies, densities, vvdos, cdos) = BL.BTPDOS(self.eband, self.vvband, npts=npts_mu)
if (T is not None):
densities = BL.smoothen_DOS(energies, densities, T)
tdos = Dos((self.efermi / units.eV), (energies / units.eV), {Spin(spin): densities})
if partial_dos:
tdos = self.get_partial_doses(tdos=tdos, npts_mu=npts_mu, T=T)
return tdos | Return a Dos object interpolating bands
Args:
partial_dos: if True, projections will be interpolated as well
and partial doses will be return. Projections must be available
in the loader.
npts_mu: number of energy points of the Dos
T: parameter used to smooth the Dos | codesearchnet |
def __wizard(rho, epsilon=None):
if epsilon is None:
epsilon = 0.
dim = len(rho)
rho_wizard = np.zeros([dim, dim])
v, w = np.linalg.eigh(rho)
for j in range(dim):
if v[j] < epsilon:
tmp = v[j]
v[j] = 0.
x = 0.
for k in range(j + 1, dim):
x += tmp / (dim - (j + 1))
v[k] = v[k] + tmp / (dim - (j + 1))
for j in range(dim):
rho_wizard = rho_wizard + v[j] * outer(w[:, j])
return rho_wizard | Returns the nearest positive semidefinite operator to an operator.
This method is based on reference [1]. It constrains positivity
by setting negative eigenvalues to zero and rescaling the positive
eigenvalues.
Args:
rho (array_like): the input operator.
epsilon(float or None): threshold (>=0) for truncating small
eigenvalues values to zero.
Returns:
numpy.array: A positive semidefinite numpy array. | juraj-google-style |
def tensordot(x1, x2, axes=2):
if any_symbolic_tensors((x1, x2)):
return Tensordot(axes=axes).symbolic_call(x1, x2)
return backend.numpy.tensordot(x1, x2, axes=axes) | Compute the tensor dot product along specified axes.
Args:
x1: First tensor.
x2: Second tensor.
axes: - If an integer, N, sum over the last N axes of `x1` and the
first N axes of `x2` in order. The sizes of the corresponding
axes must match.
- Or, a list of axes to be summed over, first sequence applying
to `x1`, second to `x2`. Both sequences must be of the
same length.
Returns:
The tensor dot product of the inputs. | github-repos |
def has_checked_field(self, locator, **kwargs):
kwargs["checked"] = True
return self.has_selector("field", locator, **kwargs) | Checks if the page or current node has a radio button or checkbox with the given label,
value, or id, that is currently checked.
Args:
locator (str): The label, name, or id of a checked field.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
bool: Whether it exists. | juraj-google-style |
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
return self._vshadow_store.read(size) | Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed. | juraj-google-style |
def _ConvertBool(value, require_str):
if require_str:
if value == 'true':
return True
elif value == 'false':
return False
else:
raise ParseError('Expected "true" or "false", not {0}.'.format(value))
if not isinstance(value, bool):
raise ParseError('Expected true or false without quotes.')
return value | Convert a boolean value.
Args:
value: A scalar value to convert.
require_str: If True, value must be a str.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed. | juraj-google-style |
def LockedWrite(self, cache_data):
if isinstance(cache_data, six.text_type):
cache_data = cache_data.encode(encoding=self._encoding)
with self._thread_lock:
if (not self._EnsureFileExists()):
return False
with self._process_lock_getter() as acquired_plock:
if (not acquired_plock):
return False
with open(self._filename, 'wb') as f:
f.write(cache_data)
return True | Acquire an interprocess lock and write a string.
This method safely acquires the locks then writes a string
to the cache file. If the string is written successfully
the function will return True, if the write fails for any
reason it will return False.
Args:
cache_data: string or bytes to write.
Returns:
bool: success | codesearchnet |
def spec_filled(self, pos_args, kw_args):
req_names = self.arg_names
if (len(self.arg_defaults) > 0):
req_names = req_names[:(- len(self.arg_defaults))]
req = [x for x in req_names if (x not in kw_args)]
return (len(req) <= len(pos_args)) | Check if we have enough arguments to call this function.
Args:
pos_args (list): A list of all the positional values we have.
kw_args (dict): A dict of all of the keyword args we have.
Returns:
bool: True if we have a filled spec, False otherwise. | codesearchnet |
def dismiss_prompt(self, text=None, wait=None):
with self.driver.dismiss_modal("prompt", text=text, wait=wait):
yield | Execute the wrapped code, dismissing a prompt.
Args:
text (str | RegexObject, optional): Text to match against the text in the modal.
wait (int | float, optional): Maximum time to wait for the modal to appear after
executing the wrapped code.
Raises:
ModalNotFound: If a modal dialog hasn't been found. | juraj-google-style |
def generate_argument_parser(cls, tree, actions={}):
(cur_as, cur_subas) = tree
parser = devassistant_argparse.ArgumentParser(argument_default=argparse.SUPPRESS, usage=argparse.SUPPRESS, add_help=False)
cls.add_default_arguments_to(parser)
for arg in cur_as.args:
arg.add_argument_to(parser)
if (cur_subas or actions):
subparsers = cls._add_subparsers_required(parser, dest=settings.SUBASSISTANT_N_STRING.format('0'))
for subas in sorted(cur_subas, key=(lambda x: x[0].name)):
for alias in ([subas[0].name] + getattr(subas[0], 'aliases', [])):
cls.add_subassistants_to(subparsers, subas, level=1, alias=alias)
for (action, subactions) in sorted(actions.items(), key=(lambda x: x[0].name)):
cls.add_action_to(subparsers, action, subactions, level=1)
return parser | Generates argument parser for given assistant tree and actions.
Args:
tree: assistant tree as returned by
devassistant.assistant_base.AssistantBase.get_subassistant_tree
actions: dict mapping actions (devassistant.actions.Action subclasses) to their
subaction dicts
Returns:
instance of devassistant_argparse.ArgumentParser (subclass of argparse.ArgumentParser) | codesearchnet |
def ChunkedDecoderLayer(feature_depth, feedforward_depth, num_heads, dropout, chunk_selector, mode):
return layers.Serial(layers.Residual(layers.Map(layers.LayerNorm()), layers.ChunkedCausalMultiHeadedAttention(feature_depth, num_heads=num_heads, dropout=dropout, chunk_selector=chunk_selector, mode=mode), layers.Map(layers.Dropout(rate=dropout, mode=mode))), layers.Map(ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode))) | Transformer decoder layer operating on chunks.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
chunk_selector: a function from chunk number to list of chunks to attend.
mode: str: 'train' or 'eval'
Returns:
the layer. | codesearchnet |
def get_gui_hint(self, hint):
if hint == 'type':
if self.kwargs.get('action') == 'store_true' or self.kwargs.get('nargs') == 0:
return 'bool'
elif self.kwargs.get('action') == 'store_const':
return 'const'
return self.gui_hints.get('type', 'str')
elif hint == 'default':
hint_type = self.get_gui_hint('type')
hint_default = self.gui_hints.get('default', None)
arg_default = self.kwargs.get('default', None)
preserved_value = None
if 'preserved' in self.kwargs:
preserved_value = config_manager.get_config_value(self.kwargs['preserved'])
if hint_type == 'path':
if preserved_value is not None:
default = preserved_value
elif hint_default is not None:
default = hint_default.replace('$(pwd)', utils.get_cwd_or_homedir())
else:
default = arg_default or '~'
return os.path.abspath(os.path.expanduser(default))
elif hint_type == 'bool':
return hint_default or arg_default or False
elif hint_type == 'const':
return hint_default or arg_default
else:
if hint_default == '$(whoami)':
hint_default = getpass.getuser()
return preserved_value or hint_default or arg_default or '' | Returns the value for specified gui hint (or a sensible default value,
if this argument doesn't specify the hint).
Args:
hint: name of the hint to get value for
Returns:
value of the hint specified in yaml or a sensible default | juraj-google-style |
def _combine_eq_sets(eq_sets, operations):
UNIT = np.eye(3)
def all_equivalent_atoms_of_i(i, eq_sets, ops):
'WORKS INPLACE on operations\n '
visited = set([i])
tmp_eq_sets = {j: (eq_sets[j] - visited) for j in eq_sets[i]}
while tmp_eq_sets:
new_tmp_eq_sets = {}
for j in tmp_eq_sets:
if (j in visited):
continue
visited.add(j)
for k in tmp_eq_sets[j]:
new_tmp_eq_sets[k] = (eq_sets[k] - visited)
if (i not in ops[k]):
ops[k][i] = (np.dot(ops[j][i], ops[k][j]) if (k != i) else UNIT)
ops[i][k] = ops[k][i].T
tmp_eq_sets = new_tmp_eq_sets
return (visited, ops)
eq_sets = copy.deepcopy(eq_sets)
new_eq_sets = {}
ops = copy.deepcopy(operations)
to_be_deleted = set()
for i in eq_sets:
if (i in to_be_deleted):
continue
(visited, ops) = all_equivalent_atoms_of_i(i, eq_sets, ops)
to_be_deleted |= (visited - {i})
for k in to_be_deleted:
eq_sets.pop(k, None)
return {'eq_sets': eq_sets, 'sym_ops': ops} | Combines the dicts of _get_equivalent_atom_dicts into one
Args:
eq_sets (dict)
operations (dict)
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``. | codesearchnet |
def frame(self, locator=None, *args, **kwargs):
self.switch_to_frame(self._find_frame(locator, *args, **kwargs))
try:
yield
finally:
self.switch_to_frame("parent") | Execute the wrapped code within the given iframe using the given frame or frame name/id.
May not be supported by all drivers.
Args:
locator (str | Element, optional): The name/id of the frame or the frame's element.
Defaults to the only frame in the document. | juraj-google-style |
def stop(self, **kwargs):
return self.client.api.stop(self.id, **kwargs) | Stops a container. Similar to the ``docker stop`` command.
Args:
timeout (int): Timeout in seconds to wait for the container to
stop before sending a ``SIGKILL``. Default: 10
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | juraj-google-style |
def _get_other_names(self, line):
m = re.search(self.compound_regex['other_names'][0], line, re.IGNORECASE)
if m:
self.other_names.append(m.group(1).strip()) | Parse and extract any other names that might be recorded for the compound
Args:
line (str): line of the msp file | juraj-google-style |
def AddExtensionDescriptor(self, extension):
if (not (isinstance(extension, descriptor.FieldDescriptor) and extension.is_extension)):
raise TypeError('Expected an extension descriptor.')
if (extension.extension_scope is None):
self._toplevel_extensions[extension.full_name] = extension
try:
existing_desc = self._extensions_by_number[extension.containing_type][extension.number]
except KeyError:
pass
else:
if (extension is not existing_desc):
raise AssertionError(('Extensions "%s" and "%s" both try to extend message type "%s" with field number %d.' % (extension.full_name, existing_desc.full_name, extension.containing_type.full_name, extension.number)))
self._extensions_by_number[extension.containing_type][extension.number] = extension
self._extensions_by_name[extension.containing_type][extension.full_name] = extension
if _IsMessageSetExtension(extension):
self._extensions_by_name[extension.containing_type][extension.message_type.full_name] = extension | Adds a FieldDescriptor describing an extension to the pool.
Args:
extension: A FieldDescriptor.
Raises:
AssertionError: when another extension with the same number extends the
same message.
TypeError: when the specified extension is not a
descriptor.FieldDescriptor. | codesearchnet |
def send_notice(self, room_id, text_content, timestamp=None):
body = {
"msgtype": "m.notice",
"body": text_content
}
return self.send_message_event(room_id, "m.room.message", body,
timestamp=timestamp) | Perform PUT /rooms/$room_id/send/m.room.message with m.notice msgtype
Args:
room_id (str): The room ID to send the event in.
text_content (str): The m.notice body to send.
timestamp (int): Set origin_server_ts (For application services only) | juraj-google-style |
def imag(input, name=None):
with ops.name_scope(name, 'Imag', [input]) as name:
input = ops.convert_to_tensor(input, name='input')
if input.dtype.is_complex:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.zeros_like(input) | Returns the imaginary part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the imaginary part of each element in `input` considered as a complex
number. If `input` is real, a tensor of all zeros is returned.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.math.imag(x) # [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`. | github-repos |
def readCmd(cls, cmd):
args = shlex.split(cmd)
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(proc_stdout, proc_stderr) = proc.communicate(input=None)
return proc_stdout.decode() | run command and return the str format stdout
Args:
cmd: string
Returns:
str: what the command's echo | codesearchnet |
def register_sub_command(self, sub_command, additional_ids=[]):
self.__register_sub_command(sub_command, sub_command.command_desc().command)
self.__additional_ids.update(additional_ids)
for id in additional_ids:
self.__register_sub_command(sub_command, id) | Register a command as a subcommand.
It will have it's CommandDesc.command string used as id. Additional ids can be provided.
Args:
sub_command (CommandBase): Subcommand to register.
additional_ids (List[str]): List of additional ids. Can be empty. | juraj-google-style |
def conv_block(x, growth_rate, name):
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x1 = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_0_bn')(x)
x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
x1 = layers.Conv2D(4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')(x1)
x1 = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_1_bn')(x1)
x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
x1 = layers.Conv2D(growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')(x1)
x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
return x | A building block for a dense block.
Args:
x: input tensor.
growth_rate: float, growth rate at dense layers.
name: string, block label.
Returns:
Output tensor for the block. | github-repos |
def is45(msg):
if allzeros(msg):
return False
d = hex2bin(data(msg))
if wrongstatus(d, 1, 2, 3):
return False
if wrongstatus(d, 4, 5, 6):
return False
if wrongstatus(d, 7, 8, 9):
return False
if wrongstatus(d, 10, 11, 12):
return False
if wrongstatus(d, 13, 14, 15):
return False
if wrongstatus(d, 16, 17, 26):
return False
if wrongstatus(d, 27, 28, 38):
return False
if wrongstatus(d, 39, 40, 51):
return False
if (bin2int(d[51:56]) != 0):
return False
temp = temp45(msg)
if temp:
if ((temp > 60) or (temp < (- 80))):
return False
return True | Check if a message is likely to be BDS code 4,5.
Meteorological hazard report
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
bool: True or False | codesearchnet |
def __fill_buffer(self, size=0):
read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE)
self.__buffer = fetch_data(self.__blob_key, self.__position,
self.__position + read_size - 1)
self.__buffer_position = 0
self.__eof = len(self.__buffer) < read_size | Fills the internal buffer.
Args:
size: Number of bytes to read. Will be clamped to
[self.__buffer_size, MAX_BLOB_FETCH_SIZE]. | juraj-google-style |
def fleet_id_to_slug(did):
try:
fleet_slug = IOTileFleetSlug(did)
except ValueError:
raise ArgumentError('Unable to recognize {} as a fleet id'.format(did))
return str(fleet_slug) | Converts a fleet id into a correct fleet slug.
Args:
did (long) : A fleet id
did (string) : A device slug in the form of XXXX, XXXX-XXXX-XXXX, g--XXXX, g--XXXX-XXXX-XXXX
Returns:
str: The device slug in the g--XXXX-XXXX-XXX format
Raises:
ArgumentError: if the ID is not in the [1, 16**12] range, or if not a valid string | codesearchnet |
def _should_invoke_v2_op():
if not _ops.executing_eagerly_outside_functions():
return False
if not _summary_ops_v2.has_default_writer():
warnings.warn('Cannot activate TF2 compatibility support for TF1 summary ops: default summary writer not found.')
return False
if _get_step_for_v2() is None:
warnings.warn('Cannot activate TF2 compatibility support for TF1 summary ops: global step not set. To set step for summary writer, use `tf.summary.SummaryWriter.as_default(step=_)`, `tf.summary.experimental.set_step()` or `tf.compat.v1.train.create_global_step()`.')
return False
return True | Check if v2 op can be invoked.
When calling TF1 summary op in eager mode, if the following conditions are
met, v2 op will be invoked:
- The outermost context is eager mode.
- A default TF2 summary writer is present.
- A step is set for the writer (using `tf.summary.SummaryWriter.as_default`,
`tf.summary.experimental.set_step` or
`tf.compat.v1.train.create_global_step`).
Returns:
A boolean indicating whether v2 summary op should be invoked. | github-repos |
def anm_score(self, x, y):
gp = GaussianProcessRegressor().fit(x, y)
y_predict = gp.predict(x)
indepscore = normalized_hsic(y_predict - y, x)
return indepscore | Compute the fitness score of the ANM model in the x->y direction.
Args:
a (numpy.ndarray): Variable seen as cause
b (numpy.ndarray): Variable seen as effect
Returns:
float: ANM fit score | juraj-google-style |
def _SkipFieldValue(tokenizer):
if tokenizer.TryConsumeByteString():
while tokenizer.TryConsumeByteString():
pass
return
if (not tokenizer.TryConsumeIdentifier() and
not tokenizer.TryConsumeInt64() and
not tokenizer.TryConsumeUint64() and
not tokenizer.TryConsumeFloat()):
raise ParseError('Invalid field value: ' + tokenizer.token) | Skips over a field value.
Args:
tokenizer: A tokenizer to parse the field name and values.
Raises:
ParseError: In case an invalid field value is found. | juraj-google-style |
def _matrix_conv(self, m1, m2):
n = m1[0, 0, 0].shape.as_list()[0]
if n != m2[0, 0, 0].shape.as_list()[0]:
raise ValueError(f'The entries in matrices m1 and m2 must have the same dimensions. Received m1[0, 0, 0].shape={m1[0, 0, 0].shape} and m2[0, 0, 0].shape={m2[0, 0, 0].shape}.')
k = int(np.cbrt(len(m1)))
l = int(np.cbrt(len(m2)))
result = {}
size = k + l - 1
for i in range(size):
for j in range(size):
for r in range(size):
result[i, j, r] = array_ops.zeros([n, n], self.dtype)
for index1 in range(min(k, i + 1)):
for index2 in range(min(k, j + 1)):
for index3 in range(min(k, r + 1)):
if i - index1 < l and j - index2 < l and (r - index3 < l):
result[i, j, r] += math_ops.matmul(m1[index1, index2, index3], m2[i - index1, j - index2, r - index3])
return result | Matrix convolution.
Args:
m1: is a k x k x k dictionary, each element is a n x n matrix.
m2: is a l x l x l dictionary, each element is a n x n matrix.
Returns:
(k + l - 1) x (k + l - 1) x (k + l - 1) dictionary each
element is a n x n matrix.
Raises:
ValueError: if the entries of m1 and m2 are of different dimensions. | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.