code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def set_peer_address(self, value=None, default=False, disable=False):
return self._configure_mlag('peer-address', value, default, disable)
|
Configures the mlag peer-address value
Args:
value (str): The value to configure the peer-address
default (bool): Configures the peer-address using the
default keyword
disable (bool): Negates the peer-address using the no keyword
Returns:
bool: Returns True if the commands complete successfully
|
codesearchnet
|
def export_analytics_data_to_excel(data, output_file_name, result_info_key, identifier_keys):
workbook = create_excel_workbook(data, result_info_key, identifier_keys)
workbook.save(output_file_name)
print('Saved Excel file to {}'.format(output_file_name))
|
Creates an Excel file containing data returned by the Analytics API
Args:
data: Analytics API data as a list of dicts
output_file_name: File name for output Excel file (use .xlsx extension).
|
juraj-google-style
|
def register_custom_opdefs(custom_opdefs_list):
return wrap_converter.wrapped_register_custom_opdefs(custom_opdefs_list)
|
Register the given custom opdefs to the TensorFlow global op registry.
Args:
custom_opdefs_list: String representing the custom ops OpDefs that are
included in the GraphDef.
Returns:
True if the registration is successfully completed.
|
github-repos
|
def _ParseLeak(
self, parser_mediator, cache_directories, msiecf_item, recovered=False):
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event_data = MSIECFLeakEventData()
event_data.cached_filename = msiecf_item.filename
event_data.cached_file_size = msiecf_item.cached_file_size
event_data.cache_directory_index = msiecf_item.cache_directory_index
event_data.offset = msiecf_item.offset
event_data.recovered = recovered
if (event_data.cache_directory_index >= 0 and
event_data.cache_directory_index < len(cache_directories)):
event_data.cache_directory_name = (
cache_directories[event_data.cache_directory_index])
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extract data from a MSIE Cache Files (MSIECF) leak item.
Every item is stored as an event object, one for each timestamp.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache_directories (list[str]): cache directory names.
msiecf_item (pymsiecf.leak): MSIECF leak item.
recovered (Optional[bool]): True if the item was recovered.
|
juraj-google-style
|
def from_config(cls, config_dict: dict, schema_path: str = None):
if schema_path is None:
schema_path = join(dirname(__file__), 'schema',
'configure_sbi.json')
with open(schema_path, 'r') as file:
schema = json.loads(file.read())
validate(config_dict, schema)
config_dict['status'] = 'created'
if 'subarray_id' not in config_dict:
config_dict['subarray_id'] = 'None'
timestamp = datetime.datetime.utcnow().isoformat()
config_dict['created'] = timestamp
config_dict['updated'] = timestamp
pb_list = copy.deepcopy(config_dict['processing_blocks'])
config_dict.pop('processing_blocks', None)
config_dict['processing_block_ids'] = []
for pb in pb_list:
config_dict['processing_block_ids'].append(pb['id'])
key = SchedulingObject.get_key(SBI_KEY, config_dict['id'])
DB.save_dict(key, config_dict, hierarchical=False)
key = '{}:active'.format(SBI_KEY)
DB.append_to_list(key, config_dict['id'])
sbi = SchedulingObject(SBI_KEY, config_dict['id'])
sbi.set_status('created')
for pb in pb_list:
pb['sbi_id'] = config_dict['id']
cls._add_pb(pb)
return cls(config_dict['id'])
|
Create an SBI object from the specified configuration dict.
NOTE(BM) This should really be done as a single atomic db transaction.
Args:
config_dict(dict): SBI configuration dictionary
schema_path(str, optional): Path to the SBI config schema.
|
juraj-google-style
|
def _expand_terms(self, terms):
ret = {'keywords': list(), 'doc': list()}
if (not isinstance(terms, dict)):
stp = SearchTermParser()
terms = stp.parse(terms, term_join=self.backend._and_join)
if ('about' in terms):
ret['doc'].append(terms['about'])
if ('source' in terms):
ret['keywords'].append(terms['source'])
return ret
|
Expands terms of the dataset to the appropriate fields. It will parse the search phrase
and return only the search term components that are applicable to a Dataset query.
Args:
terms (dict or str):
Returns:
dict: keys are field names, values are query strings
|
codesearchnet
|
def _ParseLogLine(self, parser_mediator, structure, key):
time_elements_tuple = self._GetTimeElementsTuple(structure)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
self._last_month = time_elements_tuple[1]
if key == 'logline':
self._previous_structure = structure
message = structure.message
else:
message = 'Repeated {0:d} times: {1:s}'.format(
structure.times, self._previous_structure.message)
structure = self._previous_structure
event_data = MacOSSecuritydLogEventData()
event_data.caller = structure.caller.strip() or 'unknown'
event_data.facility = structure.facility
event_data.level = structure.level
event_data.message = message
event_data.security_api = structure.security_api or 'unknown'
event_data.sender_pid = structure.sender_pid
event_data.sender = structure.sender.strip()
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parse a single log line and produce an event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
key (str): name of the parsed structure.
|
juraj-google-style
|
def write(self, *pb2_obj):
base = len(self._write_buff)
for idx, obj in enumerate(pb2_obj):
if self._buffer_size > 0 and \
(idx + base) != 0 and \
(idx + base) % self._buffer_size == 0:
self.flush()
self._write_buff.append(obj)
if self._buffer_size == 0:
self.flush()
|
Write a group of one or more protobuf objects to the file. Multiple
object groups can be written by calling this method several times
before closing stream or exiting the runtime context.
The input protobuf objects get buffered and will be written down when
the number of buffered objects exceed the `self._buffer_size`.
Args:
pb2_obj (*protobuf.message.Message): list of protobuf messages.
|
juraj-google-style
|
def file(path, format='csv', csv_delimiter=',', csv_header=True, compress=False, use_cache=True):
output = QueryOutput()
output._output_type = 'file'
output._file_path = path
output._file_format = format
output._csv_delimiter = csv_delimiter
output._csv_header = csv_header
output._compress_file = compress
return output
|
Construct a query output object where the result is either a local file or a GCS path
Note that there are two jobs that may need to be run sequentially, one to run the query,
and the second to extract the resulting table. These are wrapped by a single outer Job.
If the query has already been executed and you would prefer to get a Job just for the
extract, you can can call extract[_async] on the QueryResultsTable returned by the query
Args:
path: the destination path. Can either be a local or GCS URI (starting with gs://)
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use (default ',').
csv_header: for CSV exports, whether to include an initial header line (default True).
compress: whether to compress the data on export. Compression is not supported for
AVRO format (default False). Applies only to GCS URIs.
use_cache: whether to use cached results or not (default True).
|
codesearchnet
|
def post_process_travis_macos(journal_filename):
travis_build_dir = os.environ.get("TRAVIS_BUILD_DIR", "")
with open(journal_filename, "r") as file_obj:
content = file_obj.read()
processed = content.replace(travis_build_dir, "${TRAVIS_BUILD_DIR}")
with open(journal_filename, "w") as file_obj:
file_obj.write(processed)
|
Post-process a generated journal file on Travis macOS.
Args:
journal_filename (str): The name of the journal file.
|
juraj-google-style
|
def ldu(load_v, name):
try:
return load_v()
except (KeyError, AttributeError, NameError):
return Undefined(name)
|
Load variable operator that returns Undefined when failing to evaluate.
Note: the name ("load or return undefined") is abbreviated to minimize
the amount of clutter in generated code.
This variant of `ld` is useful when loading symbols that may be undefined at
runtime, such as composite symbols, and whether they are defined or not cannot
be determined statically. For example `d['a']` is undefined when `d` is an
empty dict.
Args:
load_v: Lambda that executes the actual read.
name: Human-readable name of the symbol being read.
Returns:
Either the value of the symbol, or Undefined, if the symbol is not fully
defined.
|
github-repos
|
def _get_mtime():
return ((os.path.exists(RPM_PATH) and int(os.path.getmtime(RPM_PATH))) or 0)
|
Get the modified time of the RPM Database.
Returns:
Unix ticks
|
codesearchnet
|
def evaluate_model_predictions(y_true, y_pred, weights=None):
if isinstance(y_pred[0], np.ndarray):
y_pred = np.concatenate(y_pred)
if isinstance(y_true[0], np.ndarray):
y_true = np.concatenate(y_true)
if (weights is not None) and (isinstance(weights[0], np.ndarray)):
weights = np.concatenate(weights)
accuracy = accuracy_score(
y_true, y_pred, normalize=True, sample_weight=weights)
precision = precision_score(
y_true, y_pred, average='binary', pos_label=1, sample_weight=weights)
recall = recall_score(
y_true, y_pred, average='binary', pos_label=1, sample_weight=weights)
f1 = f1_score(
y_true, y_pred, average='binary', pos_label=1, sample_weight=weights)
return {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1}
|
Evaluate the performance of an extractor model's binary classification
predictions, typically at the block level, of whether a block is content
or not.
Args:
y_true (``np.ndarray``)
y_pred (``np.ndarray``)
weights (``np.ndarray``)
Returns:
Dict[str, float]
|
juraj-google-style
|
def atan(cls, x: 'TensorFluent') -> 'TensorFluent':
return cls._unary_op(x, tf.atan2, tf.float32)
|
Returns a TensorFluent for the arctan function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the arctan function.
|
codesearchnet
|
def get_available_references(self, datas):
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names
|
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
|
juraj-google-style
|
def flush_all(self, delay=0, noreply=None):
if noreply is None:
noreply = self.default_noreply
cmd = b'flush_all ' + six.text_type(delay).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'flush_all', noreply)
if noreply:
return True
return results[0] == b'OK'
|
The memcached "flush_all" command.
Args:
delay: optional int, the number of seconds to wait before flushing,
or zero to flush immediately (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True.
|
juraj-google-style
|
def delete_user(self, user):
self.project_service.set_auth(self._token_project)
self.project_service.delete_user(user)
|
Delete the given user.
Args:
user (string): User name.
Raises:
requests.HTTPError on failure.
|
juraj-google-style
|
def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:
raise NotImplementedError("This method must be defined for each subclass.")
|
Return a dataframe of validation results for the appropriate series vs the vector of validators.
Args:
table (pd.DataFrame): A dataframe on which to apply validation logic.
failed_only (bool): If ``True``: return only the indexes that failed to validate.
|
juraj-google-style
|
def Parse(self, value):
value_line = value.split(' ')
if len(value_line) < 3:
raise TextFSMTemplateError('Expect at least 3 tokens on line.')
if not value_line[2].startswith('('):
options = value_line[1]
for option in options.split(','):
self._AddOption(option)
_ = [option.OnCreateOptions() for option in self.options]
self.name = value_line[2]
self.regex = ' '.join(value_line[3:])
else:
self.name = value_line[1]
self.regex = ' '.join(value_line[2:])
if len(self.name) > self.max_name_len:
raise TextFSMTemplateError(
"Invalid Value name '%s' or name too long." % self.name)
if (not re.match(r'^\(.*\)$', self.regex) or
self.regex.count('(') != self.regex.count(')')):
raise TextFSMTemplateError(
"Value '%s' must be contained within a '()' pair." % self.regex)
self.template = re.sub(r'^\(', '(?P<%s>' % self.name, self.regex)
if any(map(lambda x: isinstance(x, TextFSMOptions.List), self.options)):
try:
self.compiled_regex = re.compile(self.regex)
except re.error as e:
raise TextFSMTemplateError(str(e))
|
Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error.
|
juraj-google-style
|
def execute(self, method, **kwargs):
payload = {'id': 1, 'jsonrpc': '2.0', 'method': method, 'params': kwargs}
credentials = base64.b64encode('{}:{}'.format(self._username, self._password).encode())
auth_header_prefix = ('Basic ' if (self._auth_header == DEFAULT_AUTH_HEADER) else '')
headers = {self._auth_header: (auth_header_prefix + credentials.decode()), 'Content-Type': 'application/json'}
return self._do_request(headers, payload)
|
Call remote API procedure
Args:
method: Procedure name
kwargs: Procedure named arguments
Returns:
Procedure result
Raises:
urllib2.HTTPError: Any HTTP error (Python 2)
urllib.error.HTTPError: Any HTTP error (Python 3)
|
codesearchnet
|
def process_extra_vars(extra_vars_list, force_json=True):
extra_vars = {}
extra_vars_yaml = ''
for extra_vars_opt in extra_vars_list:
if extra_vars_opt.startswith('@'):
with open(extra_vars_opt[1:], 'r') as f:
extra_vars_opt = f.read()
opt_dict = string_to_dict(extra_vars_opt, allow_kv=False)
else:
opt_dict = string_to_dict(extra_vars_opt, allow_kv=True)
if any((line.startswith('
extra_vars_yaml += (extra_vars_opt + '\n')
elif (extra_vars_opt != ''):
extra_vars_yaml += (yaml.dump(opt_dict, default_flow_style=False) + '\n')
extra_vars.update(opt_dict)
if (not force_json):
try:
try_dict = yaml.load(extra_vars_yaml, Loader=yaml.SafeLoader)
assert (type(try_dict) is dict)
debug.log('Using unprocessed YAML', header='decision', nl=2)
return extra_vars_yaml.rstrip()
except Exception:
debug.log('Failed YAML parsing, defaulting to JSON', header='decison', nl=2)
if (extra_vars == {}):
return ''
return json.dumps(extra_vars, ensure_ascii=False)
|
Returns a string that is valid JSON or YAML and contains all the
variables in every extra_vars_opt inside of extra_vars_list.
Args:
parse_kv (bool): whether to allow key=value syntax.
force_json (bool): if True, always output json.
|
codesearchnet
|
def reminders_complete(self, *, reminder: str, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({"reminder": reminder})
return self.api_call("reminders.complete", json=kwargs)
|
Marks a reminder as complete.
Args:
reminder (str): The ID of the reminder to be marked as complete.
e.g. 'Rm12345678'
|
juraj-google-style
|
def parse_blob_info(field_storage):
if field_storage is None:
return None
field_name = field_storage.name
def get_value(dct, name):
value = dct.get(name, None)
if value is None:
raise BlobInfoParseError(
'Field %s has no %s.' % (field_name, name))
return value
filename = get_value(field_storage.disposition_options, 'filename')
blob_key_str = get_value(field_storage.type_options, 'blob-key')
blob_key = BlobKey(blob_key_str)
upload_content = email.message_from_file(field_storage.file)
content_type = get_value(upload_content, 'content-type')
size = get_value(upload_content, 'content-length')
creation_string = get_value(upload_content, UPLOAD_INFO_CREATION_HEADER)
md5_hash_encoded = get_value(upload_content, 'content-md5')
md5_hash = base64.urlsafe_b64decode(md5_hash_encoded)
try:
size = int(size)
except (TypeError, ValueError):
raise BlobInfoParseError(
'%s is not a valid value for %s size.' % (size, field_name))
try:
creation = blobstore._parse_creation(creation_string, field_name)
except blobstore._CreationFormatError, err:
raise BlobInfoParseError(str(err))
return BlobInfo(id=blob_key_str,
content_type=content_type,
creation=creation,
filename=filename,
size=size,
md5_hash=md5_hash,
)
|
Parse a BlobInfo record from file upload field_storage.
Args:
field_storage: cgi.FieldStorage that represents uploaded blob.
Returns:
BlobInfo record as parsed from the field-storage instance.
None if there was no field_storage.
Raises:
BlobInfoParseError when provided field_storage does not contain enough
information to construct a BlobInfo object.
|
juraj-google-style
|
def save_config(self, lookup_key, config):
with self._config_lock:
self._configs[lookup_key] = config
|
Save a configuration to the cache of configs.
Args:
lookup_key: A string containing the cache lookup key.
config: The dict containing the configuration to save to the cache.
|
juraj-google-style
|
def unique_row_id(self):
self._unique_row_id += 1
return '%s_%d' % (self._row_id_prefix, self._unique_row_id)
|
Returns a unique row ID (str) used to avoid multiple insertions.
If the row ID is provided, BigQuery will make a best effort to not insert
the same row multiple times for fail and retry scenarios in which the insert
request may be issued several times. This comes into play for sinks executed
in a local runner.
Returns:
a unique row ID string
|
github-repos
|
def load(self) -> RepresentativeDatasetMapping:
raise NotImplementedError('Method "load" is not implemented.')
|
Loads the representative datasets.
Returns:
representative dataset mapping: A loaded signature def key ->
representative mapping.
|
github-repos
|
def generate_combinations_with_testcase_name(**kwargs) -> list[OrderedDict[str, Any]]:
combinations = _combine_named_parameters(**kwargs)
named_combinations: list[OrderedDict[str, Any]] = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = ''.join(['_{}_{}'.format(''.join(filter(str.isalnum, key)), ''.join(filter(str.isalnum, str(value)))) for key, value in combination.items()])
named_combinations.append(OrderedDict(list(combination.items()) + [('testcase_name', '_test{}'.format(name))]))
return named_combinations
|
Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
|
github-repos
|
def iterator_chain(variables: VarType, parent: str=None) -> Iterable[VarMatrix]:
logger.debug('Yielding from append iterator')
if (not isinstance(variables, list)):
raise ValueError(f'Append keyword only takes a list of arguments, got {variables} of type {type(variables)}')
(yield list(chain.from_iterable((variable_matrix(item, parent, 'product') for item in variables))))
|
This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
|
codesearchnet
|
def allreduce_grads(all_grads, average):
if get_tf_version_tuple() <= (1, 12):
from tensorflow.contrib import nccl
else:
from tensorflow.python.ops import nccl_ops as nccl
nr_tower = len(all_grads)
if nr_tower == 1:
return all_grads
new_all_grads = []
for grads in zip(*all_grads):
summed = nccl.all_sum(grads)
grads_for_devices = []
for g in summed:
with tf.device(g.device):
if average:
g = tf.multiply(g, 1.0 / nr_tower)
grads_for_devices.append(g)
new_all_grads.append(grads_for_devices)
ret = list(zip(*new_all_grads))
return ret
|
All-reduce average the gradients among K devices. Results are broadcasted to all devices.
Args:
all_grads (K x N): List of list of gradients. N is the number of variables.
average (bool): average gradients or not.
Returns:
K x N: same as input, but each grad is replaced by the average over K devices.
|
juraj-google-style
|
def get_license(name):
filenames = os.listdir((cwd + licenses_loc))
licenses = dict(zip(filenames, ([(- 1)] * len(filenames))))
for l in licenses:
licenses[l] = compute_distance(name, l)
return min(licenses, key=(lambda k: licenses[k]))
|
Returns the closest match to the requested license.
Arguments:
- name (str) License to use
Returns:
- (str) License that most closely matches the 'name' parameter
|
codesearchnet
|
def joint(node):
(node, _, _) = _fix(node)
body = (node.body[0].body[:(- 1)] + node.body[1].body)
func = gast.Module(body=[gast.FunctionDef(name=node.body[0].name, args=node.body[1].args, body=body, decorator_list=[], returns=None)])
anno.clearanno(func)
return func
|
Merge the bodies of primal and adjoint into a single function.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
Returns:
func: A `Module` node with a single function definition containing the
combined primal and adjoint.
|
codesearchnet
|
def _find_initialized_value_for_variable(variable_op):
try:
var_names = [variable_op.node_def.name, variable_op.node_def.name + ':0']
for collection_name in (ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES):
for var in variable_op.graph.get_collection(collection_name):
if var.name in var_names:
return var.initialized_value()
except AttributeError:
return None
return None
|
Find the initialized value for a variable op.
To do so, lookup the variable op in the variables collection.
Args:
variable_op: A variable `Operation`.
Returns:
A `Tensor` representing the initialized value for the variable or `None`
if the initialized value could not be found.
|
github-repos
|
def lineitem_get_v1(config, auth, advertiser_id, lineitem_id):
return API_DV360(config, auth).advertisers().lineItems().get(advertiserId=advertiser_id, lineItemId=lineitem_id).execute()
|
Gets a DV360 Line Item
Args:
auth: StarThinker authentication scheme
advertiser_id: ID of the advertiser of the line item
lineitem_id: ID of the line item
Returns: Line Item from the DV360 API
|
github-repos
|
def en010(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `en010`'.format(value))
self._en010 = value
|
Corresponds to IDD Field `en010`
mean coincident dry-bulb temperature to
Enthalpy corresponding to 1.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `en010`
Unit: kJ/kg
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def get_subscriber_queue(self, event_types=None):
try:
self.started_queue.get(timeout=1)
raise RuntimeError('Cannot create a new subscriber queue while Exchange is running.')
except Empty:
pass
if (event_types is None):
event_types = EventTypes.ALL
queue = Queue()
self.queues[event_types].append(queue)
return queue
|
Create a new queue for a specific combination of event types
and return it.
Returns:
a :class:`multiprocessing.Queue`.
Raises:
RuntimeError if called after `run`
|
codesearchnet
|
def __parameter_default(self, field):
if field.default:
if isinstance(field, messages.EnumField):
return field.default.name
else:
return field.default
|
Returns default value of field if it has one.
Args:
field: A simple field.
Returns:
The default value of the field, if any exists, with the exception of an
enum field, which will have its value cast to a string.
|
juraj-google-style
|
def set_probe_file_name(self, checked):
if checked:
file_name = os.path.join(self.gui_settings['probes_log_folder'], '{:s}_probes.csv'.format(datetime.datetime.now().strftime('%y%m%d-%H_%M_%S')))
if os.path.isfile(file_name) == False:
self.probe_file = open(file_name, 'a')
new_values = self.read_probes.probes_values
header = ','.join(list(np.array([['{:s} ({:s})'.format(p, instr) for p in list(p_dict.keys())] for instr, p_dict in new_values.items()]).flatten()))
self.probe_file.write('{:s}\n'.format(header))
else:
self.probe_file.close()
|
sets the filename to which the probe logging function will write
Args:
checked: boolean (True: opens file) (False: closes file)
|
juraj-google-style
|
def create_module_graph(module_spec):
(height, width) = hub.get_expected_image_size(module_spec)
with tf.Graph().as_default() as graph:
resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])
m = hub.Module(module_spec)
bottleneck_tensor = m(resized_input_tensor)
wants_quantization = any(((node.op in FAKE_QUANT_OPS) for node in graph.as_graph_def().node))
return (graph, bottleneck_tensor, resized_input_tensor, wants_quantization)
|
Creates a graph and loads Hub Module into it.
Args:
module_spec: the hub.ModuleSpec for the image module being used.
Returns:
graph: the tf.Graph that was created.
bottleneck_tensor: the bottleneck values output by the module.
resized_input_tensor: the input images, resized as expected by the module.
wants_quantization: a boolean, whether the module has been instrumented
with fake quantization ops.
|
codesearchnet
|
def mark_experimental(fn):
@wraps(fn)
def wrapper(*args, **kw):
from peltak.core import shell
if shell.is_tty:
warnings.warn("This command is has experimental status. The "
"interface is not yet stable and might change "
"without notice within with a patch version update. "
"Use at your own risk")
return fn(*args, **kw)
return wrapper
|
Mark function as experimental.
Args:
fn (FunctionType):
The command function to decorate.
|
juraj-google-style
|
def are_genes_in_api(my_clue_api_client, gene_symbols):
if (len(gene_symbols) > 0):
query_gene_symbols = (gene_symbols if (type(gene_symbols) is list) else list(gene_symbols))
query_result = my_clue_api_client.run_filter_query(resource_name, {'where': {'gene_symbol': {'inq': query_gene_symbols}}, 'fields': {'gene_symbol': True}})
logger.debug('query_result: {}'.format(query_result))
r = set([x['gene_symbol'] for x in query_result])
return r
else:
logger.warning('provided gene_symbols was empty, cannot run query')
return set()
|
determine if genes are present in the API
Args:
my_clue_api_client:
gene_symbols: collection of gene symbols to query the API with
Returns: set of the found gene symbols
|
codesearchnet
|
def get_local_variable_from_name(self, variable_name):
return next((v for v in self.variables if v.name == variable_name), None)
|
Return a local variable from a name
Args:
varible_name (str): name of the variable
Returns:
LocalVariable
|
juraj-google-style
|
def StartFlowAndWorker(client_id, flow_name, **kwargs):
queue = rdfvalue.RDFURN("DEBUG-%s-" % getpass.getuser())
if "token" in kwargs:
token = kwargs.pop("token")
else:
token = access_control.ACLToken(username="GRRConsole")
session_id = flow.StartAFF4Flow(
client_id=client_id,
flow_name=flow_name,
queue=queue,
token=token,
**kwargs)
worker_thrd = worker_lib.GRRWorker(
queues=[queue], token=token, threadpool_size=1)
while True:
try:
worker_thrd.RunOnce()
except KeyboardInterrupt:
print("exiting")
worker_thrd.thread_pool.Join()
break
time.sleep(2)
with aff4.FACTORY.Open(session_id, token=token) as flow_obj:
if not flow_obj.GetRunner().IsRunning():
break
worker_thrd.thread_pool.Join()
return session_id
|
Launches the flow and worker and waits for it to finish.
Args:
client_id: The client common name we issue the request.
flow_name: The name of the flow to launch.
**kwargs: passthrough to flow.
Returns:
A flow session id.
Note: you need raw access to run this flow as it requires running a worker.
|
juraj-google-style
|
def unique_parameter_values(self) -> 'list[Collection[cfg.Binding]]':
def _get_values(parameter):
return {b.data.get_type_key(): b for b in parameter.bindings}.values()
return [_get_values(parameter) for parameter in self._unique_parameters()]
|
Get unique parameter subtypes as bindings.
Like _unique_parameters, but returns bindings instead of variables.
Returns:
A list of list of bindings.
|
github-repos
|
def install(self, connection, partition, table_name = None, index_columns=None, materialize=False,
logger = None):
virtual_table = partition.vid
table = partition.vid if not table_name else table_name
if self._relation_exists(connection, table):
if logger:
logger.debug("Skipping '{}'; already installed".format(table))
return
else:
if logger:
logger.info("Installing '{}'".format(table))
partition.localize()
virtual_table = partition.vid + '_vt'
self._add_partition(connection, partition)
if materialize:
if self._relation_exists(connection, table):
debug_logger.debug(
'Materialized table of the partition already exists.\n partition: {}, table: {}'
.format(partition.name, table))
else:
cursor = connection.cursor()
create_query = self.__class__._get_create_query(partition, table)
debug_logger.debug(
'Creating new materialized view for partition mpr.'
'\n partition: {}, view: {}, query: {}'
.format(partition.name, table, create_query))
cursor.execute(create_query)
copy_query = .format(table, virtual_table)
debug_logger.debug(
'Populating sqlite table with rows from partition mpr.'
'\n partition: {}, view: {}, query: {}'
.format(partition.name, table, copy_query))
cursor.execute(copy_query)
cursor.close()
else:
cursor = connection.cursor()
view_q = "CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} ".format(partition.vid, virtual_table)
cursor.execute(view_q)
cursor.close()
if index_columns is not None:
self.index(connection,table, index_columns)
return table
|
Creates virtual table or read-only table for gion.
Args:
ref (str): id, vid, name or versioned name of the partition.
materialize (boolean): if True, create read-only table. If False create virtual table.
Returns:
str: name of the created table.
|
juraj-google-style
|
def as_dict(self, verbosity=0):
species_list = []
for (spec, occu) in self._species.items():
d = spec.as_dict()
del d['@module']
del d['@class']
d['occu'] = occu
species_list.append(d)
d = {'species': species_list, 'abc': [float(c) for c in self._frac_coords], 'lattice': self._lattice.as_dict(verbosity=verbosity), '@module': self.__class__.__module__, '@class': self.__class__.__name__}
if (verbosity > 0):
d['xyz'] = [float(c) for c in self.coords]
d['label'] = self.species_string
d['properties'] = self.properties
return d
|
Json-serializable dict representation of PeriodicSite.
Args:
verbosity (int): Verbosity level. Default of 0 only includes the
matrix representation. Set to 1 for more details such as
cartesian coordinates, etc.
|
codesearchnet
|
def require_params(self, req):
params = {}
for (name, param) in self.params.items():
if ((name not in req.params) and param.required):
missing = (set((p for p in self.params if self.params[p].required)) - set(req.params.keys()))
raise errors.HTTPMissingParam(', '.join(missing))
elif ((name in req.params) or param.default):
try:
if param.many:
values = (req.get_param_as_list(name, param.validated_value) or [(param.default and param.validated_value(param.default))])
params[name] = param.container(values)
else:
params[name] = param.validated_value(req.get_param(name, default=param.default))
except ValidationError as err:
raise err.as_invalid_param(name)
except ValueError as err:
raise errors.HTTPInvalidParam(str(err), name)
return params
|
Require all defined parameters from request query string.
Raises ``falcon.errors.HTTPMissingParam`` exception if any of required
parameters is missing and ``falcon.errors.HTTPInvalidParam`` if any
of parameters could not be understood (wrong format).
Args:
req (falcon.Request): request object
|
codesearchnet
|
def tokens(cls, tokens):
return cls(Lnk.TOKENS, tuple(map(int, tokens)))
|
Create a Lnk object for a token range.
Args:
tokens: a list of token identifiers
|
codesearchnet
|
def __init__(self, p_range, ns_range, query_spec):
self._property_range = p_range
self._ns_range = ns_range
self._query_spec = query_spec
self._cursor = None
self._query = None
|
Init.
Args:
p_range: a property_range.PropertyRange object that defines the
conditions entities should safisfy.
ns_range: a namesrange.NamespaceRange object that defines the namespaces
to examine.
query_spec: a model.QuerySpec object that defines how to retrieve
entities from datastore.
|
juraj-google-style
|
def rank(input: ragged_tensor.Ragged, name=None):
with ops.name_scope(name, 'RaggedRank', [input]) as name:
if not ragged_tensor.is_ragged(input):
return array_ops.rank(input, name)
return input.ragged_rank + array_ops.rank(input.flat_values)
|
Returns the rank of a RaggedTensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
#### Example:
>>> # shape of tensor 't' is [2, None, None]
>>> t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]])
>>> tf.rank(t).numpy().item()
3
Args:
input: A `RaggedTensor`
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
|
github-repos
|
def get_dialect(mixed: Union[(SQLCompiler, Engine, Dialect)]) -> Dialect:
if isinstance(mixed, Dialect):
return mixed
elif isinstance(mixed, Engine):
return mixed.dialect
elif isinstance(mixed, SQLCompiler):
return mixed.dialect
else:
raise ValueError("get_dialect: 'mixed' parameter of wrong type")
|
Finds the SQLAlchemy dialect in use.
Args:
mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or
:class:`Dialect` object
Returns: the SQLAlchemy :class:`Dialect` being used
|
codesearchnet
|
def get_campaign_name_list(self):
campaigns = self.find('campaigns', {})
campaign_names = []
for campaign in campaigns:
if ('name' in campaign):
campaign_names.append(campaign['name'])
return campaign_names
|
Returns a list of all valid campaign names
Returns:
List of strings containing all valid campaign names
|
codesearchnet
|
def cos(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:
return amp*np.cos(2*np.pi*freq*times+phase).astype(np.complex_)
|
Continuous cosine wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude.
freq: Pulse frequency, units of 1/dt.
phase: Pulse phase.
|
juraj-google-style
|
def extract_wavs(utterances: List[Utterance], tgt_dir: Path,
lazy: bool) -> None:
tgt_dir.mkdir(parents=True, exist_ok=True)
for utter in utterances:
wav_fn = "{}.{}".format(utter.prefix, "wav")
out_wav_path = tgt_dir / wav_fn
if lazy and out_wav_path.is_file():
logger.info("File {} already exists and lazy == {}; not " \
"writing.".format(out_wav_path, lazy))
continue
logger.info("File {} does not exist and lazy == {}; creating " \
"it.".format(out_wav_path, lazy))
trim_wav_ms(utter.org_media_path, out_wav_path,
utter.start_time, utter.end_time)
|
Extracts WAVs from the media files associated with a list of Utterance
objects and stores it in a target directory.
Args:
utterances: A list of Utterance objects, which include information
about the source media file, and the offset of the utterance in the
media_file.
tgt_dir: The directory in which to write the output WAVs.
lazy: If True, then existing WAVs will not be overwritten if they have
the same name
|
juraj-google-style
|
def _flatten_multiplicand_list(kernels):
flattened = []
for k in kernels:
if isinstance(k, _ProductKernel):
flattened += k.kernels
else:
flattened.append(k)
return flattened
|
Flatten a list of kernels which may contain _ProductKernel instances.
Args:
kernels: Python list of `PositiveSemidefiniteKernel` instances
Returns:
Python list containing the elements of kernels, with any _ProductKernel
instances replaced by their `kernels` property contents.
|
codesearchnet
|
def to_raw_op(f: types.FunctionType) -> Callable[..., Any]:
f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__, f.__closure__)
return kwarg_only(f)
|
Make a given op wrapper function `f` raw.
Raw op wrappers can only be called with keyword arguments.
Args:
f: An op wrapper function to make raw.
Returns:
Raw `f`.
|
github-repos
|
def _map_args(call_node, function):
args = call_node.args
kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}
call_args = tf_inspect.getcallargs(function, *args, **kwds)
unexpected_defaults = []
for k in call_args:
if k not in kwds and call_args[k] not in args and (call_args[k] is not directives.UNSPECIFIED):
unexpected_defaults.append(k)
if unexpected_defaults:
raise ValueError('Unexpected keyword argument values, %s, for function %s' % (zip(unexpected_defaults, [call_args[k] for k in unexpected_defaults]), function))
return {k: v for k, v in call_args.items() if v is not directives.UNSPECIFIED}
|
Maps AST call nodes to the actual function's arguments.
Args:
call_node: ast.Call
function: Callable[..., Any], the actual function matching call_node
Returns:
Dict[Text, ast.AST], mapping each of the function's argument names to
the respective AST node.
Raises:
ValueError: if the default arguments are not correctly set
|
github-repos
|
def touch(self, key, expire=0, noreply=None):
if (noreply is None):
noreply = self.default_noreply
key = self.check_key(key)
cmd = (((b'touch ' + key) + b' ') + six.text_type(expire).encode('ascii'))
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'touch', noreply)
if noreply:
return True
return (results[0] == b'TOUCHED')
|
The memcached "touch" command.
Args:
key: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True if the expiration time was updated, False if the key wasn't
found.
|
codesearchnet
|
def transform(self, program: moderngl.Program, buffer: moderngl.Buffer,
mode=None, vertices=-1, first=0, instances=1):
vao = self.instance(program)
if mode is None:
mode = self.mode
vao.transform(buffer, mode=mode, vertices=vertices, first=first, instances=instances)
|
Transform vertices. Stores the output in a single buffer.
Args:
program: The ``moderngl.Program``
buffer: The ``moderngl.buffer`` to store the output
Keyword Args:
mode: Draw mode (for example ``moderngl.POINTS``)
vertices (int): The number of vertices to transform
first (int): The index of the first vertex to start with
instances (int): The number of instances
|
juraj-google-style
|
def _init_index(root_dir, schema, index_name):
index_dir = os.path.join(root_dir, index_name)
try:
if not os.path.exists(index_dir):
os.makedirs(index_dir)
return create_in(index_dir, schema), index_dir
else:
return open_dir(index_dir), index_dir
except Exception as e:
logger.error("Init error: failed to open search index at: '{}': {} ".format(index_dir, e))
raise
|
Creates new index or opens existing.
Args:
root_dir (str): root dir where to find or create index.
schema (whoosh.fields.Schema): schema of the index to create or open.
index_name (str): name of the index.
Returns:
tuple ((whoosh.index.FileIndex, str)): first element is index, second is index directory.
|
juraj-google-style
|
def build_request_body(type, id, attributes=None, relationships=None):
result = {'data': {'type': type}}
data = result['data']
if (attributes is not None):
data['attributes'] = attributes
if (relationships is not None):
data['relationships'] = relationships
if (id is not None):
data['id'] = id
return result
|
Build a request body object.
A body JSON object is used for any of the ``update`` or ``create``
methods on :class:`Resource` subclasses. In normal library use you
should not have to use this function directly.
Args:
type(string): The resource type for the attribute
id(uuid): The id of the object to update. This may be ``None``
Keyword Args:
attributes(dict): A JSON dictionary of the attributes to set
relationships(dict) A JSON dictionary of relationships to set
Returns:
A valid attribute dictionary. Often used in the ``update`` or
``create`` :class:`Resource`` methods.
|
codesearchnet
|
def add(self, pattern, function, method=None, type_cast=None):
if (not type_cast):
type_cast = {}
with self._lock:
self._data_store.append({'pattern': pattern, 'function': function, 'method': method, 'type_cast': type_cast})
|
Function for registering a path pattern.
Args:
pattern (str): Regex pattern to match a certain path.
function (function): Function to associate with this path.
method (str, optional): Usually used to define one of GET, POST,
PUT, DELETE. You may use whatever fits your situation though.
Defaults to None.
type_cast (dict, optional): Mapping between the param name and
one of `int`, `float` or `bool`. The value reflected by the
provided param name will than be casted to the given type.
Defaults to None.
|
codesearchnet
|
def tf_initialize(self, x_init, b):
if x_init is None:
x_init = [tf.zeros(shape=util.shape(t)) for t in b]
initial_args = super(ConjugateGradient, self).tf_initialize(x_init)
conjugate = residual = [t - fx for t, fx in zip(b, self.fn_x(x_init))]
squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in residual])
return initial_args + (conjugate, residual, squared_residual)
|
Initialization step preparing the arguments for the first iteration of the loop body:
$x_0, 0, p_0, r_0, r_0^2$.
Args:
x_init: Initial solution guess $x_0$, zero vector if None.
b: The right-hand side $b$ of the system of linear equations.
Returns:
Initial arguments for tf_step.
|
juraj-google-style
|
def error_messages(self, driver_id=None):
if driver_id is not None:
assert isinstance(driver_id, ray.DriverID)
return self._error_messages(driver_id)
error_table_keys = self.redis_client.keys(
ray.gcs_utils.TablePrefix_ERROR_INFO_string + "*")
driver_ids = [
key[len(ray.gcs_utils.TablePrefix_ERROR_INFO_string):]
for key in error_table_keys
]
return {
binary_to_hex(driver_id): self._error_messages(
ray.DriverID(driver_id))
for driver_id in driver_ids
}
|
Get the error messages for all drivers or a specific driver.
Args:
driver_id: The specific driver to get the errors for. If this is
None, then this method retrieves the errors for all drivers.
Returns:
A dictionary mapping driver ID to a list of the error messages for
that driver.
|
juraj-google-style
|
def compute_metrics_cv(self, X, y, **kwargs):
results = self.cv_score_mean(X, y)
return results
|
Compute cross-validated metrics.
Trains this model on data X with labels y.
Returns a list of dict with keys name, scoring_name, value.
Args:
X (Union[np.array, pd.DataFrame]): data
y (Union[np.array, pd.DataFrame, pd.Series]): labels
|
codesearchnet
|
def get_pipeline_id(app='', name=''):
return_id = None
pipelines = get_all_pipelines(app=app)
for pipeline in pipelines:
LOG.debug('ID of %(name)s: %(id)s', pipeline)
if pipeline['name'] == name:
return_id = pipeline['id']
LOG.info('Pipeline %s found, ID: %s', name, return_id)
break
return return_id
|
Get the ID for Pipeline _name_.
Args:
app (str): Name of Spinnaker Application to search.
name (str): Name of Pipeline to get ID for.
Returns:
str: ID of specified Pipeline.
None: Pipeline or Spinnaker Appliation not found.
|
juraj-google-style
|
def __init__(self, executable_path: _PATH = 'default', port: Union[int, str] = 5037, env: Dict = None, service_args: Union[list, tuple] = None) -> None:
self._service_args = service_args or []
super(Service, self).__init__(executable_path, port=port, env=env)
|
Creates a new instance of the Service.
Args:
executable_path: Path to the AndroidDriver.
port: Port the service is running on.
env: Environment variables.
service_args: List of args to pass to the androiddriver service.
|
juraj-google-style
|
def get_original(self):
pk_value = self._get_pk_value()
if (isinstance(pk_value, int) and (not self._original)):
self._original = self.select().where((self.__class__.id == pk_value)).get()
return self._original
|
Get the original instance of this instance before it's updated.
Returns:
fleaker.peewee.EventMixin:
The original instance of the model.
|
codesearchnet
|
def __init__(self, table_id=None, active_count=None, lookup_count=None,
matched_count=None):
super().__init__()
self.table_id = table_id
self.active_count = active_count
self.lookup_count = lookup_count
self.matched_count = matched_count
|
Create a TableStats with the optional parameters below.
Args:
table_id (int): Identifier of table. Lower numbered tables are
consulted first.
active_count (int): Number of active entries.
lookup_count (int): Number of packets looked up in table.
matched_count (int): Number of packets that hit table.
|
juraj-google-style
|
def press(keys, presses=1, interval=0.0, pause=None, _pause=True):
if (type(keys) == str):
keys = [keys]
else:
lowerKeys = []
for s in keys:
if (len(s) > 1):
lowerKeys.append(s.lower())
else:
lowerKeys.append(s)
interval = float(interval)
for i in range(presses):
for k in keys:
_failSafeCheck()
platformModule._keyDown(k)
platformModule._keyUp(k)
time.sleep(interval)
_autoPause(pause, _pause)
|
Performs a keyboard key press down, followed by a release.
Args:
key (str, list): The key to be pressed. The valid names are listed in
KEYBOARD_KEYS. Can also be a list of such strings.
presses (integer, optiional): the number of press repetition
1 by default, for just one press
interval (float, optional): How many seconds between each press.
0.0 by default, for no pause between presses.
pause (float, optional): How many seconds in the end of function process.
None by default, for no pause in the end of function process.
Returns:
None
|
codesearchnet
|
def _force_disconnect_action(self, action):
conn_key = action.data['id']
if self._get_connection_state(conn_key) == self.Disconnected:
return
data = self._get_connection(conn_key)
if data['state'] == self.Connecting:
callback = data['action'].data['callback']
callback(data['connection_id'], self.id, False, 'Unexpected disconnection')
elif data['state'] == self.Disconnecting:
callback = data['action'].data['callback']
callback(data['connection_id'], self.id, True, None)
elif data['state'] == self.InProgress:
callback = data['action'].data['callback']
if data['microstate'] == 'rpc':
callback(False, 'Unexpected disconnection', 0xFF, None)
elif data['microstate'] == 'open_interface':
callback(False, 'Unexpected disconnection')
elif data['microstate'] == 'close_interface':
callback(False, 'Unexpected disconnection')
connection_id = data['connection_id']
internal_id = data['internal_id']
del self._connections[connection_id]
del self._int_connections[internal_id]
|
Forcibly disconnect a device.
Args:
action (ConnectionAction): the action object describing what we are
forcibly disconnecting
|
juraj-google-style
|
def update(self, *args, **kwargs):
for next_dict in chain(args, (kwargs, )):
for k, v in next_dict.items():
self[k] = v
|
Equivalent to the python dict update method.
Update the dictionary with the key/value pairs from other, overwriting
existing keys.
Args:
other (dict): The source of key value pairs to add to headers
Keyword Args:
All keyword arguments are stored in header directly
Returns:
None
|
juraj-google-style
|
def Exponential(cls, mean: 'TensorFluent', batch_size: Optional[int]=None) -> Tuple[(Distribution, 'TensorFluent')]:
rate = (1 / mean.tensor)
dist = tf.distributions.Exponential(rate)
batch = mean.batch
if ((not batch) and (batch_size is not None)):
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = mean.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch))
|
Returns a TensorFluent for the Exponential sampling op with given mean parameter.
Args:
mean: The mean parameter of the Exponential distribution.
batch_size: The size of the batch (optional).
Returns:
The Exponential distribution and a TensorFluent sample drawn from the distribution.
|
codesearchnet
|
def _read_range(self, start, end=0):
try:
with _handle_client_exception():
return self._client.get_object(*self._client_args, headers=dict(
Range=self._http_range(start, end)))[1]
except _ClientException as exception:
if exception.http_status == 416:
return b''
raise
|
Read a range of bytes in stream.
Args:
start (int): Start stream position.
end (int): End stream position.
0 To not specify end.
Returns:
bytes: number of bytes read
|
juraj-google-style
|
def copy(self, source, dest):
if not self.copyable:
raise IOError('Driver does not support raster copying')
if not isinstance(source, Raster):
source = Raster(source)
should_close = True
else:
should_close = False
if source.name == dest:
raise ValueError(
'Input and output are the same location: %s' % source.name)
settings = driverdict_tolist(self.settings)
ds = self.CreateCopy(dest, source.ds, self.strictmode,
options=settings)
if should_close:
source.close()
return Raster(ds)
|
Returns a copied Raster instance.
Arguments:
source -- the source Raster instance or filepath as str
dest -- destination filepath as str
|
juraj-google-style
|
def __init__(self, item_type=None, min_length=None, max_length=None, empty=True):
super(ListTypeChecker, self).__init__(
iter_type=list, item_type=item_type, min_length=min_length, max_length=max_length, empty=empty
)
|
Initialization method.
Args:
item_type (type): the type of the items inside the list.
min_length (int): minimum length of the list (included).
max_length (int): maximum length of the list (included).
empty (bool): whether empty list is allowed.
|
juraj-google-style
|
def get_unconditional_inputs(self, num_samples=1):
input_ids = torch.ones((num_samples, 1), device=self.device, dtype=torch.int64) * self.config.vocab_size
user_audio_codes = torch.ones((num_samples, self.num_codebooks, 1), device=self.device, dtype=torch.int64) * self.config.audio_vocab_size
moshi_audio_codes = torch.ones((num_samples, self.num_codebooks, 1), device=self.device, dtype=torch.int64) * self.config.audio_vocab_size
attention_mask = torch.ones((num_samples, 1), device=self.device, dtype=torch.long)
return MoshiUnconditionalInput(input_ids=input_ids, user_audio_codes=user_audio_codes, moshi_audio_codes=moshi_audio_codes, attention_mask=attention_mask)
|
Helper function to get null inputs for unconditional generation, enabling the model to be used without the
feature extractor or tokenizer.
Args:
num_samples (int, *optional*):
Number of audio samples to unconditionally generate.
max_new_tokens (int, *optional*):
Number of tokens to generate for each sample. More tokens means longer audio samples, at the expense of
longer inference (since more audio tokens need to be generated per sample).
Example:
```python
>>> from transformers import MoshiForConditionalGeneration
>>> model = MoshiForConditionalGeneration.from_pretrained("kmhf/hf-moshiko-pytorch-bf16")
>>> # get the unconditional (or 'null') inputs for the model
>>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1)
>>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256)
```
|
github-repos
|
def call_api(self, method, url, headers=None, params=None, data=None, files=None, timeout=None):
method = method.upper()
headers = (deepcopy(headers) or {})
headers['Accept'] = self.accept_type
params = (deepcopy(params) or {})
data = (data or {})
files = (files or {})
if (self.username and self.api_key):
params.update(self.get_credentials())
url = urljoin(self.base_url, url)
r = requests.request(method, url, headers=headers, params=params, files=files, data=data, timeout=timeout)
return (r, r.status_code)
|
Call API.
This returns object containing data, with error details if applicable.
Args:
method (str): The HTTP method to use.
url (str): Resource location relative to the base URL.
headers (dict or None): Extra request headers to set.
params (dict or None): Query-string parameters.
data (dict or None): Request body contents for POST or PUT requests.
files (dict or None: Files to be passed to the request.
timeout (int): Maximum time before timing out.
Returns:
ResultParser or ErrorParser.
|
codesearchnet
|
def zero_fraction(value, name=None):
with ops.name_scope(name, 'zero_fraction', [value]):
value = ops.convert_to_tensor(value, name='value')
size = array_ops.size(value, out_type=dtypes.int64)
num_nonzero = tf_cond.cond(size <= dtypes.int32.max, true_fn=lambda: math_ops.cast(_count_nonzero(value, dtype=dtypes.int32), dtype=dtypes.int64), false_fn=lambda: _count_nonzero(value, dtype=dtypes.int64))
with ops.name_scope('counts_to_fraction'):
num_zero = size - num_nonzero
num_zero_float32 = math_ops.cast(num_zero, dtype=dtypes.float32)
size_float32 = math_ops.cast(size, dtype=dtypes.float32)
zero_fraction_float32 = num_zero_float32 / size_float32
return array_ops.identity(zero_fraction_float32, 'fraction')
|
Returns the fraction of zeros in `value`.
If `value` is empty, the result is `nan`.
This is useful in summaries to measure and report sparsity. For example,
```python
z = tf.nn.relu(...)
summ = tf.compat.v1.summary.scalar('sparsity', tf.nn.zero_fraction(z))
```
Args:
value: A tensor of numeric type.
name: A name for the operation (optional).
Returns:
The fraction of zeros in `value`, with type `float32`.
|
github-repos
|
def extract_resources_from_bundle(bundle: message.Message, *, resource_type: Type[_T]) -> List[_T]:
if not fhir_types.is_type_or_profile_of('http:
raise TypeError(f'{bundle.DESCRIPTOR.name} is not a type or profile of Bundle.')
contained_resource_field = path_utils.camel_case_to_snake_case(resource_type.DESCRIPTOR.name)
return [getattr(entry.resource, contained_resource_field) for entry in cast(Any, bundle).entry if entry.resource.HasField(contained_resource_field)]
|
Returns a list of resources of type `resource_type` from `bundle`.
Args:
bundle: The FHIR Bundle to examine.
resource_type: The message type of the resource to return.
Returns:
A list of resources of type `resource_type` belonging to the bundle.
Raises:
TypeError: In the event that `bundle` is not of type "Bundle".
ValueError: In the event that a field corresponding to the "snake_case" name
of `resource_type` does not exist on `Bundle.Entry`.
|
github-repos
|
def __init__(self, statistics: calib_stats_pb2.CalibrationStatistics, calib_opts: stablehlo_quant_config_pb2.CalibrationOptions):
super().__init__(statistics, calib_opts)
hist_stats = statistics.histogram_statistics
self._bin_width = hist_stats.bin_width
self._lower_bound = hist_stats.lower_bound
self._hist_freq = np.array(hist_stats.hist_freq)
self._num_bins = len(self._hist_freq)
self._num_bits = 8
first_mid = self._lower_bound + self._bin_width / 2
last_mid = first_mid + (self._num_bins - 1) * self._bin_width
self._hist_mids = np.linspace(first_mid, last_mid, self._num_bins)
|
Builds histogram using statistics.histogram_statistics.
lower_bound hist_mid
v v
|=========|=========|=========|=========|=========|
bin width
Args:
statistics: Collected calibration statistics.
calib_opts: Calibration options used for calculating min and max.
|
github-repos
|
def fixup_for_packaged():
if exists(join(ROOT, 'PKG-INFO')):
if (('--build-js' in sys.argv) or ('--install-js' in sys.argv)):
print(SDIST_BUILD_WARNING)
if ('--build-js' in sys.argv):
sys.argv.remove('--build-js')
if ('--install-js' in sys.argv):
sys.argv.remove('--install-js')
if ('--existing-js' not in sys.argv):
sys.argv.append('--existing-js')
|
If we are installing FROM an sdist, then a pre-built BokehJS is
already installed in the python source tree.
The command line options ``--build-js`` or ``--install-js`` are
removed from ``sys.argv``, with a warning.
Also adds ``--existing-js`` to ``sys.argv`` to signal that BokehJS is
already packaged.
Returns:
None
|
codesearchnet
|
def get_parent_of_type(typ, obj):
if type(typ) is not text:
typ = typ.__name__
while hasattr(obj, 'parent'):
obj = obj.parent
if obj.__class__.__name__ == typ:
return obj
|
Finds first object up the parent chain of the given type.
If no parent of the given type exists None is returned.
Args:
typ(str or python class): The type of the model object we are
looking for.
obj (model object): Python model object which is the start of the
search process.
|
juraj-google-style
|
def _common_args(self):
return {'metadata': self._metadata.SerializeToString(), 'output_shapes': self._flat_shapes, 'output_types': self._flat_types}
|
Helper for generating arguments that are common across most dataset ops.
Most dataset op constructors expect `output_shapes` and `output_types`
arguments that represent the flattened structure of an element, as well as a
`metadata` argument for additional metadata such as user-defined dataset
name. This helper function generates common attributes as a keyword argument
dictionary, allowing `Dataset._variant_tensor` implementations to pass
`**self._common_args` to the op constructor.
Returns:
A dictionary of keyword arguments that can be passed to a dataset op
constructor.
|
github-repos
|
def config_from_url(u, **kwargs):
path = u.path.lstrip("/").split("/")
if len(path) > 2 or not path:
raise AssertionError("zmq url format: zmq:
typ = path[0].upper()
try:
topic = path[1]
except IndexError as _:
topic = ''
param = dict(urllib.parse.parse_qsl(u.query))
transport = param.get("transport", "tcp")
_id = "%s-%s-%s-%s" % (typ, topic, transport, u.netloc)
if kwargs.get("prefix") is not None:
_id = "%s-%s" % (kwargs.get("prefix"), _id)
return {
"id" : _id,
"typ_str" : typ,
"typ" : getattr(zmq, typ),
"topic" : topic,
"transport" : transport,
"url" : "%s:
}
|
Returns dict containing zmq configuration arguments
parsed from xbahn url
Arguments:
- u (urlparse.urlparse result)
Returns:
dict:
- id (str): connection index key
- typ_str (str): string representation of zmq socket type
- typ (int): zmq socket type (PUB, SUB, REQ, REP, PUSH, PULL)
- topic (str): subscription topic
- url (str): url to use with zmq's bind function
|
juraj-google-style
|
def broadcast_to(input: ragged_tensor.RaggedOrDense, shape: dynamic_ragged_shape.DynamicRaggedShape) -> Union[ragged_tensor.RaggedTensor, tensor_lib.Tensor]:
return dynamic_ragged_shape.broadcast_to(input, shape)
|
Broadcasts a potentially ragged tensor to a ragged shape.
Tiles `input` as necessary to match the given shape.
Behavior is undefined if `input` is not broadcast-compatible with `shape`.
Args:
input: The potentially ragged tensor to broadcast.
shape: A `DynamicRaggedShape`
Returns:
A potentially ragged tensor whose values are taken from
`input`, and whose shape matches `shape`.
|
github-repos
|
def convert_inference_tf_type_to_tflite_type(tf_type: dtypes.DType, usage: str='') -> _types_pb2.IODataType:
mapping = {dtypes.float32: _types_pb2.FLOAT, dtypes.uint8: _types_pb2.QUANTIZED_UINT8, dtypes.int8: _types_pb2.QUANTIZED_INT8, dtypes.int16: _types_pb2.QUANTIZED_INT16}
tflite_type = mapping.get(tf_type)
if tflite_type is None:
raise ValueError('Unsupported TensorFlow type `{0}` provided for the {1}'.format(tf_type, usage))
return tflite_type
|
Convert inference type from tf type to tflite type.
Args:
tf_type: TensorFlow type.
usage: Text describing the reason for invoking this function.
Raises:
ValueError: If `tf_type` is unsupported.
Returns:
tflite_type: TFLite type. Refer to compiler/mlir/lite/types.proto.
|
github-repos
|
def set_global(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs):
cls.user_agent = cls._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)
|
Set global user agent string
Args:
user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed.
user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.
user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.
Returns:
None
|
codesearchnet
|
def list_knowledge_bases(project_id):
import dialogflow_v2beta1 as dialogflow
client = dialogflow.KnowledgeBasesClient()
project_path = client.project_path(project_id)
print('Knowledge Bases for: {}'.format(project_id))
for knowledge_base in client.list_knowledge_bases(project_path):
print(' - Display Name: {}'.format(knowledge_base.display_name))
print(' - Knowledge ID: {}\n'.format(knowledge_base.name))
|
Lists the Knowledge bases belonging to a project.
Args:
project_id: The GCP project linked with the agent.
|
codesearchnet
|
def db_en020(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `db_en020`'.format(value))
self._db_en020 = value
|
Corresponds to IDD Field `db_en020`
mean coincident dry-bulb temperature to
Enthalpy corresponding to 2.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `db_en020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def apply_regression(input_, regression_fn, target, regression_args=(), regression_kwargs=None, name=PROVIDED, loss_weight=None, per_example_weights=None):
if (regression_kwargs is None):
regression_kwargs = {}
if ((name is not None) and ('name' not in regression_kwargs)):
regression_kwargs['name'] = name
elif (name is None):
name = input_.tensor.op.name
tensor = input_.tensor
loss = regression_fn(tensor, target, *regression_args, **regression_kwargs)
if (loss_weight is not None):
loss *= loss_weight
if (per_example_weights is not None):
per_example_weights = _convert_and_assert_per_example_weights_compatible(input_, per_example_weights, dtype=loss.dtype)
loss *= per_example_weights
if (name is None):
name = loss.op.name
if (tensor.get_shape()[0].value is not None):
avg_loss = (tf.reduce_sum(loss) / tensor.get_shape()[0].value)
else:
avg_loss = tf.reduce_mean(loss)
return input_.add_loss(avg_loss, name=name)
|
Applies the given regression and adds the loss to the bookkeeper.
This does not change tensor.
Args:
input_: A Tensor or a Pretty Tensor holding the input.
regression_fn: A function that takes (in order) tensor, labels.
target: The targe of the regression.
regression_args: Other arguments for the regression.
regression_kwargs: Keyword args for the regression.
name: The name, also added to regression_kwargs.
loss_weight: A scalar multiplier for the loss.
per_example_weights: A Tensor with a weight per example.
Returns:
The loss tensor's name.
Raises:
ValueError: If the target is not a compatible shape with input_.
|
codesearchnet
|
def _GetParsersFromPresetCategory(cls, category):
preset_definition = cls._presets.GetPresetByName(category)
if (preset_definition is None):
return []
preset_names = cls._presets.GetNames()
parser_names = set()
for element_name in preset_definition.parsers:
if (element_name in preset_names):
category_parser_names = cls._GetParsersFromPresetCategory(element_name)
parser_names.update(category_parser_names)
else:
parser_names.add(element_name)
return sorted(parser_names)
|
Retrieves the parser names of specific preset category.
Args:
category (str): parser preset categories.
Returns:
list[str]: parser names in alphabetical order.
|
codesearchnet
|
def flat_values(self):
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_values = rt_values.values
return rt_values
|
The innermost `values` tensor for this ragged tensor.
Concretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is
`rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`.
Conceptually, `flat_values` is the tensor formed by flattening the
outermost dimension and all of the ragged dimensions into a single
dimension.
`rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]`
(where `nvals` is the number of items in the flattened dimensions).
Returns:
A `Tensor`.
#### Example:
>>> rt = tf.ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
>>> print(rt.flat_values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
|
github-repos
|
def __register_notifiers(self):
notifiers = {}
for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.notifiers']['plugins']:
cls = entry_point.load()
notifiers[cls.notifier_type] = cls.validation
return notifiers
|
Lists all notifiers to be able to provide metadata for the frontend
Returns:
`list` of `dict`
|
codesearchnet
|
def __init__(self, connections, picker_class=RoundRobinPicker):
self.connections = connections
self.picker = picker_class()
|
Initializes a :class:`~bigchaindb_driver.pool.Pool` instance.
Args:
connections (list): List of
:class:`~bigchaindb_driver.connection.Connection` instances.
|
juraj-google-style
|
def connection_delay(self, node_id):
conn = self._conns.get(node_id)
if conn is None:
return 0
return conn.connection_delay()
|
Return the number of milliseconds to wait, based on the connection
state, before attempting to send data. When disconnected, this respects
the reconnect backoff time. When connecting, returns 0 to allow
non-blocking connect to finish. When connected, returns a very large
number to handle slow/stalled connections.
Arguments:
node_id (int): The id of the node to check
Returns:
int: The number of milliseconds to wait.
|
juraj-google-style
|
def get(self, profile_id):
if profile_id not in self._profiles:
try:
self._profiles[profile_id] = self._get_profile(profile_id)
except (ValueError,
IOError) as e:
six.raise_from(RegistryError(e), e)
return self._profiles[profile_id]
|
Returns the profile with the received ID as a dict
If a local copy of the profile exists, it'll be returned. If not, it'll
be downloaded from the web. The results are cached, so any subsequent
calls won't hit the filesystem or the web.
Args:
profile_id (str): The ID of the profile you want.
Raises:
RegistryError: If there was some problem opening the profile file
or its format was incorrect.
|
juraj-google-style
|
def run(self, group_x=1, group_y=1, group_z=1) -> None:
return self.mglo.run(group_x, group_y, group_z)
|
Run the compute shader.
Args:
group_x (int): The number of work groups to be launched in the X dimension.
group_y (int): The number of work groups to be launched in the Y dimension.
group_z (int): The number of work groups to be launched in the Z dimension.
|
juraj-google-style
|
def _colourise(text: str, colour: str) -> str:
if COLOUR:
text = style(text, fg=colour, bold=True)
return text
|
Colour text, if possible.
Args:
text: Text to colourise
colour: Colour to display text in
Returns:
Colourised text, if possible
|
juraj-google-style
|
def connect(self, component):
if not isinstance(component, ThreadPool):
raise TypeError('"component" must be a ThreadPool object')
component.in_queue = self.out_queue
return component
|
Connect two ThreadPools.
The ``in_queue`` of the second pool will be set as the ``out_queue`` of
the current pool, thus all the output will be input to the second pool.
Args:
component (ThreadPool): the ThreadPool to be connected.
Returns:
ThreadPool: the modified second ThreadPool.
|
juraj-google-style
|
def add_logger(self, name, address, conn_type, log_dir_path=None, **kwargs):
capture_handler_conf = kwargs
if (not log_dir_path):
log_dir_path = self._mngr_conf['root_log_directory']
log_dir_path = os.path.normpath(os.path.expanduser(log_dir_path))
capture_handler_conf['log_dir'] = log_dir_path
capture_handler_conf['name'] = name
if ('rotate_log' not in capture_handler_conf):
capture_handler_conf['rotate_log'] = True
transforms = []
if ('pre_write_transforms' in capture_handler_conf):
for transform in capture_handler_conf['pre_write_transforms']:
if isinstance(transform, str):
if globals().has_key(transform):
transforms.append(globals().get(transform))
else:
msg = 'Unable to load data transformation "{}" for handler "{}"'.format(transform, capture_handler_conf['name'])
log.warn(msg)
elif hasattr(transform, '__call__'):
transforms.append(transform)
else:
msg = 'Unable to determine how to load data transform "{}"'.format(transform)
log.warn(msg)
capture_handler_conf['pre_write_transforms'] = transforms
address_key = str(address)
if (address_key in self._stream_capturers):
capturer = self._stream_capturers[address_key][0]
capturer.add_handler(capture_handler_conf)
return
socket_logger = SocketStreamCapturer(capture_handler_conf, address, conn_type)
greenlet = gevent.spawn(socket_logger.socket_monitor_loop)
self._stream_capturers[address_key] = (socket_logger, greenlet)
self._pool.add(greenlet)
|
Add a new stream capturer to the manager.
Add a new stream capturer to the manager with the provided configuration
details. If an existing capturer is monitoring the same address the
new handler will be added to it.
Args:
name:
A string defining the new capturer's name.
address:
A tuple containing address data for the capturer. Check the
:class:`SocketStreamCapturer` documentation for what is
required.
conn_type:
A string defining the connection type. Check the
:class:`SocketStreamCapturer` documentation for a list of valid
options.
log_dir_path:
An optional path defining the directory where the
capturer should write its files. If this isn't provided the root
log directory from the manager configuration is used.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.