code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def create_backup(name):
r
if name in list_backups():
raise CommandExecutionError('Backup already present: {0}'.format(name))
ps_cmd = ['Backup-WebConfiguration',
'-Name', "'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to backup web configuration: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
return name in list_backups() | r'''
Backup an IIS Configuration on the System.
.. versionadded:: 2017.7.0
.. note::
Backups are stored in the ``$env:Windir\System32\inetsrv\backup``
folder.
Args:
name (str): The name to give the backup
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_backup good_config_20170209 | juraj-google-style |
def has_types(self, types, all_=True):
func = (all if all_ else any)
return func([self.get_stim(t) for t in listify(types)]) | Check whether the current component list matches all Stim types
in the types argument.
Args:
types (Stim, list): a Stim class or iterable of Stim classes.
all_ (bool): if True, all input types must match; if False, at
least one input type must match.
Returns:
True if all passed types match at least one Stim in the component
list, otherwise False. | codesearchnet |
def interactive_console(self):
if (not self.running()):
raise RuntimeError(('VM %s is not running' % self._libvirt_.name))
virsh_command = ['virsh', '-c', config.get('libvirt_url'), 'console', self._libvirt_name()]
return utils.run_interactive_command(command=virsh_command) | Opens an interactive console
Returns:
lago.utils.CommandStatus: result of the virsh command execution | codesearchnet |
def measurement_key(val: Any, default: Any=RaiseTypeErrorIfNotProvided):
getter = getattr(val, '_measurement_key_', None)
result = (NotImplemented if (getter is None) else getter())
if (result is not NotImplemented):
return result
if (default is not RaiseTypeErrorIfNotProvided):
return default
if (getter is None):
raise TypeError("object of type '{}' has no _measurement_key_ method.".format(type(val)))
raise TypeError("object of type '{}' does have a _measurement_key_ method, but it returned NotImplemented.".format(type(val))) | Get the measurement key for the given value.
Args:
val: The value which has the measurement key..
default: Determines the fallback behavior when `val` doesn't have
a measurement key. If `default` is not set, a TypeError is raised.
If default is set to a value, that value is returned if the value
does not have `_measurement_key_`.
Returns:
If `val` has a `_measurement_key_` method and its result is not
`NotImplemented`, that result is returned. Otherwise, if a default
value was specified, the default value is returned.
Raises:
TypeError: `val` doesn't have a _measurement_key_ method (or that method
returned NotImplemented) and also no default value was specified. | codesearchnet |
def columns_exist(inspect_dataset):
if not hasattr(inspect_dataset, "columns"):
warnings.warn(
"No columns list found in dataset; no autoinspection performed.")
return
elif isinstance(inspect_dataset.columns[0], string_types):
columns = inspect_dataset.columns
elif isinstance(inspect_dataset.columns[0], dict) and "name" in inspect_dataset.columns[0]:
columns = [col['name'] for col in inspect_dataset.columns]
else:
raise AutoInspectError(
"Unable to determine column names for this dataset.")
create_multiple_expectations(
inspect_dataset, columns, "expect_column_to_exist") | This function will take a dataset and add expectations that each column present exists.
Args:
inspect_dataset (great_expectations.dataset): The dataset to inspect and to which to add expectations. | juraj-google-style |
def create_db(file_pth):
conn = sqlite3.connect(file_pth)
c = conn.cursor()
c.execute('DROP TABLE IF EXISTS library_spectra_source')
c.execute(
)
c.execute('DROP TABLE IF EXISTS metab_compound')
c.execute()
c.execute('DROP TABLE IF EXISTS library_spectra_meta')
c.execute(
)
c.execute('DROP TABLE IF EXISTS library_spectra')
c.execute(
)
c.execute('DROP TABLE IF EXISTS library_spectra_annotation')
c.execute(
) | Create an empty SQLite database for library spectra.
Example:
>>> from msp2db.db import create_db
>>> db_pth = 'library.db'
>>> create_db(file_pth=db_pth)
Args:
file_pth (str): File path for SQLite database | juraj-google-style |
def _get_image_nums_and_video_nums(self, input_ids: Optional[torch.LongTensor]) -> Tuple[torch.Tensor, torch.Tensor]:
image_token_id = self.config.image_token_id
video_token_id = self.config.video_token_id
vision_start_token_id = self.config.vision_start_token_id
vision_start_mask = input_ids == vision_start_token_id
vision_first_mask = torch.roll(vision_start_mask, shifts=1, dims=1)
image_mask = input_ids == image_token_id
video_mask = input_ids == video_token_id
image_nums = torch.sum(vision_first_mask & image_mask, dim=1)
video_nums = torch.sum(vision_first_mask & video_mask, dim=1)
return (image_nums, video_nums) | Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Returns:
image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`) | github-repos |
def set_values(self, values, separator='\n', indent=4*' '):
self._updated = True
self._multiline_value_joined = True
self._values = values
if separator == '\n':
values.insert(0, '')
separator = separator + indent
self._value = separator.join(values) | Sets the value to a given list of options, e.g. multi-line values
Args:
values (list): list of values
separator (str): separator for values, default: line separator
indent (str): indentation depth in case of line separator | juraj-google-style |
def from_api_repr(cls, resource, client):
job_ref_properties = resource.get('jobReference', {'projectId': client.project})
job_ref = _JobReference._from_api_repr(job_ref_properties)
job = cls(job_ref, client)
resource['jobReference'] = job_ref_properties
job._properties = resource
return job | Construct an UnknownJob from the JSON representation.
Args:
resource (dict): JSON representation of a job.
client (google.cloud.bigquery.client.Client):
Client connected to BigQuery API.
Returns:
UnknownJob: Job corresponding to the resource. | codesearchnet |
def DeleteOldFeedItems(client, feed_item_ids, feed):
if (not feed_item_ids):
return
feed_item_service = client.GetService('FeedItemService', 'v201809')
operations = [{'operator': 'REMOVE', 'operand': {'feedId': feed['id'], 'feedItemId': feed_item_id}} for feed_item_id in feed_item_ids]
feed_item_service.mutate(operations) | Deletes the old feed items for which extension settings have been created.
Args:
client: an AdWordsClient instance.
feed_item_ids: a list of Feed Item Ids.
feed: the Feed containing the given Feed Item Ids. | codesearchnet |
def _normalize_feature_columns(feature_columns):
if isinstance(feature_columns, _FeatureColumn):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections_abc.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
for column in feature_columns:
if not isinstance(column, _FeatureColumn):
raise ValueError('Items of feature_columns must be a _FeatureColumn. Given (type {}): {}.'.format(type(column), column))
if not feature_columns:
raise ValueError('feature_columns must not be empty.')
name_to_column = {}
for column in feature_columns:
if column.name in name_to_column:
raise ValueError('Duplicate feature column name found for columns: {} and {}. This usually means that these columns refer to same base feature. Either one must be discarded or a duplicated but renamed item must be inserted in features dict.'.format(column, name_to_column[column.name]))
name_to_column[column.name] = column
return feature_columns | Normalizes the `feature_columns` input.
This method converts the `feature_columns` to list type as best as it can. In
addition, verifies the type and other parts of feature_columns, required by
downstream library.
Args:
feature_columns: The raw feature columns, usually passed by users.
Returns:
The normalized feature column list.
Raises:
ValueError: for any invalid inputs, such as empty, duplicated names, etc. | github-repos |
def get_stream(data=None):
if len(__mstreams_available__) == 0:
if data:
mstream = MemoryStream(data)
mstream.seek(0)
else:
mstream = MemoryStream()
__mstreams__.append(mstream)
return mstream
mstream = __mstreams_available__.pop()
if data is not None and len(data):
mstream.clean_up()
mstream.write(data)
mstream.seek(0)
return mstream | Get a MemoryStream instance.
Args:
data (bytes, bytearray, BytesIO): (Optional) data to create the stream from.
Returns:
MemoryStream: instance. | juraj-google-style |
def map_creative_third_party_url_feeds(self, creative_feed, third_party_url_feed):
for creative in creative_feed:
creative['third_party_urls'] = [third_party_url for third_party_url in third_party_url_feed if self._assignment_matches(creative, third_party_url)] | Maps third party url feed to the corresponding creative.
Third party URL is a child object to the creative, and there is a 1 creative
to many third party urls relationship. In Bulkdozer they are represented by
two separate tab in the feed, and this method maps the creatives to their
respective third party URLs based on the creative ID.
Args:
creative_feed: Creative feed.
third_party_url_feed: Third party url feed. | github-repos |
def mark_causative(self, institute, case, user, link, variant):
display_name = variant['display_name']
LOG.info('Mark variant {0} as causative in the case {1}'.format(display_name, case['display_name']))
LOG.info('Adding variant to causatives in case {0}'.format(case['display_name']))
LOG.info('Marking case {0} as solved'.format(case['display_name']))
updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$push': {'causatives': variant['_id']}, '$set': {'status': 'solved'}}, return_document=pymongo.ReturnDocument.AFTER)
LOG.info('Creating case event for marking {0} causative'.format(variant['display_name']))
self.create_event(institute=institute, case=case, user=user, link=link, category='case', verb='mark_causative', variant=variant, subject=variant['display_name'])
LOG.info('Creating variant event for marking {0} causative'.format(case['display_name']))
self.create_event(institute=institute, case=case, user=user, link=link, category='variant', verb='mark_causative', variant=variant, subject=variant['display_name'])
return updated_case | Create an event for marking a variant causative.
Arguments:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (str): The url to be used in the event
variant (variant): A variant object
Returns:
updated_case(dict) | codesearchnet |
def _container_start_handler_factory(ion_type, before_yield=lambda c, ctx: None):
assert ion_type.is_container
@coroutine
def container_start_handler(c, ctx):
before_yield(c, ctx)
yield
yield ctx.event_transition(IonEvent, IonEventType.CONTAINER_START, ion_type, value=None)
return container_start_handler | Generates handlers for tokens that begin with container start characters.
Args:
ion_type (IonType): The type of this container.
before_yield (Optional[callable]): Called at initialization. Accepts the first character's ordinal and the
current context; performs any necessary initialization actions. | juraj-google-style |
def option_configure(debug=False, path=None):
if CONFIG_SCRIPT in sys.argv[0]:
debug = True
if path is None:
path = local_config['PROJECT']['CONFIG_PATH']
if debug:
if os.path.isfile(path):
debug_mode('local_config file: ', local_config, debug, halt=True)
else:
msg =
debug_mode(msg, {'CONFIG_PATH': path}, debug, halt=True)
r = configuration.init(debug, path)
return r | Summary:
Initiate configuration menu to customize metal runtime options.
Console script ```keyconfig``` invokes this option_configure directly
in debug mode to display the contents of the local config file (if exists)
Args:
:path (str): full path to default local configuration file location
:debug (bool): debug flag, when True prints out contents of local
config file
Returns:
TYPE (bool): Configuration Success | Failure | juraj-google-style |
def _create_delta(self):
states = self._read_transitions()
total_states = len(states)
self._add_sink_state(states)
nulltrans = self._read_null_transitions()
def delta(current_state, character):
if character != '':
newstate = states[current_state][ord(character)]
if newstate > 0:
return newstate
else:
return total_states
else:
return nulltrans[current_state]
return total_states + 1, delta | This function creates the delta transition
Args:
startState (int): Initial state of automaton
Results:
int, func: A number indicating the total states, and the delta function | juraj-google-style |
def _HandleDuplicates(self, new_aliases):
name_to_alias = {}
out = []
for a in new_aliases:
if a.name not in name_to_alias:
name_to_alias[a.name] = a
out.append(a)
continue
existing = name_to_alias[a.name]
if self._EquivalentAliases(existing, a):
continue
existing_name = existing.type.name or existing.type.__class__.__name__
a_name = a.type.name or a.type.__class__.__name__
raise KeyError(f'Duplicate top level items: {existing_name!r}, {a_name!r}')
return out | Handle duplicate module-level aliases.
Aliases pointing to qualified names could be the result of importing the
same entity through multiple import paths, which should not count as an
error; instead we just deduplicate them.
Args:
new_aliases: The list of new aliases to deduplicate
Returns:
A deduplicated list of aliases.
Raises:
KeyError: If there is a name clash. | github-repos |
def is_supergroup(self, subgroup):
warnings.warn('This is not fully functional. Only trivial subsets are tested right now. ')
return set(subgroup.symmetry_ops).issubset(self.symmetry_ops) | True if this group is a supergroup of the supplied group.
Args:
subgroup (SymmetryGroup): Subgroup to test.
Returns:
True if this group is a supergroup of the supplied group. | codesearchnet |
def get_unresolved(aln_df):
unresolved_df = aln_df[aln_df['type'] == 'unresolved']
unresolved = []
if not unresolved_df.empty:
unresolved_df['id_a_pos'] = unresolved_df['id_a_pos'].astype(int)
unresolved = unresolved_df.id_a_pos.tolist()
return unresolved | Get a list of residue numbers (in the original sequence's numbering) that are unresolved
Args:
aln_df (DataFrame): Alignment DataFrame
Returns:
list: Residue numbers that are mutated | juraj-google-style |
def _load_features_from_images(self, images, names=None):
if ((names is not None) and (len(names) != len(images))):
raise Exception('Lists of feature names and images must be of same length!')
self.feature_names = (names if (names is not None) else images)
self.feature_images = imageutils.load_imgs(images, self.masker) | Load feature image data from image files.
Args:
images: A list of image filenames.
names: An optional list of strings to use as the feature names. Must
be in the same order as the images. | codesearchnet |
def _clip_gradient_op(dtype):
def clip_gradient_backward(op, grad):
clip_value_min = op.inputs[1]
clip_value_max = op.inputs[2]
clipped_grad = tf.clip_by_value(grad, clip_value_min, clip_value_max)
return (clipped_grad, None, None)
def clip_gradient_forward(x, clip_value_min, clip_value_max):
del clip_value_min
del clip_value_max
return x
func_name = 'ClipGradient_{}'.format(dtype.name)
return function.Defun(dtype, dtype, dtype, python_grad_func=clip_gradient_backward, func_name=func_name)(clip_gradient_forward) | Create an op that clips gradients using a Defun.
The tensorflow Defun decorator creates an op and tensorflow caches these op
automatically according to `func_name`. Using a Defun decorator twice with the
same `func_name` does not create a new op, instead the cached op is used.
This method produces a new op the first time it is called with a given `dtype`
argument, and then uses the cached op each time it is called after that with
the same `dtype`. The min and max clip values are given as arguments for the
forward pass method so that they can be used in the backwards pass.
Args:
dtype: the dtype of the net whose gradient is being clipped.
Returns:
The op that clips gradients. | codesearchnet |
def run_docker(self, commands):
try:
import docker
except ImportError:
print('{}{}Could not import docker module (try "pip install docker").'.format(c.Style.BRIGHT, c.Fore.RED))
sys.exit(1)
app_args_data = self.profile.get('profile_args').data
install_json = self.profile.get('install_json')
client = docker.from_env()
app_dir = os.getcwd()
ports = {}
if self.args.vscd:
ports = {'{}/tcp'.format(self.args.vscd_port): self.args.vscd_port}
volumes = {}
in_path = '{}/{}'.format(app_dir, app_args_data.get('tc_in_path'))
if (app_args_data.get('tc_in_path') is not None):
volumes[in_path] = {'bind': in_path}
log_path = '{}/{}'.format(app_dir, app_args_data.get('tc_log_path'))
if (app_args_data.get('tc_log_path') is not None):
volumes[log_path] = {'bind': log_path}
out_path = '{}/{}'.format(app_dir, app_args_data.get('tc_out_path'))
if (app_args_data.get('tc_out_path') is not None):
volumes[out_path] = {'bind': out_path}
temp_path = '{}/{}'.format(app_dir, app_args_data.get('tc_temp_path'))
if (app_args_data.get('tc_temp_path') is not None):
volumes[temp_path] = {'bind': temp_path}
volumes[app_dir] = {'bind': app_dir}
if (self.args.docker_image is not None):
docker_image = self.args.docker_image
else:
docker_image = self.profile.get('dockerImage', install_json.get('dockerImage', self.docker_image))
status_code = 1
try:
self.container = client.containers.run(docker_image, entrypoint=commands.get('cli_command'), environment=['PYTHONPATH={}/lib_latest'.format(app_dir)], detach=True, ports=ports, remove=True, volumes=volumes, working_dir=app_dir)
results = self.container.wait()
status_code = results.get('StatusCode')
error = results.get('Error')
if error:
print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, error))
except Exception as e:
print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, e))
sys.exit()
return self.run_exit_code(status_code) | Run App in Docker Container.
Args:
commands (dict): A dictionary of the CLI commands.
Returns:
int: The exit code of the subprocess command. | codesearchnet |
def get_all_subtypes_with_tags(self):
assert self.has_enumerated_subtypes(), 'Enumerated subtypes not set.'
subtypes_with_tags = []
fifo = deque([subtype_field.data_type for subtype_field in self.get_enumerated_subtypes()])
while fifo:
data_type = fifo.popleft()
subtypes_with_tags.append((data_type._get_subtype_tags(), data_type))
if data_type.has_enumerated_subtypes():
for subtype_field in data_type.get_enumerated_subtypes():
fifo.append(subtype_field.data_type)
return subtypes_with_tags | Unlike other enumerated-subtypes-related functionality, this method
returns not just direct subtypes, but all subtypes of this struct. The
tag of each subtype is the list of tags from which the type descends.
This method only applies to structs that enumerate subtypes.
Use this when you need to generate a lookup table for a root struct
that maps a generated class representing a subtype to the tag it needs
in the serialized format.
Returns:
List[Tuple[List[String], Struct]] | codesearchnet |
def get(issue_id, issue_type_id):
return db.Issue.find_one((Issue.issue_id == issue_id), (Issue.issue_type_id == issue_type_id)) | Return issue by ID
Args:
issue_id (str): Unique Issue identifier
issue_type_id (str): Type of issue to get
Returns:
:obj:`Issue`: Returns Issue object if found, else None | codesearchnet |
class ClapAudioPatchMerging(nn.Module):
def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = height % 2 == 1 or width % 2 == 1
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
input_feature = self.maybe_pad(input_feature, height, width)
input_feature_0 = input_feature[:, 0::2, 0::2, :]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels)
input_feature = self.norm(input_feature)
input_feature = self.reduction(input_feature)
return input_feature | Patch Merging Layer.
Args:
input_resolution (`Tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class. | github-repos |
def CreateCampaign(client, merchant_id, budget_id):
campaign_service = client.GetService('CampaignService', 'v201809')
campaign = {'name': ('Shopping campaign
operations = [{'operator': 'ADD', 'operand': campaign}]
return campaign_service.mutate(operations)['value'][0] | Creates a new Display Network campaign.
Args:
client: an AdWordsClient instance.
merchant_id: a int merchant center ID.
budget_id: a int budget ID.
Returns:
The campaign that was successfully created. | codesearchnet |
def GetFileObjectReferenceCount(self, path_spec):
cache_value = self._file_object_cache.GetCacheValue(path_spec.comparable)
if (not cache_value):
return None
return cache_value.reference_count | Retrieves the reference count of a cached file-like object.
Args:
path_spec (PathSpec): path specification.
Returns:
int: reference count or None if there is no file-like object for
the corresponding path specification cached. | codesearchnet |
def get_lat_long(self, callsign, timestamp=timestamp_now):
callsign_data = self.get_all(callsign, timestamp=timestamp)
return {const.LATITUDE: callsign_data[const.LATITUDE], const.LONGITUDE: callsign_data[const.LONGITUDE]} | Returns Latitude and Longitude for a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Containing Latitude and Longitude
Raises:
KeyError: No data found for callsign
Example:
The following code returns Latitude & Longitude for "DH1TW"
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.get_lat_long("DH1TW")
{
'latitude': 51.0,
'longitude': -10.0
}
Note:
Unfortunately, in most cases the returned Latitude and Longitude are not very precise.
Clublog and Country-files.com use the country's capital coordinates in most cases, if no
dedicated entry in the database exists. Best results will be retrieved with QRZ.com Lookup. | codesearchnet |
def _find_docstring_line(self, start, end):
for i in range(start, end + 1):
if i in self._tokenized_triple_quotes:
return i
return None | Find the row where a docstring starts in a function or class.
This will search for the first match of a triple quote token in
row sequence from the start of the class or function.
Args:
start: the row where the class / function starts.
end: the row where the class / function ends.
Returns:
int: the row number where the docstring is found. | juraj-google-style |
def make_job(name: str='', run_name: str='', num_tasks: int=0, install_script: str='', **kwargs) -> backend.Job:
return _backend.make_job(name=name, run_name=run_name, num_tasks=num_tasks, install_script=install_script, **kwargs) | Create a job using current backend. Blocks until all tasks are up and initialized.
Args:
name: name of the job
run_name: name of the run (auto-assigned if empty)
num_tasks: number of tasks
install_script: bash-runnable script
**kwargs:
Returns:
backend.Job | codesearchnet |
def import_entities(self, entities):
edata = Entity.create_payload(entities)
r = fapi.upload_entities(self.namespace, self.name, edata, self.api_url)
fapi._check_response_code(r, 201) | Upload entity objects.
Args:
entities: iterable of firecloud.Entity objects. | codesearchnet |
def clinvar_submission_header(submission_objs, csv_type):
complete_header = {}
custom_header = {}
if csv_type == 'variant_data' :
complete_header = CLINVAR_HEADER
else:
complete_header = CASEDATA_HEADER
for header_key, header_value in complete_header.items():
for clinvar_obj in submission_objs:
for key, value in clinvar_obj.items():
if not header_key in custom_header and header_key == key:
custom_header[header_key] = header_value
return custom_header | Determine which fields to include in csv header by checking a list of submission objects
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
csv_type(str) : 'variant_data' or 'case_data'
Returns:
custom_header(dict): A dictionary with the fields required in the csv header. Keys and values are specified in CLINVAR_HEADER and CASEDATA_HEADER | juraj-google-style |
def CalculateWaitForRetry(retry_attempt, max_wait=60):
wait_time = (2 ** retry_attempt)
max_jitter = (wait_time / 4.0)
wait_time += random.uniform((- max_jitter), max_jitter)
return max(1, min(wait_time, max_wait)) | Calculates amount of time to wait before a retry attempt.
Wait time grows exponentially with the number of attempts. A
random amount of jitter is added to spread out retry attempts from
different clients.
Args:
retry_attempt: Retry attempt counter.
max_wait: Upper bound for wait time [seconds].
Returns:
Number of seconds to wait before retrying request. | codesearchnet |
def deploy(app_id, version, promote, quiet):
gae_app = GaeApp.for_branch(git.current_branch().name)
if gae_app is None and None in (app_id, version):
msg = (
"Can't find an AppEngine app setup for branch <35>{}<32> and"
"--project and --version were not given."
)
log.err(msg, git.current_branch().name)
sys.exit(1)
if version is not None:
gae_app.version = version
if app_id is not None:
gae_app.app_id = app_id
gae_app.deploy(promote, quiet) | Deploy the app to AppEngine.
Args:
app_id (str):
AppEngine App ID. Overrides config value app_id if given.
version (str):
AppEngine project version. Overrides config values if given.
promote (bool):
If set to **True** promote the current remote app version to the one
that's being deployed.
quiet (bool):
If set to **True** this will pass the ``--quiet`` flag to gcloud
command. | juraj-google-style |
def output_shape(self):
if not self._inbound_nodes:
raise AttributeError('The layer has never been called and thus has no defined output shape.')
all_output_shapes = set([str(node.output_shapes) for node in self._inbound_nodes])
if len(all_output_shapes) == 1:
return self._inbound_nodes[0].output_shapes
else:
raise AttributeError('The layer "%s" has multiple inbound nodes, with different output shapes. Hence the notion of "output shape" is ill-defined for the layer. Use `get_output_shape_at(node_index)` instead.' % self.name) | Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode. | github-repos |
def remove(self,
entity_id,
property_uri,
value):
if not entity_id.startswith("http"):
entity_uri = urllib.parse.urljoin(self.base_url, entity_id)
else:
entity_uri = entity_id
sparql_template = Template()
sparql = sparql_template.substitute(
prefix=build_prefixes(self.namespaces),
entity=entity_uri,
prop_name=property_uri,
value_str=self.__value_format__(value))
delete_property_request = urllib.request.Request(
entity_uri,
data=sparql.encode(),
method='PATCH',
headers={'Content-Type': 'application/sparql-update'})
response = urllib.request.urlopen(delete_property_request)
if response.code < 400:
return True
return False | Method removes a triple for the given/subject.
Args:
entity_id(string): Fedora Object ID, ideally URI of the subject
property_uri(string):
value(string):
Return:
boolean: True if triple was removed from the object | juraj-google-style |
def candidate_paths(self, filepath):
(filelead, filetail) = os.path.split(filepath)
(name, extension) = os.path.splitext(filetail)
if extension:
extension = extension[1:]
filenames = [name]
if (not name.startswith('_')):
filenames.append('_{}'.format(name))
if (extension and (extension in self.CANDIDATE_EXTENSIONS)):
filenames = ['.'.join([k, extension]) for k in filenames]
else:
if extension:
filenames = ['.'.join([k, extension]) for k in filenames]
new = []
for ext in self.CANDIDATE_EXTENSIONS:
new.extend(['.'.join([k, ext]) for k in filenames])
filenames = new
return [os.path.join(filelead, v) for v in filenames] | Return candidates path for given path
* If Filename does not starts with ``_``, will build a candidate for
both with and without ``_`` prefix;
* Will build For each available extensions if filename does not have
an explicit extension;
* Leading path directory is preserved;
Args:
filepath (str): Relative path as finded in an import rule from a
SCSS source.
Returns:
list: Builded candidate paths (as relative paths). | codesearchnet |
def forward(self, encoder_hidden_states, padding_masks=None):
hidden_states = encoder_hidden_states.transpose(1, -1)
for layer in self.conv_layers:
hidden_states = layer(hidden_states)
hidden_states = self.linear(hidden_states.transpose(1, 2))
if padding_masks is not None:
hidden_states = hidden_states.masked_fill(padding_masks, 0.0)
return hidden_states | Calculate forward propagation.
Args:
encoder_hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
Batch of input sequences.
padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
Batch of masks indicating padded part.
Returns:
Tensor: Batch of predicted sequences `(batch_size, max_text_length, 1)`. | github-repos |
def toarray(vari):
if isinstance(vari, Poly):
shape = vari.shape
out = numpy.asarray([{} for _ in range(numpy.prod(shape))], dtype=object)
core = vari.A.copy()
for key in core.keys():
core[key] = core[key].flatten()
for i in range(numpy.prod(shape)):
if (not numpy.all((core[key][i] == 0))):
out[i][key] = core[key][i]
for i in range(numpy.prod(shape)):
out[i] = Poly(out[i], vari.dim, (), vari.dtype)
out = out.reshape(shape)
return out
return numpy.asarray(vari) | Convert polynomial array into a numpy.asarray of polynomials.
Args:
vari (Poly, numpy.ndarray):
Input data.
Returns:
(numpy.ndarray):
A numpy array with ``Q.shape==A.shape``.
Examples:
>>> poly = cp.prange(3)
>>> print(poly)
[1, q0, q0^2]
>>> array = cp.toarray(poly)
>>> print(isinstance(array, numpy.ndarray))
True
>>> print(array[1])
q0 | codesearchnet |
def list_images(self):
r = self.get((self.registry_url + '/v2/_catalog'), auth=self.auth)
return r.json()['repositories'] | List images stored in the registry.
Returns:
list[str]: List of image names. | codesearchnet |
def get_unique_tags(field_to_obs):
return {field: sorted(set([x.get('tag', '') for x in observations])) for (field, observations) in field_to_obs.items() if (field in TAG_FIELDS)} | Returns a dictionary of tags that a user could query over.
Args:
field_to_obs: Dict that maps string field to `Observation` list.
Returns:
A dict that maps keys in `TAG_FIELDS` to a list of string tags present in
the event files. If the dict does not have any observations of the type,
maps to an empty list so that we can render this to console. | codesearchnet |
def gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99):
return rewards_to_go(td_deltas, mask, (lambda_ * gamma)) | r"""Computes the GAE advantages given the one step TD-residuals.
The formula for a GAE advantage estimator is as follows:
A_{bt} = \sum_{l=0}^{\infty}(\gamma * \lambda)^{l}(\delta_{b,t+l}).
Internally we just call rewards_to_go, since it is the same computation.
Args:
td_deltas: np.ndarray of shape (B, T) of one step TD-residuals.
mask: np.ndarray of shape (B, T) of mask for the residuals. It maybe the
case that the `td_deltas` are already masked correctly since they are
produced by `deltas(...)`
lambda_: float, lambda parameter for GAE estimators.
gamma: float, lambda parameter for GAE estimators.
Returns:
GAE advantage estimates. | codesearchnet |
def reset_time_estimate(self, **kwargs):
path = '%s/%s/reset_time_estimate' % (self.manager.path, self.get_id())
return self.manager.gitlab.http_post(path, **kwargs) | Resets estimated time for the object to 0 seconds.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done | juraj-google-style |
def smooth(self, noise, strategy=INVERSE_STRATEGY):
if strategy is INVERSE_STRATEGY:
self.points = with_inverse(self.points, noise)
elif strategy is EXTRAPOLATE_STRATEGY:
self.points = with_extrapolation(self.points, noise, 30)
elif strategy is NO_STRATEGY:
self.points = with_no_strategy(self.points, noise)
return self | In-place smoothing
See smooth_segment function
Args:
noise (float): Noise expected
strategy (int): Strategy to use. Either smooth.INVERSE_STRATEGY
or smooth.EXTRAPOLATE_STRATEGY
Returns:
:obj:`Segment` | juraj-google-style |
def _InitValues(self, sizes):
total_size = 1
for s in sizes:
total_size *= s
x = [f * 0.5 for f in range(1, total_size + 1)]
return constant_op.constant(x, shape=sizes) | Initializes values for input tensors.
Args:
sizes: Tensor dimensions.
Returns:
Tensor initialized to values. | github-repos |
def update_headers(self, response):
if 'expires' in response.headers and 'cache-control' in response.headers:
self.msg = self.server_cache_headers
return response.headers
else:
self.msg = self.default_cache_vars
date = parsedate(response.headers['date'])
expires = datetime(*date[:6]) + timedelta(0, self.expire_after)
response.headers.update({'expires': formatdate(calendar.timegm(expires.timetuple())),
'cache-control': 'public'})
return response.headers | Returns the updated caching headers.
Args:
response (HttpResponse): The response from the remote service
Returns:
response:(HttpResponse.Headers): Http caching headers | juraj-google-style |
def require_version(requirement: str, hint: Optional[str]=None) -> None:
hint = f'\n{hint}' if hint is not None else ''
if re.match('^[\\w_\\-\\d]+$', requirement):
pkg, op, want_ver = (requirement, None, None)
else:
match = re.findall('^([^!=<>\\s]+)([\\s!=<>]{1,2}.+)', requirement)
if not match:
raise ValueError(f'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}')
pkg, want_full = match[0]
want_range = want_full.split(',')
wanted = {}
for w in want_range:
match = re.findall('^([\\s!=<>]{1,2})(.+)', w)
if not match:
raise ValueError(f'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}')
op, want_ver = match[0]
wanted[op] = want_ver
if op not in ops:
raise ValueError(f'{requirement}: need one of {list(ops.keys())}, but got {op}')
if pkg == 'python':
got_ver = '.'.join([str(x) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
return
try:
got_ver = importlib.metadata.version(pkg)
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(f"The '{requirement}' distribution was not found and is required by this application. {hint}")
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint) | Perform a runtime check of the dependency versions, using the exact same syntax used by pip.
The installed module version comes from the *site-packages* dir via *importlib.metadata*.
Args:
requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy"
hint (`str`, *optional*): what suggestion to print in case of requirements not being met
Example:
```python
require_version("pandas>1.1.2")
require_version("numpy>1.18.5", "this is important to have for whatever reason")
``` | github-repos |
def pb(name, data, display_name=None, description=None):
import tensorflow.compat.v1 as tf
data = np.array(data)
if (data.shape != ()):
raise ValueError(('Expected scalar shape for data, saw shape: %s.' % data.shape))
if (data.dtype.kind not in ('b', 'i', 'u', 'f')):
raise ValueError(('Cast %s to float is not supported' % data.dtype.name))
tensor = tf.make_tensor_proto(data.astype(np.float32))
if (display_name is None):
display_name = name
summary_metadata = metadata.create_summary_metadata(display_name=display_name, description=description)
tf_summary_metadata = tf.SummaryMetadata.FromString(summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag=('%s/scalar_summary' % name), metadata=tf_summary_metadata, tensor=tensor)
return summary | Create a legacy scalar summary protobuf.
Arguments:
name: A unique name for the generated summary, including any desired
name scopes.
data: A rank-0 `np.array` or array-like form (so raw `int`s and
`float`s are fine, too).
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object. | codesearchnet |
def cube(data, xcoords=None, ycoords=None, chcoords=None, scalarcoords=None, datacoords=None, attrs=None, name=None):
cube = xr.DataArray(data, dims=('x', 'y', 'ch'), attrs=attrs, name=name)
cube.dcc._initcoords()
if (xcoords is not None):
cube.coords.update({key: ('x', xcoords[key]) for key in xcoords})
if (ycoords is not None):
cube.coords.update({key: ('y', ycoords[key]) for key in ycoords})
if (chcoords is not None):
cube.coords.update({key: ('ch', chcoords[key]) for key in chcoords})
if (datacoords is not None):
cube.coords.update({key: (('x', 'y', 'ch'), datacoords[key]) for key in datacoords})
if (scalarcoords is not None):
cube.coords.update(scalarcoords)
return cube | Create a cube as an instance of xarray.DataArray with Decode accessor.
Args:
data (numpy.ndarray): 3D (x x y x channel) array.
xcoords (dict, optional): Dictionary of arrays that label x axis.
ycoords (dict, optional): Dictionary of arrays that label y axis.
chcoords (dict, optional): Dictionary of arrays that label channel axis.
scalarcoords (dict, optional): Dictionary of values that don't label any axes (point-like).
datacoords (dict, optional): Dictionary of arrays that label x, y, and channel axes.
attrs (dict, optional): Dictionary of attributes to add to the instance.
name (str, optional): String that names the instance.
Returns:
decode cube (decode.cube): Decode cube. | codesearchnet |
def acquire(self, constructor_fn: Callable[[], Any], tag: Any=None) -> Any:
return _shared_map.acquire(self._key, constructor_fn, tag) | Acquire a reference to the object associated with this Shared handle.
Args:
constructor_fn: function that initialises / constructs the object if not
present in the cache. This function should take no arguments. It should
return an initialised object, or None if the object could not be
initialised / constructed.
tag: an optional indentifier to store with the cached object. If
subsequent calls to acquire use different tags, the object will be
reloaded rather than returned from cache.
Returns:
A reference to an initialised object, either from the cache, or
newly-constructed. | github-repos |
def validate_redis(self, db_data, user_data, oper):
passed = True
if isinstance(db_data, int):
db_data = str(db_data)
if isinstance(user_data, int):
user_data = str(user_data)
if isinstance(db_data, (list)):
try:
db_data = sorted(db_data)
except TypeError:
pass
if isinstance(user_data, (list)):
try:
user_data = sorted(user_data)
except TypeError:
pass
if oper not in self.operators:
self.log.error('Invalid operator provided ({})'.format(oper))
return False
if self.operators.get(oper)(db_data, user_data):
self.reports.profile_validation(True)
else:
self.reports.profile_validation(False)
passed = False
self.validate_log_output(passed, db_data, user_data, oper)
return passed | Validate data in Redis.
Args:
db_data (str): The data store in Redis.
user_data (str): The user provided data.
oper (str): The comparison operator.
Returns:
bool: True if the data passed validation. | juraj-google-style |
def __init__(self, variant_tensor, resource_creator):
super(_VariantTracker, self).__init__(device='CPU')
self._resource_handle = variant_tensor
if not isinstance(resource_creator, def_function.Function):
raise TypeError('Resource creator should already be a tf.function.')
self._create_resource = resource_creator | Record that `variant_tensor` is associated with `resource_creator`.
Args:
variant_tensor: The variant-dtype Tensor associated with the Dataset. This
Tensor will be a captured input to functions which use the Dataset, and
is used by saving code to identify the corresponding _VariantTracker.
resource_creator: A zero-argument function which creates a new
variant-dtype Tensor. This function will be included in SavedModels and
run to re-create the Dataset's variant Tensor on restore. | github-repos |
def send_to_default_exchange(self, sess_id, message=None):
msg = json.dumps(message, cls=ZEngineJSONEncoder)
log.debug("Sending following message to %s queue through default exchange:\n%s" % (
sess_id, msg))
self.get_channel().publish(exchange='', routing_key=sess_id, body=msg) | Send messages through RabbitMQ's default exchange,
which will be delivered through routing_key (sess_id).
This method only used for un-authenticated users, i.e. login process.
Args:
sess_id string: Session id
message dict: Message object. | juraj-google-style |
def serialize_example(transformed_json_data, features, feature_indices, target_name):
import six
import tensorflow as tf
from trainer import feature_transforms
line = str(transformed_json_data[target_name][0])
for name, info in feature_indices:
if features[name]['transform'] in [feature_transforms.IDENTITY_TRANSFORM,
feature_transforms.SCALE_TRANSFORM]:
line += ' %d:%s' % (info['index_start'], str(transformed_json_data[name][0]))
elif features[name]['transform'] in [feature_transforms.ONE_HOT_TRANSFORM,
feature_transforms.MULTI_HOT_TRANSFORM]:
for i in range(info['size']):
if i in transformed_json_data[name]:
line += ' %d:1' % (info['index_start'] + i)
elif features[name]['transform'] in [feature_transforms.IMAGE_TRANSFORM]:
for i in range(info['size']):
line += ' %d:%s' % (info['index_start'] + i, str(transformed_json_data[name][i]))
return line | Makes an instance of data in libsvm format.
Args:
transformed_json_data: dict of transformed data.
features: features config.
feature_indices: output of feature_transforms.get_transformed_feature_indices()
Returns:
The text line representation of an instance in libsvm format. | juraj-google-style |
def get_language(self, text):
files = {'text': text}
(res, status_code) = self.post(self.language_service, files=files)
if (status_code != 200):
logger.debug('Language recognition failed.')
return (self.decode(res), status_code) | Recognise the language of the text in input
Args:
id (str): The text whose the language needs to be recognised
Returns:
dict, int: A dict containing the recognised language and the
confidence score. | codesearchnet |
def _add_source(model):
ignored_keys = {"author_tags", "original_xml", "additional_info"}
source = "Aleph"
for key, val in model.get_mapping().iteritems():
if key in ignored_keys:
continue
if type(val) in [list, tuple]:
ss_val = [
SourceString(item, source).to_dict()
for item in val
]
else:
ss_val = [SourceString(val, source).to_dict()]
setattr(model, key, ss_val)
return model | Go over all attributes in `model` and add :class:`SourceString` to them.
Args:
model (obj): :class:`Model` instance.
Returns:
obj: :class:`Model` instance with :class:`SourceString` descriptors. | juraj-google-style |
async def send_script(self, client_id, conn_string, script):
conn_id = self._client_connection(client_id, conn_string)
(await self.adapter.send_script(conn_id, script)) | Send a script to a device on behalf of a client.
See :meth:`AbstractDeviceAdapter.send_script`.
Args:
client_id (str): The client we are working for.
conn_string (str): A connection string that will be
passed to the underlying device adapter.
script (bytes): The script that we wish to send.
Raises:
DeviceServerError: There is an issue with your client_id such
as not being connected to the device.
DeviceAdapterError: The adapter had a protocol issue sending the script. | codesearchnet |
def bit_for_bit(model_path, bench_path, config):
fname = model_path.split(os.path.sep)[-1]
if not (os.path.isfile(bench_path) and os.path.isfile(model_path)):
return elements.error("Bit for Bit",
"File named " + fname + " has no suitable match!")
try:
model_data = Dataset(model_path)
bench_data = Dataset(bench_path)
except (FileNotFoundError, PermissionError):
return elements.error("Bit for Bit",
"File named " + fname + " could not be read!")
if not (netcdf.has_time(model_data) and netcdf.has_time(bench_data)):
return elements.error("Bit for Bit",
"File named " + fname + " could not be read!")
headers = ["Max Error", "Index of Max Error", "RMS Error", "Plot"]
stats = LIVVDict()
for i, var in enumerate(config["bit_for_bit_vars"]):
if var in model_data.variables and var in bench_data.variables:
m_vardata = model_data.variables[var][:]
b_vardata = bench_data.variables[var][:]
diff_data = m_vardata - b_vardata
if diff_data.any():
stats[var]["Max Error"] = np.amax(np.absolute(diff_data))
stats[var]["Index of Max Error"] = str(
np.unravel_index(np.absolute(diff_data).argmax(), diff_data.shape))
stats[var]["RMS Error"] = np.sqrt(np.sum(np.square(diff_data).flatten()) /
diff_data.size)
pf = plot_bit_for_bit(fname, var, m_vardata, b_vardata, diff_data)
else:
stats[var]["Max Error"] = stats[var]["RMS Error"] = 0
pf = stats[var]["Index of Max Error"] = "N/A"
stats[var]["Plot"] = pf
else:
stats[var] = {"Max Error": "No Match", "RMS Error": "N/A", "Plot": "N/A"}
model_data.close()
bench_data.close()
return elements.bit_for_bit("Bit for Bit", headers, stats) | Checks whether the given files have bit for bit solution matches
on the given variable list.
Args:
model_path: absolute path to the model dataset
bench_path: absolute path to the benchmark dataset
config: the configuration of the set of analyses
Returns:
A dictionary created by the elements object corresponding to
the results of the bit for bit testing | juraj-google-style |
def extract_all_content(self, path=None, payload=None, objectInput=None, pretty_print=False, convert_to_obj=False):
f = file_path(path, payload, objectInput)
switches = ['-J', '-t', '-r', f]
if (not pretty_print):
switches.remove('-r')
result = self._command_template(switches)
if (result and convert_to_obj):
result = json.loads(result, encoding='utf-8')
return (result, path, f) | This function returns a JSON of all contents and
metadata of passed file
Args:
path (string): Path of file to analyze
payload (string): Payload base64 to analyze
objectInput (object): file object/standard input to analyze
pretty_print (boolean): If True adds newlines and whitespace,
for better readability
convert_to_obj (boolean): If True convert JSON in object | codesearchnet |
def convert_to_tensor_with_default(value, default, dtype=None, name=None):
rtn_val = default if value is None else value
return tf.convert_to_tensor(rtn_val, dtype=dtype, name=name) | Converts the given `value` to a `Tensor` or returns the `default` value.
Converts the input `value` to a `Tensor` or returns `default` converted to a
`Tensor` if `value == None`.
Args:
value: An object whose type has a registered Tensor conversion function.
default: The value to return if `value == None`.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of value.
name: Optional name to use if a new Tensor is created.
Returns:
A Tensor based on value. | github-repos |
def _consumers(self):
consumers = nest.flatten([component.consumers() for component in nest.flatten(self, expand_composites=True) if getattr(component, 'graph', None) is not None])
return list(set(consumers)) | Returns a list of `Operation`s that consume this `CompositeTensor`.
Returns:
A list of `Operation`s.
Raises:
RuntimeError: If this method is called while executing eagerly. | github-repos |
def delete(filepath):
remove_acl(filepath)
remove_immutable_attribute(filepath)
if os.path.isfile(filepath) or os.path.islink(filepath):
os.remove(filepath)
elif os.path.isdir(filepath):
shutil.rmtree(filepath) | Delete the given file, directory or link.
It Should support undelete later on.
Args:
filepath (str): Absolute full path to a file. e.g. /path/to/file | juraj-google-style |
def addRow(self, triggered):
if triggered:
model = self.tableView.model()
model.addDataFrameRows()
self.sender().setChecked(False) | Adds a row to the model.
This method is also a slot.
Args:
triggered (bool): If the corresponding button was
activated, the row will be appended to the end. | juraj-google-style |
class Wav2Vec2PhonemeCTCTokenizerOutput(ModelOutput):
text: Union[List[str], str]
char_offsets: Union[List[ListOfDict], ListOfDict] = None | Output type of [` Wav2Vec2PhonemeCTCTokenizer`], with transcription.
Args:
text (list of `str` or `str`):
Decoded logits in text from. Usually the speech transcription.
char_offsets (list of `List[Dict[str, Union[int, str]]]` or `List[Dict[str, Union[int, str]]]`):
Offsets of the decoded characters. In combination with sampling rate and model downsampling rate char
offsets can be used to compute time stamps for each character. Total logit score of the beam associated with
produced text. | github-repos |
def save_to_file(self, filename, remap_dim0=None, remap_dim1=None):
with open(filename, 'w') as fobj:
columns = list(sorted(self._dim1))
for col in columns:
fobj.write(',')
fobj.write(str(remap_dim1[col] if remap_dim1 else col))
fobj.write('\n')
for row in sorted(self._dim0):
fobj.write(str(remap_dim0[row] if remap_dim0 else row))
for col in columns:
fobj.write(',')
fobj.write(str(self[row, col]))
fobj.write('\n') | Saves matrix to the file.
Args:
filename: name of the file where to save matrix
remap_dim0: dictionary with mapping row indices to row names which should
be saved to file. If none then indices will be used as names.
remap_dim1: dictionary with mapping column indices to column names which
should be saved to file. If none then indices will be used as names. | juraj-google-style |
def SetFlushInterval(self, flush_interval):
self._flush_interval = flush_interval
logger.debug('Elasticsearch flush interval: {0:d}'.format(flush_interval)) | Set the flush interval.
Args:
flush_interval (int): number of events to buffer before doing a bulk
insert. | juraj-google-style |
def set_attributes(self, **kwargs):
for field, value in kwargs.items():
if field in self.Meta.attributes:
setattr(self, field, value) | Set the resource attributes from the kwargs.
Only sets items in the `self.Meta.attributes` white list.
Args:
kwargs: Keyword arguements passed into the init of this class | juraj-google-style |
def __init__(self, dct_type=2, validate_args=False, name='dct'):
if dct_type not in (2, 3):
raise NotImplementedError('`type` must be one of 2 or 3')
self._dct_type = dct_type
super(DiscreteCosineTransform, self).__init__(
forward_min_event_ndims=1,
inverse_min_event_ndims=1,
is_constant_jacobian=True,
validate_args=validate_args,
name=name) | Instantiates the `DiscreteCosineTransform` bijector.
Args:
dct_type: Python `int`, the DCT type performed by the forward
transformation. Currently, only 2 and 3 are supported.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object. | juraj-google-style |
def modify_ack_deadline(self, seconds):
self._request_queue.put(requests.ModAckRequest(ack_id=self._ack_id, seconds=seconds)) | Resets the deadline for acknowledgement.
New deadline will be the given value of seconds from now.
The default implementation handles this for you; you should not need
to manually deal with setting ack deadlines. The exception case is
if you are implementing your own custom subclass of
:class:`~.pubsub_v1.subcriber._consumer.Consumer`.
Args:
seconds (int): The number of seconds to set the lease deadline
to. This should be between 0 and 600. Due to network latency,
values below 10 are advised against. | codesearchnet |
def get_enterprise_customer_or_404(enterprise_uuid):
EnterpriseCustomer = apps.get_model('enterprise', 'EnterpriseCustomer')
try:
enterprise_uuid = UUID(enterprise_uuid)
return EnterpriseCustomer.objects.get(uuid=enterprise_uuid)
except (TypeError, ValueError, EnterpriseCustomer.DoesNotExist):
LOGGER.error('Unable to find enterprise customer for UUID: [%s]', enterprise_uuid)
raise Http404 | Given an EnterpriseCustomer UUID, return the corresponding EnterpriseCustomer or raise a 404.
Arguments:
enterprise_uuid (str): The UUID (in string form) of the EnterpriseCustomer to fetch.
Returns:
(EnterpriseCustomer): The EnterpriseCustomer given the UUID. | juraj-google-style |
def in_coord_list(coord_list, coord, atol=1e-8):
return len(find_in_coord_list(coord_list, coord, atol=atol)) > 0 | Tests if a particular coord is within a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
True if coord is in the coord list. | juraj-google-style |
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] | Create a mask from the two sequences passed to be used in a sequence-pair classification task. PLBart does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros. | github-repos |
def target_call_func(self, answer: Union[str, List[str]], add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
is_batched = isinstance(answer, (list, tuple))
if is_batched:
return self.target_batch_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.target_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) | The method tokenizes and prepares the answer label for the model.
Args:
answer (`str` or `List[str]`):
Corresponding answer supervision to the queries for training the model. | github-repos |
def allconcat(self, x, mesh_axis, concat_axis):
return self._collective_with_groups(
x, [mesh_axis],
functools.partial(allconcat_ring, concat_axis=concat_axis)) | Grouped allconcat (like MPI allgather followed by concat).
Args:
x: a LaidOutTensor
mesh_axis: an integer - the mesh axis along which to group
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor | juraj-google-style |
def _validate_signature_def_map(self, signature_def_map):
for signature_def_key in signature_def_map:
signature_def = signature_def_map[signature_def_key]
inputs = signature_def.inputs
outputs = signature_def.outputs
for inputs_key in inputs:
self._validate_tensor_info(inputs[inputs_key])
for outputs_key in outputs:
self._validate_tensor_info(outputs[outputs_key])
if constants.INIT_OP_SIGNATURE_KEY in signature_def_map:
raise KeyError(f'SignatureDef map key "{constants.INIT_OP_SIGNATURE_KEY}" is reserved for initialization. Please use a different key.')
if constants.TRAIN_OP_SIGNATURE_KEY in signature_def_map:
raise KeyError(f'SignatureDef map key "{constants.TRAIN_OP_SIGNATURE_KEY}" is reserved for the train op. Please use a different key.') | Validates the `SignatureDef` entries in the signature def map.
Validation of entries in the signature def map includes ensuring that the
`name` and `dtype` fields of the TensorInfo protos of the `inputs` and
`outputs` of each `SignatureDef` are populated. Also ensures that reserved
SignatureDef keys for the initialization and train ops are not used.
Args:
signature_def_map: The map of signature defs to be validated.
Raises:
AssertionError: If a TensorInfo is not valid.
KeyError: If a reserved signature key is used in the map. | github-repos |
def decode(tokens):
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret) | Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string | juraj-google-style |
def _is_variable(node_def: node_def_pb2.NodeDef) -> bool:
return node_def.op == 'VarHandleOp' | Determines whether `node_def` is a variable node.
Args:
node_def: `NodeDef` to test whether it is a variable or not.
Returns:
Returns True if it is a variable. | github-repos |
def clear(self, back_r: int=0, back_g: int=0, back_b: int=0, fore_r: int=0, fore_g: int=0, fore_b: int=0, char: str=' ') -> None:
n = (self.width * self.height)
self.back_r = ([back_r] * n)
self.back_g = ([back_g] * n)
self.back_b = ([back_b] * n)
self.fore_r = ([fore_r] * n)
self.fore_g = ([fore_g] * n)
self.fore_b = ([fore_b] * n)
self.char = ([ord(char)] * n) | Clears the console. Values to fill it with are optional, defaults
to black with no characters.
Args:
back_r (int): Red background color, from 0 to 255.
back_g (int): Green background color, from 0 to 255.
back_b (int): Blue background color, from 0 to 255.
fore_r (int): Red foreground color, from 0 to 255.
fore_g (int): Green foreground color, from 0 to 255.
fore_b (int): Blue foreground color, from 0 to 255.
char (AnyStr): A single character str or bytes object. | codesearchnet |
def normalize(self, text, normalizations=None):
for (normalization, kwargs) in self._parse_normalizations((normalizations or self._config.normalizations)):
try:
text = getattr(self, normalization)(text, **kwargs)
except AttributeError as e:
self._logger.debug('Invalid normalization: %s', e)
return text | Normalize a given text applying all normalizations.
Normalizations to apply can be specified through a list of
parameters and will be executed in that order.
Args:
text: The text to be processed.
normalizations: List of normalizations to apply.
Returns:
The text normalized. | codesearchnet |
def helper_add(access_token, ck_id, path, body):
full_path = ''.join([path, "('", ck_id, "')"])
full_path_encoded = urllib.parse.quote(full_path, safe='')
endpoint = ''.join([ams_rest_endpoint, full_path_encoded])
return do_ams_put(endpoint, full_path_encoded, body, access_token, 'json_only', '1.0;NetFx') | Helper Function to add strings to a URL path.
Args:
access_token (str): A valid Azure authentication token.
ck_id (str): A CK ID.
path (str): A URL Path.
body (str): A Body.
Returns:
HTTP response. JSON body. | codesearchnet |
def get_variation_for_feature(self, feature, user_id, attributes=None):
experiment = None
variation = None
bucketing_id = self._get_bucketing_id(user_id, attributes)
if feature.groupId:
group = self.config.get_group(feature.groupId)
if group:
experiment = self.get_experiment_in_group(group, bucketing_id)
if experiment and experiment.id in feature.experimentIds:
variation = self.get_variation(experiment, user_id, attributes)
if variation:
self.logger.debug('User "%s" is in variation %s of experiment %s.' % (
user_id,
variation.key,
experiment.key
))
return Decision(experiment, variation, enums.DecisionSources.FEATURE_TEST)
else:
self.logger.error(enums.Errors.INVALID_GROUP_ID_ERROR.format('_get_variation_for_feature'))
elif feature.experimentIds:
experiment = self.config.get_experiment_from_id(feature.experimentIds[0])
if experiment:
variation = self.get_variation(experiment, user_id, attributes)
if variation:
self.logger.debug('User "%s" is in variation %s of experiment %s.' % (
user_id,
variation.key,
experiment.key
))
return Decision(experiment, variation, enums.DecisionSources.FEATURE_TEST)
if feature.rolloutId:
rollout = self.config.get_rollout_from_id(feature.rolloutId)
return self.get_variation_for_rollout(rollout, user_id, attributes)
else:
return Decision(None, None, enums.DecisionSources.ROLLOUT) | Returns the experiment/variation the user is bucketed in for the given feature.
Args:
feature: Feature for which we are determining if it is enabled or not for the given user.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Decision namedtuple consisting of experiment and variation for the user. | juraj-google-style |
def remove_send_message(self, connection):
if connection in self._send_message:
del self._send_message[connection]
LOGGER.debug("Removed send_message function "
"for connection %s", connection)
else:
LOGGER.warning("Attempted to remove send_message "
"function for connection %s, but no "
"send_message function was registered",
connection) | Removes a send_message function previously registered
with the Dispatcher.
Args:
connection (str): A locally unique identifier provided
by the receiver of messages. | juraj-google-style |
def __get_scope(cls,
expr: Union['Expression', Tuple]) -> Set[str]:
scope = set()
for i, atom in enumerate(expr):
if isinstance(atom, Expression):
scope.update(cls.__get_scope(atom._expr))
elif type(atom) in [tuple, list]:
scope.update(cls.__get_scope(atom))
elif atom == 'pvar_expr':
functor, params = expr[i+1]
arity = len(params) if params is not None else 0
name = '{}/{}'.format(functor, arity)
scope.add(name)
break
return scope | Returns the set of fluents in the expression's scope.
Args:
expr: Expression object or nested tuple of Expressions.
Returns:
The set of fluents in the expression's scope. | juraj-google-style |
def _is_valid_netmask(self, prefixlen):
try:
prefixlen = int(prefixlen)
except ValueError:
return False
return 0 <= prefixlen <= self._max_prefixlen | Verify that the netmask/prefixlen is valid.
Args:
prefixlen: A string, the netmask in prefix length format.
Returns:
A boolean, True if the prefix represents a valid IPv6
netmask. | juraj-google-style |
def do_ams_auth(endpoint, body):
headers = {"content-type": "application/x-www-form-urlencoded",
"Accept": json_acceptformat}
return requests.post(endpoint, data=body, headers=headers) | Acquire Media Services Authentication Token.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
body (str): A Content Body.
Returns:
HTTP response. JSON body. | juraj-google-style |
def insert_into_range(self, operations: ops.OP_TREE, start: int, end: int) -> int:
if (not (0 <= start <= end <= len(self))):
raise IndexError('Bad insert indices: [{}, {})'.format(start, end))
operations = list(ops.flatten_op_tree(operations))
for op in operations:
self._device.validate_operation(op)
i = start
op_index = 0
while (op_index < len(operations)):
op = operations[op_index]
while ((i < end) and (not self._device.can_add_operation_into_moment(op, self._moments[i]))):
i += 1
if (i >= end):
break
self._moments[i] = self._moments[i].with_operation(op)
op_index += 1
if (op_index >= len(operations)):
return end
return self.insert(end, operations[op_index:]) | Writes operations inline into an area of the circuit.
Args:
start: The start of the range (inclusive) to write the
given operations into.
end: The end of the range (exclusive) to write the given
operations into. If there are still operations remaining,
new moments are created to fit them.
operations: An operation or tree of operations to insert.
Returns:
An insertion index that will place operations after the operations
that were inserted by this method.
Raises:
IndexError: Bad inline_start and/or inline_end. | codesearchnet |
def plug(update_kwargs=True, **plugs_map):
for a_plug in plugs_map.values():
if (not (isinstance(a_plug, PlugPlaceholder) or issubclass(a_plug, BasePlug))):
raise InvalidPlugError(('Plug %s is not a subclass of plugs.BasePlug nor a placeholder for one' % a_plug))
def result(func):
'Wrap the given function and return the wrapper.\n\n Args:\n func: The function to wrap.\n\n Returns:\n A PhaseDescriptor that, when called will invoke the wrapped function,\n passing plugs as keyword args.\n\n Raises:\n DuplicatePlugError: If a plug name is declared twice for the\n same function.\n '
phase = openhtf.core.phase_descriptor.PhaseDescriptor.wrap_or_copy(func)
duplicates = (frozenset((p.name for p in phase.plugs)) & frozenset(plugs_map))
if duplicates:
raise DuplicatePlugError(('Plugs %s required multiple times on phase %s' % (duplicates, func)))
phase.plugs.extend([PhasePlug(name, a_plug, update_kwargs=update_kwargs) for (name, a_plug) in six.iteritems(plugs_map)])
return phase
return result | Creates a decorator that passes in plugs when invoked.
This function returns a decorator for a function that will replace positional
arguments to that function with the plugs specified. See the module
docstring for details and examples.
Note this decorator does not work with class or bound methods, but does work
with @staticmethod.
Args:
update_kwargs: If true, makes the decorated phase take this plug as a kwarg.
**plugs_map: Dict mapping name to Plug type.
Returns:
A PhaseDescriptor that will pass plug instances in as kwargs when invoked.
Raises:
InvalidPlugError: If a type is provided that is not a subclass of BasePlug. | codesearchnet |
def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
step = 0
logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32)
for i, output_logit in enumerate(start_or_end_logits):
batch_size = output_logit.shape[0]
cols = output_logit.shape[1]
if step + batch_size < len(dataset):
logits_concat[step:step + batch_size, :cols] = output_logit
else:
logits_concat[step:, :cols] = output_logit[:len(dataset) - step]
step += batch_size
return logits_concat | Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor
Args:
start_or_end_logits(:obj:`tensor`):
This is the output predictions of the model. We can only enter either start or end logits.
eval_dataset: Evaluation dataset
max_len(:obj:`int`):
The maximum length of the output tensor. ( See the model.eval() part for more details ) | github-repos |
def changes(self):
output = []
if (self.status() is self.UNMODIFIED):
output = [(self.formatter % (' ', self.key, self.old_value))]
elif (self.status() is self.ADDED):
output.append((self.formatter % ('+', self.key, self.new_value)))
elif (self.status() is self.REMOVED):
output.append((self.formatter % ('-', self.key, self.old_value)))
elif (self.status() is self.MODIFIED):
output.append((self.formatter % ('-', self.key, self.old_value)))
output.append((self.formatter % ('+', self.key, self.new_value)))
return output | Returns a list of changes to represent the diff between
old and new value.
Returns:
list: [string] representation of the change (if any)
between old and new value | codesearchnet |
def run_instruction(self, op: opcodes.Opcode, state: frame_state.FrameState) -> frame_state.FrameState:
_opcode_counter.inc(op.name)
self.frame.current_opcode = op
self._importing = 'IMPORT' in op.__class__.__name__
if log.isEnabledFor(logging.INFO):
vm_utils.log_opcode(op, state, self.frame, len(self.frames))
if op.line in self._branch_tracker.matches.match_cases:
state = self._handle_match_case(state, op)
bytecode_fn = getattr(self, f'byte_{op.name}', None)
if bytecode_fn is None:
raise VirtualMachineError(f'Unknown opcode: {op.name}')
state = bytecode_fn(state, op)
if state.why in ('reraise', 'Never'):
state = state.set_why('exception')
implicit_return = op.name in ('RETURN_VALUE', 'RETURN_CONST') and op.line not in self._director.return_lines
if len(self.frames) <= 2:
for err in self._branch_tracker.check_ending(op, implicit_return):
self.ctx.errorlog.incomplete_match(self.frames, err.line, err.cases)
self.frame.current_opcode = None
return state | Run a single bytecode instruction.
Args:
op: An opcode.
state: The state just before running this instruction.
Returns:
The state right after this instruction that should roll over to the
subsequent instruction. If this opcode aborts this function (e.g. through
a 'raise'), then the state's "why" attribute is set to the abort reason.
Raises:
VirtualMachineError: if a fatal error occurs. | github-repos |
def find_dependency_wheels(tile):
return [os.path.join(x.folder, 'python', x.support_wheel) for x in _iter_dependencies(tile) if x.has_wheel] | Return a list of all python wheel objects created by dependencies of this tile
Args:
tile (IOTile): Tile that we should scan for dependencies
Returns:
list: A list of paths to dependency wheels | codesearchnet |
async def _async_wait_for_process(future_process: Any, out: Optional[Union[(TeeCapture, IO[str])]]=sys.stdout, err: Optional[Union[(TeeCapture, IO[str])]]=sys.stderr) -> CommandOutput:
process = (await future_process)
future_output = _async_forward(process.stdout, out)
future_err_output = _async_forward(process.stderr, err)
(output, err_output) = (await asyncio.gather(future_output, future_err_output))
(await process.wait())
return CommandOutput(output, err_output, process.returncode) | Awaits the creation and completion of an asynchronous process.
Args:
future_process: The eventually created process.
out: Where to write stuff emitted by the process' stdout.
err: Where to write stuff emitted by the process' stderr.
Returns:
A (captured output, captured error output, return code) triplet. | codesearchnet |
def _get_samples_shared_with(self, other, index=None):
if isinstance(other, (pd.DataFrame, Projection)):
df_other = (other.coords if isinstance(other, Projection) else other)
if (len(set(df_other.index)) != len(df_other.index)):
raise ValueError('other index has duplicates')
if (len(set(self.coords.index)) != len(self.coords.index)):
raise ValueError('This projection index has duplicates')
if index:
uniq_idx = set(index)
if (len(uniq_idx) != len(index)):
raise ValueError('index has has duplicates')
if (uniq_idx - set(df_other.index)):
raise ValueError('index has samples not in other')
if (uniq_idx - set(self.coords.index)):
raise ValueError('index has samples not in this projection')
else:
uniq_idx = (set(df_other.index) & set(self.coords.index))
if (not len(uniq_idx)):
raise ValueError('No samples shared between other and this projection')
idx = list(uniq_idx)
return (self.coords.loc[(idx, :)].values, df_other.loc[(idx, :)].values)
else:
other = np.array(other)
if (other.shape != self.coords.shape):
raise ValueError('array-like must have the same shape as self.coords')
return (self.coords.values, other) | Find samples shared with another dataset.
Args:
other
(:py:class:`pymds.Projection` or :py:class:`pandas.DataFrame`
or `array-like`):
The other dataset. If `other` is an instance of
:py:class:`pymds.Projection` or :py:class:`pandas.DataFrame`,
then `other` must have indexes in common with this projection.
If `array-like`, then other must have same dimensions as
`self.coords`.
index (`list-like` or `None`): If `other` is an instance of
:py:class:`pymds.Projection` or :py:class:`pandas.DataFrame`
then only return samples in index.
Returns:
`tuple`: containing:
- this (`numpy.array`) Shape [`x`, `n`].
- other (`numpy.array`) Shape [`x`, `n`]. | codesearchnet |
def zip_fit_params(data):
(genes, cells) = data.shape
m = data.mean(1)
v = data.var(1)
M = ((v - m) / (((m ** 2) + v) - m))
M = np.array([min(1.0, max(0.0, x)) for x in M])
L = ((m + (v / m)) - 1.0)
L[np.isnan(L)] = 0.0
L = np.array([max(0.0, x) for x in L])
return (L, M) | Returns the ZIP parameters that best fit a given data set.
Args:
data (array): 2d array of genes x cells belonging to a given cluster
Returns:
L (array): 1d array of means
M (array): 1d array of zero-inflation parameter | codesearchnet |
def probability_density(self, X):
self.check_fit()
return norm.pdf(X, loc=self.mean, scale=self.std) | Compute probability density.
Arguments:
X: `np.ndarray` of shape (n, 1).
Returns:
np.ndarray | juraj-google-style |
def StartsWith(this, that):
this_iter = iter(this)
that_iter = iter(that)
while True:
try:
this_value = next(that_iter)
except StopIteration:
return True
try:
that_value = next(this_iter)
except StopIteration:
return False
if this_value != that_value:
return False | Checks whether an items of one iterable are a prefix of another.
Args:
this: An iterable that needs to be checked.
that: An iterable of which items must match the prefix of `this`.
Returns:
`True` if `that` is a prefix of `this`, `False` otherwise. | juraj-google-style |
def _hertz_to_mel(frequencies_hertz, name=None):
with ops.name_scope(name, 'hertz_to_mel', [frequencies_hertz]):
frequencies_hertz = ops.convert_to_tensor(frequencies_hertz)
return _MEL_HIGH_FREQUENCY_Q * math_ops.log(1.0 + frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ) | Converts frequencies in `frequencies_hertz` in Hertz to the mel scale.
Args:
frequencies_hertz: A `Tensor` of frequencies in Hertz.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type of `frequencies_hertz` containing
frequencies in the mel scale. | github-repos |
def ub_to_str(string):
if not isinstance(string, str):
if six.PY2:
return str(string)
else:
return string.decode()
return string | converts py2 unicode / py3 bytestring into str
Args:
string (unicode, byte_string): string to be converted
Returns:
(str) | juraj-google-style |
def add_argument(self, parser, bootstrap=False):
tmp_default = self.default
exclusive_grp = parser.add_mutually_exclusive_group()
self.default = True
args = self._get_argparse_names(parser.prefix_chars)
kwargs = self._get_argparse_kwargs(bootstrap)
exclusive_grp.add_argument(*args, **kwargs)
self.default = False
args = self._get_argparse_names(parser.prefix_chars)
kwargs = self._get_argparse_kwargs(bootstrap)
exclusive_grp.add_argument(*args, **kwargs)
self.default = tmp_default | Add boolean item as an argument to the given parser.
An exclusive group is created on the parser, which will add
a boolean-style command line argument to the parser.
Examples:
A non-nested boolean value with the name 'debug' will result
in a command-line argument like the following:
'--debug/--no-debug'
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap (bool): Flag to indicate whether you only want to mark
this item as required or not. | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.