code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __init__(self, output_path):
self._output_path = output_path
self._profile = cProfile.Profile() if self._output_path else None | Initialize.
Args:
output_path: A pathname for the profiler output. An empty string
indicates that no profiling should be done. | github-repos |
def _to_numpy(a):
if isinstance(a, ops.EagerTensor):
return a.numpy()
if isinstance(a, tensor.Tensor):
sess = ops.get_default_session()
return sess.run(a)
if isinstance(a, indexed_slices.IndexedSlicesValue):
arr = np.zeros(a.dense_shape)
assert len(a.values) == len(a.indices), 'IndexedSlicesValue has %s value slices but %s indices\n%s' % (a.values, a.indices, a)
for values_slice, index in zip(a.values, a.indices):
assert 0 <= index < len(arr), 'IndexedSlicesValue has invalid index %s\n%s' % (index, a)
arr[index] += values_slice
return arr
return a | Converts Tensors, EagerTensors, and IndexedSlicesValue to numpy arrays.
Args:
a: any value.
Returns:
If a is EagerTensor or Tensor, returns the evaluation of a by calling
numpy() or run(). If a is IndexedSlicesValue, constructs the corresponding
dense numpy array. Otherwise returns a unchanged. | github-repos |
def StartProfiling(self, configuration, identifier):
if not configuration:
return
if configuration.HaveProfileTasks():
self._tasks_profiler = profilers.TasksProfiler(identifier, configuration)
self._tasks_profiler.Start() | Starts profiling.
Args:
configuration (ProfilingConfiguration): profiling configuration.
identifier (str): identifier of the profiling session used to create
the sample filename. | juraj-google-style |
def get_geno_marker(self, marker, return_index=False):
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
if marker not in self._bim.index:
raise ValueError("{}: marker not in BIM".format(marker))
seek_index = self._bim.loc[marker, "i"]
self.seek(seek_index)
if return_index:
return self._read_current_marker(), seek_index
return self._read_current_marker() | Gets the genotypes for a given marker.
Args:
marker (str): The name of the marker.
return_index (bool): Wether to return the marker's index or not.
Returns:
numpy.ndarray: The genotypes of the marker (additive format). | juraj-google-style |
def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3, probe_rad=0.1):
with ScratchDir('.'):
name = 'temp_zeo'
zeo_inp_filename = (name + '.cssr')
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
if rad_dict:
rad_file = (name + '.rad')
with open(rad_file, 'w') as fp:
for el in rad_dict.keys():
fp.write('{0} {1}'.format(el, rad_dict[el]))
atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file)
vol_str = volume(atmnet, 0.3, probe_rad, 10000)
sa_str = surface_area(atmnet, 0.3, probe_rad, 10000)
vol = None
sa = None
for line in vol_str.split('\n'):
if ('Number_of_pockets' in line):
fields = line.split()
if (float(fields[1]) > 1):
vol = (- 1.0)
break
if (float(fields[1]) == 0):
vol = (- 1.0)
break
vol = float(fields[3])
for line in sa_str.split('\n'):
if ('Number_of_pockets' in line):
fields = line.split()
if (float(fields[1]) > 1):
sa = (- 1.0)
break
if (float(fields[1]) == 0):
sa = (- 1.0)
break
sa = float(fields[3])
if ((not vol) or (not sa)):
raise ValueError('Error in zeo++ output stream')
return (vol, sa) | Computes the volume and surface area of isolated void using Zeo++.
Useful to compute the volume and surface area of vacant site.
Args:
structure: pymatgen Structure containing vacancy
rad_dict(optional): Dictionary with short name of elements and their
radii.
chan_rad(optional): Minimum channel Radius.
probe_rad(optional): Probe radius for Monte Carlo sampling.
Returns:
volume: floating number representing the volume of void | codesearchnet |
def equals(self, actual_seq):
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual | Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool | juraj-google-style |
def call(self, inputs):
net = self.encoder_net(tf.cast(inputs, tf.float32))
return ed.MultivariateNormalDiag(loc=net[(..., :self.latent_size)], scale_diag=tf.nn.softplus(net[(..., self.latent_size:)]), name='latent_code_posterior') | Runs the model forward to return a stochastic encoding.
Args:
inputs: Tensor of shape [1, num_productions, num_production_rules]. It is
a sequence of productions of length `num_productions`. Each production
is a one-hot vector of length `num_production_rules`: it determines
which production rule the production corresponds to.
Returns:
latent_code_posterior: A random variable capturing a sample from the
variational distribution, of shape [1, self.latent_size]. | codesearchnet |
def broadcast_change():
(_, res) = win32gui.SendMessageTimeout(win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 0, win32con.SMTO_ABORTIFHUNG, 5000)
return (not bool(res)) | Refresh the windows environment.
.. note::
This will only effect new processes and windows. Services will not see
the change until the system restarts.
Returns:
bool: True if successful, otherwise False
Usage:
.. code-block:: python
import salt.utils.win_reg
winreg.broadcast_change() | codesearchnet |
def s(self, data, errors='strict'):
try:
if data is None or isinstance(data, (int, list, dict)):
pass
elif isinstance(data, unicode):
try:
data.decode('utf-8')
except UnicodeEncodeError:
data = str(data.encode('utf-8').strip(), errors=errors)
self.log.warning(u'Encoding poorly encoded string ({})'.format(data))
except AttributeError:
pass
else:
data = str(data, 'utf-8', errors=errors)
except NameError:
pass
return data | Decode value using correct Python 2/3 method.
This method is intended to replace the :py:meth:`~tcex.tcex.TcEx.to_string` method with
better logic to handle poorly encoded unicode data in Python2 and still work in Python3.
Args:
data (any): Data to ve validated and (de)encoded
errors (string): What method to use when dealing with errors.
Returns:
(string): Return decoded data | juraj-google-style |
def __init__(self, file_entry, bytes_per_sector):
super(TSKVolume, self).__init__(file_entry.name)
self._file_entry = file_entry
self._bytes_per_sector = bytes_per_sector | Initializes a volume.
Args:
file_entry (TSKPartitionFileEntry): a TSK partition file entry.
bytes_per_sector (int): number of bytes per sector. | juraj-google-style |
def connect_to(self, vertex, weight=1):
for edge in self.edges_out:
if (vertex == edge.vertex_in):
return edge
return Edge(self, vertex, weight) | Connect this vertex to another one.
Args:
vertex (Vertex): vertex to connect to.
weight (int): weight of the edge.
Returns:
Edge: the newly created edge. | codesearchnet |
def report_error_to_cluster(self, error_code, error_message):
if self._context_handle:
pywrap_tfe.TFE_ReportErrorToCluster(self._context_handle, error_code, error_message)
else:
raise ValueError('Context is not initialized.') | Report error to other members in a multi-client cluster.
Args:
error_code: a `tf.errors` error code.
error_message: a string. The error message. | github-repos |
def _infer_num_gpus_per_worker(devices):
if _is_device_list_single_worker(devices):
return sum((1 for d in devices if _is_gpu_device(d)))
else:
device_dict = _group_device_list(devices)
num_gpus = None
for _, devices_in_task in device_dict.items():
for device_in_task in devices_in_task:
if num_gpus is None:
num_gpus = sum((1 for d in device_in_task if _is_gpu_device(d)))
elif num_gpus != sum((1 for d in device_in_task if _is_gpu_device(d))):
raise ValueError('All workers should have the same number of GPUs.')
for d in device_in_task:
d_spec = tf_device.DeviceSpec.from_string(d)
if d_spec.device_type == 'GPU' and d_spec.device_index >= num_gpus:
raise ValueError('GPU `device_index` on a worker should be consecutive and start from 0.')
return num_gpus | Infers the number of GPUs on each worker.
Currently to make multi-worker cross device ops work, we need all workers to
have the same number of GPUs.
Args:
devices: a list of device strings, can be either local devices or remote
devices.
Returns:
number of GPUs per worker.
Raises:
ValueError if workers have different number of GPUs or GPU indices are not
consecutive and starting from 0. | github-repos |
def atomic_download(handle, download_fn, module_dir, lock_file_timeout_sec=(10 * 60)):
lock_file = _lock_filename(module_dir)
task_uid = uuid.uuid4().hex
lock_contents = _lock_file_contents(task_uid)
tmp_dir = _temp_download_dir(module_dir, task_uid)
try:
while True:
try:
tf_utils.atomic_write_string_to_file(lock_file, lock_contents, overwrite=False)
if tf_v1.gfile.Exists(module_dir):
return module_dir
break
except tf.errors.OpError:
pass
_wait_for_lock_to_disappear(handle, lock_file, lock_file_timeout_sec)
logging.info("Downloading TF-Hub Module '%s'.", handle)
tf_v1.gfile.MakeDirs(tmp_dir)
download_fn(handle, tmp_dir)
_write_module_descriptor_file(handle, module_dir)
try:
tf_v1.gfile.Rename(tmp_dir, module_dir)
logging.info("Downloaded TF-Hub Module '%s'.", handle)
except tf.errors.AlreadyExistsError:
logging.warning('Module already exists in %s', module_dir)
finally:
try:
tf_v1.gfile.DeleteRecursively(tmp_dir)
except tf.errors.NotFoundError:
pass
try:
contents = tf_utils.read_file_to_string(lock_file)
except tf.errors.NotFoundError:
contents = ''
if (contents == lock_contents):
try:
tf_v1.gfile.Remove(lock_file)
except tf.errors.NotFoundError:
pass
return module_dir | Returns the path to a Module directory for a given TF-Hub Module handle.
Args:
handle: (string) Location of a TF-Hub Module.
download_fn: Callback function that actually performs download. The callback
receives two arguments, handle and the location of a temporary
directory to download the content into.
module_dir: Directory where to download the module files to.
lock_file_timeout_sec: The amount of time we give the current holder of
the lock to make progress in downloading a module.
If no progress is made, the lock is revoked.
Returns:
A string containing the path to a TF-Hub Module directory.
Raises:
ValueError: if the Module is not found. | codesearchnet |
def _create_L_ind(self, L):
if issparse(L[0]):
L = [L_t.todense() for L_t in L]
L = self._to_numpy(L)
L_ind = np.ones((self.n, (self.m * self.k)))
for (yi, y) in enumerate(self.task_graph.feasible_set()):
for t in range(self.t):
L_ind[(:, yi::self.k)] *= np.where(np.logical_or((L[t] == y[t]), (L[t] == 0)), 1, 0)
L_ind[(:, yi::self.k)] *= np.where((sum(L) != 0), 1, 0)
return L_ind | Convert T label matrices with labels in 0...K_t to a one-hot format
Here we can view e.g. the $(i,j)$ entries of the $T$ label matrices as
a _label vector_ emitted by LF j for data point i.
Args:
L: a T-length list of [n,m] scipy.sparse label matrices with values
in {0,1,...,k}
Returns:
L_ind: An [n,m*k] dense np.ndarray with values in {0,1}
Note that no column is required for 0 (abstain) labels. | codesearchnet |
def append(self, value, key=''):
if isinstance(value, type('')) or isinstance(value, type(u'')):
value = ListItem(value)
keys = super(ListView, self).append(value, key=key)
if type(value) in (list, tuple, dict):
for k in keys:
if not self.EVENT_ONCLICK in self.children[k].attributes:
self.children[k].onclick.connect(self.onselection)
self.children[k].attributes['selected'] = False
else:
if not self.EVENT_ONCLICK in value.attributes:
value.onclick.connect(self.onselection)
value.attributes['selected'] = False
return keys | Appends child items to the ListView. The items are accessible by list.children[key].
Args:
value (ListItem, or iterable of ListItems): The child to be appended. In case of a dictionary,
each item's key is used as 'key' param for the single append.
key (str): The unique string identifier for the child. Ignored in case of iterable 'value'
param. | juraj-google-style |
def ConsumeInteger(self, is_long=False):
try:
result = _ParseAbstractInteger(self.token, is_long=is_long)
except ValueError as e:
raise self.ParseError(str(e))
self.NextToken()
return result | Consumes an integer number.
Args:
is_long: True if the value should be returned as a long integer.
Returns:
The integer parsed.
Raises:
ParseError: If an integer couldn't be consumed. | codesearchnet |
def _SetValues(self, values):
def _ToStr(value):
if isinstance(value, (list, tuple)):
result = []
for val in value:
result.append(str(val))
return result
else:
return str(value)
if isinstance(values, Row):
if self._keys != values.header:
raise TypeError('Attempt to append row with mismatched header.')
self._values = copy.deepcopy(values.values)
elif isinstance(values, dict):
for key in self._keys:
if key not in values:
raise TypeError('Dictionary key mismatch with row.')
for key in self._keys:
self[key] = _ToStr(values[key])
elif isinstance(values, list) or isinstance(values, tuple):
if len(values) != len(self._values):
raise TypeError('Supplied list length != row length')
for (index, value) in enumerate(values):
self._values[index] = _ToStr(value)
else:
raise TypeError('Supplied argument must be Row, dict or list, not %s',
type(values)) | Set values from supplied dictionary or list.
Args:
values: A Row, dict indexed by column name, or list.
Raises:
TypeError: Argument is not a list or dict, or list is not equal row
length or dictionary keys don't match. | juraj-google-style |
def normalize_audio_buffer(buf, volume_percentage, sample_width=2):
if (sample_width != 2):
raise Exception('unsupported sample width:', sample_width)
scale = (math.pow(2, ((1.0 * volume_percentage) / 100)) - 1)
arr = array.array('h', buf)
for idx in range(0, len(arr)):
arr[idx] = int((arr[idx] * scale))
buf = arr.tostring()
return buf | Adjusts the loudness of the audio data in the given buffer.
Volume normalization is done by scaling the amplitude of the audio
in the buffer by a scale factor of 2^(volume_percentage/100)-1.
For example, 50% volume scales the amplitude by a factor of 0.414,
and 75% volume scales the amplitude by a factor of 0.681.
For now we only sample_width 2.
Args:
buf: byte string containing audio data to normalize.
volume_percentage: volume setting as an integer percentage (1-100).
sample_width: size of a single sample in bytes. | codesearchnet |
def google_api_execute(config, auth, api_call, results, errors, append=None):
try:
rows = API(config, api_call).execute()
if results:
if isinstance(rows, dict):
rows = [rows]
elif results.get('bigquery', {}).get('format', 'JSON') == 'CSV':
rows = [[r] for r in rows]
if config.verbose:
print('.', end='', flush=True)
if append:
rows = google_api_append(append, api_call['kwargs'], rows)
yield from map(lambda r: Discovery_To_BigQuery.clean(r), rows)
except HttpError as e:
if errors:
rows = [{'Error': str(e), 'Parameters': [{'Key': k, 'Value': str(v)} for k, v in api_call['kwargs'].items()]}]
put_rows(config, auth, errors, rows)
if 'bigquery' in errors:
errors['bigquery']['disposition'] = 'WRITE_APPEND'
else:
raise e | Execute the actual API call and write to the end points defined.
The API call is completely defined at this point.
The results and error definition is optional.
Args:
auth (string): either "user" or "service" to make the API call.
api_call (dict): the JSON for the API call as defined in recipe.
results (dict): defines where the data will be written
errors (dict): defines where the errors will be written
append (dict): optional parameters to append to each row, given as BQ schema
Returns (dict):
None, all data is transfered between API / BigQuery
Raises:
ValueError: If a required key in the recipe is missing. | github-repos |
def symmetric_kl_divergence(predicted, actual):
epsilon = tf.constant(1e-07, dtype=tf.float32, name='epsilon')
p = tf.math.maximum(predicted, epsilon)
q = tf.math.maximum(actual, epsilon)
kld_1 = tf.math.reduce_sum(tf.math.multiply(p, tf.math.log(tf.math.divide(p, q))))
kld_2 = tf.math.reduce_sum(tf.math.multiply(q, tf.math.log(tf.math.divide(q, p))))
return tf.add(kld_1, kld_2) | Calculate symmetric KL-divergence over two classification tensors.
Note that here the classifications do not form a probability distribution.
They are, however normalized to 0..1 and calculating a KL-divergence over them
gives reasonable numerical results.
Shape of the two inputs must be the same at inference time but is otherwise
unconstrained.
Args:
predicted: classification outputs from model
actual: golden classification outputs
Returns:
Single scalar tensor with symmetric KL-divergence between predicted and
actual. | github-repos |
def put_pixel(self, x: int, y: int, color: Tuple[(int, int, int)]) -> None:
lib.TCOD_image_put_pixel(self.image_c, x, y, color) | Change a pixel on this Image.
Args:
x (int): X pixel of the Image. Starting from the left at 0.
y (int): Y pixel of the Image. Starting from the top at 0.
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance. | codesearchnet |
def setupSerialPort(loopback, port):
if loopback:
testSerial = SerialTestClass()
serialPort = testSerial.serialPort
else:
serialPort = serial.Serial(port, 115200, timeout=0)
return serialPort | Sets up serial port by connecting to phsyical or software port.
Depending on command line options, this function will either connect to a
SerialTestClass() port for loopback testing or to the specified port from
the command line option. If loopback is True it overrides the physical port
specification.
Args:
loopback: argparse option
port: argparse option
Returns:
serialPort: Pyserial serial port instance | codesearchnet |
def create_unbroadcast_axis(shape, broadcast_shape):
return tuple(
-(1 + i)
for i in range(len(broadcast_shape))
if i >= len(shape) or broadcast_shape[-(1 + i)] > shape[-(1 + i)]) | Creates the reduction axis for unbroadcasting.
Args:
shape: A list. The shape after the broadcast operation.
broadcast_shape: A list. The original shape the array being unbroadcast
had.
Returns:
A list. The axes along which the array needs to be reduced. These axes will
be distributed evenly into the original shape. | juraj-google-style |
def write_rst(self,
prefix: str = "",
suffix: str = "",
heading_underline_char: str = "=",
method: AutodocMethod = None,
overwrite: bool = False,
mock: bool = False) -> None:
content = self.rst_content(
prefix=prefix,
suffix=suffix,
heading_underline_char=heading_underline_char,
method=method
)
write_if_allowed(self.target_rst_filename, content,
overwrite=overwrite, mock=mock) | Writes the RST file to our destination RST filename, making any
necessary directories.
Args:
prefix: as for :func:`rst_content`
suffix: as for :func:`rst_content`
heading_underline_char: as for :func:`rst_content`
method: as for :func:`rst_content`
overwrite: overwrite the file if it exists already?
mock: pretend to write, but don't | juraj-google-style |
def csv_to_dict(file_name, file_location):
file = __os.path.join(file_location, file_name)
try:
csv_read = open(file, "r")
except Exception as e:
LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))
print('Error {error} ignoring any errors'.format(error=e))
csv_read = open(file, "r", errors='ignore')
data_row = __csv.DictReader(csv_read, dialect="excel")
dict_key = 1
temp_dict = dict()
for row in data_row:
temp_dict[dict_key] = row
dict_key += 1
csv_read.close()
return temp_dict | Function to import a csv as a dictionary
Args:
file_name: The name of the csv file
file_location: The location of the file, derive from the os module
Returns: returns a dictionary | juraj-google-style |
def ParseDestList(self, parser_mediator, olecf_item):
header_map = self._GetDataTypeMap('dest_list_header')
try:
header, entry_offset = self._ReadStructureFromFileObject(
olecf_item, 0, header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse DestList header with error: {0!s}'.format(
exception))
if header.format_version == 1:
entry_map = self._GetDataTypeMap('dest_list_entry_v1')
elif header.format_version in (3, 4):
entry_map = self._GetDataTypeMap('dest_list_entry_v3')
else:
parser_mediator.ProduceExtractionWarning(
'unsupported format version: {0:d}.'.format(header.format_version))
return
while entry_offset < olecf_item.size:
try:
entry, entry_data_size = self._ReadStructureFromFileObject(
olecf_item, entry_offset, entry_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse DestList entry with error: {0!s}'.format(
exception))
display_name = 'DestList entry at offset: 0x{0:08x}'.format(entry_offset)
try:
droid_volume_identifier = self._ParseDistributedTrackingIdentifier(
parser_mediator, entry.droid_volume_identifier, display_name)
except (TypeError, ValueError) as exception:
droid_volume_identifier = ''
parser_mediator.ProduceExtractionWarning(
'unable to read droid volume identifier with error: {0!s}'.format(
exception))
try:
droid_file_identifier = self._ParseDistributedTrackingIdentifier(
parser_mediator, entry.droid_file_identifier, display_name)
except (TypeError, ValueError) as exception:
droid_file_identifier = ''
parser_mediator.ProduceExtractionWarning(
'unable to read droid file identifier with error: {0!s}'.format(
exception))
try:
birth_droid_volume_identifier = (
self._ParseDistributedTrackingIdentifier(
parser_mediator, entry.birth_droid_volume_identifier,
display_name))
except (TypeError, ValueError) as exception:
birth_droid_volume_identifier = ''
parser_mediator.ProduceExtractionWarning((
'unable to read birth droid volume identifier with error: '
'{0:s}').format(
exception))
try:
birth_droid_file_identifier = self._ParseDistributedTrackingIdentifier(
parser_mediator, entry.birth_droid_file_identifier, display_name)
except (TypeError, ValueError) as exception:
birth_droid_file_identifier = ''
parser_mediator.ProduceExtractionWarning((
'unable to read birth droid file identifier with error: '
'{0:s}').format(
exception))
if entry.last_modification_time == 0:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(
timestamp=entry.last_modification_time)
event_data = AutomaticDestinationsDestListEntryEventData()
event_data.birth_droid_file_identifier = birth_droid_file_identifier
event_data.birth_droid_volume_identifier = birth_droid_volume_identifier
event_data.droid_file_identifier = droid_file_identifier
event_data.droid_volume_identifier = droid_volume_identifier
event_data.entry_number = entry.entry_number
event_data.hostname = entry.hostname.rstrip('\x00')
event_data.offset = entry_offset
event_data.path = entry.path.rstrip('\x00')
event_data.pin_status = entry.pin_status
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
entry_offset += entry_data_size | Parses the DestList OLECF item.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
olecf_item (pyolecf.item): OLECF item.
Raises:
UnableToParseFile: if the DestList cannot be parsed. | juraj-google-style |
def __init__(self, name, description, *labels):
super(BoolGauge, self).__init__('BoolGauge', _bool_gauge_methods, len(labels), name, description, *labels) | Creates a new BoolGauge.
Args:
name: name of the new metric.
description: description of the new metric.
*labels: The label list of the new metric. | github-repos |
def timestamp_YmdHMS(value):
i = int(value)
S = i
M = S
H = M
d = H
m = d
Y = m
return int(calendar.timegm((
Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, 0, 0, 0)
)) | Convert timestamp string to time in seconds since epoch.
Timestamps strings like '20130618120000' are able to be converted by this
function.
Args:
value: A timestamp string in the format '%Y%m%d%H%M%S'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
Note: The timezone is assumed to be UTC/GMT. | juraj-google-style |
def _create_in_hdx(self, object_type, id_field_name, name_field_name, file_to_upload=None):
self.check_required_fields()
if ((id_field_name in self.data) and self._load_from_hdx(object_type, self.data[id_field_name])):
logger.warning(('%s exists. Updating %s' % (object_type, self.data[id_field_name])))
self._merge_hdx_update(object_type, id_field_name, file_to_upload)
else:
self._save_to_hdx('create', name_field_name, file_to_upload) | Helper method to check if resource exists in HDX and if so, update it, otherwise create it
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
name_field_name (str): Name of field containing HDX object name
file_to_upload (Optional[str]): File to upload to HDX (if url not supplied)
Returns:
None | codesearchnet |
def verify_cert(signature_chain_url: str) -> Optional[crypto.X509]:
try:
certs_chain_get = requests.get(signature_chain_url)
except requests.exceptions.ConnectionError as e:
log.error(f'Amazon signature chain get error: {e}')
return None
certs_chain_txt = certs_chain_get.text
certs_chain = extract_certs(certs_chain_txt)
amazon_cert: crypto.X509 = certs_chain.pop(0)
sc_url_verification = verify_sc_url(signature_chain_url)
if not sc_url_verification:
log.error(f'Amazon signature url {signature_chain_url} was not verified')
expired_verification = not amazon_cert.has_expired()
if not expired_verification:
log.error(f'Amazon certificate ({signature_chain_url}) expired')
sans_verification = verify_sans(amazon_cert)
if not sans_verification:
log.error(f'Subject alternative names verification for ({signature_chain_url}) certificate failed')
chain_verification = verify_certs_chain(certs_chain, amazon_cert)
if not chain_verification:
log.error(f'Certificates chain verification for ({signature_chain_url}) certificate failed')
result = (sc_url_verification and expired_verification and sans_verification and chain_verification)
return amazon_cert if result else None | Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements.
Args:
signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.
Returns:
result: Amazon certificate if verification was successful, None if not. | juraj-google-style |
def flush(writer=None, name=None):
del name
if writer is None:
writer = _summary_state.writer
if writer is None:
return control_flow_ops.no_op()
if isinstance(writer, SummaryWriter):
return writer.flush()
raise ValueError('Invalid argument to flush(): %r' % (writer,)) | Forces summary writer to send any buffered data to storage.
This operation blocks until that finishes.
Args:
writer: The `tf.summary.SummaryWriter` to flush. If None, the current
default writer will be used instead; if there is no current writer, this
returns `tf.no_op`.
name: Ignored legacy argument for a name for the operation.
Returns:
The created `tf.Operation`. | github-repos |
def _Stat(self, path, ext_attrs=False):
local_path = client_utils.CanonicalPathToLocalPath(path)
result = client_utils.StatEntryFromPath(local_path, self.pathspec, ext_attrs=ext_attrs)
try:
result.symlink = utils.SmartUnicode(os.readlink(local_path))
except (OSError, AttributeError):
pass
return result | Returns stat information of a specific path.
Args:
path: A unicode string containing the path.
ext_attrs: Whether the call should also collect extended attributes.
Returns:
a StatResponse proto
Raises:
IOError when call to os.stat() fails | codesearchnet |
def _ParseLogLine(self, parser_mediator, key, structure):
time_elements_tuple = self._GetTimeElementsTuple(key, structure)
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
self._last_month = time_elements_tuple[1]
event_data = MacWifiLogEventData()
event_data.agent = structure.agent
event_data.function = structure.function.strip()
event_data.text = structure.text
if key == 'known_function_logline':
event_data.action = self._GetAction(
event_data.function, event_data.text)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parse a single log line and produce an event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file. | juraj-google-style |
def get_distance(self, node):
delta = ((node.pos[0] - self.pos[0]), (node.pos[1] - self.pos[1]))
return sqrt(((delta[0] ** 2) + (delta[1] ** 2))) | Get the distance beetween 2 nodes
Args:
node (object): The other node. | codesearchnet |
def ephemeris(self, **kwargs):
for orb in self.iter(inclusive=True, **kwargs):
yield orb | Generator giving the propagation of the orbit at different dates
Args:
start (Date)
stop (Date or timedelta)
step (timedelta)
Yield:
Orbit | juraj-google-style |
def copy_cwl_files(from_dir=CWL_PATH, to_dir=None):
cwl_files = glob.glob('{}{}*.cwl'.format(from_dir, os.sep))
if (len(cwl_files) > 0):
create_dirs(to_dir)
for fi in cwl_files:
fo = os.path.join(to_dir, os.path.basename(fi))
shutil.copy2(fi, fo)
return len(cwl_files) | Copy cwl files to a directory where the cwl-runner can find them.
Args:
from_dir (str): Path to directory where to copy files from (default:
the cwl directory of nlppln).
to_dir (str): Path to directory where the files should be copied to
(e.g., the CWL working directory). | codesearchnet |
def _validate_namespace(self, namespace):
if self._namespace_regex.fullmatch(namespace) is None:
LOGGER.debug('Invalid namespace: %s', namespace)
raise _ResponseFailed(self._status.INVALID_ADDRESS) | Validates a namespace, raising a ResponseFailed error if invalid.
Args:
state_root (str): The state_root to validate
Raises:
ResponseFailed: The state_root was invalid, and a status of
INVALID_ROOT will be sent with the response. | juraj-google-style |
class BridgeTowerProcessor(ProcessorMixin):
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'BridgeTowerImageProcessor'
tokenizer_class = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
def __call__(self, images, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[BridgeTowerProcessorKwargs]) -> BatchEncoding:
output_kwargs = self._merge_kwargs(BridgeTowerProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
encoding = self.tokenizer(text=text, **output_kwargs['text_kwargs'])
encoding_image_processor = self.image_processor(images, **output_kwargs['images_kwargs'])
encoding.update(encoding_image_processor)
return encoding
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) | Constructs a BridgeTower processor which wraps a Roberta tokenizer and BridgeTower image processor into a single
processor.
[`BridgeTowerProcessor`] offers all the functionalities of [`BridgeTowerImageProcessor`] and
[`RobertaTokenizerFast`]. See the docstring of [`~BridgeTowerProcessor.__call__`] and
[`~BridgeTowerProcessor.decode`] for more information.
Args:
image_processor (`BridgeTowerImageProcessor`):
An instance of [`BridgeTowerImageProcessor`]. The image processor is a required input.
tokenizer (`RobertaTokenizerFast`):
An instance of ['RobertaTokenizerFast`]. The tokenizer is a required input. | github-repos |
def update_video(self, video_id, title="", description="", keywords="", access_control=AccessControl.Unlisted):
if not self.authenticated:
raise ApiError(_("Authentication is required"))
entry = self.fetch_video(video_id)
extension = self._access_control(access_control)
if extension:
entry.extension_elements = extension
if title:
entry.media.title.text = title
if description:
entry.media.description.text = description
success = Api.yt_service.UpdateVideoEntry(entry)
return success | Updates the video
Authentication is required
Params:
entry: video entry fetch via 'fetch_video()'
title: string
description: string
keywords: string
Returns:
a video entry on success
None otherwise | juraj-google-style |
def attention_bias_prepend_inputs_full_attention(padding):
in_target = tf.cumsum(padding, axis=1, exclusive=True)
target_pos = tf.cumsum(in_target, axis=1)
illegal_connections = tf.greater(tf.expand_dims(target_pos, 1), tf.expand_dims(target_pos, 2))
bias = (tf.to_float(illegal_connections) * (- 1000000000.0))
bias = tf.expand_dims(bias, 1)
return bias | Create a bias tensor for prepend_mode="prepend_inputs_full_attention".
See prepend_inputs in common_hparams.py.
Produces a bias tensor to be used in self-attention.
This bias tensor allows for full connectivity in the "inputs" part of
the sequence and masked connectivity in the targets part.
Args:
padding: a float `Tensor` with shape [batch, length] with
ones in positions corresponding to padding. In each row, a single
padding position separates the input part from the target part.
Returns:
a `Tensor` with shape [batch, 1, length, length]. | codesearchnet |
def append(self, value, key=''):
if type(value) in (list, tuple, dict):
if type(value)==dict:
for k in value.keys():
self.append(value[k], k)
return value.keys()
keys = []
for child in value:
keys.append( self.append(child) )
return keys
key = str(key)
if not isinstance(value, Widget):
raise ValueError('value should be a Widget (otherwise use add_child(key,other)')
if 'left' in value.style.keys():
del value.style['left']
if 'right' in value.style.keys():
del value.style['right']
if not 'order' in value.style.keys():
value.style.update({'position':'static', 'order':'-1'})
if key.isdigit():
value.style['order'] = key
key = value.identifier if key == '' else key
self.add_child(key, value)
return key | It allows to add child widgets to this.
The key allows to access the specific child in this way widget.children[key].
The key have to be numeric and determines the children order in the layout.
Args:
value (Widget): Child instance to be appended.
key (str): Unique identifier for the child. If key.isdigit()==True '0' '1'.. the value determines the order
in the layout | juraj-google-style |
def rename(self, name):
return self.client.api.rename(self.id, name) | Rename this container. Similar to the ``docker rename`` command.
Args:
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | codesearchnet |
def parse_document_id(chrom, pos, ref, alt, variant_type, case_id):
return generate_md5_key([chrom, pos, ref, alt, variant_type, case_id]) | Parse the unique document id for a variant.
This will always be unique in the database.
Args:
chrom(str)
pos(str)
ref(str)
alt(str)
variant_type(str): 'clinical' or 'research'
case_id(str): unqiue family id
Returns:
document_id(str): The unique document id in an md5 string | codesearchnet |
def _fetch(self, method, url=None, post_data=None, parse_data=True, key=None, parameters=None, listener=None, full_return=False):
headers = self.get_headers()
headers['Content-Type'] = 'application/json'
handlers = []
debuglevel = int(self._settings['debug'])
handlers.append(urllib2.HTTPHandler(debuglevel=debuglevel))
if hasattr(httplib, 'HTTPS'):
handlers.append(urllib2.HTTPSHandler(debuglevel=debuglevel))
handlers.append(urllib2.HTTPCookieProcessor(cookielib.CookieJar()))
password_url = self._get_password_url()
if (password_url and ('Authorization' not in headers)):
pwd_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
pwd_manager.add_password(None, password_url, self._settings['user'], self._settings['password'])
handlers.append(HTTPBasicAuthHandler(pwd_manager))
opener = urllib2.build_opener(*handlers)
if (post_data is not None):
post_data = json.dumps(post_data)
uri = self._url(url, parameters)
request = RESTRequest(uri, method=method, headers=headers)
if (post_data is not None):
request.add_data(post_data)
response = None
try:
response = opener.open(request)
body = response.read()
if (password_url and (password_url not in self._settings['authorizations']) and request.has_header('Authorization')):
self._settings['authorizations'][password_url] = request.get_header('Authorization')
except urllib2.HTTPError as e:
if (e.code == 401):
raise AuthenticationError(('Access denied while trying to access %s' % uri))
elif (e.code == 404):
raise ConnectionError(('URL not found: %s' % uri))
else:
raise
except urllib2.URLError as e:
raise ConnectionError(('Error while fetching from %s: %s' % (uri, e)))
finally:
if response:
response.close()
opener.close()
data = None
if parse_data:
if (not key):
key = string.split(url, '/')[0]
data = self.parse(body, key)
if full_return:
info = (response.info() if response else None)
status = (int(string.split(info['status'])[0]) if (info and ('status' in info)) else None)
return {'success': ((status >= 200) and (status < 300)), 'data': data, 'info': info, 'body': body}
return data | Issue a request.
Args:
method (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None
Kwargs:
url (str): Destination URL
post_data (str): A string of what to POST
parse_data (bool): If true, parse response data
key (string): If parse_data==True, look for this key when parsing data
parameters (dict): Additional GET parameters to append to the URL
listener (func): callback called when uploading a file
full_return (bool): If set to True, get a full response (with success, data, info, body)
Returns:
dict. Response. If full_return==True, a dict with keys: success, data, info, body, otherwise the parsed data
Raises:
AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError | codesearchnet |
def getPageType(name, number=False):
if (not (name in pageNames())):
return None
pageType = PyOrigin.Pages(name).GetType()
if number:
return str(pageType)
if (pageType == 1):
return 'matrix'
if (pageType == 2):
return 'book'
if (pageType == 3):
return 'graph'
if (pageType == 4):
return 'layout'
if (pageType == 5):
return 'notes' | Returns the type of the page with that name.
If that name doesn't exist, None is returned.
Args:
name (str): name of the page to get the folder from
number (bool): if True, return numbers (i.e., a graph will be 3)
if False, return words where appropriate (i.e, "graph")
Returns:
string of the type of object the page is | codesearchnet |
def get_event(self, event_name, event_history=None):
if event_history is None:
event_history = event_name + '_history'
return self._db.rpoplpush(event_name, event_history) | Get an event from the database.
Gets an event from the named event list removing the event and
adding it to the event history.
Args:
event_name (str): Event list key.
event_history (str, optional): Event history list.
Returns:
str: string representation of the event object | juraj-google-style |
def _resolve_grad_captures(body_graph, body_grad_graph, while_op):
new_capture_inputs = []
for t in body_grad_graph.external_captures:
if t.graph == body_graph:
for i, output in enumerate(t.graph.outputs):
if output is t:
t = while_op.outputs[i]
break
assert t.graph == body_graph.outer_graph
new_capture_inputs.append(t)
return new_capture_inputs | Returns the tensors to pass as captured inputs to `body_grad_graph`.
`body_grad_graph` may have external references to:
1. Its outer graph containing the input gradients. These are left as-is.
2. Accumulators captured from the forward-pass graph. These should have been
added as `while_op` outputs after the gradient graph was built. We replace
these with the corresponding output of `while_op`, i.e. a tensor in
`body_graph.outer_graph`. In the case of nested control flow or functions,
the gradient logic handling `body_grad_graph.outer_graph` will make sure
the tensor from `body_graph.outer_graph` is also correctly captured.
Args:
body_graph: FuncGraph. The forward-pass body function.
body_grad_graph: FuncGraph. The body gradients function.
while_op: The forward-pass While Operation calling `body_graph`.
Returns:
A list of input tensors to be passed as the captured inputs to
`body_grad_graph`. | github-repos |
def get_document(project_id, knowledge_base_id, document_id):
import dialogflow_v2beta1 as dialogflow
client = dialogflow.DocumentsClient()
document_path = client.document_path(project_id, knowledge_base_id, document_id)
response = client.get_document(document_path)
print('Got Document:')
print(' - Display Name: {}'.format(response.display_name))
print(' - Knowledge ID: {}'.format(response.name))
print(' - MIME Type: {}'.format(response.mime_type))
print(' - Knowledge Types:')
for knowledge_type in response.knowledge_types:
print(' - {}'.format(KNOWLEDGE_TYPES[knowledge_type]))
print(' - Source: {}\n'.format(response.content_uri)) | Gets a Document.
Args:
project_id: The GCP project linked with the agent.
knowledge_base_id: Id of the Knowledge base.
document_id: Id of the Document. | codesearchnet |
def __init__(self, org=None, course=None, run=None, branch=None, version_guid=None, deprecated=False, **kwargs):
offering_arg = kwargs.pop('offering', None)
if offering_arg:
warnings.warn(
"offering is deprecated! Use course and run instead.",
DeprecationWarning,
stacklevel=2
)
course, __, run = offering_arg.partition("/")
if deprecated:
for part in (org, course, run):
self._check_location_part(part, self.INVALID_CHARS_DEPRECATED)
fields = [org, course]
if run:
fields.append(run)
if branch is not None:
fields.append(branch)
if not all(self.DEPRECATED_ALLOWED_ID_RE.match(field) for field in fields):
raise InvalidKeyError(self.__class__, fields)
else:
if version_guid:
version_guid = self.as_object_id(version_guid)
for name, value in [['org', org], ['course', course], ['run', run], ['branch', branch]]:
if not (value is None or self.ALLOWED_ID_RE.match(value)):
raise InvalidKeyError(self.__class__,
u"Special characters not allowed in field {}: '{}'".format(name, value))
super(CourseLocator, self).__init__(
org=org,
course=course,
run=run,
branch=branch,
version_guid=version_guid,
deprecated=deprecated,
**kwargs
)
if self.deprecated and (self.org is None or self.course is None):
raise InvalidKeyError(self.__class__, "Deprecated strings must set both org and course.")
if not self.deprecated and self.version_guid is None and \
(self.org is None or self.course is None or self.run is None):
raise InvalidKeyError(self.__class__, "Either version_guid or org, course, and run should be set") | Construct a CourseLocator
Args:
version_guid (string or ObjectId): optional unique id for the version
org, course, run (string): the standard definition. Optional only if version_guid given
branch (string): the branch such as 'draft', 'published', 'staged', 'beta' | juraj-google-style |
def get_unstable_entries(self, charge_to_discharge=True):
list_copy = list(self._unstable_entries)
return list_copy if charge_to_discharge else list_copy.reverse() | Returns the unstable entries for the electrode.
Args:
charge_to_discharge: Order from most charge to most discharged
state? Defaults to True.
Returns:
A list of unstable entries in the electrode, ordered by amount of
the working ion. | juraj-google-style |
def _check_conversion_params(conversion_params, is_v2=False):
supported_precision_modes = TrtPrecisionMode.supported_precision_modes()
if conversion_params.precision_mode not in supported_precision_modes:
raise ValueError("precision mode '{}' is not supported.It should be one of {}".format(conversion_params.precision_mode, supported_precision_modes))
if conversion_params.minimum_segment_size <= 0 and conversion_params.minimum_segment_size != -1:
raise ValueError('minimum segment size should be positive or -1 (to disable main graph conversion).') | Validate the provided TrtConversionParams.
Args:
conversion_params: a TrtConversionParams instance.
is_v2: whether we're getting a RewriterConfig for TF 2.0.
Raises:
TypeError: if any of the parameters are of unexpected type.
ValueError: if any of the parameters are of unexpected value. | github-repos |
def get_results(self, fp=sys.stdout, inline=True, delim=None, fetch=True, qlog=None, arguments=[]):
result_path = self.meta_data['results_resource']
conn = Qubole.agent()
include_header = 'false'
if (len(arguments) == 1):
include_header = arguments.pop(0)
if (include_header not in ('true', 'false')):
raise ParseError('incude_header can be either true or false')
r = conn.get(result_path, {'inline': inline, 'include_headers': include_header})
if r.get('inline'):
raw_results = r['results']
encoded_results = raw_results.encode('utf8')
if (sys.version_info < (3, 0, 0)):
fp.write(encoded_results)
else:
import io
if isinstance(fp, io.TextIOBase):
if hasattr(fp, 'buffer'):
fp.buffer.write(encoded_results)
else:
fp.write(raw_results)
elif (isinstance(fp, io.BufferedIOBase) or isinstance(fp, io.RawIOBase)):
fp.write(encoded_results)
else:
pass
elif fetch:
storage_credentials = conn.get(Account.credentials_rest_entity_path)
if (storage_credentials['region_endpoint'] is not None):
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'], aws_secret_access_key=storage_credentials['storage_secret_key'], security_token=storage_credentials['session_token'], host=storage_credentials['region_endpoint'])
else:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'], aws_secret_access_key=storage_credentials['storage_secret_key'], security_token=storage_credentials['session_token'])
log.info(('Starting download from result locations: [%s]' % ','.join(r['result_location'])))
num_result_dir = Command.find(self.id).num_result_dir
if ((include_header.lower() == 'true') and (qlog is not None)):
write_headers(qlog, fp)
for s3_path in r['result_location']:
_download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=delim)
else:
fp.write(','.join(r['result_location'])) | Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3 | codesearchnet |
def print_result_for_plain_cgi_script_from_tuple(contenttype_headers_content: WSGI_TUPLE_TYPE, status: str='200 OK') -> None:
(contenttype, headers, content) = contenttype_headers_content
print_result_for_plain_cgi_script(contenttype, headers, content, status) | Writes HTTP result to stdout.
Args:
contenttype_headers_content:
the tuple ``(contenttype, extraheaders, data)``
status:
HTTP status message (default ``"200 OK``) | codesearchnet |
def measure_topology(script):
filter_xml = ' <xmlfilter name="Compute Topological Measures"/>\n'
util.write_filter(script, filter_xml)
if isinstance(script, mlx.FilterScript):
script.parse_topology = True
return None | Compute a set of topological measures over a mesh
Args:
script: the mlx.FilterScript object or script filename to write
the filter to.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA | juraj-google-style |
def extract_keywords_from_text(index_page, no_items=5):
index_page = MLStripper.strip_tags(index_page)
tokenized_index = TextBlob(index_page).lower()
def to_str(key):
if isinstance(key, unicode):
return key.encode("utf-8")
return key
present_keywords = [
KEYWORDS_LOWER[key]
for key in KEYWORDS_LOWER.keys()
if len(key) > 3 and key in tokenized_index
]
def to_source_string(key):
source = "Keyword analysis"
try:
return SourceString(key, source)
except UnicodeEncodeError:
return SourceString(key.encode("utf-8"), source)
multi_keywords = [
to_source_string(key)
for key in present_keywords
if tokenized_index.words.count(key) >= 1
]
multi_keywords = sorted(multi_keywords, key=lambda x: len(x), reverse=True)
if len(multi_keywords) > no_items:
return multi_keywords[:no_items]
return multi_keywords | Try to process text on the `index_page` deduce the keywords and then try
to match them on the Aleph's dataset.
Function returns maximally `no_items` items, to prevent spamming the user.
Args:
index_page (str): Content of the page as UTF-8 string
no_items (int, default 5): Number of items to return.
Returns:
list: List of :class:`.SourceString` objects. | juraj-google-style |
def get_wulff_shape(self, material_id):
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.wulff import WulffShape, hkl_tuple_to_str
structure = self.get_structure_by_material_id(material_id)
surfaces = self.get_surface_data(material_id)["surfaces"]
lattice = (SpacegroupAnalyzer(structure)
.get_conventional_standard_structure().lattice)
miller_energy_map = {}
for surf in surfaces:
miller = tuple(surf["miller_index"])
if (miller not in miller_energy_map) or surf["is_reconstructed"]:
miller_energy_map[miller] = surf["surface_energy"]
millers, energies = zip(*miller_energy_map.items())
return WulffShape(lattice, millers, energies) | Constructs a Wulff shape for a material.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
Returns:
pymatgen.analysis.wulff.WulffShape | juraj-google-style |
def _client_receive(self):
try:
return self._client.readline()
except socket.error as e:
raise errors.Error(self._device, f'Encountered socket error "{e}" reading RPC response') from e | Receives the server's response of an RPC message.
Returns:
Raw bytes of the response.
Raises:
errors.Error: if a socket error occurred during the read. | github-repos |
def get_book_links(links):
book_links = []
for link in links:
data = DOWNER.download((link + '1'))
dom = dhtmlparser.parseString(data)
book_links.extend(_parse_book_links(dom))
max_page = _get_max_page(dom)
if (max_page == 1):
continue
for i in range((max_page - 1)):
data = DOWNER.download((link + str((i + 2))))
book_links.extend(_parse_book_links(dhtmlparser.parseString(data)))
return book_links | Go thru `links` to categories and return list to all publications in all
given categories.
Args:
links (list): List of strings (absolute links to categories).
Returns:
list: List of strings / absolute links to book details. | codesearchnet |
def print_str(self, string):
(x, y) = self._cursor
for char in string:
if (char == '\n'):
x = 0
y += 1
continue
if (char == '\r'):
x = 0
continue
(x, y) = self._normalizeCursor(x, y)
self.draw_char(x, y, char, self._fg, self._bg)
x += 1
self._cursor = (x, y) | Print a string at the virtual cursor.
Handles special characters such as '\\n' and '\\r'.
Printing past the bottom of the console will scroll everything upwards
if :any:`set_mode` is set to 'scroll'.
Colors can be set with :any:`set_colors` and the virtual cursor can
be moved with :any:`move`.
Args:
string (Text): The text to print.
.. seealso:: :any:`draw_str`, :any:`move`, :any:`set_colors`,
:any:`set_mode`, :any:`write`, :any:`Window` | codesearchnet |
def get_first(self, status):
items = self.get_all(status)
if items:
return list(items.items())[0][1]
return None | Get the first item in the queue that has the given status.
Args:
status (str): return the first item with this status.
Returns:
:class:`nyawc.QueueItem`: The first queue item with the given status. | codesearchnet |
def FileEntryExistsByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
if location is None or not location.startswith(self.LOCATION_ROOT):
return False
if len(location) == 1:
return True
return self._cpio_archive_file.FileEntryExistsByPath(location[1:]) | Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists. | juraj-google-style |
def db_for_write(self, model, **hints):
try:
if (model.sf_access == READ_ONLY):
raise WriteNotSupportedError(('%r is a read-only model.' % model))
except AttributeError:
pass
return None | Prevent write actions on read-only tables.
Raises:
WriteNotSupportedError: If models.sf_access is ``read_only``. | codesearchnet |
def __init__(
self,
epsilon,
alphabet=None):
self.bookeeping = None
self.groups = None
self.epsilon = epsilon
if alphabet is None:
alphabet = createalphabet()
self.alphabet = alphabet | Initialization Function
Args:
epsilon (str): The epsilon symbol
alphabet (list): The DFA Alphabet
Returns:
None | juraj-google-style |
def run_init_ops(self, sess, tags, import_scope=None):
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
with sess.graph.as_default():
asset_tensors_dictionary = get_asset_tensors(self._export_dir, meta_graph_def, import_scope=import_scope)
init_op = get_init_op(meta_graph_def, import_scope)
if init_op is not None:
sess.run(fetches=[init_op], feed_dict=asset_tensors_dictionary) | Run initialization ops defined in the `MetaGraphDef`.
Args:
sess: tf.compat.v1.Session to restore variable values.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned. | github-repos |
def update_reminder(self, reminder):
uri = '/'.join([self.api_uri,
self.reminders_suffix,
])
payload = None
if type(reminder) is not StreakReminder:
return requests.codes.bad_request, None
payload = reminder.to_dict(rw = True)
try:
uri = '/'.join([uri, reminder.attributes['key']])
except KeyError:
return requests.codes.bad_request, None
code, data = self._req('post', uri , json.dumps(payload))
return code, data | Creates a reminder with the provided attributes.
Args:
reminder updated reminder of StreakReminder type
return (status code, reminder dict) | juraj-google-style |
def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool=False) \
-> Tuple[bool, str, dict]:
if not self._url_filter:
return True, 'nofilters', None
test_info = self._url_filter.test_info(url_info, url_record)
verdict = test_info['verdict']
if verdict:
reason = 'filters'
elif is_redirect and self.is_only_span_hosts_failed(test_info):
verdict = True
reason = 'redirect'
else:
reason = 'filters'
return verdict, reason, test_info | Consult the URL filter.
Args:
url_record: The URL record.
is_redirect: Whether the request is a redirect and it is
desired that it spans hosts.
Returns
tuple:
1. bool: The verdict
2. str: A short reason string: nofilters, filters, redirect
3. dict: The result from :func:`DemuxURLFilter.test_info` | juraj-google-style |
def get_instance(cls, device):
if cls._nuis.get(device) is None:
cls._nuis[device] = AndroidUiautomationPoco(device)
return cls._nuis[device] | This is only a slot to store and get already initialized poco instance rather than initializing again. You can
simply pass the ``current device instance`` provided by ``airtest`` to get the AndroidUiautomationPoco instance.
If no such AndroidUiautomationPoco instance, a new instance will be created and stored.
Args:
device (:py:obj:`airtest.core.device.Device`): more details refer to ``airtest doc``
Returns:
poco instance | juraj-google-style |
def remove_alias(alias_names):
alias_table = get_alias_table()
for alias_name in alias_names:
if (alias_name not in alias_table.sections()):
raise CLIError(ALIAS_NOT_FOUND_ERROR.format(alias_name))
alias_table.remove_section(alias_name)
_commit_change(alias_table) | Remove an alias.
Args:
alias_name: The name of the alias to be removed. | codesearchnet |
def __init__(self, value, indices=None, name=None):
del name
super(CSRSparseMatrix, self).__init__()
if isinstance(value, sparse_tensor.SparseTensor):
if indices is not None:
raise ValueError('indices must be None if value is a SparseTensor.')
self._dtype = value.dtype
self._csr_matrix = sm_ops.sparse_tensor_to_csr_sparse_matrix(indices=value.indices, values=value.values, dense_shape=value.dense_shape)
else:
value = ops.convert_to_tensor(value)
self._dtype = value.dtype
if indices is not None:
indices = ops.convert_to_tensor(indices, dtype=dtypes.int64)
else:
indices = array_ops.stop_gradient(array_ops.where(value))
self._csr_matrix = sm_ops.dense_to_csr_sparse_matrix(value, indices)
if self._eager_mode:
self._csr_matrix._handle_data = _make_handle_data(value) | Construct a CSRSparseMatrix from a dense matrix or SparseTensor.
Args:
value: A dense `2D` or `3D` Tensor or `SparseTensor`.
indices: The nonzero indices of `value`
(if `value` is not a `SparseTensor`).
name: Optional op name.
Raises:
ValueError: if `value` is a `SparseTensor` and `indices` is not `None`. | github-repos |
def _build_instruction_ds(instructions):
tensor_inputs = {
k: np.array(vals, dtype=np.int64) if k == "mask_offset" else list(vals)
for k, vals in utils.zip_dict(*instructions)
}
return tf.data.Dataset.from_tensor_slices(tensor_inputs) | Create a dataset containing individual instruction for each shard.
Each instruction is a dict:
```
{
"filepath": tf.Tensor(shape=(), dtype=tf.string),
"mask_offset": tf.Tensor(shape=(), dtype=tf.int64),
"mask": tf.Tensor(shape=(100,), dtype=tf.bool),
}
```
Args:
instructions: `list[dict]`, the list of instruction dict
Returns:
instruction_ds: The dataset containing the instruction. The dataset size is
the number of shard. | juraj-google-style |
def sequence_to_onehot(sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool=False) -> np.ndarray:
num_entries = max(mapping.values()) + 1
if sorted(set(mapping.values())) != list(range(num_entries)):
raise ValueError('The mapping must have values from 0 to num_unique_aas-1 without any gaps. Got: %s' % sorted(mapping.values()))
one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32)
for aa_index, aa_type in enumerate(sequence):
if map_unknown_to_x:
if aa_type.isalpha() and aa_type.isupper():
aa_id = mapping.get(aa_type, mapping['X'])
else:
raise ValueError(f'Invalid character in the sequence: {aa_type}')
else:
aa_id = mapping[aa_type]
one_hot_arr[aa_index, aa_id] = 1
return one_hot_arr | Maps the given sequence into a one-hot encoded matrix.
Args:
sequence: An amino acid sequence.
mapping: A dictionary mapping amino acids to integers.
map_unknown_to_x: If True, any amino acid that is not in the mapping will be
mapped to the unknown amino acid 'X'. If the mapping doesn't contain amino acid 'X', an error will be thrown.
If False, any amino acid not in the mapping will throw an error.
Returns:
A numpy array of shape (seq_len, num_unique_aas) with one-hot encoding of the sequence.
Raises:
ValueError: If the mapping doesn't contain values from 0 to
num_unique_aas - 1 without any gaps. | github-repos |
def index(self, name=None):
try:
return self.header.index(name)
except ValueError:
raise TableError(('Unknown index name %s.' % name)) | Returns index number of supplied column name.
Args:
name: string of column name.
Raises:
TableError: If name not found.
Returns:
Index of the specified header entry. | codesearchnet |
def _ConsumeInteger(tokenizer, is_signed=False, is_long=False):
try:
result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long)
except ValueError as e:
raise tokenizer.ParseError(str(e))
tokenizer.NextToken()
return result | Consumes an integer number from tokenizer.
Args:
tokenizer: A tokenizer used to parse the number.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer parsed.
Raises:
ParseError: If an integer with given characteristics couldn't be consumed. | codesearchnet |
def _check_instance_type(type_constraint, instance, var_name=None, verbose=False):
hint_type = "argument: '%s'" % var_name if var_name is not None else 'return type'
try:
check_constraint(type_constraint, instance)
except SimpleTypeHintError:
if verbose:
verbose_instance = '%s, ' % instance
else:
verbose_instance = ''
raise TypeCheckError('Type-hint for %s violated. Expected an instance of %s, instead found %san instance of %s.' % (hint_type, type_constraint, verbose_instance, type(instance)))
except CompositeTypeHintError as e:
raise TypeCheckError('Type-hint for %s violated: %s' % (hint_type, e)) | A helper function to report type-hint constraint violations.
Args:
type_constraint: An instance of a 'TypeConstraint' or a built-in Python
type.
instance: The candidate object which will be checked by to satisfy
'type_constraint'.
var_name: If 'instance' is an argument, then the actual name for the
parameter in the original function definition.
Raises:
TypeCheckError: If 'instance' fails to meet the type-constraint of
'type_constraint'. | github-repos |
def repeat(n: int, body: Callable[..., Union[core_types.TensorLike, Iterable]], inputs: Optional[List[core_types.TensorLike]]=None, infeed_queue: Optional[tpu_feed.InfeedQueue]=None, name: Any=None) -> List[core_types.TensorLike]:
def _convert_to_list(xs):
if not isinstance(xs, (list, tuple)):
return [xs]
else:
return list(xs)
def cond(i, *args):
del args
return i < n
def body_wrapper(i, *args):
return [i + 1] + _convert_to_list(body(*args))
inputs = [0] if inputs is None else [0] + _convert_to_list(inputs)
outputs = while_loop(cond, body_wrapper, inputs=inputs, infeed_queue=infeed_queue, name=name)
outputs = _convert_to_list(outputs)
if len(outputs) == 1:
return outputs[0].op
else:
return outputs[1:] | Builds a training loop that executes a fixed number of iterations.
The set of loop-carried tensors correspond to `inputs`.
`body` must be a function that takes and returns the values of the
loop-carried tensors.
Args:
n: the number of loop iterations
body: a Python function that builds the loop body.
inputs: a list of initial values passed into the training loop or None
(equivalent to an empty list).
infeed_queue: if not None, the infeed queue from which to append a tuple of
arguments as inputs to condition.
name: (Deprecated) Does nothing.
Returns:
The final values of the loop-carried tensors.
Raises:
ValueError: if there is a type error. | github-repos |
def wire(self, name, receive=None, send=None, respond=None, **kwargs):
if (hasattr(self, name) and (name != 'main')):
raise AttributeError("cannot use '%s' as name for wire, attribute already exists")
if send:
self.log_debug(("Wiring '%s'.send: %s" % (name, send)))
if respond:
self.log_debug(("Wiring '%s'.respond: %s" % (name, respond)))
if receive:
self.log_debug(("Wiring '%s'.receive: %s" % (name, receive)))
wire = Wire(receive=receive, send=send, respond=respond)
wire.name = ('%s.%s' % (self.name, name))
wire.meta = kwargs.get('meta', {})
wire.on('receive', self.on_receive)
setattr(self, name, wire)
if (not self.main):
self.main = wire
return wire | Wires the link to a connection. Can be called multiple
times to set up wires to different connections
After creation wire will be accessible on the link via its name
as an attribute.
You can undo this action with the cut() method
Arguments:
- name (str): unique name for the wire
Keyword Arguments:
- receive (Connection): wire receiver to this connection
- respond (Connection): wire responder to this connection
- send (Connection): wire sender to this connection
- meta (dict): attach these meta variables to any message
sent from this wire
Returns:
- Wire: the created wire instance | codesearchnet |
def from_voigt(cls, voigt_input):
voigt_input = np.array(voigt_input)
rank = sum(voigt_input.shape)
t = cls(np.zeros([3] * rank))
if voigt_input.shape != t._vscale.shape:
raise ValueError("Invalid shape for voigt matrix")
voigt_input = voigt_input / t._vscale
this_voigt_map = t.get_voigt_dict(rank)
for ind in this_voigt_map:
t[ind] = voigt_input[this_voigt_map[ind]]
return cls(t) | Constructor based on the voigt notation vector or matrix.
Args:
voigt_input (array-like): voigt input for a given tensor | juraj-google-style |
def find_amplitude(chunk):
return (abs(int((chunk.max() - chunk.min()))) / config.SAMPLE_RANGE) | Calculate the 0-1 amplitude of an ndarray chunk of audio samples.
Samples in the ndarray chunk are signed int16 values oscillating
anywhere between -32768 and 32767. Find the amplitude between 0 and 1
by summing the absolute values of the minimum and maximum, and dividing
by 32767.
Args:
chunk (numpy.ndarray): An array of int16 audio samples
Returns:
float: The amplitude of the sample between 0 and 1.
Note that this is not a decibel representation of
the amplitude. | codesearchnet |
def relpath(path, start=None):
relative = get_instance(path).relpath(path)
if start:
return os_path_relpath(relative, start=start).replace('\\', '/')
return relative | Return a relative file path to path either from the
current directory or from an optional start directory.
For storage objects, "path" and "start" are relative to
storage root.
"/" are not stripped on storage objects path. The ending slash is required
on some storage to signify that target is a directory.
Equivalent to "os.path.relpath".
Args:
path (path-like object): Path or URL.
start (path-like object): Relative from this optional directory.
Default to "os.curdir" for local files.
Returns:
str: Relative path. | codesearchnet |
def create_keras_history(tensors):
_, created_layers = _create_keras_history_helper(tensors, set(), [])
return created_layers | Wraps TensorFlow Operations for compatibility with the Functional API.
This method checks to see if a Tensor in `tensors` is missing Keras metadata
and has its origin in a Keras `Input` Layer. If so, this method will replace
the raw TensorFlow Operations that created this tensor with
`TensorFlowOpLayer` instances that create identical operations.
Any Tensors not originating from a Keras `Input` Layer will be treated as
constants when constructing `TensorFlowOpLayer` instances.
Args:
tensors: A structure of Tensors, some of which come from raw TensorFlow
operations and need to have Keras metadata assigned to them.
Returns:
created_layers: List. The `TensorFlowOpLayer` instances created to wrap
the raw Tensorflow operations. | github-repos |
def new(namespace, name, wdl, synopsis, documentation=None, api_url=fapi.PROD_API_ROOT):
r = fapi.update_workflow(namespace, name, synopsis, wdl, documentation, api_url)
fapi._check_response_code(r, 201)
d = r.json()
return Method(namespace, name, d['snapshotId']) | Create new FireCloud method.
If the namespace + name already exists, a new snapshot is created.
Args:
namespace (str): Method namespace for this method
name (str): Method name
wdl (file): WDL description
synopsis (str): Short description of task
documentation (file): Extra documentation for method | codesearchnet |
def port_create_gre(br, port, id, remote):
if (not (0 <= id < (2 ** 32))):
return False
elif (not __salt__['dig.check_ip'](remote)):
return False
elif (not bridge_exists(br)):
return False
elif (port in port_list(br)):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode']) | Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10 | codesearchnet |
def do_youtube_dl(worker, site, page):
with tempfile.TemporaryDirectory(prefix='brzl-ydl-') as tempdir:
ydl = _build_youtube_dl(worker, tempdir, site)
ie_result = _try_youtube_dl(worker, ydl, site, page)
outlinks = set()
if (ie_result and (ie_result.get('extractor') == 'youtube:playlist')):
outlinks = {('https:
return (ydl.fetch_spy.fetches, outlinks) | Runs youtube-dl configured for `worker` and `site` to download videos from
`page`.
Args:
worker (brozzler.BrozzlerWorker): the calling brozzler worker
site (brozzler.Site): the site we are brozzling
page (brozzler.Page): the page we are brozzling
Returns:
tuple with two entries:
`list` of `dict`: with info about urls fetched:
[{
'url': ...,
'method': ...,
'response_code': ...,
'response_headers': ...,
}, ...]
`list` of `str`: outlink urls | codesearchnet |
def get_details(self, ids):
if isinstance(ids, list):
if len(ids) > 5:
ids = ids[:5]
id_param = ';'.join(ids) + '/'
else:
ids = str(ids)
id_param = ids + '/'
header, content = self._http_request(id_param)
resp = json.loads(content)
if not self._is_http_response_ok(header):
error = resp.get('error_message', 'Unknown Error')
raise HttpException(header.status, header.reason, error)
return resp | Locu Venue Details API Call Wrapper
Args:
list of ids : ids of a particular venues to get insights about. Can process up to 5 ids | juraj-google-style |
def sawtooth(duration: int, amp: complex, period: float = None,
phase: float = 0, name: str = None) -> SamplePulse:
if period is None:
period = duration
return _sampled_sawtooth_pulse(duration, amp, period, phase=phase, name=name) | Generates sawtooth wave `SamplePulse`.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude. Wave range is [-amp, amp].
period: Pulse period, units of dt. If `None` defaults to single cycle.
phase: Pulse phase.
name: Name of pulse. | juraj-google-style |
def delete_recursively(dirname):
delete_recursively_v2(dirname) | Deletes everything under dirname recursively.
Args:
dirname: string, a path to a directory
Raises:
errors.OpError: If the operation fails. | github-repos |
def oauth_access(
self, *, client_id: str, client_secret: str, code: str, **kwargs
) -> SlackResponse:
kwargs.update(
{"client_id": client_id, "client_secret": client_secret, "code": code}
)
return self.api_call("oauth.access", data=kwargs) | Exchanges a temporary OAuth verifier code for an access token.
Args:
client_id (str): Issued when you created your application. e.g. '4b39e9-752c4'
client_secret (str): Issued when you created your application. e.g. '33fea0113f5b1'
code (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad' | juraj-google-style |
def libdmtx_function(fname, restype, *args):
prototype = CFUNCTYPE(restype, *args)
return prototype((fname, load_libdmtx())) | Returns a foreign function exported by `libdmtx`.
Args:
fname (:obj:`str`): Name of the exported function as string.
restype (:obj:): Return type - one of the `ctypes` primitive C data
types.
*args: Arguments - a sequence of `ctypes` primitive C data types.
Returns:
cddl.CFunctionType: A wrapper around the function. | codesearchnet |
def _command_template(self, switches, objectInput=None):
command = ['java', '-jar', self.file_jar, '-eUTF-8']
if self.memory_allocation:
command.append('-Xmx{}'.format(self.memory_allocation))
command.extend(switches)
if (not objectInput):
objectInput = subprocess.PIPE
log.debug('Subprocess command: {}'.format(', '.join(command)))
if six.PY2:
with open(os.devnull, 'w') as devnull:
out = subprocess.Popen(command, stdin=objectInput, stdout=subprocess.PIPE, stderr=devnull)
elif six.PY3:
out = subprocess.Popen(command, stdin=objectInput, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
(stdoutdata, _) = out.communicate()
return stdoutdata.decode('utf-8').strip() | Template for Tika app commands
Args:
switches (list): list of switches to Tika app Jar
objectInput (object): file object/standard input to analyze
Return:
Standard output data (unicode Python 2, str Python 3) | codesearchnet |
def cumsum(x, axis=0):
return math_ops.cumsum(x, axis=axis) | Cumulative sum of the values in a tensor, alongside the specified axis.
Args:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`. | github-repos |
def get_value_by_xy(self, x, y):
if ((x < self.xMin) or (x > self.xMax) or (y < self.yMin) or (y > self.yMax)):
return None
else:
row = (self.nRows - int(numpy.ceil(((y - self.yMin) / self.dx))))
col = int(numpy.floor(((x - self.xMin) / self.dx)))
value = self.data[row][col]
if (value == self.noDataValue):
return None
else:
return value | Get raster value by xy coordinates.
Args:
x: X Coordinate.
y: Y Coordinate.
Returns:
raster value, None if the input are invalid. | codesearchnet |
def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs):
generated_texts = self.batch_decode(generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs)
return [self.post_process_generation(text, cleanup_and_extract=False) for text in generated_texts] | Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`List[str]`: The decoded text. | github-repos |
def db_dp004(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `db_dp004`'.format(value))
self._db_dp004 = value | Corresponds to IDD Field `db_dp004`
mean coincident dry-bulb temperature to
Dew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `db_dp004`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | codesearchnet |
async def register_agent(self, short_name):
await self.send_command(OPERATIONS.CMD_SET_AGENT, {'name': short_name},
MESSAGES.SetAgentResponse) | Register to act as the RPC agent for this service.
After this call succeeds, all requests to send RPCs to this service
will be routed through this agent.
Args:
short_name (str): A unique short name for this service that functions
as an id | juraj-google-style |
def __init__(self, submit_timestamp, metric_id, value, metric=None, label=None):
self.submit_timestamp = submit_timestamp
self.metric_id = metric_id
self.label = label or metric.key.metric.namespace + '_' + parse_step(metric.key.step) + '_' + metric.key.metric.name
self.value = value | Initializes :class:`Metric`
Args:
metric (object): object of metric result
submit_timestamp (float): date-time of saving metric to database
metric_id (uuid): unique id to identify test run
value: value of metric
label: custom metric name to be saved in database | github-repos |
def extract(self, log, basis, name, function=None):
intervals = {}
previous_ix = (- 1)
for (i, z) in enumerate(basis):
ix = self.read_at(z, index=True)
if (ix is None):
continue
if (ix == previous_ix):
intervals[ix].append(log[i])
else:
intervals[ix] = [log[i]]
previous_ix = ix
for (ix, data) in intervals.items():
f = (function or utils.null)
d = f(np.array(data))
self[ix].data[name] = d
return None | 'Extract' a log into the components of a striplog.
Args:
log (array_like). A log or other 1D data.
basis (array_like). The depths or elevations of the log samples.
name (str). The name of the attribute to store in the components.
function (function). A function that takes an array as the only
input, and returns whatever you want to store in the 'name'
attribute of the primary component.
Returns:
None. The function works on the striplog in place. | codesearchnet |
def request(self, batch: Sequence[ExampleT], model: ModelT, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionT]:
raise NotImplementedError(type(self)) | Makes a request to a remote inference service and returns the response.
Should raise an exception of some kind if there is an error to enable the
retry and client-side throttling logic to work. Returns an iterable of the
desired prediction type. This method should return the values directly, as
handling return values as a generator can prevent the retry logic from
functioning correctly.
Args:
batch: A sequence of examples or features.
model: The model used to make inferences.
inference_args: Extra arguments for models whose inference call requires
extra parameters.
Returns:
An Iterable of Predictions. | github-repos |
def _narrow_unichr(code_point):
try:
if (len(code_point.char) > 1):
return code_point.char
except AttributeError:
pass
return six.unichr(code_point) | Retrieves the unicode character representing any given code point, in a way that won't break on narrow builds.
This is necessary because the built-in unichr function will fail for ordinals above 0xFFFF on narrow builds (UCS2);
ordinals above 0xFFFF would require recalculating and combining surrogate pairs. This avoids that by retrieving the
unicode character that was initially read.
Args:
code_point (int|CodePoint): An int or a subclass of int that contains the unicode character representing its
code point in an attribute named 'char'. | codesearchnet |
def __clone_function(f, name=None):
if (not isinstance(f, types.FunctionType)):
raise SimTypeError('Given parameter is not a function.')
if (name is None):
name = f.__name__
newglobals = f.__globals__.copy()
globals_used = [x for x in f.__globals__ if (x in f.__code__.co_names)]
for x in globals_used:
gv = f.__globals__[x]
if isinstance(gv, types.FunctionType):
newglobals[x] = __clone_function(gv)
elif isinstance(gv, types.ModuleType):
newglobals[x] = gv
else:
newglobals[x] = copy.deepcopy(gv)
newfunc = types.FunctionType(f.__code__, newglobals, name, f.__defaults__, f.__closure__)
return newfunc | Make a new version of a function that has its own independent copy
of any globals that it uses directly, and has its own name.
All other attributes are assigned from the original function.
Args:
f: the function to clone
name (str): the name for the new function (if None, keep the same name)
Returns:
A copy of the function f, having its own copy of any globals used
Raises:
SimValueError | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.