code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __init__(self,
property_type=TableFeaturePropType.OFPTFPT_WRITE_ACTIONS,
action_ids=None):
super().__init__(property_type)
self.action_ids = action_ids if action_ids else ListOfActions()
self.update_length()
|
Create a ActionsProperty with the optional parameters below.
Args:
type(|TableFeaturePropType_v0x04|):
Property Type value of this instance.
action_ids(|ListOfActions_v0x04|):
List of Action instances.
|
juraj-google-style
|
def are_symmetrically_related(self, point_a, point_b, tol=0.001):
if np.allclose(self.operate(point_a), point_b, atol=tol):
return True
if np.allclose(self.operate(point_b), point_a, atol=tol):
return True
return False
|
Checks if two points are symmetrically related.
Args:
point_a (3x1 array): First point.
point_b (3x1 array): Second point.
tol (float): Absolute tolerance for checking distance.
Returns:
True if self.operate(point_a) == point_b or vice versa.
|
juraj-google-style
|
def master(self, task_type=None, task_id=None, rpc_layer=None):
session_master = _get_value_in_tfconfig(_SESSION_MASTER_KEY, self._port)
if session_master is not None:
return session_master
cluster_spec = self.cluster_spec()
if not cluster_spec.jobs or (len(cluster_spec.jobs) == 1 and len(cluster_spec.job_tasks(cluster_spec.jobs[0])) == 1):
return ''
task_type = task_type if task_type is not None else self.task_type
task_id = task_id if task_id is not None else self.task_id
rpc_layer = rpc_layer if rpc_layer is not None else self.rpc_layer
return format_master_url(cluster_spec.task_address(task_type, task_id), rpc_layer)
|
Returns the master address to use when creating a TensorFlow session.
Note: this is only useful for TensorFlow 1.x.
Args:
task_type: (String, optional) Overrides and sets the task_type of the
master.
task_id: (Integer, optional) Overrides and sets the task id of the master.
rpc_layer: (String, optional) Overrides and sets the protocol over which
TensorFlow nodes communicate with each other.
Returns:
The address of the master.
Raises:
RuntimeError: If the task_type or task_id is not specified and the
SageMaker environment variables does not contain a task section.
|
github-repos
|
def _calc_min_size(self, conv_layers):
input_size = 1
for _, conv_params, max_pooling in reversed(conv_layers):
if max_pooling is not None:
kernel_size, stride = max_pooling
input_size = input_size * stride + (kernel_size - stride)
if conv_params is not None:
kernel_size, stride = conv_params
input_size = input_size * stride + (kernel_size - stride)
return input_size
|
Calculates the minimum size of the input layer.
Given a set of convolutional layers, calculate the minimum value of
the `input_height` and `input_width`, i.e. such that the output has
size 1x1. Assumes snt.VALID padding.
Args:
conv_layers: List of tuples `(output_channels, (kernel_size, stride),
(pooling_size, pooling_stride))`
Returns:
Minimum value of input height and width.
|
juraj-google-style
|
def insort_event_right(self, event, lo=0, hi=None):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(self.queue)
while lo < hi:
mid = (lo + hi)
if event[0] < self.queue[mid][0]:
hi = mid
else:
lo = mid + 1
self.queue.insert(lo, event)
|
Insert event in queue, and keep it sorted assuming queue is sorted.
If event is already in queue, insert it to the right of the rightmost
event (to keep FIFO order).
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
Args:
event: a (time in sec since unix epoch, callback, args, kwds) tuple.
|
juraj-google-style
|
def update_hash(a_hash, mv):
if mv.labels:
signing.add_dict_to_hash(a_hash, encoding.MessageToPyValue(mv.labels))
money_value = mv.get_assigned_value(u'moneyValue')
if (money_value is not None):
a_hash.update(b'\x00')
a_hash.update(money_value.currencyCode.encode('utf-8'))
|
Adds ``mv`` to ``a_hash``
Args:
a_hash (`Hash`): the secure hash, e.g created by hashlib.md5
mv (:class:`MetricValue`): the instance to add to the hash
|
codesearchnet
|
def getWeights(self, term_i=None):
assert self.init, 'GP not initialised'
if term_i==None:
if self.gp.mean.n_terms==1:
term_i = 0
else:
print('VarianceDecomposition: Specify fixed effect term index')
return self.gp.mean.B[term_i]
|
Return weights for fixed effect term term_i
Args:
term_i: fixed effect term index
Returns:
weights of the spefied fixed effect term.
The output will be a KxL matrix of weights will be returned,
where K is F.shape[1] and L is A.shape[1] of the correspoding fixed effect term
(L will be always 1 for single-trait analysis).
|
juraj-google-style
|
def CopyFromStringTuple(self, time_elements_tuple):
if len(time_elements_tuple) < 6:
raise ValueError((
'Invalid time elements tuple at least 6 elements required,'
'got: {0:d}').format(len(time_elements_tuple)))
try:
year = int(time_elements_tuple[0], 10)
except (TypeError, ValueError):
raise ValueError('Invalid year value: {0!s}'.format(
time_elements_tuple[0]))
try:
month = int(time_elements_tuple[1], 10)
except (TypeError, ValueError):
raise ValueError('Invalid month value: {0!s}'.format(
time_elements_tuple[1]))
try:
day_of_month = int(time_elements_tuple[2], 10)
except (TypeError, ValueError):
raise ValueError('Invalid day of month value: {0!s}'.format(
time_elements_tuple[2]))
try:
hours = int(time_elements_tuple[3], 10)
except (TypeError, ValueError):
raise ValueError('Invalid hours value: {0!s}'.format(
time_elements_tuple[3]))
try:
minutes = int(time_elements_tuple[4], 10)
except (TypeError, ValueError):
raise ValueError('Invalid minutes value: {0!s}'.format(
time_elements_tuple[4]))
try:
seconds = int(time_elements_tuple[5], 10)
except (TypeError, ValueError):
raise ValueError('Invalid seconds value: {0!s}'.format(
time_elements_tuple[5]))
self._normalized_timestamp = None
self._number_of_seconds = self._GetNumberOfSecondsFromElements(
year, month, day_of_month, hours, minutes, seconds)
self._time_elements_tuple = (
year, month, day_of_month, hours, minutes, seconds)
|
Copies time elements from string-based time elements tuple.
Args:
time_elements_tuple (Optional[tuple[str, str, str, str, str, str]]):
time elements, contains year, month, day of month, hours, minutes and
seconds.
Raises:
ValueError: if the time elements tuple is invalid.
|
juraj-google-style
|
def honeycomb_lattice( a, b, spacing, alternating_sites=False ):
if alternating_sites:
site_labels = [ 'A', 'B', 'A', 'B' ]
else:
site_labels = [ 'L', 'L', 'L', 'L' ]
unit_cell_lengths = np.array( [ sqrt(3), 3.0, 0.0 ] ) * spacing
cell_lengths = unit_cell_lengths * np.array( [ a, b, 1.0 ] )
grid = np.array( list( range( 1, int( a * b * 4 + 1 ) ) ) ).reshape( a, b, 4, order='C' )
sites = []
for i in range( a ):
for j in range( b ):
r = np.array( [ i * sqrt(3) * spacing, j * 3 * spacing, 0.0 ] )
neighbours = [ grid[ i, j, 1 ],
np.roll( grid, +1, axis=0 )[ i, j, 1 ],
np.roll( grid, +1, axis=1 )[ i, j, 3 ] ]
sites.append( lattice_site.Site( grid[ i, j, 0 ], r, neighbours, 0.0, site_labels[0] ) )
r = np.array( [ i * sqrt(3) * spacing + sqrt(3)/2 * spacing, ( j * 3 + 0.5 ) * spacing, 0.0 ] )
neighbours = [ grid[ i, j, 0 ],
grid[ i, j, 2 ],
np.roll( grid, -1, axis=0 )[ i, j, 0 ] ]
sites.append( lattice_site.Site( grid[ i, j, 1 ], r, neighbours, 0.0, site_labels[1] ) )
r = np.array( [ i * sqrt(3) * spacing + sqrt(3)/2 * spacing, ( j * 3 + 1.5 ) * spacing, 0.0 ] )
neighbours = [ grid[ i, j, 1 ],
grid[ i, j, 3 ],
np.roll( grid, -1, axis=0 )[ i, j, 3 ] ]
sites.append( lattice_site.Site( grid[ i, j, 2 ], r, neighbours, 0.0, site_labels[2] ) )
r = np.array( [ i * sqrt(3) * spacing, ( j * 3 + 2 ) * spacing, 0.0 ] )
neighbours = [ grid[ i, j, 2 ],
np.roll( grid, +1, axis=0 )[ i, j, 2 ],
np.roll( grid, -1, axis=1 )[ i, j, 0 ] ]
sites.append( lattice_site.Site( grid[ i, j, 3 ], r, neighbours, 0.0, site_labels[3] ) )
return lattice.Lattice( sites, cell_lengths=cell_lengths )
|
Generate a honeycomb lattice.
Args:
a (Int): Number of lattice repeat units along x.
b (Int): Number of lattice repeat units along y.
spacing (Float): Distance between lattice sites.
alternating_sites (Bool, optional): Label alternating sites with 'A' and 'B'. Defaults to False.
Returns:
(Lattice): The new lattice
Notes:
The returned lattice is 3D periodic, but all sites and edges lie in the xy plane.
|
juraj-google-style
|
def __init__(self, parameter_name=None):
self._parameter_name = parameter_name
|
Construct a parameter modifier that may be specific to a parameter.
Args:
parameter_name: A `ParameterModifier` instance may operate on a class of
parameters or on a parameter with a particular name. Only
`ParameterModifier` instances that are of a unique type or were
initialized with a unique `parameter_name` will be executed.
See `__eq__` and `__hash__`.
|
github-repos
|
def __init__(self, output_mediator):
super(DynamicOutputModule, self).__init__(output_mediator)
self._dynamic_fields_helper = DynamicFieldsHelper(output_mediator)
self._field_delimiter = self._DEFAULT_FIELD_DELIMITER
self._fields = self._DEFAULT_FIELDS
|
Initializes an output module object.
Args:
output_mediator (OutputMediator): an output mediator.
|
juraj-google-style
|
def gaussian_deriv(duration: int, amp: complex, sigma: float, name: str=None) -> SamplePulse:
center = (duration / 2)
return _sampled_gaussian_deriv_pulse(duration, amp, center, sigma, name=name)
|
r"""Generates unnormalized gaussian derivative `SamplePulse`.
Applies `left` sampling strategy to generate discrete pulse from continuous function.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude at `center`.
sigma: Width (standard deviation) of pulse.
name: Name of pulse.
|
codesearchnet
|
async def set_notification_level(self, level):
await self._client.set_conversation_notification_level(
hangouts_pb2.SetConversationNotificationLevelRequest(
request_header=self._client.get_request_header(),
conversation_id=hangouts_pb2.ConversationId(id=self.id_),
level=level,
)
)
|
Set the notification level of this conversation.
Args:
level: ``NOTIFICATION_LEVEL_QUIET`` to disable notifications, or
``NOTIFICATION_LEVEL_RING`` to enable them.
Raises:
.NetworkError: If the request fails.
|
juraj-google-style
|
def jitChol(A, maxTries=10, warning=True):
jitter = 0
i = 0
while True:
try:
if (jitter == 0):
jitter = ((abs(SP.trace(A)) / A.shape[0]) * 1e-06)
LC = linalg.cholesky(A, lower=True)
return (LC.T, 0.0)
else:
if warning:
logging.error(('Adding jitter of %f in jitChol().' % jitter))
LC = linalg.cholesky((A + (jitter * SP.eye(A.shape[0]))), lower=True)
return (LC.T, jitter)
except linalg.LinAlgError:
if (i < maxTries):
jitter = (jitter * 10)
else:
raise linalg.LinAlgError((((('Matrix non positive definite, jitter of ' + str(jitter)) + ' added but failed after ') + str(i)) + ' trials.'))
i += 1
return LC
|
Do a Cholesky decomposition with jitter.
Description:
U, jitter = jitChol(A, maxTries, warning) attempts a Cholesky
decomposition on the given matrix, if matrix isn't positive
definite the function adds 'jitter' and tries again. Thereafter
the amount of jitter is multiplied by 10 each time it is added
again. This is continued for a maximum of 10 times. The amount of
jitter added is returned.
Returns:
U - the Cholesky decomposition for the matrix.
jitter - the amount of jitter that was added to the matrix.
Arguments:
A - the matrix for which the Cholesky decomposition is required.
maxTries - the maximum number of times that jitter is added before
giving up (default 10).
warning - whether to give a warning for adding jitter (default is True)
See also
CHOL, PDINV, LOGDET
Copyright (c) 2005, 2006 Neil D. Lawrence
|
codesearchnet
|
def set_data(self, data, from_db=False):
self._load_data(data, from_db)
return self
|
Fills the object's fields with given data dict.
Internally calls the self._load_data() method.
Args:
data (dict): Data to fill object's fields.
from_db (bool): if data coming from db then we will
use related field type's _load_data method
Returns:
Self. Returns objects itself for chainability.
|
juraj-google-style
|
def make_lines_texture(num_lines=10, resolution=50):
(x, y) = np.meshgrid(np.hstack([np.linspace(0, 1, resolution), np.nan]), np.linspace(0, 1, num_lines))
y[np.isnan(x)] = np.nan
return (x.flatten(), y.flatten())
|
Makes a texture consisting of a given number of horizontal lines.
Args:
num_lines (int): the number of lines to draw
resolution (int): the number of midpoints on each line
Returns:
A texture.
|
codesearchnet
|
def unsubscribe(self, topic):
del self.queues[topic]
try:
self.client.unsubscribe(topic)
except operationError as exc:
raise InternalError("Could not unsubscribe from topic", topic=topic, message=exc.message)
|
Unsubscribe from messages on a given topic
Args:
topic (string): The MQTT topic to unsubscribe from
|
juraj-google-style
|
def _CheckIsPipe(self, file_entry):
if (definitions.FILE_ENTRY_TYPE_PIPE not in self._file_entry_types):
return False
return file_entry.IsPipe()
|
Checks the is_pipe find specification.
Args:
file_entry (FileEntry): file entry.
Returns:
bool: True if the file entry matches the find specification, False if not.
|
codesearchnet
|
def metrics(self):
raise NotImplementedError()
|
Returns :class:`~apache_beam.metrics.metric.MetricResults` object to
query metrics from the runner.
Raises:
NotImplementedError: If the runner does not support this
operation.
|
github-repos
|
def main():
pip_package_dependencies = subprocess.check_output(['bazel', 'cquery', '--experimental_cc_shared_library', PIP_PACKAGE_QUERY_EXPRESSION])
if isinstance(pip_package_dependencies, bytes):
pip_package_dependencies = pip_package_dependencies.decode('utf-8')
pip_package_dependencies_list = pip_package_dependencies.strip().split('\n')
pip_package_dependencies_list = [x.split()[0] for x in pip_package_dependencies_list]
print('Pip package superset size: %d' % len(pip_package_dependencies_list))
tf_py_test_dependencies = subprocess.check_output(['bazel', 'cquery', '--experimental_cc_shared_library', PY_TEST_QUERY_EXPRESSION])
if isinstance(tf_py_test_dependencies, bytes):
tf_py_test_dependencies = tf_py_test_dependencies.decode('utf-8')
tf_py_test_dependencies_list = tf_py_test_dependencies.strip().split('\n')
tf_py_test_dependencies_list = [x.split()[0] for x in tf_py_test_dependencies.strip().split('\n')]
print('Pytest dependency subset size: %d' % len(tf_py_test_dependencies_list))
missing_dependencies = []
ignore_extensions = ['_test', '_test.py', '_test_cpu', '_test_cpu.py', '_test_gpu', '_test_gpu.py', '_test_lib']
ignored_files_count = 0
denylisted_dependencies_count = len(DEPENDENCY_DENYLIST)
for dependency in tf_py_test_dependencies_list:
if dependency and dependency.startswith('
ignore = False
if any((dependency.endswith(ext) for ext in ignore_extensions)):
ignore = True
ignored_files_count += 1
if not (ignore or dependency in pip_package_dependencies_list or dependency in DEPENDENCY_DENYLIST):
missing_dependencies.append(dependency)
print('Ignored files count: %d' % ignored_files_count)
print('Denylisted dependencies count: %d' % denylisted_dependencies_count)
if missing_dependencies:
print('Missing the following dependencies from pip_packages:')
for missing_dependency in missing_dependencies:
print('\nMissing dependency: %s ' % missing_dependency)
print('Affected Tests:')
rdep_query = 'rdeps(kind(py_test, %s), %s)' % (' + '.join(PYTHON_TARGETS), missing_dependency)
affected_tests = subprocess.check_output(['bazel', 'cquery', '--experimental_cc_shared_library', rdep_query])
affected_tests_list = affected_tests.split('\n')[:-2]
print('\n'.join(affected_tests_list))
raise RuntimeError('\n One or more added test dependencies are not in the pip package.\nIf these test dependencies need to be in TensorFlow pip package, please add them to
else:
print('TEST PASSED')
|
This script runs the pip smoke test.
Raises:
RuntimeError: If any dependencies for py_tests exist in subSet
Prerequisites:
1. Bazel is installed.
2. Running in github repo of tensorflow.
3. Configure has been run.
|
github-repos
|
def encode_value(value):
if (value is None):
return document_pb2.Value(null_value=struct_pb2.NULL_VALUE)
if isinstance(value, bool):
return document_pb2.Value(boolean_value=value)
if isinstance(value, six.integer_types):
return document_pb2.Value(integer_value=value)
if isinstance(value, float):
return document_pb2.Value(double_value=value)
if isinstance(value, DatetimeWithNanoseconds):
return document_pb2.Value(timestamp_value=value.timestamp_pb())
if isinstance(value, datetime.datetime):
return document_pb2.Value(timestamp_value=_datetime_to_pb_timestamp(value))
if isinstance(value, six.text_type):
return document_pb2.Value(string_value=value)
if isinstance(value, six.binary_type):
return document_pb2.Value(bytes_value=value)
document_path = getattr(value, '_document_path', None)
if (document_path is not None):
return document_pb2.Value(reference_value=document_path)
if isinstance(value, GeoPoint):
return document_pb2.Value(geo_point_value=value.to_protobuf())
if isinstance(value, list):
value_list = [encode_value(element) for element in value]
value_pb = document_pb2.ArrayValue(values=value_list)
return document_pb2.Value(array_value=value_pb)
if isinstance(value, dict):
value_dict = encode_dict(value)
value_pb = document_pb2.MapValue(fields=value_dict)
return document_pb2.Value(map_value=value_pb)
raise TypeError('Cannot convert to a Firestore Value', value, 'Invalid type', type(value))
|
Converts a native Python value into a Firestore protobuf ``Value``.
Args:
value (Union[NoneType, bool, int, float, datetime.datetime, \
str, bytes, dict, ~google.cloud.Firestore.GeoPoint]): A native
Python value to convert to a protobuf field.
Returns:
~google.cloud.firestore_v1beta1.types.Value: A
value encoded as a Firestore protobuf.
Raises:
TypeError: If the ``value`` is not one of the accepted types.
|
codesearchnet
|
def _restore_training_state(self, restore_state):
self.load_state_dict(restore_state['model'])
self.optimizer.load_state_dict(restore_state['optimizer'])
self.lr_scheduler.load_state_dict(restore_state['lr_scheduler'])
start_iteration = (restore_state['iteration'] + 1)
if self.config['verbose']:
print(f'Restored checkpoint to iteration {start_iteration}.')
if restore_state['best_model_found']:
self.checkpointer.best_model_found = True
self.checkpointer.best_iteration = restore_state['best_iteration']
self.checkpointer.best_score = restore_state['best_score']
if self.config['verbose']:
print(f'Updated checkpointer: best_score={self.checkpointer.best_score:.3f}, best_iteration={self.checkpointer.best_iteration}')
return start_iteration
|
Restores the model and optimizer states
This helper function restores the model's state to a given iteration so
that a user can resume training at any epoch.
Args:
restore_state: a state_dict dictionary
|
codesearchnet
|
def load(self):
projects = {}
path = os.path.expanduser(self.path)
if (not os.path.isdir(path)):
return projects
logger.debug('Load project configs from %s', path)
for filename in os.listdir(path):
filename_parts = os.path.splitext(filename)
if (filename_parts[1][1:] != PROJECT_CONFIG_EXTENSION):
continue
name = filename_parts[0]
try:
project_file_path = os.path.join(path, filename)
with open(project_file_path) as f:
data = yaml.load(f)
projects[name] = data
except ValueError:
continue
logger.debug("Project '{}' config readed from {}".format(name, project_file_path))
return projects
|
Load the projects config data from local path
Returns:
Dict: project_name -> project_data
|
codesearchnet
|
def wrap_deepmind(env, dim=84, framestack=True):
env = MonitorEnv(env)
env = NoopResetEnv(env, noop_max=30)
if ('NoFrameskip' in env.spec.id):
env = MaxAndSkipEnv(env, skip=4)
env = EpisodicLifeEnv(env)
if ('FIRE' in env.unwrapped.get_action_meanings()):
env = FireResetEnv(env)
env = WarpFrame(env, dim)
if framestack:
env = FrameStack(env, 4)
return env
|
Configure environment for DeepMind-style Atari.
Note that we assume reward clipping is done outside the wrapper.
Args:
dim (int): Dimension to resize observations to (dim x dim).
framestack (bool): Whether to framestack observations.
|
codesearchnet
|
def read(self, vals):
i = 0
if len(vals[i]) == 0:
self.leapyear_observed = None
else:
self.leapyear_observed = vals[i]
i += 1
if len(vals[i]) == 0:
self.daylight_saving_start_day = None
else:
self.daylight_saving_start_day = vals[i]
i += 1
if len(vals[i]) == 0:
self.daylight_saving_end_day = None
else:
self.daylight_saving_end_day = vals[i]
i += 1
count = int(vals[i])
i += 1
for _ in range(count):
obj = Holiday()
obj.read(vals[i:i + obj.field_count])
self.add_holiday(obj)
i += obj.field_count
|
Read values.
Args:
vals (list): list of strings representing values
|
juraj-google-style
|
def initialize_schema(connection):
cursor = connection.cursor()
cursor.execute('PRAGMA application_id={}'.format(_TENSORBOARD_APPLICATION_ID))
cursor.execute('PRAGMA user_version={}'.format(_TENSORBOARD_USER_VERSION))
with connection:
for statement in _SCHEMA_STATEMENTS:
lines = statement.strip('\n').split('\n')
message = (lines[0] + ('...' if (len(lines) > 1) else ''))
logger.debug('Running DB init statement: %s', message)
cursor.execute(statement)
|
Initializes the TensorBoard sqlite schema using the given connection.
Args:
connection: A sqlite DB connection.
|
codesearchnet
|
def from_file(filename='feff.inp'):
with zopen(filename, 'rt') as f:
lines = list(clean_lines(f.readlines()))
params = {}
eels_params = []
ieels = (- 1)
ieels_max = (- 1)
for (i, line) in enumerate(lines):
m = re.match('([A-Z]+\\d*\\d*)\\s*(.*)', line)
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Tags.proc_val(key, val)
if (key not in ('ATOMS', 'POTENTIALS', 'END', 'TITLE')):
if (key in ['ELNES', 'EXELFS']):
ieels = i
ieels_max = (ieels + 5)
else:
params[key] = val
if (ieels >= 0):
if ((i >= ieels) and (i <= ieels_max)):
if (i == (ieels + 1)):
if (int(line.split()[1]) == 1):
ieels_max -= 1
eels_params.append(line)
if eels_params:
if (len(eels_params) == 6):
eels_keys = ['BEAM_ENERGY', 'BEAM_DIRECTION', 'ANGLES', 'MESH', 'POSITION']
else:
eels_keys = ['BEAM_ENERGY', 'ANGLES', 'MESH', 'POSITION']
eels_dict = {'ENERGY': Tags._stringify_val(eels_params[0].split()[1:])}
for (k, v) in zip(eels_keys, eels_params[1:]):
eels_dict[k] = str(v)
params[str(eels_params[0].split()[0])] = eels_dict
return Tags(params)
|
Creates a Feff_tag dictionary from a PARAMETER or feff.inp file.
Args:
filename: Filename for either PARAMETER or feff.inp file
Returns:
Feff_tag object
|
codesearchnet
|
def write_index_and_rst_files(self, overwrite: bool=False, mock: bool=False) -> None:
for f in self.files_to_index:
if isinstance(f, FileToAutodocument):
f.write_rst(prefix=self.rst_prefix, suffix=self.rst_suffix, heading_underline_char=self.source_rst_heading_underline_char, overwrite=overwrite, mock=mock)
elif isinstance(f, AutodocIndex):
f.write_index_and_rst_files(overwrite=overwrite, mock=mock)
else:
fail('Unknown thing in files_to_index: {!r}'.format(f))
self.write_index(overwrite=overwrite, mock=mock)
|
Writes both the individual RST files and the index.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
|
codesearchnet
|
def save_with_exif_info(img, *args, **kwargs):
if 'exif' in kwargs:
exif = kwargs.pop('exif')
else:
exif = img.info.get('exif')
img.save(*args, exif=exif, **kwargs)
|
Saves an image using PIL, preserving the exif information.
Args:
img (PIL.Image.Image):
*args: The arguments for the `save` method of the Image class.
**kwargs: The keywords for the `save` method of the Image class.
|
juraj-google-style
|
def predict_undirected_graph(self, data):
graph = Graph()
for (idx_i, i) in enumerate(data.columns):
for (idx_j, j) in enumerate(data.columns[(idx_i + 1):]):
score = self.predict(data[i].values, data[j].values)
if (abs(score) > 0.001):
graph.add_edge(i, j, weight=score)
return graph
|
Build a skeleton using a pairwise independence criterion.
Args:
data (pandas.DataFrame): Raw data table
Returns:
networkx.Graph: Undirected graph representing the skeleton.
|
codesearchnet
|
def append(self, transitions, rows=None):
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity,
message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less(
tf.gather(self._length, rows), self._max_length,
message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
timestep = tf.gather(self._length, rows)
indices = tf.stack([rows, timestep], 1)
append_ops = tools.nested.map(
lambda var, val: tf.scatter_nd_update(var, indices, val),
self._buffers, transitions, flatten=True)
with tf.control_dependencies(append_ops):
episode_mask = tf.reduce_sum(tf.one_hot(
rows, self._capacity, dtype=tf.int32), 0)
return self._length.assign_add(episode_mask)
|
Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
|
juraj-google-style
|
def parse_conservation(variant, info_key):
raw_score = variant.INFO.get(info_key)
conservations = []
if raw_score:
if isinstance(raw_score, numbers.Number):
raw_score = (raw_score,)
for score in raw_score:
if (score >= CONSERVATION[info_key]['conserved_min']):
conservations.append('Conserved')
else:
conservations.append('NotConserved')
return conservations
|
Get the conservation prediction
Args:
variant(dict): A variant dictionary
info_key(str)
Returns:
conservations(list): List of censervation terms
|
codesearchnet
|
def restore(self, sess, save_path):
start_time = time.time()
if self._is_empty:
return
if save_path is None:
raise ValueError("Can't load save_path when it is None.")
checkpoint_prefix = compat.as_text(save_path)
if not checkpoint_management.checkpoint_exists_internal(checkpoint_prefix):
raise ValueError('The passed save_path is not a valid checkpoint: ' + checkpoint_prefix)
logging.info('Restoring parameters from %s', checkpoint_prefix)
try:
if context.executing_eagerly():
self._build_eager(save_path, build_save=False, build_restore=True)
else:
sess.run(self.saver_def.restore_op_name, {self.saver_def.filename_tensor_name: save_path})
except errors.NotFoundError as err:
try:
names_to_keys = object_graph_key_mapping(save_path)
except errors.NotFoundError:
raise _wrap_restore_error_with_msg(err, 'a Variable name or other graph key that is missing')
logging.warning('Restoring an object-based checkpoint using a name-based saver. This may be somewhat fragile, and will re-build the Saver. Instead, consider loading object-based checkpoints using tf.train.Checkpoint().')
self._object_restore_saver = saver_from_object_based_checkpoint(checkpoint_path=save_path, var_list=self._var_list, builder=self._builder, names_to_keys=names_to_keys, cached_saver=self._object_restore_saver)
self._object_restore_saver.restore(sess=sess, save_path=save_path)
except errors.InvalidArgumentError as err:
raise _wrap_restore_error_with_msg(err, 'a mismatch between the current graph and the graph')
metrics.AddCheckpointReadDuration(api_label=_SAVER_LABEL, microseconds=_get_duration_microseconds(start_time, time.time()))
|
Restores previously saved variables.
This method runs the ops added by the constructor for restoring variables.
It requires a session in which the graph was launched. The variables to
restore do not have to have been initialized, as restoring is itself a way
to initialize variables.
The `save_path` argument is typically a value previously returned from a
`save()` call, or a call to `latest_checkpoint()`.
Args:
sess: A `Session` to use to restore the parameters. None in eager mode.
save_path: Path where parameters were previously saved.
Raises:
ValueError: If save_path is None or not a valid checkpoint.
|
github-repos
|
def place_line(self,
device: 'cirq.google.XmonDevice',
length: int) -> GridQubitLineTuple:
if not device.qubits:
return GridQubitLineTuple()
start = min(device.qubits)
sequences = []
greedy_search = {
'minimal_connectivity': [
_PickFewestNeighbors(device, start),
],
'largest_area': [
_PickLargestArea(device, start),
],
'best': [
_PickFewestNeighbors(device, start),
_PickLargestArea(device, start),
]
}
algos = greedy_search.get(self.algorithm)
if algos is None:
raise ValueError(
"Unknown greedy search algorithm %s" % self.algorithm)
for algorithm in algos:
sequences.append(algorithm.get_or_search())
return GridQubitLineTuple.best_of(sequences, length)
|
Runs line sequence search.
Args:
device: Chip description.
length: Required line length.
Returns:
Linear sequences found on the chip.
Raises:
ValueError: If search algorithm passed on initialization is not
recognized.
|
juraj-google-style
|
def _CreateFeedMapping(client, feed_details):
feed_mapping_service = client.GetService('FeedMappingService', version='v201809')
operation = {'operand': {'criterionType': DSA_PAGE_FEED_CRITERION_TYPE, 'feedId': feed_details.feed_id, 'attributeFieldMappings': [{'feedAttributeId': feed_details.url_attribute_id, 'fieldId': DSA_PAGE_URLS_FIELD_ID}, {'feedAttributeId': feed_details.label_attribute_id, 'fieldId': DSA_LABEL_FIELD_ID}]}, 'operator': 'ADD'}
feed_mapping_service.mutate([operation])
|
Creates the feed mapping for DSA page feeds.
Args:
client: an AdWordsClient instance.
feed_details: a _DSAFeedDetails instance.
|
codesearchnet
|
def __init__(self,
html_id=None,
title=None,
description=None,
widgets=None,
template=None,
context=None,
**kwargs):
if widgets is not None:
if not isinstance(widgets, (list, tuple)):
raise AttributeError('Box widgets attribute '
'must be a list or tuple')
if not all([isinstance(e, Widget) for e in widgets]):
raise ValueError('All elements of Box must be Widget instances')
try:
self.widgets = widgets
except AttributeError:
self._widgets = widgets
self.type = 'box'
if html_id is not None:
try:
self.html_id = html_id
except AttributeError:
self._html_id = html_id
if title is not None:
try:
self.title = title
except AttributeError:
self._title = title
if description is not None:
try:
self.description = description
except AttributeError:
self._description = description
if template is not None:
try:
self.template = template
except AttributeError:
self._template = template
if context is not None:
try:
self.context = context
except AttributeError:
self._context = context
for kw, arg in kwargs.items():
setattr(self, kw, arg)
|
Init method.
Args:
html_id (str): an ID to set on the HTML box.
title (str): a title to display on the top of the box.
description (str): a description to display after the title box.
widgets (list): the box's list of widgets.
template (str): the path to a custom template to use for this box.
context (dict): additional context to pass to the box.
|
juraj-google-style
|
def experimental_local_results(self, value):
return super(CentralStorageStrategy, self).experimental_local_results(value)
|
Returns the list of all local per-replica values contained in `value`.
In `CentralStorageStrategy` there is a single worker so the value returned
will be all the values on that worker.
Args:
value: A value returned by `run()`, `extended.call_for_each_replica()`,
or a variable created in `scope`.
Returns:
A tuple of values contained in `value`. If `value` represents a single
value, this returns `(value,).`
|
github-repos
|
def image_summary(seqs, name, num=None):
seqs = tf.clip_by_value(seqs, 0., 1.)
seqs = tf.unstack(seqs[:num])
joined_seqs = [tf.concat(tf.unstack(seq), 1) for seq in seqs]
joined_seqs = tf.expand_dims(tf.concat(joined_seqs, 0), 0)
tf.compat.v2.summary.image(
name,
joined_seqs,
max_outputs=1,
step=tf.compat.v1.train.get_or_create_global_step())
|
Visualizes sequences as TensorBoard summaries.
Args:
seqs: A tensor of shape [n, t, h, w, c].
name: String name of this summary.
num: Integer for the number of examples to visualize. Defaults to
all examples.
|
juraj-google-style
|
def compute_edges(self, rules: List[str]=None, ast_result=False, fmt='medium') -> List[Mapping[(str, Any)]]:
if (not self.ast):
return self
edges_asts = bel.edge.computed.compute_edges(self.ast, self.spec)
if ast_result:
return edges_asts
edges = []
for ast in edges_asts:
edges.append({'subject': ast.bel_subject.to_string(), 'relation': ast.bel_relation, 'object': ast.bel_object.to_string()})
return edges
|
Computed edges from primary BEL statement
Takes an AST and generates all computed edges based on BEL Specification YAML computed signatures.
Will run only the list of computed edge rules if given.
Args:
rules (list): a list of rules to filter; only the rules in this list will be applied to computed
fmt (str): short, medium or long version of BEL Edge (function and relation names)
Returns:
List[Mapping[str, Any]]: BEL Edges in medium format
|
codesearchnet
|
def write(self, *parts: WritableTypes, shared_parts_only: bool=False) -> 'Content':
content_updated = False
for p in parts:
p = self._to_content(p)
if p is None:
continue
if not isinstance(p, (str, self.__class__)):
raise TypeError(f'{p!r} ({type(p)}) cannot be writable. Only str, None, {self.__class__.__name__} and callable object that returns one of them are supported.')
if isinstance(p, Content):
current = self._shared_parts
for k, v in p.shared_parts.items():
current[k].add(v)
p = p.content
if not shared_parts_only:
self._content_stream.write(p)
content_updated = True
if content_updated:
self.__dict__.pop('content', None)
return self
|
Writes one or more parts to current Content.
Args:
*parts: The parts to be written. Each part can be a string, a Content
object, a callable that returns one of the above, or None.
shared_parts_only: If True, only write the shared parts.
Returns:
The current Content object for chaining.
|
github-repos
|
def advise(self, options):
advise_pb = tfprof_output_pb2.AdviceProto()
opts = _build_advisor_options(options)
advise_pb.ParseFromString(print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))
return advise_pb
|
Automatically detect problems and generate reports.
Args:
options: A dict of options. See ALL_ADVICE example above.
Returns:
An Advise proto that contains the reports from all checkers.
|
github-repos
|
def energy_string_to_float( string ):
energy_re = re.compile( "(-?\d+\.\d+)" )
return float( energy_re.match( string ).group(0) )
|
Convert a string of a calculation energy, e.g. '-1.2345 eV' to a float.
Args:
string (str): The string to convert.
Return
(float)
|
juraj-google-style
|
def test_skip(self, e=None):
self._test_end(TestResultEnums.TEST_RESULT_SKIP, e)
|
To mark the test as skipped in this record.
Args:
e: An instance of mobly.signals.TestSkip.
|
github-repos
|
def get_resolution(pdb_id):
pdb_id = pdb_id.upper()
if (pdb_id not in _property_table().index):
raise ValueError('PDB ID not in property table')
else:
resolution = _property_table().ix[(pdb_id, 'resolution')]
if pd.isnull(resolution):
log.debug('{}: no resolution available, probably not an X-ray crystal structure')
resolution = float('inf')
return resolution
|
Quick way to get the resolution of a PDB ID using the table of results from the REST service
Returns infinity if the resolution is not available.
Returns:
float: resolution of a PDB ID in Angstroms
TODO:
- Unit test
|
codesearchnet
|
def plot_ax(self, ax=None, fontsize=12, **kwargs):
(ax, fig, plt) = get_ax_fig_plt(ax=ax)
color = kwargs.get('color', 'r')
label = kwargs.get('label', '{} fit'.format(self.__class__.__name__))
lines = [('Equation of State: %s' % self.__class__.__name__), ('Minimum energy = %1.2f eV' % self.e0), ('Minimum or reference volume = %1.2f Ang^3' % self.v0), ('Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa' % (self.b0, self.b0_GPa)), ('Derivative of bulk modulus wrt pressure = %1.2f' % self.b1)]
text = '\n'.join(lines)
text = kwargs.get('text', text)
ax.plot(self.volumes, self.energies, linestyle='None', marker='o', color=color)
(vmin, vmax) = (min(self.volumes), max(self.volumes))
(vmin, vmax) = ((vmin - (0.01 * abs(vmin))), (vmax + (0.01 * abs(vmax))))
vfit = np.linspace(vmin, vmax, 100)
ax.plot(vfit, self.func(vfit), linestyle='dashed', color=color, label=label)
ax.grid(True)
ax.set_xlabel('Volume $\\AA^3$')
ax.set_ylabel('Energy (eV)')
ax.legend(loc='best', shadow=True)
ax.text(0.5, 0.5, text, fontsize=fontsize, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes)
return fig
|
Plot the equation of state on axis `ax`
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
fontsize: Legend fontsize.
color (str): plot color.
label (str): Plot label
text (str): Legend text (options)
Returns:
Matplotlib figure object.
|
codesearchnet
|
def __init__(self, scope, parent, result, value=(), paren=False):
try:
value = list(value)
except TypeError as te:
raise AssertionError(str(te))
CodeLiteral.__init__(self, scope, parent, value, result, paren)
|
Constructor for a compound literal.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
value (iterable): The initial value sequence in this composition.
result (str): The return type of the literal in the program.
Kwargs:
paren (bool): Whether the literal is enclosed in parentheses.
|
juraj-google-style
|
def transpose(a, axes=None):
if isinstance(a, np.ndarray):
return np.transpose(a, axes)
elif isinstance(a, RemoteArray):
return a.transpose(*axes)
elif isinstance(a, Remote):
return _remote_to_array(a).transpose(*axes)
elif isinstance(a, DistArray):
if (axes is None):
axes = range((a.ndim - 1), (- 1), (- 1))
axes = list(axes)
if (len(set(axes)) < len(axes)):
raise ValueError('repeated axis in transpose')
if (sorted(axes) != list(range(a.ndim))):
raise ValueError("axes don't match array")
distaxis = a._distaxis
new_distaxis = axes.index(distaxis)
new_subarrays = [ra.transpose(*axes) for ra in a._subarrays]
return DistArray(new_subarrays, new_distaxis)
else:
return np.transpose(a, axes)
|
Returns a view of the array with axes transposed.
For a 1-D array, this has no effect.
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted
Args:
a (array_like): Input array.
axes (list of int, optional): By default, reverse the dimensions,
otherwise permute the axes according to the values given.
|
codesearchnet
|
def __init__(self, ident, latitude, longitude, visible=False, user=None,
timestamp=None, tags=None):
super(Node, self).__init__(latitude, longitude)
self.ident = ident
self.visible = visible
self.user = user
self.timestamp = timestamp
self.tags = tags
|
Initialise a new ``Node`` object.
Args:
ident (int): Unique identifier for the node
latitude (float): Nodes's latitude
longitude (float): Node's longitude
visible (bool): Whether the node is visible
user (str): User who logged the node
timestamp (str): The date and time a node was logged
tags (dict): Tags associated with the node
|
juraj-google-style
|
def get_data_with_timestamps(self):
result = []
for (t, d) in zip(self.timestamps, self.data_points):
result.append(t, round(d, self.lr))
return result
|
Returns the data points with timestamps.
Returns:
A list of tuples in the format of (timestamp, data)
|
codesearchnet
|
def discard_observer(self, observer):
discarded = False
key = self.make_key(observer)
if key in self.observers:
del self.observers[key]
discarded = True
return discarded
|
Un-register an observer.
Args:
observer: The observer to un-register.
Returns true if an observer was removed, otherwise False.
|
juraj-google-style
|
def merge(self, other):
if (other.seed != self.seed):
raise ValueError('Cannot merge MinHash with different seeds')
if (len(self) != len(other)):
raise ValueError('Cannot merge MinHash with different numbers of permutation functions')
self.hashvalues = np.minimum(other.hashvalues, self.hashvalues)
|
Merge the other MinHash with this one, making this one the union
of both.
Args:
other (datasketch.MinHash): The other MinHash.
|
codesearchnet
|
def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, top_k: int=100):
out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
prob = out_logits.sigmoid()
prob = prob.view(out_logits.shape[0], -1)
k_value = min(top_k, prob.size(1))
topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
scores = topk_values
topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')
labels = topk_indexes % out_logits.shape[2]
boxes = center_to_corners_format(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
if target_sizes is not None:
if isinstance(target_sizes, List):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = []
for s, l, b in zip(scores, labels, boxes):
score = s[s > threshold]
label = l[s > threshold]
box = b[s > threshold]
results.append({'scores': score, 'labels': label, 'boxes': box})
return results
|
Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`YolosObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
top_k (`int`, *optional*, defaults to 100):
Keep only top k bounding boxes before filtering by thresholding.
Returns:
`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
|
github-repos
|
def _ConvertHeaderToId(header):
if (not (header.startswith('<') or header.endswith('>'))):
raise exceptions.BatchError(('Invalid value for Content-ID: %s' % header))
if ('+' not in header):
raise exceptions.BatchError(('Invalid value for Content-ID: %s' % header))
(_, request_id) = header[1:(- 1)].rsplit('+', 1)
return urllib_parse.unquote(request_id)
|
Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that
_ConvertIdToHeader() returns.
Args:
header: A string indicating the Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
|
codesearchnet
|
def claim(self, file_readers):
unclaimed_readers = []
vcf_readers = []
for file_reader in file_readers:
if self._is_mutect_vcf(file_reader):
vcf_reader = vcf.VcfReader(file_reader)
vcf_readers.append(_MutectVcfReader(vcf_reader))
else:
unclaimed_readers.append(file_reader)
return (unclaimed_readers, vcf_readers)
|
Recognizes and claims MuTect VCFs form the set of all input VCFs.
Each defined caller has a chance to evaluate and claim all the incoming
files as something that it can process.
Args:
file_readers: the collection of currently unclaimed files
Returns:
A tuple of unclaimed readers and MuTectVcfReaders.
|
codesearchnet
|
def insert_arguments_into_sql_query(compilation_result, arguments):
if (compilation_result.language != SQL_LANGUAGE):
raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result))
base_query = compilation_result.query
return base_query.params(**arguments)
|
Insert the arguments into the compiled SQL query to form a complete query.
Args:
compilation_result: CompilationResult, compilation result from the GraphQL compiler.
arguments: Dict[str, Any], parameter name -> value, for every parameter the query expects.
Returns:
SQLAlchemy Selectable, a executable SQL query with parameters bound.
|
codesearchnet
|
def _register_info(self, server):
server_url = urllib.parse.urlparse(server.get_url())
info = manager.TensorBoardInfo(version=version.VERSION, start_time=int(time.time()), port=server_url.port, pid=os.getpid(), path_prefix=self.flags.path_prefix, logdir=self.flags.logdir, db=self.flags.db, cache_key=self.cache_key)
atexit.register(manager.remove_info_file)
manager.write_info_file(info)
|
Write a TensorBoardInfo file and arrange for its cleanup.
Args:
server: The result of `self._make_server()`.
|
codesearchnet
|
def match(sel, obj, arr=None, bailout_fn=None):
if arr:
sel = interpolate(sel, arr)
sel = parse(sel)[1]
return _forEach(sel, obj, bailout_fn=bailout_fn)
|
Match a selector to an object, yielding the matched values.
Args:
sel: The JSONSelect selector to apply (a string)
obj: The object against which to apply the selector
arr: If sel contains ? characters, then the values in this array will
be safely interpolated into the selector.
bailout_fn: A callback which takes two parameters, |obj| and |matches|.
This will be called on every node in obj. If it returns True, the
search for matches will be aborted below that node. The |matches|
parameter indicates whether the node matched the selector. This is
intended to be used as a performance optimization.
|
codesearchnet
|
def get_ip_prefixes_from_config(config, services, ip_version):
ip_prefixes = set()
for service in services:
ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix'))
if ip_prefix.version == ip_version:
ip_prefixes.add(ip_prefix.with_prefixlen)
return ip_prefixes
|
Build a set of IP prefixes found in service configuration files.
Arguments:
config (obg): A configparser object which holds our configuration.
services (list): A list of section names which are the name of the
service checks.
ip_version (int): IP protocol version
Returns:
A set of IP prefixes.
|
juraj-google-style
|
def outputZip(self,figtype='png'):
from zipfile import ZipFile
with ZipFile(self.outfile+'.zip', 'w') as zipcontainer:
zipcontainer.writestr(
'summary.txt',
'
self.title,
self.p,
('\n
).encode()
)
c = count(1)
for section in self.sections:
section.sectionOutZip(zipcontainer,'s{}_{}/'.format(next(c),section.title.replace(' ','_')),
figtype=figtype)
|
Outputs the report in a zip container.
Figs and tabs as pngs and excells.
Args:
figtype (str): Figure type of images in the zip folder.
|
juraj-google-style
|
def add_scheduling_block(config, schema_path=None):
if (schema_path is None):
schema_path = os.path.join(os.path.dirname(__file__), 'sbi_post.json')
schema = load_schema(schema_path)
jsonschema.validate(config, schema)
DB.set('scheduling_block/{}'.format(config['id']), json.dumps(config))
DB.rpush('scheduling_block_events', json.dumps(dict(type='created', id=config['id'])))
|
Add a Scheduling Block to the Configuration Database.
The configuration dictionary must match the schema defined in
in the schema_path variable at the top of the function.
Args:
config (dict): Scheduling Block instance request configuration.
schema_path (str): Path to schema file used to validate the
Scheduling Block Instance request
|
codesearchnet
|
def test_ingraph_train_loop(self, mode):
self._maybe_skip(mode)
if tf2.enabled():
self.skipTest('TensorFlow 1 required')
with ops.device(_get_device(mode)):
random_seed.set_random_seed(1234)
np.random.seed(1234)
num_iter, bs, nchan, nclass = (100, 64, 32, 100)
data = np.random.normal(size=(bs * num_iter, nchan)).astype(np.float32)
labels = np.random.randint(nclass, size=(bs * num_iter,))
ds = dataset_ops.Dataset.from_tensor_slices((data, labels))
ds = ds.batch(bs).prefetch(3)
it = ds.make_one_shot_iterator()
def body(_, i):
i += 1
x, yt = it.get_next()
dense = layers.Dense(nclass)
y = dense(x)
loss = losses.sparse_softmax_cross_entropy(yt, y)
opt = adam.AdamOptimizer()
train_op = opt.minimize(loss, var_list=dense.trainable_weights)
with ops.control_dependencies([train_op]):
loss = array_ops.identity(loss)
return (loss, i)
begin, end = (constant_op.constant(0), constant_op.constant(num_iter))
loss, _ = while_loop.while_loop(lambda loss, i: math_ops.less(i, end), body, [0.0, begin])
output_val_ref, output_val, cost_graph = self._run(mode, loss)
node_map = _build_node_map(cost_graph.node)
self._assert_output_f16(mode, node_map, 'while/dense/MatMul')
self._assert_output_f16(mode, node_map, 'while/gradients/while/dense/MatMul_grad/MatMul_1')
self.assertAllClose(output_val_ref, output_val, atol=0.001, rtol=0.001)
|
Tests a graph containing a while loop around a training update.
This requires the grappler pass to take special care with its handling of
Enter ops that appear in front of reads from non-resource variables. See
the use of NodeImplicitlyReadsVariable in auto_mixed_precision.cc.
Args:
mode: Either 'cuda' or 'mkl'.
|
github-repos
|
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1 is not None:
output += token_ids_1 + [self.sep_token_id]
return output
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A REALM sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
github-repos
|
def add_period_and_roll(self, date_tensor, period_tensor, roll_convention=constants.BusinessDayConvention.NONE):
pass
|
Adds given periods to given dates and rolls to business days.
The original dates are not rolled prior to addition.
Args:
date_tensor: DateTensor of dates to add to.
period_tensor: PeriodTensor broadcastable to `date_tensor`.
roll_convention: BusinessDayConvention. Determines how to roll a date that
falls on a holiday.
Returns:
The resulting DateTensor.
|
github-repos
|
def create_opengl_context(surface_size=(640, 480)):
egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY)
major, minor = egl.EGLint(), egl.EGLint()
egl.eglInitialize(egl_display, pointer(major), pointer(minor))
config_attribs = [
egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8,
egl.EGL_GREEN_SIZE, 8, egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24,
egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, egl.EGL_NONE
]
config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs)
num_configs = egl.EGLint()
egl_cfg = egl.EGLConfig()
egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1,
pointer(num_configs))
width, height = surface_size
pbuffer_attribs = [
egl.EGL_WIDTH,
width,
egl.EGL_HEIGHT,
height,
egl.EGL_NONE,
]
pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs)
egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg, pbuffer_attribs)
egl.eglBindAPI(egl.EGL_OPENGL_API)
egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT,
None)
egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)
|
Create offscreen OpenGL context and make it current.
Users are expected to directly use EGL API in case more advanced
context management is required.
Args:
surface_size: (width, height), size of the offscreen rendering surface.
|
juraj-google-style
|
def set_scf_algorithm_and_iterations(self, algorithm='diis', iterations=50):
available_algorithms = {'diis', 'dm', 'diis_dm', 'diis_gdm', 'gdm', 'rca', 'rca_diis', 'roothaan'}
if (algorithm.lower() not in available_algorithms):
raise ValueError((('Algorithm ' + algorithm) + ' is not available in QChem'))
self.params['rem']['scf_algorithm'] = algorithm.lower()
self.params['rem']['max_scf_cycles'] = iterations
|
Set algorithm used for converging SCF and max number of SCF iterations.
Args:
algorithm: The algorithm used for converging SCF. (str)
iterations: The max number of SCF iterations. (Integer)
|
codesearchnet
|
def wait_until_element_not_visible(webdriver, locator_lambda_expression, timeout=WTF_TIMEOUT_MANAGER.NORMAL, sleep=0.5):
try:
stoptime = (datetime.now() + timedelta(seconds=timeout))
while (datetime.now() < stoptime):
element = WebDriverWait(webdriver, WTF_TIMEOUT_MANAGER.BRIEF).until(locator_lambda_expression)
if element.is_displayed():
time.sleep(sleep)
else:
break
except TimeoutException:
pass
|
Wait for a WebElement to disappear.
Args:
webdriver (Webdriver) - Selenium Webdriver
locator (lambda) - Locator lambda expression.
Kwargs:
timeout (number) - timeout period
sleep (number) - sleep period between intervals.
|
codesearchnet
|
def _GetFileByPath(self, key_path_upper):
key_path_prefix, registry_file = self._GetCachedFileByPath(key_path_upper)
if not registry_file:
for mapping in self._GetFileMappingsByPath(key_path_upper):
try:
registry_file = self._OpenFile(mapping.windows_path)
except IOError:
registry_file = None
if not registry_file:
continue
if not key_path_prefix:
key_path_prefix = mapping.key_path_prefix
self.MapFile(key_path_prefix, registry_file)
key_path_prefix = key_path_prefix.upper()
break
return key_path_prefix, registry_file
|
Retrieves a Windows Registry file for a specific path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Returns:
tuple: consists:
str: upper case key path prefix
WinRegistryFile: corresponding Windows Registry file or None if not
available.
|
juraj-google-style
|
def NeedsSeparatingHyphenHyphen(self, flag='help'):
element = self.GetLastHealthyElement()
component = element.component
spec = inspectutils.GetFullArgSpec(component)
return spec.varkw is not None or flag in spec.args or flag in spec.kwonlyargs
|
Returns whether a the trace need '--' before '--help'.
'--' is needed when the component takes keyword arguments, when the value of
flag matches one of the argument of the component, or the component takes in
keyword-only arguments(e.g. argument with default value).
Args:
flag: the flag available for the trace
Returns:
True for needed '--', False otherwise.
|
github-repos
|
def attention_mask_autoregressive(query_pos, dtype=tf.float32):
memory_pos = rename_length_to_memory_length(query_pos)
return mtf.cast(mtf.less(query_pos, memory_pos), dtype) * -1e9
|
Bias for self-attention where attention to the right is disallowed.
Args:
query_pos: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
|
juraj-google-style
|
def _get_duration_microseconds(start_time_seconds, end_time_seconds):
if end_time_seconds < start_time_seconds:
return 0
return round((end_time_seconds - start_time_seconds) * 1000000)
|
Calculate the duration between start and end time.
Args:
start_time_seconds: The start time in seconds.
end_time_seconds: The end time in seconds.
Returns:
The duration between the start and the end time. Return 0 if
end_time_seconds < start_time_seconds.
|
github-repos
|
def with_headers(self, headers):
copy = headers.copy()
copy.update(self._headers)
return self.__copy_and_set('headers', copy)
|
Adds headers to the request
Args:
headers (dict): The headers to add the request headers
Returns:
The request builder instance in order to chain calls
|
juraj-google-style
|
def extract_objects(self, fname, type_filter=None):
objects = []
if (fname in self.object_cache):
objects = self.object_cache[fname]
else:
with io.open(fname, 'rt', encoding='utf-8') as fh:
text = fh.read()
objects = parse_verilog(text)
self.object_cache[fname] = objects
if type_filter:
objects = [o for o in objects if isinstance(o, type_filter)]
return objects
|
Extract objects from a source file
Args:
fname(str): Name of file to read from
type_filter (class, optional): Object class to filter results
Returns:
List of objects extracted from the file.
|
codesearchnet
|
def render_to_image_file(self, image_out_path, width_pixels=None, height_pixels=None, dpi=90):
self._render_type = 'file'
self._tree.render(file_name=image_out_path, w=width_pixels, h=height_pixels, dpi=dpi, units='px', tree_style=self._get_tree_style())
|
Render the SubjectInfo to an image file.
Args:
image_out_path : str
Path to where image image will be written. Valid extensions are
``.svg,`` ``.pdf``, and ``.png``.
width_pixels : int
Width of image to write.
height_pixels : int
Height of image to write, in pixels.
dpi:
Dots Per Inch to declare in image file. This does not change the
resolution of the image but may change the size of the image when
rendered.
Returns:
None
|
codesearchnet
|
def set_fresh_watermark(game_queue, count_from, window_size, fresh_fraction=0.05, minimum_fresh=20000):
already_played = (game_queue.latest_game_number - count_from)
print('== already_played: ', already_played, flush=True)
if (window_size > count_from):
game_queue.require_fresh_games(int((minimum_fresh * 0.9)))
else:
num_to_play = max(0, (math.ceil(((window_size * 0.9) * fresh_fraction)) - already_played))
print('== Num to play: ', num_to_play, flush=True)
game_queue.require_fresh_games(num_to_play)
|
Sets the metadata cell used to block until some quantity of games have been played.
This sets the 'freshness mark' on the `game_queue`, used to block training
until enough new games have been played. The number of fresh games required
is the larger of:
- The fraction of the total window size
- The `minimum_fresh` parameter
The number of games required can be indexed from the 'count_from' parameter.
Args:
game_queue: A GameQueue object, on whose backing table will be modified.
count_from: the index of the game to compute the increment from
window_size: an integer indicating how many past games are considered
fresh_fraction: a float in (0,1] indicating the fraction of games to wait for
minimum_fresh: an integer indicating the lower bound on the number of new
games.
|
codesearchnet
|
def http_request(self, verb, uri, data=None, headers=None, files=None, response_format=None, is_rdf=True, stream=False):
if is_rdf:
'\n\t\t\tAcceptable content negotiated response formats include:\n\t\t\t\tapplication/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository)\n\t\t\t\tapplication/n-triples\n\t\t\t\tapplication/rdf+xml\n\t\t\t\ttext/n3 (or text/rdf+n3)\n\t\t\t\ttext/plain\n\t\t\t\ttext/turtle (or application/x-turtle)\n\t\t\t'
if (verb == 'GET'):
if (not response_format):
response_format = self.repo.default_serialization
if (headers and ('Accept' not in headers.keys())):
headers['Accept'] = response_format
else:
headers = {'Accept': response_format}
if (type(uri) == rdflib.term.URIRef):
uri = uri.toPython()
logger.debug(('%s request for %s, format %s, headers %s' % (verb, uri, response_format, headers)))
session = requests.Session()
request = requests.Request(verb, uri, auth=(self.repo.username, self.repo.password), data=data, headers=headers, files=files)
prepped_request = session.prepare_request(request)
response = session.send(prepped_request, stream=stream)
return response
|
Primary route for all HTTP requests to repository. Ability to set most parameters for requests library,
with some additional convenience parameters as well.
Args:
verb (str): HTTP verb to use for request, e.g. PUT, POST, GET, HEAD, PATCH, etc.
uri (rdflib.term.URIRef,str): input URI
data (str,file): payload of data to send for request, may be overridden in preperation of request
headers (dict): optional dictionary of headers passed directly to requests.request
files (dict): optional dictionary of files passed directly to requests.request
response_format (str): desired response format for resource's payload, e.g. 'application/rdf+xml', 'text/turtle', etc.
is_rdf (bool): if True, set Accept header based on combination of response_format and headers
stream (bool): passed directly to requests.request for stream parameter
Returns:
requests.models.Response
|
codesearchnet
|
def basistransform(self, new_basis, old_basis=None, orthonormalize=True):
if (old_basis is None):
old_basis = np.identity(3)
is_rotation_matrix = np.isclose(np.linalg.det(new_basis), 1)
if ((not is_rotation_matrix) and orthonormalize):
new_basis = xyz_functions.orthonormalize_righthanded(new_basis)
is_rotation_matrix = True
if is_rotation_matrix:
return dot(np.dot(new_basis.T, old_basis), self)
else:
return dot(np.dot(np.linalg.inv(new_basis), old_basis), self)
|
Transform the frame to a new basis.
This function transforms the cartesian coordinates from an
old basis to a new one. Please note that old_basis and
new_basis are supposed to have full Rank and consist of
three linear independent vectors. If rotate_only is True,
it is asserted, that both bases are orthonormal and right
handed. Besides all involved matrices are transposed
instead of inverted.
In some applications this may require the function
:func:`xyz_functions.orthonormalize` as a previous step.
Args:
old_basis (np.array):
new_basis (np.array):
rotate_only (bool):
Returns:
Cartesian: The transformed molecule.
|
codesearchnet
|
def do_REMOTE(self,
target: str,
remote_command: str,
source: list,
*args,
**kwargs) -> None:
if target == self.messaging._service_name:
info = 'target for remote command is the bot itself! Returning the function'
self.logger.info(info)
return self._handle_command(remote_command, source, *args, **kwargs)
try:
target = self.messaging._address_map[target]
except KeyError:
warn = ' Target %s, not found in addresses. Are you sure that %s sent an IDENT message?'
self.logger.warn(warn, target, target)
return
self.logger.info(' REMOTE %s, target: %s | %s, %s',
remote_command, target, args, kwargs)
source = target + source
self.messaging.send_command_response(source,
remote_command,
*args,
**kwargs)
|
Send a remote command to a service. Used
Args:
target: The service that the command gets set to
remote_command: The command to do remotely.
source: the binary source of the zmq_socket. Packed to send to the
|
juraj-google-style
|
def __init__(self, message):
super(InvalidField, self).__init__(
reason=enums.ResultReason.INVALID_FIELD,
message=message
)
|
Create an InvalidField exception.
Args:
message (string): A string containing information about the error.
|
juraj-google-style
|
def parse(self, message, schema):
func = {'audit-log': self._parse_audit_log_msg, 'event': self._parse_event_msg}[schema]
return func(message)
|
Parse message according to schema.
`message` should already be validated against the given schema.
See :ref:`schemadef` for more information.
Args:
message (dict): message data to parse.
schema (str): valid message schema.
Returns:
(dict): parsed message
|
codesearchnet
|
def reduce_logsumexp(x, reduced_dim, extra_logit=None, name=None):
reduced_dim = convert_to_dimension(reduced_dim)
with tf.variable_scope(name, default_name="reduce_logsumexp"):
reduced_shape = x.shape - reduced_dim
max_logit = reduce_max(stop_gradient(x), output_shape=reduced_shape)
if extra_logit is not None:
if isinstance(extra_logit, Tensor):
extra_logit = stop_gradient(extra_logit)
max_logit = maximum(max_logit, extra_logit)
x -= max_logit
exp_x = exp(x)
sum_exp_x = reduce_sum(exp_x, output_shape=reduced_shape)
if extra_logit is not None:
sum_exp_x += exp(extra_logit - max_logit)
return log(sum_exp_x) + max_logit
|
Numerically stable version of log(reduce_sum(exp(x))).
Unlike other reductions, the output has the same shape as the input.
Note: with a minor change, we could allow multiple reduced dimensions.
Args:
x: a Tensor
reduced_dim: a dimension in x
extra_logit: an optional Tensor broadcastable to (x.shape - reduced_dim)
name: an optional string
Returns:
a Tensor with the same shape and dtype as x.
|
juraj-google-style
|
def __init__(self, action_type=None, tp_port=None):
super().__init__(action_type, length=8)
self.tp_port = tp_port
|
Create an ActionTPPort with the optional parameters below.
Args:
action_type (:class:`~pyof.v0x01.common.action.ActionType`):
:attr:`~ActionType.OFPAT_SET_TP_SRC` or
:attr:`~ActionType.OFPAT_SET_TP_DST`.
tp_port (int): TCP/UDP/other port to set.
|
juraj-google-style
|
def true_num_reactions(model, custom_spont_id=None):
true_num = 0
for rxn in model.reactions:
if len(rxn.genes) == 0:
continue
if len(rxn.genes) == 1 and is_spontaneous(list(rxn.genes)[0], custom_id=custom_spont_id):
continue
else:
true_num += 1
return true_num
|
Return the number of reactions associated with a gene.
Args:
model (Model):
custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
int: Number of reactions associated with a gene
|
juraj-google-style
|
def run_inside_wrap_function_in_eager_mode(graph_function):
def wrap_and_execute(self):
if context.executing_eagerly():
wrapped = wrap_function.wrap_function(graph_function, [self])
wrapped()
else:
graph_function(self)
return wrap_and_execute
|
Decorator to execute the same graph code in eager and graph modes.
In graph mode, we just execute the graph_function passed as argument. In eager
mode, we wrap the function using wrap_function and then execute the wrapped
result.
Args:
graph_function: python function containing graph code to be wrapped
Returns:
decorated function
|
github-repos
|
def unprotect(self, **kwargs):
id = self.get_id().replace('/', '%2F')
path = ('%s/%s/unprotect' % (self.manager.path, id))
self.manager.gitlab.http_put(path, **kwargs)
self._attrs['protected'] = False
|
Unprotect the branch.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabProtectError: If the branch could not be unprotected
|
codesearchnet
|
def fitness(self, width, height):
assert ((width > 0) and (height > 0))
(rect, max_rect) = self._select_position(width, height)
if (rect is None):
return None
return self._rect_fitness(max_rect, rect.width, rect.height)
|
Metric used to rate how much space is wasted if a rectangle is placed.
Returns a value greater or equal to zero, the smaller the value the more
'fit' is the rectangle. If the rectangle can't be placed, returns None.
Arguments:
width (int, float): Rectangle width
height (int, float): Rectangle height
Returns:
int, float: Rectangle fitness
None: Rectangle can't be placed
|
codesearchnet
|
def _isbn_cleanse(isbn, checksum=True):
if not isinstance(isbn, string_types):
raise TypeError('ISBN must be a string, received %r' % isbn)
if PY2 and isinstance(isbn, str):
isbn = unicode(isbn)
uni_input = False
else:
uni_input = True
for dash in DASHES:
isbn = isbn.replace(dash, unicode())
if checksum:
if not isbn[:-1].isdigit():
raise IsbnError('non-digit parts')
if len(isbn) == 9:
isbn = '0' + isbn
if len(isbn) == 10:
if not (isbn[-1].isdigit() or isbn[-1] in 'Xx'):
raise IsbnError('non-digit or X checksum')
elif len(isbn) == 13:
if not isbn[-1].isdigit():
raise IsbnError('non-digit checksum')
if not isbn.startswith(('978', '979')):
raise IsbnError('invalid Bookland region')
else:
raise IsbnError('ISBN must be either 10 or 13 characters long')
else:
if len(isbn) == 8:
isbn = '0' + isbn
elif len(isbn) == 12 and not isbn[:3].startswith(('978', '979')):
raise IsbnError('invalid Bookland region')
if not isbn.isdigit():
raise IsbnError('non-digit parts')
if not len(isbn) in (9, 12):
raise IsbnError('ISBN must be either 9 or 12 characters long '
'without checksum')
if PY2 and not uni_input:
return str(isbn)
else:
return isbn
|
Check ISBN is a string, and passes basic sanity checks.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
checksum (bool): ``True`` if ``isbn`` includes checksum character
Returns:
``str``: ISBN with hyphenation removed, including when called with a
SBN
Raises:
TypeError: ``isbn`` is not a ``str`` type
IsbnError: Incorrect length for ``isbn``
IsbnError: Incorrect SBN or ISBN formatting
|
juraj-google-style
|
def _use_temp_cache(self):
if self._use_tensor_buffer():
return False
if self._use_tensor_values_cache():
return self._parameters.use_temp_cache_var
else:
return False
|
Returns true if the intermediate values should be stacked instead of being stored in a tf.Variable.
Returns:
A boolean, denoting whether to use a temporary cache or not.
|
github-repos
|
def _derive_namespaces(self):
for graph in [self.diffs.overlap, self.diffs.removed, self.diffs.added]:
for (s, p, o) in graph:
try:
(ns_prefix, ns_uri, predicate) = graph.compute_qname(p)
self.update_namespaces.add(ns_uri)
except:
logger.debug(('could not parse Object URI: %s' % ns_uri))
try:
(ns_prefix, ns_uri, predicate) = graph.compute_qname(o)
self.update_namespaces.add(ns_uri)
except:
logger.debug(('could not parse Object URI: %s' % ns_uri))
logger.debug(self.update_namespaces)
for ns_uri in self.update_namespaces:
for k in self.prefixes.__dict__:
if (str(ns_uri) == str(self.prefixes.__dict__[k])):
logger.debug(('adding prefix %s for uri %s to unique_prefixes' % (k, str(ns_uri))))
self.update_prefixes[k] = self.prefixes.__dict__[k]
|
Small method to loop through three graphs in self.diffs, identify unique namespace URIs.
Then, loop through provided dictionary of prefixes and pin one to another.
Args:
None: uses self.prefixes and self.diffs
Returns:
None: sets self.update_namespaces and self.update_prefixes
|
codesearchnet
|
def get_voronoi_polyhedra(self, structure, n):
if (self.targets is None):
targets = structure.composition.elements
else:
targets = self.targets
center = structure[n]
cutoff = self.cutoff
corners = [[1, 1, 1], [(- 1), 1, 1], [1, (- 1), 1], [1, 1, (- 1)]]
d_corners = [np.linalg.norm(structure.lattice.get_cartesian_coords(c)) for c in corners]
max_cutoff = (max(d_corners) + 0.01)
while True:
try:
neighbors = structure.get_sites_in_sphere(center.coords, cutoff)
neighbors = [i[0] for i in sorted(neighbors, key=(lambda s: s[1]))]
qvoronoi_input = [s.coords for s in neighbors]
voro = Voronoi(qvoronoi_input)
cell_info = self._extract_cell_info(structure, 0, neighbors, targets, voro, self.compute_adj_neighbors)
break
except RuntimeError as e:
if (cutoff >= max_cutoff):
if (e.args and ('vertex' in e.args[0])):
raise e
else:
raise RuntimeError('Error in Voronoi neighbor finding; max cutoff exceeded')
cutoff = min((cutoff * 2), (max_cutoff + 0.001))
return cell_info
|
Gives a weighted polyhedra around a site.
See ref: A Proposed Rigorous Definition of Coordination Number,
M. O'Keeffe, Acta Cryst. (1979). A35, 772-775
Args:
structure (Structure): structure for which to evaluate the
coordination environment.
n (integer): site index.
Returns:
A dict of sites sharing a common Voronoi facet with the site
n mapped to a directory containing statistics about the facet:
- solid_angle - Solid angle subtended by face
- angle_normalized - Solid angle normalized such that the
faces with the largest
- area - Area of the facet
- face_dist - Distance between site n and the facet
- volume - Volume of Voronoi cell for this face
- n_verts - Number of vertices on the facet
|
codesearchnet
|
def structure_lines(self, structure, cell_flg=True, frac_flg=True, anion_shell_flg=True, cation_shell_flg=False, symm_flg=True):
gin = ''
if cell_flg:
gin += 'cell\n'
l = structure.lattice
lat_str = [str(i) for i in [l.a, l.b, l.c, l.alpha, l.beta, l.gamma]]
gin += (' '.join(lat_str) + '\n')
if frac_flg:
gin += 'frac\n'
coord_attr = 'frac_coords'
else:
gin += 'cart\n'
coord_attr = 'coords'
for site in structure.sites:
coord = [str(i) for i in getattr(site, coord_attr)]
specie = site.specie
core_site_desc = (((specie.symbol + ' core ') + ' '.join(coord)) + '\n')
gin += core_site_desc
if (((specie in _anions) and anion_shell_flg) or ((specie in _cations) and cation_shell_flg)):
shel_site_desc = (((specie.symbol + ' shel ') + ' '.join(coord)) + '\n')
gin += shel_site_desc
else:
pass
if symm_flg:
gin += 'space\n'
gin += (str(SpacegroupAnalyzer(structure).get_space_group_number()) + '\n')
return gin
|
Generates GULP input string corresponding to pymatgen structure.
Args:
structure: pymatgen Structure object
cell_flg (default = True): Option to use lattice parameters.
fractional_flg (default = True): If True, fractional coordinates
are used. Else, cartesian coodinates in Angstroms are used.
******
GULP convention is to use fractional coordinates for periodic
structures and cartesian coordinates for non-periodic
structures.
******
anion_shell_flg (default = True): If True, anions are considered
polarizable.
cation_shell_flg (default = False): If True, cations are
considered polarizable.
symm_flg (default = True): If True, symmetry information is also
written.
Returns:
string containing structure for GULP input
|
codesearchnet
|
def _InvokeImportCallbackBySuffix(names):
def GetModuleFromName(name, path):
"Returns the loaded module for this name/path, or None if not found.\n\n Args:\n name: A string that may represent the name of a loaded Python module.\n path: If 'name' ends with '.*', then the last path component in 'path' is\n used to identify what the wildcard may map to. Does not contain file\n extension.\n\n Returns:\n The loaded module for the given name and path, or None if a loaded module\n was not found.\n "
if name.endswith('.*'):
name = ((name.rpartition('.')[0] + '.') + path.split('/')[(- 1)])
return sys.modules.get(name)
for (path, callbacks) in list(_import_callbacks.items()):
root = os.path.splitext(path)[0]
nonempty_names = (n for n in names if n)
modules = (GetModuleFromName(name, root) for name in nonempty_names)
nonempty_modules = (m for m in modules if m)
for module in nonempty_modules:
mod_file = getattr(module, '__file__', None)
if (not mod_file):
continue
mod_root = os.path.splitext(mod_file)[0]
if (not os.path.isabs(mod_root)):
mod_root = os.path.join(os.curdir, mod_root)
if module_utils2.IsPathSuffix(mod_root, root):
for callback in callbacks.copy():
callback(module)
break
|
Invokes import callbacks for newly loaded modules.
Uses a path suffix match to identify whether a loaded module matches the
file path provided by the user.
Args:
names: A set of names for modules that are loaded by the current import.
The set may contain some superfluous entries that were already
loaded before this import, or some entries that do not correspond
to a module. The list is expected to be much smaller than the exact
sys.modules so that a linear search is not as costly.
|
codesearchnet
|
def objects_to_serialize(self, serialization_cache):
raise NotImplementedError
|
Returns dictionary of extra checkpointable objects to serialize.
See `functions_to_serialize` for an explanation of this function's
effects.
Args:
serialization_cache: Dictionary passed to all objects in the same object
graph during serialization.
Returns:
A dictionary mapping attribute names to checkpointable objects.
|
github-repos
|
def _update_unenrolled_list(sailthru_client, email, course_url, unenroll):
try:
sailthru_response = sailthru_client.api_get('user', {'id': email, 'fields': {'vars': 1}})
if (not sailthru_response.is_ok()):
error = sailthru_response.get_error()
logger.error('Error attempting to read user record from Sailthru: %s', error.get_message())
return (not can_retry_sailthru_request(error))
response_json = sailthru_response.json
unenroll_list = []
if (response_json and ('vars' in response_json) and response_json['vars'] and ('unenrolled' in response_json['vars'])):
unenroll_list = response_json['vars']['unenrolled']
changed = False
if unenroll:
if (course_url not in unenroll_list):
unenroll_list.append(course_url)
changed = True
elif (course_url in unenroll_list):
unenroll_list.remove(course_url)
changed = True
if changed:
sailthru_response = sailthru_client.api_post('user', {'id': email, 'key': 'email', 'vars': {'unenrolled': unenroll_list}})
if (not sailthru_response.is_ok()):
error = sailthru_response.get_error()
logger.error('Error attempting to update user record in Sailthru: %s', error.get_message())
return (not can_retry_sailthru_request(error))
return True
except SailthruClientError as exc:
logger.exception('Exception attempting to update user record for %s in Sailthru - %s', email, text_type(exc))
return False
|
Maintain a list of courses the user has unenrolled from in the Sailthru user record
Arguments:
sailthru_client (object): SailthruClient
email (str): user's email address
course_url (str): LMS url for course info page.
unenroll (boolean): True if unenrolling, False if enrolling
Returns:
False if retryable error, else True
|
codesearchnet
|
def random_state(dim, seed=None):
if seed is None:
seed = np.random.randint(0, np.iinfo(np.int32).max)
rng = np.random.RandomState(seed)
x = rng.rand(dim)
x += x == 0
x = -np.log(x)
sumx = sum(x)
phases = rng.rand(dim)*2.0*np.pi
return np.sqrt(x/sumx)*np.exp(1j*phases)
|
Return a random quantum state from the uniform (Haar) measure on
state space.
Args:
dim (int): the dim of the state spaxe
seed (int): Optional. To set a random seed.
Returns:
ndarray: state(2**num) a random quantum state.
|
juraj-google-style
|
def find_node(self, x: int, y: int) -> Optional['BSP']:
if (not self.contains(x, y)):
return None
for child in self.children:
found = child.find_node(x, y)
if found:
return found
return self
|
Return the deepest node which contains these coordinates.
Returns:
Optional[BSP]: BSP object or None.
|
codesearchnet
|
def __init__(self, skip_header_lines=None, name=None):
rr = gen_io_ops.text_line_reader_v2(skip_header_lines=skip_header_lines, name=name)
super(TextLineReader, self).__init__(rr)
|
Create a TextLineReader.
Args:
skip_header_lines: An optional int. Defaults to 0. Number of lines
to skip from the beginning of every file.
name: A name for the operation (optional).
|
github-repos
|
def _GetDictFromStringsTable(self, parser_mediator, table):
if not table:
return {}
record_values = {}
for record in table.records:
if parser_mediator.abort:
break
if record.get_number_of_values() != 2:
continue
identification = self._GetRecordValue(record, 0)
filename = self._GetRecordValue(record, 1)
if not identification:
continue
record_values[identification] = filename
return record_values
|
Build a dictionary of the value in the strings table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
table (pyesedb.table): strings table.
Returns:
dict[str,object]: values per column name.
|
juraj-google-style
|
def parse_value(self):
parsers = [self._maybe_parse_container, self._maybe_parse_basic_type, self._maybe_parse_configurable_reference, self._maybe_parse_macro]
for parser in parsers:
(success, value) = parser()
if success:
return value
self._raise_syntax_error('Unable to parse value.')
|
Parse a single literal value.
Returns:
The parsed value.
|
codesearchnet
|
def resolve_input(self, input_name):
name_elts = input_name.split(':')
source_name = name_elts[0]
if source_name[0] == '^':
source_name = source_name[1:]
source_index = 0
if len(name_elts) > 1 and name_elts[-1].isnumeric():
source_index = int(name_elts[-1])
if self._function is None:
return _EndPoint(self._enclosing_graph.nodes[source_name], source_index)
if source_index != 0 or source_name in self._function.nodes:
return _EndPoint(self._function.nodes[source_name], source_index)
inputs = [i.name for i in self._function.function.signature.input_arg]
return _EndPoint(self._function, inputs.index(source_name))
|
Resolves an input into its _EndPoint.
A NodeDef's input name can refer to either global NodeDefs (in the
GraphDef's node list), a NodeDef in a function's node list, or a Function
(in the GraphDef's function library). The name can also carry semantic
information, depending on whether it starts with "^". This method handles
all that logic in order to find the object to which the input name refers
to.
Args:
input_name: The input name to resolve.
Returns:
The object referred to by 'input_name'.
|
github-repos
|
def find_mip(self, direction, mechanism, purview):
if (not purview):
return _null_ria(direction, mechanism, purview)
repertoire = self.repertoire(direction, mechanism, purview)
def _mip(phi, partition, partitioned_repertoire):
return RepertoireIrreducibilityAnalysis(phi=phi, direction=direction, mechanism=mechanism, purview=purview, partition=partition, repertoire=repertoire, partitioned_repertoire=partitioned_repertoire, node_labels=self.node_labels)
if ((direction == Direction.CAUSE) and np.all((repertoire == 0))):
return _mip(0, None, None)
mip = _null_ria(direction, mechanism, purview, phi=float('inf'))
for partition in mip_partitions(mechanism, purview, self.node_labels):
(phi, partitioned_repertoire) = self.evaluate_partition(direction, mechanism, purview, partition, repertoire=repertoire)
if (phi == 0):
return _mip(0.0, partition, partitioned_repertoire)
if (phi < mip.phi):
mip = _mip(phi, partition, partitioned_repertoire)
return mip
|
Return the minimum information partition for a mechanism over a
purview.
Args:
direction (Direction): |CAUSE| or |EFFECT|.
mechanism (tuple[int]): The nodes in the mechanism.
purview (tuple[int]): The nodes in the purview.
Returns:
RepertoireIrreducibilityAnalysis: The irreducibility analysis for
the mininum-information partition in one temporal direction.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.