code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def golden_images(self):
if (not self.__golden_images):
self.__golden_images = GoldenImages(self.__connection)
return self.__golden_images | Gets the Golden Images API client.
Returns:
GoldenImages: | codesearchnet |
def next_generation(self, mut_rate=0, max_mut_amt=0, log_base=10):
if (self.__num_processes > 1):
process_pool = Pool(processes=self.__num_processes)
members = [m.get() for m in self.__members]
else:
members = self.__members
if (len(members) == 0):
raise Exception('Generation 0 not found: use generate_population() first')
selected_members = self.__select_fn(members)
reproduction_probs = list(reversed(logspace(0.0, 1.0, num=len(selected_members), base=log_base)))
reproduction_probs = (reproduction_probs / sum(reproduction_probs))
self.__members = []
for _ in range(self.__pop_size):
parent_1 = nrandom.choice(selected_members, p=reproduction_probs)
parent_2 = nrandom.choice(selected_members, p=reproduction_probs)
feed_dict = {}
for param in self.__parameters:
which_parent = uniform(0, 1)
if (which_parent < 0.5):
feed_dict[param.name] = parent_1.parameters[param.name]
else:
feed_dict[param.name] = parent_2.parameters[param.name]
feed_dict[param.name] = self.__mutate_parameter(feed_dict[param.name], param, mut_rate, max_mut_amt)
if (self.__num_processes > 1):
self.__members.append(process_pool.apply_async(self._start_process, [self.__cost_fn, feed_dict, self.__cost_fn_args]))
else:
self.__members.append(Member(feed_dict, self.__cost_fn(feed_dict, self.__cost_fn_args)))
if (self.__num_processes > 1):
process_pool.close()
process_pool.join()
self.__determine_best_member() | Generates the next population from a previously evaluated generation
Args:
mut_rate (float): mutation rate for new members (0.0 - 1.0)
max_mut_amt (float): how much the member is allowed to mutate
(0.0 - 1.0, proportion change of mutated parameter)
log_base (int): the higher this number, the more likely the first
Members (chosen with supplied selection function) are chosen
as parents for the next generation | codesearchnet |
def closing(input_rasterfilename, times):
input_raster = RasterUtilClass.read_raster(input_rasterfilename)
closing_raster = input_raster
for i in range(times):
closing_raster = RasterUtilClass.raster_dilation(closing_raster)
for i in range(times):
closing_raster = RasterUtilClass.raster_erosion(closing_raster)
return closing_raster | Do closing.
Closing: Dilate firstly, then Erode.
Args:
input_rasterfilename: input original raster image filename.
times: Erode and Dilate times.
Returns:
closing_raster: raster image after close. | codesearchnet |
def transpose(self, name=None):
if any(x > 1 for x in self._rate):
raise base.NotSupportedError(
"Cannot transpose a dilated convolution module.")
if any(p != self._conv_op_padding for p in self._padding):
raise base.NotSupportedError(
"Cannot tranpose a convolution using mixed paddings or paddings "
"other than SAME or VALID.")
if name is None:
name = self.module_name + "_transpose"
def output_shape():
if self._data_format == DATA_FORMAT_NCHW:
return self.input_shape[2:4]
else:
return self.input_shape[1:3]
return Conv2DTranspose(output_channels=lambda: self._input_channels,
output_shape=output_shape,
kernel_shape=self._kernel_shape,
stride=self._stride,
padding=self._conv_op_padding,
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
data_format=self._data_format,
custom_getter=self._custom_getter,
name=name) | Returns matching `Conv2DTranspose` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.name`.
Returns:
`Conv2DTranspose` module.
Raises:
base.NotSupportedError: If `rate` in any dimension > 1. | juraj-google-style |
def copy(self, destination):
destination_uri = self.repo.parse_uri(destination)
response = self.repo.api.http_request('COPY', self.uri, data=None, headers={'Destination':destination_uri.toPython()})
if response.status_code == 201:
return destination_uri
else:
raise Exception('HTTP %s, could not move resource %s to %s' % (response.status_code, self.uri, destination_uri)) | Method to copy resource to another location
Args:
destination (rdflib.term.URIRef, str): URI location to move resource
Returns:
(Resource) new, moved instance of resource | juraj-google-style |
def micros_to_timestamp(micros, timestamp):
seconds = long(micros / _MICROS_PER_SECOND)
micro_remainder = micros % _MICROS_PER_SECOND
timestamp.seconds = seconds
timestamp.nanos = micro_remainder * _NANOS_PER_MICRO | Convert microseconds from utc epoch to google.protobuf.timestamp.
Args:
micros: a long, number of microseconds since utc epoch.
timestamp: a google.protobuf.timestamp.Timestamp to populate. | juraj-google-style |
def warp(self, to_sref, dest=None, interpolation=gdalconst.GRA_NearestNeighbour):
if not hasattr(to_sref, 'ExportToWkt'):
to_sref = SpatialReference(to_sref)
dest_wkt = to_sref.ExportToWkt()
dtype = self[0].DataType
err_thresh = 0.125
vrt = gdal.AutoCreateWarpedVRT(self.ds, None, dest_wkt,
interpolation, err_thresh)
if vrt is None:
raise ValueError('Could not warp %s to %s' % (self, dest_wkt))
warpsize = (vrt.RasterXSize, vrt.RasterYSize, len(self))
warptrans = vrt.GetGeoTransform()
vrt = None
if dest is None:
imgio = MemFileIO()
rwarp = self.driver.raster(imgio, warpsize, dtype)
imgio.close()
else:
rwarp = self.driver.raster(dest, warpsize, dtype)
rwarp.SetGeoTransform(warptrans)
rwarp.SetProjection(to_sref)
if self.nodata is not None:
for band in rwarp:
band.SetNoDataValue(self.nodata)
band = None
gdal.ReprojectImage(self.ds, rwarp.ds, None, None, interpolation)
return rwarp | Returns a new reprojected instance.
Arguments:
to_sref -- spatial reference as a proj4 or wkt string, or a
SpatialReference
Keyword args:
dest -- filepath as str
interpolation -- GDAL interpolation type | juraj-google-style |
def register(self, table):
if table.table_type.is_system:
raise ValueError('Cannot add system table to catalog')
if not table.table_type.is_shared:
raise ValueError('Cannot add local table to catalog')
if table.is_substitute:
raise ValueError('Cannot add substitute table to catalog')
versions = self.__tables.get(table.name)
if versions is None:
versions = {}
self.__tables[table.name] = versions
versions[table.version] = table | Adds a shared table to the catalog.
Args:
table (SymbolTable): A non-system, shared symbol table. | juraj-google-style |
def compose_q_update_vec(self, q_update_vec: torch.Tensor, normalize_quats: bool=True) -> Rotation:
quats = self.get_quats()
new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)
return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats) | Returns a new quaternion Rotation after updating the current object's underlying rotation with a quaternion
update, formatted as a [*, 3] tensor whose final three columns represent x, y, z such that (1, x, y, z) is the
desired (not necessarily unit) quaternion update.
Args:
q_update_vec:
A [*, 3] quaternion update tensor
normalize_quats:
Whether to normalize the output quaternion
Returns:
An updated Rotation | github-repos |
def getToC(doc, simple=True):
def recurse(olItem, liste, lvl):
'Recursively follow the outline item chain and record item information in a list.'
while olItem:
if olItem.title:
title = olItem.title
else:
title = ' '
if (not olItem.isExternal):
if olItem.uri:
page = (olItem.page + 1)
else:
page = (- 1)
else:
page = (- 1)
if (not simple):
link = getLinkDict(olItem)
liste.append([lvl, title, page, link])
else:
liste.append([lvl, title, page])
if olItem.down:
liste = recurse(olItem.down, liste, (lvl + 1))
olItem = olItem.next
return liste
if doc.isClosed:
raise ValueError('illegal operation on closed document')
olItem = doc.outline
if (not olItem):
return []
lvl = 1
liste = []
return recurse(olItem, liste, lvl) | Create a table of contents.
Args:
simple: a bool to control output. Returns a list, where each entry consists of outline level, title, page number and link destination (if simple = False). For details see PyMuPDF's documentation. | codesearchnet |
def __init__(self, name, data=None, package_cls=None):
super(PackageMaker, self).__init__(data)
self.name = name
self.package_cls = package_cls or Package
self.installed_variants = []
self.skipped_variants = [] | Create a package maker.
Args:
name (str): Package name. | juraj-google-style |
def copy_sharding(from_tensor, to_tensor, use_sharding_op=False):
sharding = get_tensor_sharding(from_tensor)
if sharding is None:
return to_tensor
if isinstance(to_tensor, resource_variable_ops.BaseResourceVariable) and context.xla_sharding_for_resource_variables_enabled():
proto = xla_data_pb2.OpSharding()
proto.ParseFromString(sharding)
to_tensor._set_xla_sharding(proto)
return to_tensor
if use_sharding_op:
to_tensor = tf2xla.sharding(to_tensor, sharding=sharding)
attr_value = attr_value_pb2.AttrValue(s=sharding)
to_tensor.op._set_attr('_XlaSharding', attr_value)
return to_tensor | Copies the a tensor's sharding to another.
Args:
from_tensor: Source tensor. Must be the sole output of an op.
to_tensor: the tensor the annotate with the copy.
use_sharding_op: whether to create a sharding op on `to_tensor`.
Returns:
A tensor with sharding annotation copied from `from_tensor`. | github-repos |
def __init__(self, items: Optional[Iterable[Any]]=None, *, value_spec: Optional[pg_typing.List]=None, onchange_callback: Optional[Callable[[Dict[utils.KeyPath, base.FieldUpdate]], None]]=None, allow_partial: bool=False, accessor_writable: bool=True, sealed: bool=False, root_path: Optional[utils.KeyPath]=None):
if value_spec and (not isinstance(value_spec, pg_typing.List)):
raise TypeError(f"Argument 'value_spec' must be a `pg.typing.List` object. Encountered {value_spec}.")
base.Symbolic.__init__(self, allow_partial=allow_partial, accessor_writable=accessor_writable, sealed=False, root_path=root_path)
self._value_spec = None
self._onchange_callback = None
list.__init__(self)
if items:
if isinstance(items, List):
items = items.sym_values()
for item in items:
self._set_item_without_permission_check(len(self), item)
if value_spec:
self.use_value_spec(value_spec, allow_partial)
self._onchange_callback = onchange_callback
self.seal(sealed) | Constructor.
Args:
items: A optional iterable object as initial value for this list.
value_spec: Value spec that applies to this List.
onchange_callback: Callback when sub-tree has been modified.
allow_partial: Whether to allow unbound or partial fields. This takes
effect only when value_spec is not None.
accessor_writable: Whether to allow modification of this List using
accessors (operator[]).
sealed: Whether to seal this List after creation.
root_path: KeyPath of this List in its object tree. | github-repos |
def sym_distance(cls, q0, q1):
q = Quaternion.sym_log_map(q0, q1)
return q.norm | Quaternion symmetrized distance.
Find the intrinsic symmetrized geodesic distance between q0 and q1.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive amount corresponding to the length of the symmetrized
geodesic curve connecting q0 to q1.
Note:
This formulation is more numerically stable when performing
iterative gradient descent on the Riemannian quaternion manifold.
However, the distance between q and -q is equal to pi, rendering this
formulation not useful for measuring rotation similarities when the
samples are spread over a "solid" angle of more than pi/2 radians
(the spread refers to quaternions as point samples on the unit hypersphere). | codesearchnet |
def __init__(self, batch_size, key_depth, val_depth, memory_size,
sharpen_factor=1., name="neural_memory"):
self.name = name
self.batch_size = batch_size
self.key_depth = key_depth
self.val_depth = val_depth
self.memory_size = memory_size
self.sharpen_factor = sharpen_factor
with tf.variable_scope(name):
self.segment_number = tf.get_variable(
"segment_number", [self.batch_size],
dtype=tf.int32, trainable=False,
initializer=tf.constant_initializer(100000))
self.mem_vals = tf.get_variable(
"memvals", [self.batch_size, self.memory_size, self.val_depth],
dtype=tf.float32, trainable=False,
initializer=tf.constant_initializer(.0))
self.mean_logits = tf.get_variable(
"meanlogits", [self.batch_size, self.memory_size],
dtype=tf.float32, trainable=False,
initializer=tf.constant_initializer(.0)) | Initialize the memory object.
Args:
batch_size: the batch size.
key_depth: the depth of the memory keys.
val_depth: the depth of the memory values.
memory_size: the number of items in the memory.
sharpen_factor: the sharpen_factor for addressing the memory.
name: the optional variable scope. | juraj-google-style |
def _get_token(request=None, allowed_auth_schemes=('OAuth', 'Bearer'), allowed_query_keys=('bearer_token', 'access_token')):
allowed_auth_schemes = _listlike_guard(allowed_auth_schemes, 'allowed_auth_schemes', iterable_only=True)
auth_header = os.environ.get('HTTP_AUTHORIZATION')
if auth_header:
for auth_scheme in allowed_auth_schemes:
if auth_header.startswith(auth_scheme):
return auth_header[(len(auth_scheme) + 1):]
return None
if request:
allowed_query_keys = _listlike_guard(allowed_query_keys, 'allowed_query_keys', iterable_only=True)
for key in allowed_query_keys:
(token, _) = request.get_unrecognized_field_info(key)
if token:
return token | Get the auth token for this request.
Auth token may be specified in either the Authorization header or
as a query param (either access_token or bearer_token). We'll check in
this order:
1. Authorization header.
2. bearer_token query param.
3. access_token query param.
Args:
request: The current request, or None.
Returns:
The token in the request or None. | codesearchnet |
def parse_cscore(infile):
cscore_dict = {}
with open(infile, 'r') as f:
for ll in f.readlines():
if ll.lower().startswith('model1'):
l = ll.split()
cscore = l[1]
tmscore_full = l[2].split('+-')
tmscore = tmscore_full[0]
tmscore_err = tmscore_full[1]
rmsd_full = l[3].split('+-')
rmsd = rmsd_full[0]
rmsd_err = rmsd_full[1]
cscore_dict['c_score'] = float(cscore)
cscore_dict['tm_score'] = float(tmscore)
cscore_dict['tm_score_err'] = float(tmscore_err)
cscore_dict['rmsd'] = float(rmsd)
cscore_dict['rmsd_err'] = float(rmsd_err)
return cscore_dict | Parse the cscore file to return a dictionary of scores.
Args:
infile (str): Path to cscore
Returns:
dict: Dictionary of scores | codesearchnet |
def end_of(self, event_id, import_options=True):
event_id = str(event_id)
if (event_id in DatePickerDictionary.items):
linked_picker = DatePickerDictionary.items[event_id]
self.config['linked_to'] = linked_picker.config['id']
if import_options:
backup_moment_format = self.config['options']['format']
self.config['options'].update(linked_picker.config['options'])
self.config['options'].update(self.options_param)
if (self.format_param or ('format' in self.options_param)):
self.config['options']['format'] = backup_moment_format
else:
self.format = linked_picker.format
self.config['options']['useCurrent'] = False
self._link_to(linked_picker)
else:
raise KeyError(('start-date not specified for event_id "%s"' % event_id))
return self | Set Date-Picker as the end-date of a date-range.
Args:
- event_id (string): User-defined unique id for linking two fields
- import_options (bool): inherit options from start-date input,
default: TRUE | codesearchnet |
def compile_action_preconditions(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> List[TensorFluent]:
scope = self.action_precondition_scope(state, action)
preconds = []
with self.graph.as_default():
with tf.name_scope('action_preconditions'):
for p in self.rddl.domain.preconds:
fluent = self._compile_expression(p, scope)
preconds.append(fluent)
return preconds | Compiles the action preconditions given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`. | codesearchnet |
def create_event_model(event):
if event['type'].startswith('task'):
factory = {
JobEventName.Started: JobStartedEvent,
JobEventName.Succeeded: JobSucceededEvent,
JobEventName.Stopped: JobStoppedEvent,
JobEventName.Aborted: JobAbortedEvent
}
if event['type'] in factory:
return factory[event['type']].from_event(event)
else:
raise JobEventTypeUnsupported(
'Unsupported event type {}'.format(event['type']))
elif event['type'].startswith('worker'):
raise WorkerEventTypeUnsupported(
'Unsupported event type {}'.format(event['type']))
else:
raise EventTypeUnknown('Unknown event type {}'.format(event['type'])) | Factory function that turns a celery event into an event object.
Args:
event (dict): A dictionary that represents a celery event.
Returns:
object: An event object representing the received event.
Raises:
JobEventTypeUnsupported: If an unsupported celery job event was received.
WorkerEventTypeUnsupported: If an unsupported celery worker event was received.
EventTypeUnknown: If an unknown event type (neither job nor worker) was received. | juraj-google-style |
def yaw_pitch_roll(self):
self._normalise()
yaw = np.arctan2((2 * ((self.q[0] * self.q[3]) - (self.q[1] * self.q[2]))), (1 - (2 * ((self.q[2] ** 2) + (self.q[3] ** 2)))))
pitch = np.arcsin((2 * ((self.q[0] * self.q[2]) + (self.q[3] * self.q[1]))))
roll = np.arctan2((2 * ((self.q[0] * self.q[1]) - (self.q[2] * self.q[3]))), (1 - (2 * ((self.q[1] ** 2) + (self.q[2] ** 2)))))
return (yaw, pitch, roll) | Get the equivalent yaw-pitch-roll angles aka. intrinsic Tait-Bryan angles following the z-y'-x'' convention
Returns:
yaw: rotation angle around the z-axis in radians, in the range `[-pi, pi]`
pitch: rotation angle around the y'-axis in radians, in the range `[-pi/2, -pi/2]`
roll: rotation angle around the x''-axis in radians, in the range `[-pi, pi]`
The resulting rotation_matrix would be R = R_x(roll) R_y(pitch) R_z(yaw)
Note:
This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one. | codesearchnet |
def initial_state(self, batch_size, trainable=False):
init_state = tf.eye(self._mem_slots, batch_shape=[batch_size])
if (self._mem_size > self._mem_slots):
difference = (self._mem_size - self._mem_slots)
pad = tf.zeros((batch_size, self._mem_slots, difference))
init_state = tf.concat([init_state, pad], (- 1))
elif (self._mem_size < self._mem_slots):
init_state = init_state[(:, :, :self._mem_size)]
return init_state | Creates the initial memory.
We should ensure each row of the memory is initialized to be unique,
so initialize the matrix to be the identity. We then pad or truncate
as necessary so that init_state is of size
(batch_size, self._mem_slots, self._mem_size).
Args:
batch_size: The size of the batch.
trainable: Whether the initial state is trainable. This is always True.
Returns:
init_state: A truncated or padded matrix of size
(batch_size, self._mem_slots, self._mem_size). | codesearchnet |
def Collect(self, top_frame):
frame = top_frame
top_line = self.breakpoint['location']['line']
breakpoint_frames = self.breakpoint['stackFrames']
try:
if 'expressions' in self.breakpoint:
self.breakpoint['evaluatedExpressions'] = [
self._CaptureExpression(top_frame, expression) for expression
in self.breakpoint['expressions']]
while frame and (len(breakpoint_frames) < self.max_frames):
line = top_line if frame == top_frame else frame.f_lineno
code = frame.f_code
if len(breakpoint_frames) < self.max_expand_frames:
frame_arguments, frame_locals = self.CaptureFrameLocals(frame)
else:
frame_arguments = []
frame_locals = []
breakpoint_frames.append({
'function': _GetFrameCodeObjectName(frame),
'location': {
'path': NormalizePath(code.co_filename),
'line': line
},
'arguments': frame_arguments,
'locals': frame_locals
})
frame = frame.f_back
except BaseException as e:
self.breakpoint['status'] = {
'isError': True,
'description': {
'format': ('INTERNAL ERROR: Failed while capturing locals '
'of frame $0: $1'),
'parameters': [str(len(breakpoint_frames)), str(e)]}}
num_vars = 1
while (num_vars < len(self._var_table)) and (
self._total_size < self.max_size):
self._var_table[num_vars] = self.CaptureVariable(
self._var_table[num_vars], 0, self.default_capture_limits,
can_enqueue=False)
num_vars += 1
self.TrimVariableTable(num_vars)
self._CaptureEnvironmentLabels()
self._CaptureRequestLogId()
self._CaptureUserId() | Collects call stack, local variables and objects.
Starts collection from the specified frame. We don't start from the top
frame to exclude the frames due to debugger. Updates the content of
self.breakpoint.
Args:
top_frame: top frame to start data collection. | juraj-google-style |
def make_call_types(f, globals_d):
arg_spec = getargspec(f)
args = [k for k in arg_spec.args if k != "self"]
defaults = {}
if arg_spec.defaults:
default_args = args[-len(arg_spec.defaults):]
for a, default in zip(default_args, arg_spec.defaults):
defaults[a] = default
if not getattr(f, "__annotations__", None):
annotations = make_annotations(f, globals_d)
else:
annotations = f.__annotations__
call_types = OrderedDict()
for a in args:
anno = anno_with_default(annotations[a], defaults.get(a, NO_DEFAULT))
assert isinstance(anno, Anno), \
"Argument %r has type %r which is not an Anno" % (a, anno)
call_types[a] = anno
return_type = anno_with_default(annotations.get("return", None))
if return_type is Any:
return_type = Anno("Any return value", Any, "return")
assert return_type is None or isinstance(return_type, Anno), \
"Return has type %r which is not an Anno" % (return_type,)
return call_types, return_type | Make a call_types dictionary that describes what arguments to pass to f
Args:
f: The function to inspect for argument names (without self)
globals_d: A dictionary of globals to lookup annotation definitions in | juraj-google-style |
def start_listener_thread(self, timeout_ms=30000, exception_handler=None):
try:
thread = Thread(target=self.listen_forever,
args=(timeout_ms, exception_handler))
thread.daemon = True
self.sync_thread = thread
self.should_listen = True
thread.start()
except RuntimeError:
e = sys.exc_info()[0]
logger.error("Error: unable to start thread. %s", str(e)) | Start a listener thread to listen for events in the background.
Args:
timeout (int): How long to poll the Home Server for before
retrying.
exception_handler (func(exception)): Optional exception handler
function which can be used to handle exceptions in the caller
thread. | juraj-google-style |
def __write_to_fil_light(self, filename_out, *args, **kwargs):
n_bytes = self.header[b'nbits'] / 8
with open(filename_out, "wb") as fileh:
fileh.write(generate_sigproc_header(self))
j = self.data
if n_bytes == 4:
np.float32(j.ravel()).tofile(fileh)
elif n_bytes == 2:
np.int16(j.ravel()).tofile(fileh)
elif n_bytes == 1:
np.int8(j.ravel()).tofile(fileh) | Write data to .fil file.
Args:
filename_out (str): Name of output file | juraj-google-style |
def _get_snippet_ctime(self, snip_name):
if snip_name not in self.snip_ctimes:
snippet = yaml_snippet_loader.YamlSnippetLoader.get_snippet_by_name(snip_name)
self.snip_ctimes[snip_name] = os.path.getctime(snippet.path)
return self.snip_ctimes[snip_name] | Returns and remembers (during this DevAssistant invocation) last ctime of given
snippet.
Calling ctime costs lost of time and some snippets, like common_args, are used widely,
so we don't want to call ctime bazillion times on them during one invocation.
Args:
snip_name: name of snippet to get ctime for
Returns:
ctime of the snippet | juraj-google-style |
def _GetWinevtRcDatabaseReader(self):
if ((not self._winevt_database_reader) and self._data_location):
database_path = os.path.join(self._data_location, self._WINEVT_RC_DATABASE)
if (not os.path.isfile(database_path)):
return None
self._winevt_database_reader = winevt_rc.WinevtResourcesSqlite3DatabaseReader()
if (not self._winevt_database_reader.Open(database_path)):
self._winevt_database_reader = None
return self._winevt_database_reader | Opens the Windows Event Log resource database reader.
Returns:
WinevtResourcesSqlite3DatabaseReader: Windows Event Log resource
database reader or None. | codesearchnet |
class PoolerAnswerClass(nn.Module):
def __init__(self, config):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
logger.warning_once('[DEPRECATION WARNING] `PoolerAnswerClass` is deprecated and will be removed in v4.53. Please use model-specific class, e.g. `XLMPoolerAnswerClass`.')
def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor:
hsz = hidden_states.shape[-1]
assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)
else:
cls_token_state = hidden_states[:, -1, :]
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x | Compute SQuAD 2.0 answer class from classification and start tokens hidden states.
Args:
config ([`PretrainedConfig`]):
The config used by the model, will be used to grab the `hidden_size` of the model. | github-repos |
def __call__(self, func: T) -> T:
api_names_attr = API_ATTRS[self._api_name].names
api_names_attr_v1 = API_ATTRS_V1[self._api_name].names
_, undecorated_func = tf_decorator.unwrap(func)
self.set_attr(undecorated_func, api_names_attr, self._names)
self.set_attr(undecorated_func, api_names_attr_v1, self._names_v1)
for name in self._names:
_NAME_TO_SYMBOL_MAPPING[name] = func
for name_v1 in self._names_v1:
_NAME_TO_SYMBOL_MAPPING['compat.v1.%s' % name_v1] = func
return func | Calls this decorator.
Args:
func: decorated symbol (function or class).
Returns:
The input function with _tf_api_names attribute set. | github-repos |
def create_pull_response(responses):
from google.cloud import pubsub
from google.protobuf import timestamp_pb2
res = pubsub.types.PullResponse()
for response in responses:
received_message = pubsub.types.ReceivedMessage()
message = received_message.message
message.data = response.data
if response.attributes is not None:
for k, v in response.attributes.items():
message.attributes[k] = v
publish_time = timestamp_pb2.Timestamp()
if response.publish_time_secs is not None:
publish_time.seconds = response.publish_time_secs
if response.publish_time_nanos is not None:
publish_time.nanos = response.publish_time_nanos
message.publish_time = publish_time
if response.ack_id is not None:
received_message.ack_id = response.ack_id
res.received_messages.append(received_message)
return res | Create an instance of ``google.cloud.pubsub.types.ReceivedMessage``.
Used to simulate the response from pubsub.SubscriberClient().pull().
Args:
responses: list of ``PullResponseMessage``
Returns:
An instance of ``google.cloud.pubsub.types.PullResponse`` populated with
responses. | github-repos |
def get_reference(root):
reference = {}
elem = root.find('bibliographyLink')
if (elem is None):
raise MissingElementError('bibliographyLink')
ref_doi = elem.get('doi', None)
ref_key = elem.get('preferredKey', None)
if (ref_doi is not None):
try:
ref = crossref_api.works(ids=ref_doi)['message']
except (HTTPError, habanero.RequestError, ConnectionError):
if (ref_key is None):
raise KeywordError('DOI not found and preferredKey attribute not set')
else:
warn('Missing doi attribute in bibliographyLink or lookup failed. Setting "detail" key as a fallback; please update to the appropriate fields.')
reference['detail'] = ref_key
if (reference['detail'][(- 1)] != '.'):
reference['detail'] += '.'
else:
if (ref_key is not None):
warn('Using DOI to obtain reference information, rather than preferredKey.')
reference['doi'] = elem.attrib['doi']
reference['journal'] = ref.get('container-title')[0]
ref_year = (ref.get('published-print') or ref.get('published-online'))
reference['year'] = int(ref_year['date-parts'][0][0])
reference['volume'] = int(ref.get('volume'))
reference['pages'] = ref.get('page')
reference['authors'] = []
for author in ref['author']:
auth = {}
auth['name'] = ' '.join([author['given'], author['family']])
orcid = author.get('ORCID')
if orcid:
auth['ORCID'] = orcid.lstrip('http:
reference['authors'].append(auth)
elif (ref_key is not None):
warn('Missing doi attribute in bibliographyLink. Setting "detail" key as a fallback; please update to the appropriate fields.')
reference['detail'] = ref_key
if (reference['detail'][(- 1)] != '.'):
reference['detail'] += '.'
else:
raise MissingAttributeError('preferredKey', 'bibliographyLink')
return reference | Read reference info from root of ReSpecTh XML file.
Args:
root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file
Returns:
properties (`dict`): Dictionary with reference information | codesearchnet |
def path_fraction_point(points, fraction):
seg_id, offset = path_fraction_id_offset(points, fraction, relative_offset=True)
return linear_interpolate(points[seg_id], points[seg_id + 1], offset) | Computes the point which corresponds to the fraction
of the path length along the piecewise linear curve which
is constructed from the set of points.
Args:
points: an iterable of indexable objects with indices
0, 1, 2 correspoding to 3D cartesian coordinates
fraction: path length fraction (0 <= fraction <= 1)
Returns:
The 3D coordinates of the aforementioned point | juraj-google-style |
def tail(self, n):
if n < 0:
n = max(0, len(self.index) + n)
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(1, -n).transpose(),
self.index[-n:],
self.columns,
self._dtype_cache,
)
result._is_transposed = True
else:
result = self.__constructor__(
self.data.take(0, -n), self.index[-n:], self.columns, self._dtype_cache
)
return result | Returns the last n rows.
Args:
n: Integer containing the number of rows to return.
Returns:
DataManager containing the last n rows of the original DataManager. | juraj-google-style |
def _GetNumberOfDaysInCentury(self, year):
if (year < 0):
raise ValueError('Year value out of bounds.')
(year, _) = divmod(year, 100)
if self._IsLeapYear(year):
return 36525
return 36524 | Retrieves the number of days in a century.
Args:
year (int): year in the century e.g. 1970.
Returns:
int: number of (remaining) days in the century.
Raises:
ValueError: if the year value is out of bounds. | codesearchnet |
def GetDataDownloader(self, version=sorted(_SERVICE_MAP.keys())[(- 1)], server=None):
if (not server):
server = DEFAULT_ENDPOINT
return DataDownloader(self, version, server) | Creates a downloader for Ad Manager reports and PQL result sets.
This is a convenience method. It is functionally identical to calling
DataDownloader(ad_manager_client, version, server)
Args:
[optional]
version: A string identifying the Ad Manager version to connect to.
This defaults to what is currently the latest version. This will be
updated in future releases to point to what is then the
latest version.
server: A string identifying the webserver hosting the Ad Manager API.
Returns:
A DataDownloader tied to this AdManagerClient, ready to download reports. | codesearchnet |
def getmtime(self, path):
try:
file_obj = self.filesystem.resolve(path)
return file_obj.st_mtime
except IOError:
self.filesystem.raise_os_error(errno.ENOENT, winerror=3) | Returns the modification time of the fake file.
Args:
path: the path to fake file.
Returns:
(int, float) the modification time of the fake file
in number of seconds since the epoch.
Raises:
OSError: if the file does not exist. | codesearchnet |
def parse_peddy_ped_check(lines):
ped_check = []
header = []
for i,line in enumerate(lines):
line = line.rstrip()
if i == 0:
header = line.lstrip('
else:
pair_info = dict(zip(header, line.split(',')))
pair_info['hets_a'] = convert_number(pair_info['hets_a'])
pair_info['hets_b'] = convert_number(pair_info['hets_b'])
pair_info['ibs0'] = convert_number(pair_info['ibs0'])
pair_info['ibs2'] = convert_number(pair_info['ibs2'])
pair_info['n'] = convert_number(pair_info['n'])
pair_info['rel'] = convert_number(pair_info['rel'])
pair_info['pedigree_relatedness'] = convert_number(pair_info['pedigree_relatedness'])
pair_info['rel_difference'] = convert_number(pair_info['rel_difference'])
pair_info['shared_hets'] = convert_number(pair_info['shared_hets'])
pair_info['pedigree_parents'] = make_bool(pair_info.get('pedigree_parents'))
pair_info['predicted_parents'] = make_bool(pair_info.get('predicted_parents'))
pair_info['parent_error'] = make_bool(pair_info.get('parent_error'))
pair_info['sample_duplication_error'] = make_bool(pair_info.get('sample_duplication_error'))
ped_check.append(pair_info)
return ped_check | Parse a .ped_check.csv file
Args:
lines(iterable(str))
Returns:
ped_check(list(dict)) | juraj-google-style |
def _decorate_run_options_for_profile(self, run_options):
run_options.trace_level = config_pb2.RunOptions.FULL_TRACE | Modify a RunOptions object for profiling TensorFlow graph execution.
Args:
run_options: (RunOptions) the modified RunOptions object. | github-repos |
def page(self, title=None, pageid=None, auto_suggest=True, redirect=True, preload=False):
if (((title is None) or (title.strip() == '')) and (pageid is None)):
raise ValueError('Either a title or a pageid must be specified')
elif title:
if auto_suggest:
temp_title = self.suggest(title)
if (temp_title is None):
raise PageError(title=title)
else:
title = temp_title
return MediaWikiPage(self, title, redirect=redirect, preload=preload)
else:
return MediaWikiPage(self, pageid=pageid, preload=preload) | Get MediaWiki page based on the provided title or pageid
Args:
title (str): Page title
pageid (int): MediaWiki page identifier
auto-suggest (bool): **True:** Allow page title auto-suggest
redirect (bool): **True:** Follow page redirects
preload (bool): **True:** Load most page properties
Raises:
ValueError: when title is blank or None and no pageid is \
provided
Raises:
:py:func:`mediawiki.exceptions.PageError`: if page does \
not exist
Note:
Title takes precedence over pageid if both are provided | codesearchnet |
def _calculate_page_index(index, data):
if (index > data['total_results']):
raise ValueError('index not in paged data')
page_length = len(data['results'])
return (((index | Determine the location of a given index in paged data.
Arguments:
index (:py:class:`int`): The overall index.
data: (:py:class:`dict`) The first page of data.
Returns:
:py:class:`tuple`: The location of that index, in the format
``(page, index_in_page)``. | codesearchnet |
def MemberVisible(component, name, member, class_attrs=None, verbose=False):
if isinstance(name, str) and name.startswith('__'):
return False
if verbose:
return True
if member is absolute_import or member is division or member is print_function:
return False
if isinstance(member, type(absolute_import)):
return False
modules_to_hide = []
if inspect.ismodule(member) and member in modules_to_hide:
return False
if inspect.isclass(component):
if class_attrs is None:
class_attrs = inspectutils.GetClassAttrsDict(component) or {}
class_attr = class_attrs.get(name)
if class_attr:
if class_attr.kind in ('method', 'property'):
return False
tuplegetter = getattr(collections, '_tuplegetter', type(None))
if isinstance(class_attr.object, tuplegetter):
return False
if isinstance(name, str):
return not name.startswith('_')
return True | Returns whether a member should be included in auto-completion or help.
Determines whether a member of an object with the specified name should be
included in auto-completion or help text(both usage and detailed help).
If the member name starts with '__', it will always be excluded. If it
starts with only one '_', it will be included for all non-string types. If
verbose is True, the members, including the private members, are included.
When not in verbose mode, some modules and functions are excluded as well.
Args:
component: The component containing the member.
name: The name of the member.
member: The member itself.
class_attrs: (optional) If component is a class, provide this as:
inspectutils.GetClassAttrsDict(component). If not provided, it will be
computed.
verbose: Whether to include private members.
Returns
A boolean value indicating whether the member should be included. | github-repos |
def get_formal_type_parameter(self, t: str) -> 'BaseValue':
del t
return self.ctx.convert.unsolvable | Get the class's type for the type parameter.
Treating self as a class_mixin.Class, gets its formal type for the given
type parameter. For the real implementation, see
ParameterizedClass.get_formal_type_parameter.
Args:
t: The name of the type parameter.
Returns:
A formal type. | github-repos |
def _is_txn_to_replay(self, txn_id, possible_successor, already_seen):
is_successor = self._is_predecessor_of_possible_successor(
txn_id,
possible_successor)
in_different_batch = not self._is_in_same_batch(txn_id,
possible_successor)
has_not_been_seen = possible_successor not in already_seen
return is_successor and in_different_batch and has_not_been_seen | Decide if possible_successor should be replayed.
Args:
txn_id (str): Id of txn in failed batch.
possible_successor (str): Id of txn to possibly replay.
already_seen (list): A list of possible_successors that have
been replayed.
Returns:
(bool): If the possible_successor should be replayed. | juraj-google-style |
def _runExperimentImpl(options, model=None):
json_helpers.validate(options.privateOptions,
schemaDict=g_parsedPrivateCommandLineOptionsSchema)
experimentDir = options.experimentDir
descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
experimentDir)
expIface = helpers.getExperimentDescriptionInterfaceFromModule(
descriptionPyModule)
if options.privateOptions['listAvailableCheckpoints']:
_printAvailableCheckpoints(experimentDir)
return None
experimentTasks = expIface.getModelControl().get('tasks', [])
if (len(experimentTasks) == 0 and
expIface.getModelControl()['environment'] == OpfEnvironment.Nupic):
expIface.convertNupicEnvToOPF()
experimentTasks = expIface.getModelControl().get('tasks', [])
expIface.normalizeStreamSources()
newSerialization = options.privateOptions['newSerialization']
if options.privateOptions['listTasks']:
print "Available tasks:"
for label in [t['taskLabel'] for t in experimentTasks]:
print "\t", label
return None
if options.privateOptions['runCheckpointName']:
assert model is None
checkpointName = options.privateOptions['runCheckpointName']
model = ModelFactory.loadFromCheckpoint(
savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName),
newSerialization=newSerialization)
elif model is not None:
print "Skipping creation of OPFExperiment instance: caller provided his own"
else:
modelDescription = expIface.getModelDescription()
model = ModelFactory.create(modelDescription)
if options.privateOptions['createCheckpointName']:
checkpointName = options.privateOptions['createCheckpointName']
_saveModel(model=model,
experimentDir=experimentDir,
checkpointLabel=checkpointName,
newSerialization=newSerialization)
return model
taskIndexList = range(len(experimentTasks))
customTaskExecutionLabelsList = options.privateOptions['taskLabels']
if customTaskExecutionLabelsList:
taskLabelsList = [t['taskLabel'] for t in experimentTasks]
taskLabelsSet = set(taskLabelsList)
customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)
assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), \
("Some custom-provided task execution labels don't correspond "
"to actual task labels: mismatched labels: %r; actual task "
"labels: %r.") % (customTaskExecutionLabelsSet - taskLabelsSet,
customTaskExecutionLabelsList)
taskIndexList = [taskLabelsList.index(label) for label in
customTaskExecutionLabelsList]
print "
i in taskIndexList]
for taskIndex in taskIndexList:
task = experimentTasks[taskIndex]
taskRunner = _TaskRunner(model=model,
task=task,
cmdOptions=options)
taskRunner.run()
del taskRunner
if options.privateOptions['checkpointModel']:
_saveModel(model=model,
experimentDir=experimentDir,
checkpointLabel=task['taskLabel'],
newSerialization=newSerialization)
return model | Creates and runs the experiment
Args:
options: namedtuple ParseCommandLineOptionsResult
model: For testing: may pass in an existing OPF Model instance
to use instead of creating a new one.
Returns: reference to OPFExperiment instance that was constructed (this
is provided to aid with debugging) or None, if none was
created. | juraj-google-style |
def exhaustive_curie_check(self, ontology: pd.DataFrame, curie_predicate: str, curie_prefix: str, diff: bool=True) -> Tuple[list]:
(inside, outside) = ([], [])
curie_prefix = curie_prefix.replace(':', '')
header = (['Index'] + list(ontology.columns))
for row in ontology.itertuples():
row = {header[i]: val for (i, val) in enumerate(row)}
entity_curie = row[curie_predicate]
if isinstance(entity_curie, list):
if (len(entity_curie) != 0):
exit('Need to have only 1 iri in the cell from the onotology.')
else:
entity_curie = entity_curie[0]
entity_curie = ((curie_prefix + ':') + self.extract_fragment(entity_curie))
ilx_row = self.curie2row.get(entity_curie)
if ilx_row:
inside.append({'external_ontology_row': row, 'ilx_rows': [ilx_row]})
else:
outside.append(row)
if diff:
diff = self.__exhaustive_diff(inside)
return (inside, outside, diff)
return (inside, outside) | All entities with conflicting curies gets a full diff to see if they belong
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
curie_predicate: usually in qname form and is the colname of the DataFrame
curie_prefix: Not all cells in the DataFrame will have complete curies so we extract
the fragement from the cell and use the prefix to complete it.
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only | codesearchnet |
def _controller_buffer(self, port):
address = _LIB.Controller(self._env, port)
buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents
return np.frombuffer(buffer_, dtype='uint8') | Find the pointer to a controller and setup a NumPy buffer.
Args:
port: the port of the controller to setup
Returns:
a NumPy buffer with the controller's binary data | juraj-google-style |
def _compile_arithmetic_expression(self, expr: Expression, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[List[tf.Tensor]]=None) -> TensorFluent:
etype = expr.etype
args = expr.args
if (len(args) == 1):
etype2op = {'+': (lambda x: x), '-': (lambda x: (- x))}
if (etype[1] not in etype2op):
raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr))
op = etype2op[etype[1]]
x = self._compile_expression(args[0], scope, batch_size, noise)
fluent = op(x)
else:
etype2op = {'+': (lambda x, y: (x + y)), '-': (lambda x, y: (x - y)), '*': (lambda x, y: (x * y)), '/': (lambda x, y: (x / y))}
if (etype[1] not in etype2op):
raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr))
op = etype2op[etype[1]]
x = self._compile_expression(args[0], scope, batch_size, noise)
y = self._compile_expression(args[1], scope, batch_size, noise)
fluent = op(x, y)
return fluent | Compile an arithmetic expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. | codesearchnet |
def slice_hidden(self, x):
x_sliced = tf.reshape(x, shape=[(- 1), self.hparams.num_blocks, self.hparams.block_dim])
return x_sliced | Slice encoder hidden state into block_dim.
Args:
x: Encoder hidden state of shape [-1, hidden_size].
Returns:
Sliced states of shape [-1, num_blocks, block_dim]. | codesearchnet |
def floor(x):
if any_symbolic_tensors((x,)):
return Floor().symbolic_call(x)
return backend.numpy.floor(x) | Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that `i <= x`.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise floor of `x`. | github-repos |
def get(cls, issue_id):
res = Issue.get(issue_id, IssueType.get(cls.issue_type).issue_type_id)
return (cls(res) if res else None) | Returns the class object identified by `issue_id`
Args:
issue_id (str): Unique EC2 Instance ID to load from database
Returns:
EC2 Instance object if found, else None | codesearchnet |
def _ParseCachedEntryVista(self, value_data, cached_entry_offset):
try:
cached_entry = self._ReadStructureFromByteStream(
value_data[cached_entry_offset:], cached_entry_offset,
self._cached_entry_data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse cached entry value with error: {0!s}'.format(
exception))
path_size = cached_entry.path_size
maximum_path_size = cached_entry.maximum_path_size
path_offset = cached_entry.path_offset
if path_offset > 0 and path_size > 0:
path_size += path_offset
maximum_path_size += path_offset
try:
path = value_data[path_offset:path_size].decode('utf-16-le')
except UnicodeDecodeError:
raise errors.ParseError('Unable to decode cached entry path to string')
cached_entry_object = AppCompatCacheCachedEntry()
cached_entry_object.cached_entry_size = (
self._cached_entry_data_type_map.GetByteSize())
cached_entry_object.insertion_flags = cached_entry.insertion_flags
cached_entry_object.last_modification_time = (
cached_entry.last_modification_time)
cached_entry_object.path = path
cached_entry_object.shim_flags = cached_entry.shim_flags
return cached_entry_object | Parses a Windows Vista cached entry.
Args:
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
AppCompatCacheCachedEntry: cached entry.
Raises:
ParseError: if the value data could not be parsed. | juraj-google-style |
def get(cls, session, team_id):
return cls(
'/teams/%d.json' % team_id,
singleton=True,
session=session,
) | Return a specific team.
Args:
session (requests.sessions.Session): Authenticated session.
team_id (int): The ID of the team to get.
Returns:
helpscout.models.Person: A person singleton representing the team,
if existing. Otherwise ``None``. | juraj-google-style |
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(config, request, global_params=global_params) | Lists all jobs that you started in the specified project. Job information is available for a six month period after creation. The job list is sorted in reverse chronological order, by job creation time. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.
Args:
request: (BigqueryJobsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(JobList) The response message. | github-repos |
def preprocess(source):
doc = html5lib.parseFragment(source)
source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8')
source = source.replace(u'\n', u'').strip()
source = re.sub('\\s\\s+', u' ', source)
return source | Removes unnecessary break lines and white spaces.
Args:
source (str): Input sentence.
Returns:
Preprocessed sentence. (str) | codesearchnet |
def _parse_flowcontrol_send(self, config):
value = 'off'
match = re.search(r'flowcontrol send (\w+)$', config, re.M)
if match:
value = match.group(1)
return dict(flowcontrol_send=value) | Scans the config block and returns the flowcontrol send value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the flowcontrol send value
retrieved from the config block. The returned dict object
is intended to be merged into the interface resource dict | juraj-google-style |
def time_zones_for_number(numobj):
ntype = number_type(numobj)
if ntype == PhoneNumberType.UNKNOWN:
return _UNKNOWN_TIME_ZONE_LIST
elif not is_number_type_geographical(ntype, numobj.country_code):
return _country_level_time_zones_for_number(numobj)
return time_zones_for_geographical_number(numobj) | As time_zones_for_geographical_number() but explicitly checks the
validity of the number passed in.
Arguments:
numobj -- a valid phone number for which we want to get the time zones to which it belongs
Returns a list of the corresponding time zones or a single element list with the default
unknown time zone if no other time zone was found or if the number was invalid | juraj-google-style |
def build_chain(self, source, chain):
for group in WalkByGroup(source, (chain.order + 1)):
pre = group[:(- 1)]
res = group[(- 1)]
if (pre not in chain.content):
chain.content[pre] = {res: 1}
elif (res not in chain.content[pre]):
chain.content[pre][res] = 1
else:
chain.content[pre][res] += 1
chain.decache() | Build markov chain from source on top of existin chain
Args:
source: iterable which will be used to build chain
chain: MarkovChain in currently loaded shelve file that
will be extended by source | codesearchnet |
def _GetEntries(self, paths, max_entries, iterator_from_file, is_sequence=False):
entries = {}
index = 0
for filepath in paths:
reader = iterator_from_file(filepath)
for record in reader:
if is_sequence:
sequence_example = tf.train.SequenceExample.FromString(record)
self._ParseExample(sequence_example.context.feature, sequence_example.feature_lists.feature_list, entries, index)
else:
self._ParseExample(tf.train.Example.FromString(record).features.feature, [], entries, index)
index += 1
if (index == max_entries):
return (entries, index)
return (entries, index) | Extracts examples into a dictionary of feature values.
Args:
paths: A list of the paths to the files to parse.
max_entries: The maximum number of examples to load.
iterator_from_file: A method that takes a file path string and returns an
iterator to the examples in that file.
is_sequence: True if the input data from 'iterator_from_file' are
tf.SequenceExamples, False if tf.Examples. Defaults to false.
Returns:
A tuple with two elements:
- A dictionary of all features parsed thus far and arrays of their
values.
- The number of examples parsed. | codesearchnet |
def SetDecodedStreamSize(self, decoded_stream_size):
if self._is_open:
raise IOError('Already open.')
if (decoded_stream_size < 0):
raise ValueError('Invalid decoded stream size: {0:d} value out of bounds.'.format(decoded_stream_size))
self._decoded_stream_size = decoded_stream_size | Sets the decoded stream size.
This function is used to set the decoded stream size if it can be
determined separately.
Args:
decoded_stream_size (int): size of the decoded stream in bytes.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the decoded stream size is invalid. | codesearchnet |
def _might_have_parameter(fn_or_cls, arg_name):
if inspect.isclass(fn_or_cls):
fn = _find_class_construction_fn(fn_or_cls)
else:
fn = fn_or_cls
while hasattr(fn, '__wrapped__'):
fn = fn.__wrapped__
arg_spec = _get_cached_arg_spec(fn)
if six.PY3:
if arg_spec.varkw:
return True
return arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs
else:
if arg_spec.keywords:
return True
return arg_name in arg_spec.args | Returns True if `arg_name` might be a valid parameter for `fn_or_cls`.
Specifically, this means that `fn_or_cls` either has a parameter named
`arg_name`, or has a `**kwargs` parameter.
Args:
fn_or_cls: The function or class to check.
arg_name: The name fo the parameter.
Returns:
Whether `arg_name` might be a valid argument of `fn`. | juraj-google-style |
def get_sv_variants(self, chromosome=None, end_chromosome=None, sv_type=None,
pos=None, end=None):
query = {}
if chromosome:
query['chrom'] = chromosome
if end_chromosome:
query['end_chrom'] = end_chromosome
if sv_type:
query['sv_type'] = sv_type
if pos:
if not '$and' in query:
query['$and'] = []
query['$and'].append({'pos_left': {'$lte': pos}})
query['$and'].append({'pos_right': {'$gte': pos}})
if end:
if not '$and' in query:
query['$and'] = []
query['$and'].append({'end_left': {'$lte': end}})
query['$and'].append({'end_right': {'$gte': end}})
LOG.info("Find all sv variants {}".format(query))
return self.db.structural_variant.find(query).sort([('chrom', ASCENDING), ('pos_left', ASCENDING)]) | Return all structural variants in the database
Args:
chromosome (str)
end_chromosome (str)
sv_type (str)
pos (int): Left position of SV
end (int): Right position of SV
Returns:
variants (Iterable(Variant)) | juraj-google-style |
class IntLayerNorm(nn.Module):
def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant='none'):
super().__init__()
self.normalized_shape = normalized_shape
self.eps = eps
self.weight = nn.Parameter(torch.zeros(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.quant_mode = quant_mode
if force_dequant in ['nonlinear', 'layernorm']:
logger.info('Force dequantize layernorm')
self.quant_mode = False
self.register_buffer('shift', torch.zeros(1))
self.output_bit = output_bit
self.max_bit = 32
self.dim_sqrt = None
self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)
def set_shift(self, y_int):
with torch.no_grad():
y_sq_int = y_int ** 2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
shift = torch.log2(torch.sqrt(var_int / 2 ** self.max_bit)).ceil().max()
shift_old = self.shift
self.shift = torch.max(self.shift, shift)
logger.info(f'Dynamic shift adjustment: {int(shift_old)} -> {int(self.shift)}')
def overflow_fallback(self, y_int):
self.set_shift(y_int)
y_int_shifted = floor_ste.apply(y_int / 2 ** self.shift)
y_sq_int = y_int_shifted ** 2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
return var_int
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
mean = x.mean(axis=2, keepdim=True)
y = x - mean
var = torch.mean(y ** 2, axis=2, keepdim=True)
x = y / torch.sqrt(self.eps + var)
x = x * self.weight + self.bias
return (x, None)
if self.dim_sqrt is None:
n = torch.tensor(x.shape[2], dtype=torch.float)
self.dim_sqrt = torch.sqrt(n).to(x.device)
x_int = x / scaling_factor
mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))
y_int = x_int - mean_int
y_int_shifted = floor_ste.apply(y_int / 2 ** self.shift)
y_sq_int = y_int_shifted ** 2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
if self.training:
if var_int.max() >= 2 ** self.max_bit:
var_int = self.overflow_fallback(y_int)
assert var_int.max() < 2 ** self.max_bit + 0.1, 'Error detected in overflow handling: `var_int` exceeds `self.max_bit` (the maximum possible bit width)'
std_int = floor_ste.apply(torch.sqrt(var_int)) * 2 ** self.shift
factor = floor_ste.apply(2 ** 31 / std_int)
y_int = floor_ste.apply(y_int * factor / 2)
scaling_factor = self.dim_sqrt / 2 ** 30
bias = self.bias.data.detach() / self.weight.data.detach()
bias_int = floor_ste.apply(bias / scaling_factor)
y_int = y_int + bias_int
scaling_factor = scaling_factor * self.weight
x = y_int * scaling_factor
return (x, scaling_factor) | Quantized version of `torch.nn.LayerNorm`. Adds quantization-specific arguments on top of `torch.nn.LayerNorm`.
Args:
output_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the layer output activation.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "layernorm" or "nonlinear" is given. | github-repos |
def add_graph(self, run_key, device_name, graph_def, debug=False):
graph_dict = (self._run_key_to_debug_graphs if debug else self._run_key_to_original_graphs)
if (not (run_key in graph_dict)):
graph_dict[run_key] = dict()
graph_dict[run_key][tf.compat.as_str(device_name)] = debug_graphs_helper.DebugGraphWrapper(graph_def) | Add a GraphDef.
Args:
run_key: A key for the run, containing information about the feeds,
fetches, and targets.
device_name: The name of the device that the `GraphDef` is for.
graph_def: An instance of the `GraphDef` proto.
debug: Whether `graph_def` consists of the debug ops. | codesearchnet |
def _determine_best_metric(self, metrics, trial):
is_new_best_metric = False
if self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith('eval_'):
metric_to_check = f'eval_{metric_to_check}'
try:
metric_value = metrics[metric_to_check]
except KeyError as exc:
raise KeyError(f"The `metric_for_best_model` training argument is set to '{metric_to_check}', which is not found in the evaluation metrics. The available evaluation metrics are: {list(metrics.keys())}. Consider changing the `metric_for_best_model` via the TrainingArguments.") from exc
operator = np.greater if self.args.greater_is_better else np.less
if self.state.best_metric is None:
self.state.best_metric = float('-inf') if self.args.greater_is_better else float('inf')
if operator(metric_value, self.state.best_metric):
self.state.best_metric = metric_value
if self.args.save_strategy in [SaveStrategy.STEPS, SaveStrategy.EPOCH]:
self.state.best_global_step = self.state.global_step
is_new_best_metric = True
return is_new_best_metric | Determine if the model should be saved based on the evaluation metrics.
Returns:
bool: True if a new best metric was found, else False | github-repos |
def _ParseAndComputePenalties(self, code, dumptree=False):
tree = pytree_utils.ParseCodeToTree(code)
split_penalty.ComputeSplitPenalties(tree)
if dumptree:
pytree_visitor.DumpPyTree(tree, target_stream=sys.stderr)
return tree | Parses the code and computes split penalties.
Arguments:
code: code to parse as a string
dumptree: if True, the parsed pytree (after penalty assignment) is dumped
to stderr. Useful for debugging.
Returns:
Parse tree. | github-repos |
def t0(self):
return self._t0 | Absolute timestamp of the first dumped tensor across all devices.
Returns:
(`int`) absolute timestamp of the first dumped tensor, in microseconds. | github-repos |
def write(self, default: bool=False):
none_type = type(None)
if default:
ordered_vals = ['query', 'subject', 'identity', 'length',
'mismatches', 'gaps', 'query_start', 'query_end',
'subject_start', 'subject_end', 'evalue',
'bitscore']
else:
try:
ordered_vals = [self.custom_fs[i] if i in self.custom_fs
else getattr(self, i) for i in self.fs_order]
except TypeError:
ordered_vals = [getattr(self, i) for i in self.fs_order]
fstr = "\t".join(['-' if type(i) == none_type else str(i) for i in
ordered_vals])
return '{}{}'.format(fstr, os.linesep) | Restore B6/M8 entry to original format
Args:
default (bool): output entry in default BLAST+ B6 format
Returns:
str: properly formatted string containing the B6/M8 entry | juraj-google-style |
def last_metric_eval(multiplexer, session_name, metric_name):
try:
(run, tag) = run_tag_from_session_and_metric(session_name, metric_name)
tensor_events = multiplexer.Tensors(run=run, tag=tag)
except KeyError as e:
raise KeyError(("Can't find metric %s for session: %s. Underlying error message: %s" % (metric_name, session_name, e)))
last_event = tensor_events[(- 1)]
return (last_event.wall_time, last_event.step, tf.make_ndarray(last_event.tensor_proto).item()) | Returns the last evaluations of the given metric at the given session.
Args:
multiplexer: The EventMultiplexer instance allowing access to
the exported summary data.
session_name: String. The session name for which to get the metric
evaluations.
metric_name: api_pb2.MetricName proto. The name of the metric to use.
Returns:
A 3-tuples, of the form [wall-time, step, value], denoting
the last evaluation of the metric, where wall-time denotes the wall time
in seconds since UNIX epoch of the time of the evaluation, step denotes
the training step at which the model is evaluated, and value denotes the
(scalar real) value of the metric.
Raises:
KeyError if the given session does not have the metric. | codesearchnet |
def call_api(self, method_type, method_name, valid_status_codes, resource, data, uid, **kwargs):
url = resource.get_resource_url(resource, base_url=self.Meta.base_url)
if (method_type in SINGLE_RESOURCE_METHODS):
if ((not uid) and (not kwargs)):
raise MissingUidException
url = resource.get_url(url=url, uid=uid, **kwargs)
params = {'headers': self.get_http_headers(self.Meta.name, method_name, **kwargs), 'url': url}
if ((method_type in ['POST', 'PUT', 'PATCH']) and isinstance(data, dict)):
params.update(json=data)
prepared_request = self.prepare_http_request(method_type, params, **kwargs)
response = self.session.send(prepared_request)
return self._handle_response(response, valid_status_codes, resource) | Make HTTP calls.
Args:
method_type: The HTTP method
method_name: The name of the python method making the HTTP call
valid_status_codes: A tuple of integer status codes
deemed acceptable as response statuses
resource: The resource class that will be generated
data: The post data being sent.
uid: The unique identifier of the resource.
Returns:
kwargs is a list of keyword arguments. Additional custom keyword
arguments can be sent into this method and will be passed into
subclass methods:
- get_url
- prepare_http_request
- get_http_headers | codesearchnet |
def fit_cosine_function(wind):
wind_daily = wind.groupby(wind.index.date).mean()
wind_daily_hourly = pd.Series(index=wind.index, data=wind_daily.loc[wind.index.date].values)
df = pd.DataFrame(data=dict(daily=wind_daily_hourly, hourly=wind)).dropna(how='any')
x = np.array([df.daily, df.index.hour])
(popt, pcov) = scipy.optimize.curve_fit(_cosine_function, x, df.hourly)
return popt | fits a cosine function to observed hourly windspeed data
Args:
wind: observed hourly windspeed data
Returns:
parameters needed to generate diurnal features of windspeed using a cosine function | codesearchnet |
def check_embeddings_within_bounds(tensor: tf.Tensor, embed_dim: int, tensor_name: str='input_ids') -> None:
tf.debugging.assert_less(tensor, tf.cast(embed_dim, dtype=tensor.dtype), message=f"The maximum value of {tensor_name} ({tf.math.reduce_max(tensor)}) must be smaller than the embedding layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.") | `tf.gather`, on which TF embedding layers are based, won't check positive out of bound indices on GPU, returning
zeros instead. This function adds a check against that dangerous silent behavior.
Args:
tensor (`tf.Tensor`): The tensor of indices to check.
embed_dim (`int`): The embedding dimension.
tensor_name (`str`, *optional*): The name of the tensor to use in the error message. | github-repos |
def _list_certs(certificate_store='My'):
ret = dict()
blacklist_keys = ['DnsNameList', 'Thumbprint']
ps_cmd = ['Get-ChildItem',
'-Path', r"'Cert:\LocalMachine\{0}'".format(certificate_store),
'|',
'Select-Object DnsNameList, SerialNumber, Subject, Thumbprint, Version']
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
cert_info = dict()
for key in item:
if key not in blacklist_keys:
cert_info[key.lower()] = item[key]
cert_info['dnsnames'] = []
if item['DnsNameList']:
cert_info['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']]
ret[item['Thumbprint']] = cert_info
return ret | List details of available certificates in the LocalMachine certificate
store.
Args:
certificate_store (str): The name of the certificate store on the local
machine.
Returns:
dict: A dictionary of certificates found in the store | juraj-google-style |
def set_render_option(self, render_option):
self._render_option = render_option | Sets the rendering option.
Args:
render_option: (str) this parameter decides how the pipeline graph is
rendered. See display.pipeline_graph_renderer for available options. | github-repos |
def module_selected(self, module_name, module_ui):
if self.current_button == self.module_buttons[module_name]:
return
self.module_buttons[module_name].config(bg="
if self.current_button is not None:
self.current_button.config(bg="white")
self.current_button = self.module_buttons[module_name]
self.clear_ui()
try:
module_ui_frame = ModuleUIBaseFrame(self.module_ui, module_name, module_ui)
module_ui_frame.grid(column=0, row=0, sticky="W E N S")
except Exception as e:
logger.error("Could not load UI for {}".format(module_name))
logger.exception(e)
tk.Label(self.module_ui, text="Could not load UI for {}".format(module_name)).grid(
column=0, row=0, padx=0, pady=0, sticky="W E N S") | Called when a module is selected
Args:
module_name (str): The name of the module
module_ui: The function to call to create the module's UI | juraj-google-style |
def set_attr_text(self, attr_key, attr_val, el_idx=0):
self.get_element_by_attr_key(attr_key, el_idx).attrib[attr_key] = attr_val | Set the value of the selected attribute of the selected element.
Args:
attr_key : str
Name of attribute for which to search
attr_val : str
Text to set for the attribute.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name. | codesearchnet |
async def wait_done(self) -> int:
(await self._done_running_evt.wait())
if (self._exit_code is None):
raise SublemonLifetimeError('Subprocess exited abnormally with `None` exit code')
return self._exit_code | Coroutine to wait for subprocess run completion.
Returns:
The exit code of the subprocess. | codesearchnet |
def set_notify_dispatch_request(self, notify_dispatch_request, *args):
self._notify_dispatch_request = notify_dispatch_request
self._notify_args = args | Set function to call just before requests are dispatched
Args:
notify_dispatch_request (callable): function will be called
with request as single arg just before request is dispatched | juraj-google-style |
def Write(self, schedule, output_file):
root = ET.Element('kml')
root.attrib['xmlns'] = 'http:
doc = ET.SubElement(root, 'Document')
open_tag = ET.SubElement(doc, 'open')
open_tag.text = '1'
self._CreateStopsFolder(schedule, doc)
if self.split_routes:
route_types = set()
for route in schedule.GetRouteList():
route_types.add(route.route_type)
route_types = list(route_types)
route_types.sort()
for route_type in route_types:
self._CreateRoutesFolder(schedule, doc, route_type)
else:
self._CreateRoutesFolder(schedule, doc)
self._CreateShapesFolder(schedule, doc)
self._SetIndentation(root)
if isinstance(output_file, file):
output = output_file
else:
output = open(output_file, 'w')
output.write()
ET.ElementTree(root).write(output, 'utf-8') | Writes out a feed as KML.
Args:
schedule: A transitfeed.Schedule object containing the feed to write.
output_file: The name of the output KML file, or file object to use. | juraj-google-style |
def cumulative_distribution(self, X):
self.check_fit()
return norm.cdf(X, loc=self.mean, scale=self.std) | Cumulative distribution function for gaussian distribution.
Arguments:
X: `np.ndarray` of shape (n, 1).
Returns:
np.ndarray: Cumulative density for X. | codesearchnet |
def update_dynamic_gene_list(self, case, hgnc_symbols=None, hgnc_ids=None, phenotype_ids=None, build='37'):
dynamic_gene_list = []
res = []
if hgnc_ids:
LOG.info('Fetching genes by hgnc id')
res = self.hgnc_collection.find({'hgnc_id': {'$in': hgnc_ids}, 'build': build})
elif hgnc_symbols:
LOG.info('Fetching genes by hgnc symbols')
res = []
for symbol in hgnc_symbols:
for gene_obj in self.gene_by_alias(symbol=symbol, build=build):
res.append(gene_obj)
for gene_obj in res:
dynamic_gene_list.append({'hgnc_symbol': gene_obj['hgnc_symbol'], 'hgnc_id': gene_obj['hgnc_id'], 'description': gene_obj['description']})
LOG.info('Update dynamic gene panel for: %s', case['display_name'])
updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$set': {'dynamic_gene_list': dynamic_gene_list, 'dynamic_panel_phenotypes': (phenotype_ids or [])}}, return_document=pymongo.ReturnDocument.AFTER)
LOG.debug('Case updated')
return updated_case | Update the dynamic gene list for a case
Adds a list of dictionaries to case['dynamic_gene_list'] that looks like
{
hgnc_symbol: str,
hgnc_id: int,
description: str
}
Arguments:
case (dict): The case that should be updated
hgnc_symbols (iterable): A list of hgnc_symbols
hgnc_ids (iterable): A list of hgnc_ids
Returns:
updated_case(dict) | codesearchnet |
def set_properties(self, property_dict):
self.properties.update(property_dict) | Sets a dictionary of properties on this entity.
Args:
property_dict: A map from property name to value. See
:class:`google.cloud.datastore.entity.Entity` documentation for allowed
values. | github-repos |
def int64_counter(urn, metric, ptransform=None, pcollection=None, labels=None) -> metrics_pb2.MonitoringInfo:
labels = labels or {}
labels.update(create_labels(ptransform=ptransform, pcollection=pcollection))
if isinstance(metric, int):
metric = coders.VarIntCoder().encode(metric)
return create_monitoring_info(urn, SUM_INT64_TYPE, metric, labels) | Return the counter monitoring info for the specifed URN, metric and labels.
Args:
urn: The URN of the monitoring info/metric.
metric: The payload field to use in the monitoring info or an int value.
ptransform: The ptransform id used as a label.
pcollection: The pcollection id used as a label. | github-repos |
def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates, padding, use_gpu):
assert image_shape[3] == kernel_shape[2]
np.random.seed(1)
image = np.random.random_sample(image_shape).astype(np.float32)
kernel = np.random.random_sample(kernel_shape).astype(np.float32)
strides = [1] + strides + [1]
rates = [1] + rates + [1]
image_tensor = constant_op.constant(image, shape=image_shape, name='input')
kernel_tensor = constant_op.constant(kernel, shape=kernel_shape, name='filter')
def compute_erosion2d(image_tensor, kernel_tensor):
return nn_ops.erosion2d(image_tensor, kernel_tensor, strides=strides, rates=rates, padding=padding, name='erosion2d')
with test_util.device(use_gpu=use_gpu):
with self.cached_session():
err1 = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(lambda x: compute_erosion2d(x, kernel_tensor), [image_tensor]))
err2 = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(lambda x: compute_erosion2d(image_tensor, x), [kernel_tensor]))
err = max(err1, err2)
print('Erosion gradient error = %f' % err)
self.assertLess(err, 0.0001) | Verifies the gradients of the erosion function.
Args:
image_shape: Input shape, [batch, in_height, in_width, channels].
kernel_shape: Filter shape, [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
use_gpu: Whether we are running on GPU. | github-repos |
def prune_candidates(candidates):
pruned = []
for (first, second) in candidates:
if (first.__class__ is Linearization):
nodes1 = first.curve.nodes
else:
nodes1 = first.nodes
if (second.__class__ is Linearization):
nodes2 = second.curve.nodes
else:
nodes2 = second.nodes
if convex_hull_collide(nodes1, nodes2):
pruned.append((first, second))
return pruned | Reduce number of candidate intersection pairs.
.. note::
This is a helper for :func:`_all_intersections`.
Uses more strict bounding box intersection predicate by forming the
actual convex hull of each candidate curve segment and then checking
if those convex hulls collide.
Args:
candidates (List): An iterable of pairs of curves (or
linearized curves).
Returns:
List: A pruned list of curve pairs. | codesearchnet |
def get_plot(self, ylim=None, units='thz'):
u = freq_units(units)
plt = pretty_plot(12, 8)
band_linewidth = 1
data = self.bs_plot_data()
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d], [(data['frequency'][d][i][j] * u.factor) for j in range(len(data['distances'][d]))], 'b-', linewidth=band_linewidth)
self._maketicks(plt)
plt.axhline(0, linewidth=1, color='k')
plt.xlabel('$\\mathrm{Wave\\ Vector}$', fontsize=30)
ylabel = '$\\mathrm{{Frequencies\\ ({})}}$'.format(u.label)
plt.ylabel(ylabel, fontsize=30)
x_max = data['distances'][(- 1)][(- 1)]
plt.xlim(0, x_max)
if (ylim is not None):
plt.ylim(ylim)
plt.tight_layout()
return plt | Get a matplotlib object for the bandstructure plot.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1. | codesearchnet |
def parse_args(cmd_args, is_script=False):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=NewlinesHelpFormatter,
epilog=CODES_TABLE
)
if is_script:
parser.add_argument(
"files",
metavar="FILES",
nargs="*",
default=sys.stdin,
help="A whitespace separated list of STIX files or directories of "
"STIX files to validate. If none given, stdin will be used."
)
parser.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
default=True,
help="Recursively descend into input directories."
)
parser.add_argument(
"-s",
"--schemas",
dest="schema_dir",
help="Custom schema directory. If provided, input will be validated "
"against these schemas in addition to the STIX schemas bundled "
"with this script."
)
parser.add_argument(
"--version",
dest="version",
default=DEFAULT_VER,
help="The version of the STIX specification to validate against (e.g. "
"\"2.0\")."
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="Print informational notes and more verbose error messages."
)
parser.add_argument(
"-q",
"--silent",
dest="silent",
action="store_true",
default=False,
help="Silence all output to stdout."
)
parser.add_argument(
"-d",
"--disable",
"--ignore",
dest="disabled",
default="",
help="A comma-separated list of recommended best practice checks to "
"skip. By default, no checks are disabled. \n\n"
"Example: --disable 202,210"
)
parser.add_argument(
"-e",
"--enable",
"--select",
dest="enabled",
default="",
help="A comma-separated list of recommended best practice checks to "
"enable. If the --disable option is not used, no other checks "
"will be run. By default, all checks are enabled.\n\n"
"Example: --enable 218"
)
parser.add_argument(
"--strict",
dest="strict",
action="store_true",
default=False,
help="Treat warnings as errors and fail validation if any are found."
)
parser.add_argument(
"--strict-types",
dest="strict_types",
action="store_true",
default=False,
help="Ensure that no custom object types are used, only those defined"
" in the STIX specification."
)
parser.add_argument(
"--strict-properties",
dest="strict_properties",
action="store_true",
default=False,
help="Ensure that no custom properties are used, only those defined"
" in the STIX specification."
)
parser.add_argument(
"--no-cache",
dest="no_cache",
action="store_true",
default=False,
help="Disable the caching of external source values."
)
parser.add_argument(
"--refresh-cache",
dest="refresh_cache",
action="store_true",
default=False,
help="Clears the cache of external source values, then "
"during validation downloads them again."
)
parser.add_argument(
"--clear-cache",
dest="clear_cache",
action="store_true",
default=False,
help="Clear the cache of external source values after validation."
)
parser.add_argument(
"--enforce-refs",
dest="enforce_refs",
action="store_true",
default=False,
help="Ensures that all SDOs being referenced by SROs are contained "
"within the same bundle."
)
args = parser.parse_args(cmd_args)
if not is_script:
args.files = ""
if not args.version:
args.version = DEFAULT_VER
return ValidationOptions(args) | Parses a list of command line arguments into a ValidationOptions object.
Args:
cmd_args (list of str): The list of command line arguments to be parsed.
is_script: Whether the arguments are intended for use in a stand-alone
script or imported into another tool.
Returns:
Instance of ``ValidationOptions`` | juraj-google-style |
def __str__(self, talker='GP'):
if not len(talker) == 2:
raise ValueError('Talker ID must be two characters %r' % talker)
data = ['%sGLL' % talker]
data.extend(nmea_latitude(self.latitude))
data.extend(nmea_longitude(self.longitude))
data.append('%s.%02i' % (self.time.strftime('%H%M%S'),
self.time.microsecond / 1000000))
data.append('A' if self.status else 'V')
if self.mode:
data.append(self.mode)
data = ','.join(data)
return '$%s*%02X\r' % (data, calc_checksum(data)) | Pretty printed position string.
Args:
talker (str): Talker ID
Returns:
str: Human readable string representation of ``Position`` object | juraj-google-style |
def putfile(self, filepath, buildroot, metahash):
def gen_obj_path(filename):
filehash = util.hash_file(filepath).hexdigest()
return filehash, os.path.join(self.obj_cachedir, filehash[0:2],
filehash[2:4], filehash)
filepath_relative = filepath.split(buildroot)[1][1:]
incachepath = self._genpath(filepath_relative, metahash)
filehash, obj_path = gen_obj_path(filepath)
if not os.path.exists(obj_path):
obj_dir = os.path.dirname(obj_path)
if not os.path.exists(obj_dir):
os.makedirs(obj_dir)
log.debug('Adding to obj cache: %s -> %s', filepath, obj_path)
os.link(filepath, obj_path)
if os.path.exists(incachepath):
existingfile_hash = util.hash_file(incachepath).hexdigest()
if filehash != existingfile_hash:
log.warn('File found in mh cache, but checksum differs. '
'Replacing with this new version. (File: %s)',
filepath)
log.warn('Possible reasons for this:')
log.warn(' 1. This build is not hermetic, and something '
'differs about the build environment compared to the '
'previous build.')
log.warn(' 2. This file has a timestamp or other build-time '
'related data encoded into it, which will always '
'cause the checksum to differ when built.')
log.warn(' 3. Everything is terrible and nothing works.')
os.unlink(incachepath)
if not os.path.exists(incachepath):
log.debug('Adding to mh cache: %s -> %s', filepath, incachepath)
if not os.path.exists(os.path.dirname(incachepath)):
os.makedirs(os.path.dirname(incachepath))
os.link(obj_path, incachepath) | Put a file in the cache.
Args:
filepath: Path to file on disk.
buildroot: Path to buildroot
buildrule: The rule that generated this file.
metahash: hash object | juraj-google-style |
def SignMessage(self, message, script_hash):
keypair = self.GetKeyByScriptHash(script_hash)
prikey = bytes(keypair.PrivateKey)
res = Crypto.Default().Sign(message, prikey)
return res, keypair.PublicKey | Sign a message with a specified script_hash.
Args:
message (str): a hex encoded message to sign
script_hash (UInt160): a bytearray (len 20).
Returns:
str: the signed message | juraj-google-style |
def attention_lm_decoder(decoder_input, decoder_self_attention_bias, hparams, name='decoder'):
x = decoder_input
with tf.variable_scope(name):
for layer in range(hparams.num_hidden_layers):
with tf.variable_scope(('layer_%d' % layer)):
with tf.variable_scope('self_attention'):
y = common_attention.multihead_attention(common_layers.layer_preprocess(x, hparams), None, decoder_self_attention_bias, (hparams.attention_key_channels or hparams.hidden_size), (hparams.attention_value_channels or hparams.hidden_size), hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
with tf.variable_scope('ffn'):
y = common_layers.conv_hidden_relu(common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
return common_layers.layer_preprocess(x, hparams) | A stack of attention_lm layers.
Args:
decoder_input: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
Returns:
y: a Tensors | codesearchnet |
def nrows(self):
if self._nrows is not None:
return self._nrows
nsplits = tensor_shape.dimension_at_index(self._row_splits.shape, 0)
if nsplits.value is None:
return array_ops.shape(self._row_splits, out_type=self.dtype)[0] - 1
else:
return constant_op.constant(nsplits.value - 1, dtype=self.dtype) | Returns the number of rows created by this `RowPartition`.
Returns:
scalar integer Tensor | github-repos |
def __init__(self, site1, site2):
self.site1 = site1
self.site2 = site2 | Initializes a covalent bond between two sites.
Args:
site1 (Site): First site.
site2 (Site): Second site. | juraj-google-style |
def dumps(self, with_defaults=False):
return self._rw.dump_config_to_string(self._config, with_defaults=with_defaults) | Generate a string representing all the configuration values.
Args:
with_defaults (bool): if ``True``, values of items with no custom values will be included in the output
if they have a default value set. | juraj-google-style |
def ReconcileShadow(self, store_type):
for k, v in iteritems(self.entry):
if v.pw_entry.store == store_type:
shadow_entry = self.shadow.get(k)
if shadow_entry is not None:
v.pw_entry = shadow_entry
else:
v.pw_entry.store = "UNKNOWN" | Verify that entries that claim to use shadow files have a shadow entry.
If the entries of the non-shadowed file indicate that a shadow file is used,
check that there is actually an entry for that file in shadow.
Args:
store_type: The type of password store that should be used (e.g.
/etc/shadow or /etc/gshadow) | juraj-google-style |
def register_recipe(cls, recipe):
recipe_name = recipe.contents['name']
cls._recipe_classes[recipe_name] = (
recipe.contents, recipe.args, recipe.__doc__) | Registers a dftimewolf recipe.
Args:
recipe: imported python module representing the recipe. | juraj-google-style |
def call(self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, training: Optional[bool]=False, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
residual = hidden_states
if self.do_layer_norm_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
if not self.do_layer_norm_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
if self.do_layer_norm_before:
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
if not self.do_layer_norm_before:
hidden_states = self.final_layer_norm(hidden_states)
return (hidden_states, self_attn_weights, present_key_value) | Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`tf.Tensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`, *optional*): mask for attention heads in a given layer of size
`(decoder_attention_heads,)`
past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation). | github-repos |
def read_bit(self, registeraddress, functioncode=2):
_checkFunctioncode(functioncode, [1, 2])
return self._genericCommand(functioncode, registeraddress) | Read one bit from the slave.
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* functioncode (int): Modbus function code. Can be 1 or 2.
Returns:
The bit value 0 or 1 (int).
Raises:
ValueError, TypeError, IOError | juraj-google-style |
def linear_quantize(input, scale, zero_point, inplace=False):
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
else:
scale = scale.view(-1)
zero_point = zero_point.view(-1)
if inplace:
input.mul_(1.0 / scale).add_(zero_point).round_()
return input
return torch.round(1.0 / scale * input + zero_point) | Quantize single-precision input tensor to integers with the given scaling factor and zeropoint.
Args:
input (`torch.Tensor`):
Single-precision input tensor to be quantized.
scale (`torch.Tensor`):
Scaling factor for quantization.
zero_pint (`torch.Tensor`):
Shift for quantization.
inplace (`bool`, *optional*, defaults to `False`):
Whether to compute inplace or not.
Returns:
`torch.Tensor`: Linearly quantized value of *input* according to *scale* and *zero_point*. | github-repos |
def automatic_density_by_vol(structure, kppvol, force_gamma=False):
vol = structure.lattice.reciprocal_lattice.volume
kppa = kppvol * vol * structure.num_sites
return Kpoints.automatic_density(structure, kppa,
force_gamma=force_gamma) | Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom^3 of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
Returns:
Kpoints | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.