code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def sas_interconnects(self):
if (not self.__sas_interconnects):
self.__sas_interconnects = SasInterconnects(self.__connection)
return self.__sas_interconnects
|
Gets the SAS Interconnects API client.
Returns:
SasInterconnects:
|
codesearchnet
|
def keras_model_summary(name, data, step=None):
import tensorflow.summary as summary
from tensorflow.compat.v1 import SummaryMetadata
summary_metadata = SummaryMetadata()
summary_metadata.plugin_data.plugin_name = 'graph_keras_model'
summary_metadata.plugin_data.content = b'1'
try:
json_string = data.to_json()
except Exception as exc:
warnings.warn(f'Model failed to serialize as JSON. Ignoring... {exc}')
return False
with summary.experimental.summary_scope(name, 'graph_keras_model', [data, step]) as (tag, _):
return summary.write(tag=tag, tensor=json_string, step=step, metadata=summary_metadata)
|
Writes a Keras model as JSON to as a Summary.
Writing the Keras model configuration allows the TensorBoard graph plugin to
render a conceptual graph, as opposed to graph of ops. In case the model
fails to serialize as JSON, it ignores and returns False.
Args:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A Keras Model to write.
step: Explicit `int64`-castable monotonic step value for this summary.
If omitted, this defaults to `tf.summary.experimental.get_step()`,
which must not be `None`.
Returns:
True on success, or False if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is `None`.
|
github-repos
|
def to_soft(self, path_or_handle, as_gzip=False):
if isinstance(path_or_handle, str):
if as_gzip:
with gzip.open(path_or_handle, 'wt') as outfile:
outfile.write(self._get_object_as_soft())
else:
with open(path_or_handle, 'w') as outfile:
outfile.write(self._get_object_as_soft())
else:
path_or_handle.write(self._get_object_as_soft())
|
Save the object in a SOFT format.
Args:
path_or_handle (:obj:`str` or :obj:`file`): Path or handle to
output file
as_gzip (:obj:`bool`): Save as gzip
|
juraj-google-style
|
def do_labels_update(self, info, labels):
if self.update_label_func:
self.update_label_func(self.label_name, info, labels)
|
Updates a dictionary of labels using the assigned update_op_func
Args:
info (:class:`endpoints_management.control.report_request.Info`): the
info instance to update
labels (dict[string[string]]): the labels dictionary
Return:
`True` if desc is supported, otherwise `False`
|
juraj-google-style
|
def set_organization(self, organization):
if isinstance(organization, hdx.data.organization.Organization) or isinstance(organization, dict):
if 'id' not in organization:
organization = hdx.data.organization.Organization.read_from_hdx(organization['name'], configuration=self.configuration)
organization = organization['id']
elif not isinstance(organization, str):
raise HDXError('Type %s cannot be added as a organization!' % type(organization).__name__)
if is_valid_uuid(organization) is False and organization != 'hdx':
raise HDXError('%s is not a valid organization id!' % organization)
self.data['owner_org'] = organization
|
Set the dataset's organization.
Args:
organization (Union[Organization,Dict,str]): Either an Organization id or Organization metadata from an Organization object or dictionary.
Returns:
None
|
juraj-google-style
|
def namespace(self, mid: ModuleId) -> YangIdentifier:
try:
mdata = self.modules[mid]
except KeyError:
raise ModuleNotRegistered(*mid) from None
return mdata.main_module[0]
|
Return the namespace corresponding to a module or submodule.
Args:
mid: Module identifier.
Raises:
ModuleNotRegistered: If `mid` is not registered in the data model.
|
juraj-google-style
|
def __init__(self, src, raw_traces, trace_factory, filename):
self.text = src
self.traces = _collect_traces(raw_traces, trace_factory)
self.filename = filename
self._lines = src.split('\n')
self._offsets = []
self._init_byte_offsets()
|
Initializer.
Args:
src: The source text.
raw_traces: Raw (opcode, symbol, types) values.
trace_factory: A subclass of source.AbstractTrace that will be used to
instantiate traces from raw values.
filename: The filename.
|
github-repos
|
def to_html(self):
if (self.items is None):
return
else:
html = ('<ol%s>\n' % self.html_attributes())
for item in self.items:
html += ('<li>%s</li>\n' % item.to_html())
html += '</ol>'
return html
|
Render a Text MessageElement as html
Args:
None
Returns:
Str the html representation of the Text MessageElement
Raises:
Errors are propagated
|
codesearchnet
|
def call_rpc(self, address, rpc_id, payload=b""):
return self.emulator.call_rpc_external(address, rpc_id, payload)
|
Call an RPC by its address and ID.
This will send the RPC to the background rpc dispatch thread and
synchronously wait for the response.
Args:
address (int): The address of the mock tile this RPC is for
rpc_id (int): The number of the RPC
payload (bytes): A byte string of payload parameters up to 20 bytes
Returns:
bytes: The response payload from the RPC
|
juraj-google-style
|
def getitem_column_array(self, key):
numeric_indices = list(self.columns.get_indexer_for(key))
def getitem(df, internal_indices=[]):
return df.iloc[:, internal_indices]
result = self.data.apply_func_to_select_indices(
0, getitem, numeric_indices, keep_remaining=False
)
new_columns = self.columns[numeric_indices]
new_dtypes = self.dtypes[numeric_indices]
return self.__constructor__(result, self.index, new_columns, new_dtypes)
|
Get column data for target labels.
Args:
key: Target labels by which to retrieve data.
Returns:
A new QueryCompiler.
|
juraj-google-style
|
def compile_files(raw_dir, raw_files, tag):
tf.logging.info(('Compiling files with tag %s.' % tag))
filename = ('%s-%s' % (_PREFIX, tag))
input_compiled_file = os.path.join(raw_dir, (filename + '.lang1'))
target_compiled_file = os.path.join(raw_dir, (filename + '.lang2'))
with tf.gfile.Open(input_compiled_file, mode='w') as input_writer:
with tf.gfile.Open(target_compiled_file, mode='w') as target_writer:
for i in range(len(raw_files['inputs'])):
input_file = raw_files['inputs'][i]
target_file = raw_files['targets'][i]
tf.logging.info(('Reading files %s and %s.' % (input_file, target_file)))
write_file(input_writer, input_file)
write_file(target_writer, target_file)
return (input_compiled_file, target_compiled_file)
|
Compile raw files into a single file for each language.
Args:
raw_dir: Directory containing downloaded raw files.
raw_files: Dict containing filenames of input and target data.
{"inputs": list of files containing data in input language
"targets": list of files containing corresponding data in target language
}
tag: String to append to the compiled filename.
Returns:
Full path of compiled input and target files.
|
codesearchnet
|
def _represent_argument(directive_location, context, argument, inferred_type):
argument_name = argument[1:]
validate_safe_string(argument_name)
if is_variable_argument(argument):
existing_type = context['inputs'].get(argument_name, inferred_type)
if (not inferred_type.is_same_type(existing_type)):
raise GraphQLCompilationError(u'Incompatible types inferred for argument {}. The argument cannot simultaneously be {} and {}.'.format(argument, existing_type, inferred_type))
context['inputs'][argument_name] = inferred_type
return (expressions.Variable(argument, inferred_type), None)
elif is_tag_argument(argument):
argument_context = context['tags'].get(argument_name, None)
if (argument_context is None):
raise GraphQLCompilationError(u'Undeclared argument used: {}'.format(argument))
location = argument_context['location']
optional = argument_context['optional']
tag_inferred_type = argument_context['type']
if (location is None):
raise AssertionError(u'Argument declared without location: {}'.format(argument_name))
if (location.field is None):
raise AssertionError(u'Argument location is not a property field: {}'.format(location))
if (not inferred_type.is_same_type(tag_inferred_type)):
raise GraphQLCompilationError(u'The inferred type of the matching @tag directive does not match the inferred required type for this filter: {} vs {}'.format(tag_inferred_type, inferred_type))
field_is_local = (directive_location.at_vertex() == location.at_vertex())
non_existence_expression = None
if optional:
if field_is_local:
non_existence_expression = expressions.FalseLiteral
else:
non_existence_expression = expressions.BinaryComposition(u'=', expressions.ContextFieldExistence(location.at_vertex()), expressions.FalseLiteral)
if field_is_local:
representation = expressions.LocalField(argument_name)
else:
representation = expressions.ContextField(location, tag_inferred_type)
return (representation, non_existence_expression)
else:
raise GraphQLCompilationError(u'Non-argument type found: {}'.format(argument))
|
Return a two-element tuple that represents the argument to the directive being processed.
Args:
directive_location: Location where the directive is used.
context: dict, various per-compilation data (e.g. declared tags, whether the current block
is optional, etc.). May be mutated in-place in this function!
argument: string, the name of the argument to the directive
inferred_type: GraphQL type object specifying the inferred type of the argument
Returns:
(argument_expression, non_existence_expression)
- argument_expression: an Expression object that captures the semantics of the argument
- non_existence_expression: None or Expression object;
If the current block is not optional, this is set to None. Otherwise, it is an
expression that will evaluate to True if the argument is skipped as optional and
therefore not present, and False otherwise.
|
codesearchnet
|
def _get_example_from_basic_type(type):
if type == 'integer':
return [42, 24]
elif type == 'number':
return [5.5, 5.5]
elif type == 'string':
return ['string', 'string2']
elif type == 'datetime':
return ['2015-08-28T09:02:57.481Z', '2015-08-28T09:02:57.481Z']
elif type == 'boolean':
return [False, True]
elif type == 'null':
return ['null', 'null']
|
Get example from the given type.
Args:
type: the type you want an example of.
Returns:
An array with two example values of the given type.
|
juraj-google-style
|
def add_function(self, call_fn, name, match_layer_training_arg):
fn = LayerCall(self, self._maybe_wrap_with_training_arg(call_fn, match_layer_training_arg), name, input_signature=self.fn_input_signature)
self._functions[name] = fn.wrapped_call
return fn
|
Adds a layer call function to the collection.
Args:
call_fn: a python function
name: Name of call function
match_layer_training_arg: If True, removes the `training` from the
function arguments when calling `call_fn`.
Returns:
LayerCall (tf.function)
|
github-repos
|
def non_fluents_scope(self) -> Dict[(str, TensorFluent)]:
if (self.__dict__.get('non_fluents') is None):
self._initialize_non_fluents()
return dict(self.non_fluents)
|
Returns a partial scope with non-fluents.
Returns:
A mapping from non-fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
|
codesearchnet
|
def sg_all(tensor, opt):
r
return tf.reduce_all(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
|
r"""Computes the "logical and" of elements across axis of a tensor.
See `tf.reduce_all()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
|
juraj-google-style
|
def _urllib_post(self, url, json='', data='', username='', password='', headers=None, timeout=30):
if (headers is None):
headers = {}
raw_store = json
raw_request = (json_lib.dumps(json) if json else urlencode(data))
url_request = Request(url, data=raw_request.encode('utf8'))
if json:
url_request.add_header('Content-Type', 'application/json')
elif (not data):
raise ValueError('Please provide either a json or a data field.')
headers['User-Agent'] = self.user_agent
raw_request = raw_store
if (username and password):
if (sys.version_info[0] >= 3):
basic_authstring = base64.encodebytes(('%s:%s' % (username, password)).encode()).decode().replace('\n', '')
else:
basic_authstring = base64.encodestring(('%s:%s' % (username, password))).replace('\n', '')
url_request.add_header('Authorization', ('Basic %s' % basic_authstring))
for (key, value) in headers.items():
url_request.add_header(key, str(value))
try:
response = urlopen(url_request, timeout=timeout)
except HTTPError as e:
raw_response = e.read()
return (raw_response, raw_request, e.getcode(), e.headers)
else:
raw_response = response.read()
response.close()
return (raw_response, raw_request, response.getcode(), dict(response.info()))
|
This function will POST to the url endpoint using urllib2. returning
an AdyenResult object on 200 HTTP responce. Either json or data has to
be provided. If username and password are provided, basic auth will be
used.
Args:
url (str): url to send the POST
json (dict, optional): Dict of the JSON to POST
data (dict, optional): Dict, presumed flat structure of
key/value of request to place as
www-form
username (str, optional): Username for basic auth. Must be
uncluded as part of password.
password (str, optional): Password for basic auth. Must be
included as part of username.
headers (dict, optional): Key/Value pairs of headers to include
timeout (int, optional): Default 30. Timeout for the request.
Returns:
str: Raw response received
str: Raw request placed
int: HTTP status code, eg 200,404,401
dict: Key/Value pairs of the headers received.
|
codesearchnet
|
def diff(self, other):
similar_param = {}
different_param = {}
for (k1, v1) in self.items():
if (k1 not in other):
different_param[k1] = {'INCAR1': v1, 'INCAR2': None}
elif (v1 != other[k1]):
different_param[k1] = {'INCAR1': v1, 'INCAR2': other[k1]}
else:
similar_param[k1] = v1
for (k2, v2) in other.items():
if ((k2 not in similar_param) and (k2 not in different_param)):
if (k2 not in self):
different_param[k2] = {'INCAR1': None, 'INCAR2': v2}
return {'Same': similar_param, 'Different': different_param}
|
Diff function for Incar. Compares two Incars and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other (Incar): The other Incar object to compare to.
Returns:
Dict of the following format:
{"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different}
Note that the parameters are return as full dictionaries of values.
E.g. {"ISIF":3}
|
codesearchnet
|
def destination(self, bearing, distance):
return (segment.destination(bearing, distance) for segment in self)
|
Calculate destination locations for given distance and bearings.
Args:
bearing (float): Bearing to move on in degrees
distance (float): Distance in kilometres
Returns:
list of list of Point: Groups of points shifted by ``distance``
and ``bearing``
|
juraj-google-style
|
def matches(self, desc):
desc_value_type = desc.valueType or ValueType.STRING
return (self.label_name == desc.key and
self.value_type == desc_value_type)
|
Determines if a given label descriptor matches this enum instance
Args:
desc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`):
the instance to test
Return:
`True` if desc is supported, otherwise `False`
|
juraj-google-style
|
def compute_metrics(self, previous):
delta_t = self.time_difference(previous)
delta_x = self.distance(previous)
vel = 0
delta_v = 0
acc = 0
if (delta_t != 0):
vel = (delta_x / delta_t)
delta_v = (vel - previous.vel)
acc = (delta_v / delta_t)
self.dt = delta_t
self.dx = delta_x
self.acc = acc
self.vel = vel
return self
|
Computes the metrics of this point
Computes and updates the dt, vel and acc attributes.
Args:
previous (:obj:`Point`): Point before
Returns:
:obj:`Point`: Self
|
codesearchnet
|
def without_operations_touching(self, qubits: Iterable[raw_types.Qid]):
qubits = frozenset(qubits)
if (not self.operates_on(qubits)):
return self
return Moment((operation for operation in self.operations if qubits.isdisjoint(frozenset(operation.qubits))))
|
Returns an equal moment, but without ops on the given qubits.
Args:
qubits: Operations that touch these will be removed.
Returns:
The new moment.
|
codesearchnet
|
def get_authorization_url(self, client_id=None, instance_id=None, redirect_uri=None, region=None, scope=None, state=None):
client_id = (client_id or self.client_id)
instance_id = (instance_id or self.instance_id)
redirect_uri = (redirect_uri or self.redirect_uri)
region = (region or self.region)
scope = (scope or self.scope)
state = (state or str(uuid.uuid4()))
self.state = state
return (Request('GET', self.auth_base_url, params={'client_id': client_id, 'instance_id': instance_id, 'redirect_uri': redirect_uri, 'region': region, 'response_type': 'code', 'scope': scope, 'state': state}).prepare().url, state)
|
Generate authorization URL.
Args:
client_id (str): OAuth2 client ID. Defaults to ``None``.
instance_id (str): App Instance ID. Defaults to ``None``.
redirect_uri (str): Redirect URI. Defaults to ``None``.
region (str): App Region. Defaults to ``None``.
scope (str): Permissions. Defaults to ``None``.
state (str): UUID to detect CSRF. Defaults to ``None``.
Returns:
str, str: Auth URL, state
|
codesearchnet
|
def from_api_repr(cls, resource):
this = cls(None)
resource = copy.deepcopy(resource)
for training_run in resource.get("trainingRuns", ()):
start_time = training_run.get("startTime")
if not start_time or "-" in start_time:
continue
start_time = datetime_helpers.from_microseconds(1e3 * float(start_time))
training_run["startTime"] = datetime_helpers.to_rfc3339(start_time)
this._proto = json_format.ParseDict(resource, types.Model())
for key in six.itervalues(cls._PROPERTY_TO_API_FIELD):
if key in resource:
this._properties[key] = resource[key]
return this
|
Factory: construct a model resource given its API representation
Args:
resource (Dict[str, object]):
Model resource representation from the API
Returns:
google.cloud.bigquery.model.Model: Model parsed from ``resource``.
|
juraj-google-style
|
def create_in_hdx(self):
capacity = self.data.get('capacity')
if (capacity is not None):
del self.data['capacity']
self._create_in_hdx('user', 'id', 'name')
if (capacity is not None):
self.data['capacity'] = capacity
|
Check if user exists in HDX and if so, update it, otherwise create user
Returns:
None
|
codesearchnet
|
def execute(self, sensor_graph, scope_stack):
parent = scope_stack[-1]
try:
slot = parent.resolve_identifier('current_slot', SlotIdentifier)
except UnresolvedIdentifierError:
raise SensorGraphSemanticError("set config statement used outside of config block")
if self.explicit_type is None or not isinstance(self.identifier, int):
raise SensorGraphSemanticError("Config variable type definitions are not yet supported")
if isinstance(self.value, (bytes, bytearray)) and not self.explicit_type == 'binary':
raise SensorGraphSemanticError("You must pass the binary variable type when using encoded binary data")
if not isinstance(self.value, (bytes, bytearray)) and self.explicit_type == 'binary':
raise SensorGraphSemanticError("You must pass an encoded binary value with binary type config variables")
sensor_graph.add_config(slot, self.identifier, self.explicit_type, self.value)
|
Execute this statement on the sensor_graph given the current scope tree.
This adds a single config variable assignment to the current sensor graph
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
|
juraj-google-style
|
def checkAndRaise(pageNum, itemsPerPage):
if pageNum < 1:
raise ErrPaginationLimits(ErrPaginationLimits.ERR_PAGE_NUM)
if itemsPerPage < Settings.itemsPerPageMin or itemsPerPage > Settings.itemsPerPageMax:
raise ErrPaginationLimits(ErrPaginationLimits.ERR_ITEMS_PER_PAGE)
|
Check and Raise an Exception if needed
Args:
pageNum (int): Page number
itemsPerPage (int): Number of items per Page
Raises:
ErrPaginationLimits: If we are out of limits
|
juraj-google-style
|
def get(self, key, default_value=__NoDefaultSpecified__):
os_env_string = ConfigReader.ENV_PREFIX + key
os_env_string = os_env_string.replace(".", "_")
if type(os.getenv(os_env_string)) != NoneType:
return os.getenv(os_env_string)
for data_map in self._dataMaps:
try:
if "." in key:
namespaces = key.split(".")
temp_var = data_map
for name in namespaces:
temp_var = temp_var[name]
return temp_var
else:
value = data_map[key]
return value
except (AttributeError, TypeError, KeyError):
pass
if default_value == self.__NoDefaultSpecified__:
raise KeyError(u("Key '{0}' does not exist").format(key))
else:
return default_value
|
Gets the value from the yaml config based on the key.
No type casting is performed, any type casting should be
performed by the caller.
Args:
key (str) - Config setting key.
Kwargs:
default_value - Default value to return if config is not specified.
Returns:
Returns value stored in config file.
|
juraj-google-style
|
def create_from_wkt(self, wkt, item_type, ingest_source, **attributes):
geojson = load_wkt(wkt).__geo_interface__
vector = {'type': 'Feature', 'geometry': geojson, 'properties': {'item_type': item_type, 'ingest_source': ingest_source, 'attributes': attributes}}
return self.create(vector)[0]
|
Create a single vector in the vector service
Args:
wkt (str): wkt representation of the geometry
item_type (str): item_type of the vector
ingest_source (str): source of the vector
attributes: a set of key-value pairs of attributes
Returns:
id (str): string identifier of the vector created
|
codesearchnet
|
def __init__(self, input_circuit: circuit.QuantumCircuit, name: Union[None, str]=None):
super().__init__(name=name)
input_circuit.build([])
self._circuit = input_circuit
|
Initializes a generic QuantumInference layer.
Args:
input_circuit: The parameterized quantum circuit on which to do inference.
name: Identifier for this inference engine.
|
github-repos
|
def load_file_to_base64_str(f_path):
path = abs_path(f_path)
with io.open(path, 'rb') as f:
f_bytes = f.read()
base64_str = base64.b64encode(f_bytes).decode("utf-8")
return base64_str
|
Loads the content of a file into a base64 string.
Args:
f_path: full path to the file including the file name.
Returns:
A base64 string representing the content of the file in utf-8 encoding.
|
juraj-google-style
|
def overwrite_view_source(project, dir_path):
project_html_location = ((dir_path / project) / HTML_LOCATION)
if (not project_html_location.exists()):
return
files_to_overwrite = [f for f in project_html_location.iterdir() if ('html' in f.suffix)]
for html_file in files_to_overwrite:
with open(html_file, 'r') as f:
html = f.readlines()
for (i, l) in enumerate(html):
if (TO_REPLACE_WITH_HOME in l):
html[i] = NEW_HOME_LINK
break
with open(html_file, 'w') as f:
f.writelines(html)
|
In the project's index.html built file, replace the top "source"
link with a link to the documentation's home, which is mkdoc's home
Args:
project (str): project to update
dir_path (pathlib.Path): this file's path
|
codesearchnet
|
def draw_text(img, pos, text, color, font_scale=0.4):
img = img.astype(np.uint8)
x0, y0 = int(pos[0]), int(pos[1])
font = cv2.FONT_HERSHEY_SIMPLEX
((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, 1)
if x0 + text_w > img.shape[1]:
x0 = img.shape[1] - text_w
if y0 - int(1.15 * text_h) < 0:
y0 = int(1.15 * text_h)
back_topleft = x0, y0 - int(1.3 * text_h)
back_bottomright = x0 + text_w, y0
cv2.rectangle(img, back_topleft, back_bottomright, color, -1)
text_bottomleft = x0, y0 - int(0.25 * text_h)
cv2.putText(img, text, text_bottomleft, font, font_scale, (222, 222, 222), lineType=cv2.LINE_AA)
return img
|
Draw text on an image.
Args:
pos (tuple): x, y; the position of the text
text (str):
font_scale (float):
color (tuple): a 3-tuple BGR color in [0, 255]
|
juraj-google-style
|
def add_value(self, line):
if line.strip():
self._empty = False
if self.current_key in self.known_keys:
self.known_keys[self.current_key].append(line)
else:
self.unknown_keys[self.current_key].append(line)
|
Adds unstructured or multi-line value output to the current parsed
instrumentation block for outputting later.
Usually, this will add extra lines to the value list for the current
key-value pair. However, sometimes, such as when instrumentation
failed to start, output does not follow the structured prefix format.
In this case, adding all of the output is still useful so that a user
can debug the issue.
Args:
line: string, the raw instrumentation line to append to the value
list.
|
github-repos
|
def _ParseRecordString(
self, record_strings_data, record_strings_data_offset, string_offset):
if string_offset == 0:
return None
if string_offset & self._STRING_OFFSET_MSB:
if (string_offset >> 60) != 8:
raise errors.ParseError('Invalid inline record string flag.')
string_size = (string_offset >> 56) & 0x0f
if string_size >= 8:
raise errors.ParseError('Invalid inline record string size.')
string_data = bytes(bytearray([
string_offset >> (8 * byte_index) & 0xff
for byte_index in range(6, -1, -1)]))
try:
return string_data[:string_size].decode('utf-8')
except UnicodeDecodeError as exception:
raise errors.ParseError(
'Unable to decode inline record string with error: {0!s}.'.format(
exception))
data_offset = string_offset - record_strings_data_offset
record_string_map = self._GetDataTypeMap('asl_record_string')
try:
record_string = self._ReadStructureFromByteStream(
record_strings_data[data_offset:], string_offset, record_string_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse record string at offset: 0x{0:08x} with error: '
'{1!s}').format(string_offset, exception))
return record_string.string.rstrip('\x00')
|
Parses a record string.
Args:
record_strings_data (bytes): record strings data.
record_strings_data_offset (int): offset of the record strings data
relative to the start of the file.
string_offset (int): offset of the string relative to the start of
the file.
Returns:
str: record string or None if string offset is 0.
Raises:
ParseError: if the record string cannot be parsed.
|
juraj-google-style
|
def fftn(x):
out = x
for axis in reversed(range(x.ndim)[1:]):
out = torch.fft.fft(out, axis=axis)
return out
|
Applies n-dimensional Fast Fourier Transform (FFT) to input array.
Args:
x: Input n-dimensional array.
Returns:
n-dimensional Fourier transform of input n-dimensional array.
|
github-repos
|
def get_image_features(self, pixel_values: torch.FloatTensor, **kwargs):
patch_embeddings = [self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype)).squeeze(0) for patch in pixel_values]
return patch_embeddings
|
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
|
github-repos
|
def DownloadReportToFile(self, report_job_id, export_format, outfile, include_report_properties=False, include_totals_row=None, use_gzip_compression=True):
service = self._GetReportService()
if (include_totals_row is None):
include_totals_row = (True if (export_format != 'CSV_DUMP') else False)
opts = {'exportFormat': export_format, 'includeReportProperties': include_report_properties, 'includeTotalsRow': include_totals_row, 'useGzipCompression': use_gzip_compression}
report_url = service.getReportDownloadUrlWithOptions(report_job_id, opts)
_data_downloader_logger.info('Request Summary: Report job ID: %s, %s', report_job_id, opts)
response = self.url_opener.open(report_url)
_data_downloader_logger.debug('Incoming response: %s %s REDACTED REPORT DATA', response.code, response.msg)
while True:
chunk = response.read(_CHUNK_SIZE)
if (not chunk):
break
outfile.write(chunk)
|
Downloads report data and writes it to a file.
The report job must be completed before calling this function.
Args:
report_job_id: The ID of the report job to wait for, as a string.
export_format: The export format for the report file, as a string.
outfile: A writeable, file-like object to write to.
include_report_properties: Whether or not to include the report
properties (e.g. network, user, date generated...)
in the generated report.
include_totals_row: Whether or not to include the totals row.
use_gzip_compression: Whether or not to use gzip compression.
|
codesearchnet
|
def show_qouts(self, nids=None, stream=sys.stdout):
lines = []
for task in self.iflat_tasks(status=self.S_QCRITICAL, nids=nids):
header = (('=== ' + task.qout_file.path) + '===')
lines.append(header)
if task.qout_file.exists:
with open(task.qout_file.path, 'rt') as fh:
lines += fh.readlines()
else:
lines.append('File does not exist!')
lines.append((('=' * len(header)) + (2 * '\n')))
return stream.writelines(lines)
|
Write to the given stream the content of the queue output file for all tasks whose status is S_QCRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
|
codesearchnet
|
def _should_stop(state, stopping_policy_fn):
return tf.convert_to_tensor(stopping_policy_fn(state.finished), name='should_stop', dtype=tf.bool)
|
Indicates whether the overall Brent search should continue.
Args:
state: A Python `_BrentSearchState` namedtuple.
stopping_policy_fn: Python `callable` controlling the algorithm termination.
Returns:
A boolean value indicating whether the overall search should continue.
|
github-repos
|
def restore_server_connection(self, port=None):
|
Reconnects to the server after the device was disconnected.
Instead of creating a new instance of the client:
- Uses the given port (or finds a new available host port if 0 or None is
given).
- Tries to connect to the remote server with the selected port.
Args:
port: int, if given, this is the host port from which to connect to the
remote device port. Otherwise, finds a new available port as host
port.
Raises:
errors.ServerRestoreConnectionError: when failed to restore the connection
to the snippet server.
|
github-repos
|
def build_position_encoding(position_encoding_type, out_channels=None, project_pos_dim=-1, trainable_position_encoding_kwargs=None, fourier_position_encoding_kwargs=None):
if position_encoding_type == 'trainable':
if not trainable_position_encoding_kwargs:
raise ValueError('Make sure to pass trainable_position_encoding_kwargs')
output_pos_enc = PerceiverTrainablePositionEncoding(**trainable_position_encoding_kwargs)
elif position_encoding_type == 'fourier':
if not fourier_position_encoding_kwargs:
raise ValueError('Make sure to pass fourier_position_encoding_kwargs')
output_pos_enc = PerceiverFourierPositionEncoding(**fourier_position_encoding_kwargs)
else:
raise ValueError(f'Unknown position encoding type: {position_encoding_type}.')
positions_projection = nn.Linear(out_channels, project_pos_dim) if project_pos_dim > 0 else nn.Identity()
return (output_pos_enc, positions_projection)
|
Builds the position encoding.
Args:
- out_channels: refers to the number of channels of the position encodings.
- project_pos_dim: if specified, will project the position encodings to this dimension.
|
github-repos
|
def get_nn(self, structure, n):
return [e['site'] for e in self.get_nn_info(structure, n)]
|
Get near neighbors of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site in structure for which to determine
neighbors.
Returns:
sites (list of Site objects): near neighbors.
|
codesearchnet
|
def has_axis(self, axis):
if self.type != EventType.POINTER_AXIS:
raise AttributeError(_wrong_meth.format(self.type))
return self._libinput.libinput_event_pointer_has_axis(
self._handle, axis)
|
Check if the event has a valid value for the given axis.
If this method returns True for an axis and :meth:`get_axis_value`
returns a value of 0, the event is a scroll stop event.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_AXIS`, this method raises
:exc:`AttributeError`.
Args:
axis (~libinput.constant.PointerAxis): The axis to check.
Returns:
bool: True if this event contains a value for this axis.
Raises:
AttributeError
|
juraj-google-style
|
def move_to_destination(source, destination, job_name, sagemaker_session):
parsed_uri = urlparse(destination)
if (parsed_uri.scheme == 'file'):
recursive_copy(source, parsed_uri.path)
final_uri = destination
elif (parsed_uri.scheme == 's3'):
bucket = parsed_uri.netloc
path = ('%s%s' % (parsed_uri.path.lstrip('/'), job_name))
final_uri = ('s3:
sagemaker_session.upload_data(source, bucket, path)
else:
raise ValueError(('Invalid destination URI, must be s3:
shutil.rmtree(source)
return final_uri
|
move source to destination. Can handle uploading to S3
Args:
source (str): root directory to move
destination (str): file:// or s3:// URI that source will be moved to.
job_name (str): SageMaker job name.
sagemaker_session (sagemaker.Session): a sagemaker_session to interact with S3 if needed
Returns:
(str): destination URI
|
codesearchnet
|
def Open(self):
if (not self._filename):
raise ValueError('Missing filename.')
if os.path.isfile(self._filename):
raise IOError('Unable to use an already existing file for output [{0:s}]'.format(self._filename))
options = {'constant_memory': True, 'strings_to_urls': False, 'strings_to_formulas': False, 'default_date_format': self._timestamp_format}
self._workbook = xlsxwriter.Workbook(self._filename, options)
self._sheet = self._workbook.add_worksheet('Sheet')
self._current_row = 0
|
Creates a new workbook.
Raises:
IOError: if the specified output file already exists.
OSError: if the specified output file already exists.
ValueError: if the filename is not set.
|
codesearchnet
|
def alias_tool(self, context_name, tool_name, tool_alias):
data = self._context(context_name)
aliases = data["tool_aliases"]
if tool_name in aliases:
raise SuiteError("Tool %r in context %r is already aliased to %r"
% (tool_name, context_name, aliases[tool_name]))
self._validate_tool(context_name, tool_name)
aliases[tool_name] = tool_alias
self._flush_tools()
|
Register an alias for a specific tool.
Note that a tool alias takes precedence over a context prefix/suffix.
Args:
context_name (str): Context containing the tool.
tool_name (str): Name of tool to alias.
tool_alias (str): Alias to give the tool.
|
juraj-google-style
|
def add_snippet_client(self, name, package, config=None):
if name in self._snippet_clients:
raise Error(self, 'Name "%s" is already registered with package "%s", it cannot be used again.' % (name, self._snippet_clients[name].client.package))
for snippet_name, client in self._snippet_clients.items():
if package == client.package:
raise Error(self, 'Snippet package "%s" has already been loaded under name "%s".' % (package, snippet_name))
client = snippet_client_v2.SnippetClientV2(package=package, ad=self._device, config=config)
client.initialize()
self._snippet_clients[name] = client
|
Adds a snippet client to the management.
Args:
name: string, the attribute name to which to attach the snippet
client. E.g. `name='maps'` attaches the snippet client to
`ad.maps`.
package: string, the package name of the snippet apk to connect to.
config: snippet_client_v2.Config, the configuration object for
controlling the snippet behaviors. See the docstring of the `Config`
class for supported configurations.
Raises:
Error, if a duplicated name or package is passed in.
|
github-repos
|
def _orthogonal_kernel(self, ksize, cin, cout):
if cin > cout:
raise ValueError(f'The number of input channels (cin={cin}) cannot exceed the number of output channels (cout={cout}).')
orth = self._orthogonal_matrix(cout)[0:cin, :]
if ksize == 1:
return array_ops.expand_dims(orth, 0)
p = self._block_orth(self._symmetric_projection(cout))
for _ in range(ksize - 2):
temp = self._block_orth(self._symmetric_projection(cout))
p = self._matrix_conv(p, temp)
for i in range(ksize):
p[i] = math_ops.matmul(orth, p[i])
return self._dict_to_tensor(p, ksize)
|
Construct orthogonal kernel for convolution.
Args:
ksize: Kernel size.
cin: Number of input channels.
cout: Number of output channels.
Returns:
An [ksize, ksize, cin, cout] orthogonal kernel.
Raises:
ValueError: If cin > cout.
|
github-repos
|
def monte_carlo_standard_error(chain, batch_size_generator=None, compute_method=None):
batch_size_generator = (batch_size_generator or SquareRootSingleBatch())
compute_method = (compute_method or BatchMeansMCSE())
batch_sizes = batch_size_generator.get_univariate_ess_batch_sizes(len(chain))
return np.min(list((compute_method.compute_standard_error(chain, b) for b in batch_sizes)))
|
Compute Monte Carlo standard errors for the expectations
This is a convenience function that calls the compute method for each batch size and returns the lowest ESS
over the used batch sizes.
Args:
chain (ndarray): the Markov chain
batch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes
we will use. Per default it uses the :class:`SquareRootSingleBatch` method.
compute_method (ComputeMonteCarloStandardError): the method used to compute the standard error.
By default we will use the :class:`BatchMeansMCSE` method
|
codesearchnet
|
def __init__(self, port_no=PortNo.OFPP_ANY):
super().__init__()
self.port_no = port_no
|
Create a PortStatsRequest with the optional parameters below.
Args:
port_no (:class:`int`, :class:`~pyof.v0x04.common.port.PortNo`):
:attr:`StatsType.OFPST_PORT` message must request statistics
either for a single port (specified in ``port_no``) or for all
ports (if ``port_no`` == :attr:`.PortNo.OFPP_ANY`).
|
juraj-google-style
|
def sh(self, cmd, ignore_error=False, cwd=None, shell=False, **kwargs):
kwargs.update({
'shell': shell,
'cwd': cwd or self.fpath,
'stderr': subprocess.STDOUT,
'stdout': subprocess.PIPE,
'ignore_error': ignore_error})
log.debug((('cmd', cmd), ('kwargs', kwargs)))
return sh(cmd, **kwargs)
|
Run a command with the current working directory set to self.fpath
Args:
cmd (str or tuple): cmdstring or listlike
Keyword Arguments:
ignore_error (bool): if False, raise an Exception if p.returncode is
not 0
cwd (str): current working dir to run cmd with
shell (bool): subprocess.Popen ``shell`` kwarg
Returns:
str: stdout output of wrapped call to ``sh`` (``subprocess.Popen``)
|
juraj-google-style
|
def auto_convert_string_cell(flagable, cell_str, position, worksheet, flags,
units, parens_as_neg=True):
conversion = cell_str.strip()
if re.search(allregex.control_wrapping_regex, cell_str):
stripped_cell = cell_str.strip()
mod_cell_str = stripped_cell[1:][:-1].strip()
neg_mult = False
if (stripped_cell[0] == '(' and stripped_cell[-1] == ')' and
re.search(allregex.contains_numerical_regex, mod_cell_str)):
neg_mult = True
flagable.flag_change(flags, 'interpreted', position, worksheet,
flagable.FLAGS['removed-wrapping'])
converted_value = auto_convert_cell(flagable, mod_cell_str, position,
worksheet, flags, units)
neg_mult = neg_mult and check_cell_type(converted_value, get_cell_type(0))
if neg_mult and parens_as_neg:
flagable.flag_change(flags, 'interpreted', position, worksheet,
flagable.FLAGS['converted-wrapping-to-neg'])
return -converted_value if neg_mult else converted_value
elif re.search(allregex.contains_numerical_regex, cell_str):
conversion = auto_convert_numeric_string_cell(flagable, conversion, position,
worksheet, flags, units)
elif re.search(allregex.bool_regex, cell_str):
flagable.flag_change(flags, 'interpreted', position, worksheet,
flagable.FLAGS['bool-to-int'])
conversion = 1 if re.search(allregex.true_bool_regex, cell_str) else 0
return conversion
|
Handles the string case of cell and attempts auto-conversion
for auto_convert_cell.
Args:
parens_as_neg: Converts numerics surrounded by parens to negative values
|
juraj-google-style
|
def deserialize_skycoord(d):
if 'distance' in d:
args = (d['lon'], d['lat'], d['distance'])
else:
args = (d['lon'], d['lat'])
return coords.SkyCoord(
*args,
frame=d['frame'],
representation='spherical')
|
Deserializes a JSONified :obj:`astropy.coordinates.SkyCoord`.
Args:
d (:obj:`dict`): A dictionary representation of a :obj:`SkyCoord` object.
Returns:
A :obj:`SkyCoord` object.
|
juraj-google-style
|
def reset_sequence(self, topic):
if (topic in self.queues):
self.queues[topic].reset()
|
Reset the expected sequence number for a topic
If the topic is unknown, this does nothing. This behaviour is
useful when you have wildcard topics that only create queues
once they receive the first message matching the topic.
Args:
topic (string): The topic to reset the packet queue on
|
codesearchnet
|
def describe(self, req=None, resp=None, **kwargs):
description = {'params': OrderedDict([(name, param.describe()) for (name, param) in self.params.items()]), 'details': inspect.cleandoc((self.__class__.__doc__ or 'This resource does not have description yet')), 'name': self.__class__.__name__, 'methods': self.allowed_methods()}
if req:
description['path'] = req.path
description.update(**kwargs)
return description
|
Describe API resource using resource introspection.
Additional description on derrived resource class can be added using
keyword arguments and calling ``super().decribe()`` method call
like following:
.. code-block:: python
class SomeResource(BaseResource):
def describe(req, resp, **kwargs):
return super().describe(
req, resp, type='list', **kwargs
)
Args:
req (falcon.Request): request object
resp (falcon.Response): response object
kwargs (dict): dictionary of values created from resource url
template
Returns:
dict: dictionary with resource descritpion information
.. versionchanged:: 0.2.0
The `req` and `resp` parameters became optional to ease the
implementation of application-level documentation generators.
|
codesearchnet
|
def call(self, inputs, state, **kwargs):
return self._call_wrapped_cell(inputs, state, cell_call_fn=self.cell.call, **kwargs)
|
Runs the RNN cell step computation.
When `call` is being used, we assume that the wrapper object has been built,
and therefore the wrapped cells has been built via its `build` method and
its `call` method can be used directly.
This allows to use the wrapped cell and the non-wrapped cell equivalently
when using `call` and `build`.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
|
github-repos
|
def convert_to_beam_type(typ):
if (sys.version_info.major == 3 and sys.version_info.minor >= 10) and isinstance(typ, types.UnionType):
typ = typing.Union[typ]
if getattr(typ, '__module__', None) == 'typing':
typ = convert_typing_to_builtin(typ)
typ_module = getattr(typ, '__module__', None)
if isinstance(typ, typing.TypeVar):
if id(typ) not in _type_var_cache:
new_type_variable = typehints.TypeVariable(typ.__name__)
_type_var_cache[id(typ)] = new_type_variable
_type_var_cache[id(new_type_variable)] = typ
return _type_var_cache[id(typ)]
elif isinstance(typ, str):
_LOGGER.info('Converting string literal type hint to Any: "%s"', typ)
return typehints.Any
elif sys.version_info >= (3, 10) and isinstance(typ, typing.NewType):
_LOGGER.info('Converting NewType type hint to Any: "%s"', typ)
return typehints.Any
elif typ_module == 'apache_beam.typehints.native_type_compatibility' and getattr(typ, '__name__', typ.__origin__.__name__) == 'TypedWindowedValue':
pass
elif is_typeddict(typ):
return typehints.Dict[str, typehints.Any]
elif typ_module not in _CONVERTED_MODULES and (not is_builtin(typ)):
return typ
if typ_module == 'collections.abc' and getattr(typ, '__origin__', typ) not in _CONVERTED_COLLECTIONS:
return typ
type_map = [_TypeMapEntry(match=is_new_type, arity=0, beam_type=typehints.Any), _TypeMapEntry(match=is_forward_ref, arity=0, beam_type=typehints.Any), _TypeMapEntry(match=is_any, arity=0, beam_type=typehints.Any), _TypeMapEntry(match=_match_is_dict, arity=2, beam_type=typehints.Dict), _TypeMapEntry(match=_match_is_exactly_iterable, arity=1, beam_type=typehints.Iterable), _TypeMapEntry(match=_match_is_primitive(list), arity=1, beam_type=typehints.List), _TypeMapEntry(match=_match_is_primitive(frozenset), arity=1, beam_type=typehints.FrozenSet), _TypeMapEntry(match=_match_is_set, arity=1, beam_type=typehints.Set), _TypeMapEntry(match=match_is_named_tuple, arity=0, beam_type=typehints.Any), _TypeMapEntry(match=_match_is_primitive(tuple), arity=-1, beam_type=typehints.Tuple), _TypeMapEntry(match=_match_is_union, arity=-1, beam_type=typehints.Union), _TypeMapEntry(match=_match_issubclass(collections.abc.Generator), arity=3, beam_type=typehints.Generator), _TypeMapEntry(match=_match_issubclass(collections.abc.Iterator), arity=1, beam_type=typehints.Iterator), _TypeMapEntry(match=_match_is_exactly_collection, arity=1, beam_type=typehints.Collection), _TypeMapEntry(match=_match_issubclass(TypedWindowedValue), arity=1, beam_type=typehints.WindowedValue), _TypeMapEntry(match=_match_is_exactly_sequence, arity=1, beam_type=typehints.Sequence), _TypeMapEntry(match=_match_is_exactly_mapping, arity=2, beam_type=typehints.Mapping)]
matched_entry = next((entry for entry in type_map if entry.match(typ)), None)
if not matched_entry:
_LOGGER.info('Using Any for unsupported type: %s', typ)
return typehints.Any
args = _get_args(typ)
len_args = len(args)
if len_args == 0 and len_args != matched_entry.arity:
arity = matched_entry.arity
if _match_issubclass(typing.Tuple)(typ):
args = (typehints.TypeVariable('T'), Ellipsis)
elif _match_is_union(typ):
raise ValueError('Unsupported Union with no arguments.')
elif _match_issubclass(typing.Generator)(typ):
args = (typehints.TypeVariable('T_co'), type(None), type(None))
elif _match_issubclass(typing.Dict)(typ):
args = (typehints.TypeVariable('KT'), typehints.TypeVariable('VT'))
elif _match_issubclass(typing.Iterator)(typ) or _match_is_exactly_iterable(typ):
args = (typehints.TypeVariable('T_co'),)
else:
args = (typehints.TypeVariable('T'),) * arity
elif matched_entry.arity == -1:
arity = len_args
elif len_args == 1 and _safe_issubclass(getattr(typ, '__origin__', typ), collections.Counter):
args = (args[0], int)
len_args = 2
arity = matched_entry.arity
else:
arity = matched_entry.arity
if len_args != arity:
raise ValueError('expecting type %s to have arity %d, had arity %d instead' % (str(typ), arity, len_args))
typs = convert_to_beam_types(args)
if arity == 0:
return matched_entry.beam_type
elif arity == 1:
return matched_entry.beam_type[typs[0]]
else:
return matched_entry.beam_type[tuple(typs)]
|
Convert a given typing type to a Beam type.
Args:
typ (`typing.Union[type, str]`): typing type or string literal representing
a type.
Returns:
type: The given type converted to a Beam type as far as we can do the
conversion.
Raises:
ValueError: The type was malformed.
|
github-repos
|
def vmstat(stat):
out = subprocess.check_output(["vmstat", "-s"])
stat = stat.encode("ascii")
for line in out.split(b"\n"):
line = line.strip()
if stat in line:
return int(line.split(b" ")[0])
raise ValueError("Can't find {} in 'vmstat' output.".format(stat))
|
Run vmstat and get a particular statistic.
Args:
stat: The statistic that we are interested in retrieving.
Returns:
The parsed output.
|
juraj-google-style
|
def get_factors_iterative1(n):
todo, res = [(n, 2, [])], []
while todo:
n, i, combi = todo.pop()
while i * i <= n:
if n % i == 0:
res += combi + [i, n
todo.append((n
i += 1
return res
|
[summary]
Computes all factors of n.
Translated the function get_factors(...) in
a call-stack modell.
Arguments:
n {[int]} -- [to analysed number]
Returns:
[list of lists] -- [all factors]
|
juraj-google-style
|
def _create_make_unique(inputs):
if inputs.shape.ndims != 2:
raise ValueError("Input of top_k_with_unique must be rank-2 "
"but got: %s" % inputs.shape)
height = inputs.shape[0]
width = inputs.shape[1]
zeros = tf.zeros([height, width], dtype=tf.int32)
log2_ceiling = int(math.ceil(math.log(int(width), 2)))
next_power_of_two = 1 << log2_ceiling
count_mask = ~(next_power_of_two - 1)
count_mask_r0 = tf.constant(count_mask)
count_mask_r2 = tf.fill([height, width], count_mask_r0)
smallest_normal = 1 << 23
smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32)
smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0)
low_bit_mask = ~(1 << 31)
low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32)
low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0)
iota = tf.tile(tf.expand_dims(tf.range(width, dtype=tf.int32), 0),
[height, 1])
input_r2 = tf.bitcast(inputs, tf.int32)
abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2)
if_zero_r2 = tf.equal(abs_r2, zeros)
smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or(
input_r2, smallest_normal_r2)
input_no_zeros_r2 = tf.where(
if_zero_r2, smallest_normal_preserving_sign_r2, input_r2)
and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2)
or_r2 = tf.bitwise.bitwise_or(and_r2, iota)
return tf.bitcast(or_r2, tf.float32)
|
Replaces the lower bits of each element with iota.
The iota is used to derive the index, and also serves the purpose to
make each element unique to break ties.
Args:
inputs: A tensor with rank of 2 and dtype of tf.float32.
[batch_size, original_size].
Returns:
A tensor after element wise transformation, with dtype the same as inputs.
[batch_size, original_size].
Raises:
ValueError: If the rank of the input tensor does not equal 2.
|
juraj-google-style
|
def from_rfc3339(rfc3339_text, with_nanos=False):
timestamp = strict_rfc3339.rfc3339_to_timestamp(rfc3339_text)
result = datetime.datetime.utcfromtimestamp(timestamp)
if with_nanos:
return (result, int(((timestamp - int(timestamp)) * 1000000000.0)))
else:
return result
|
Parse a RFC 3339 date string format to datetime.date.
Example of accepted format: '1972-01-01T10:00:20.021-05:00'
- By default, the result is a datetime.datetime
- If with_nanos is true, the result is a 2-tuple, (datetime.datetime,
nanos), where the second field represents the possible nanosecond
resolution component of the second field.
Args:
rfc3339_text (string): An rfc3339 formatted date string
with_nanos (bool): Determines if nanoseconds should be parsed from the
string
Raises:
ValueError: if ``rfc3339_text`` is invalid
Returns:
:class:`datetime.datetime`: when with_nanos is False
tuple(:class:`datetime.datetime`, int): when with_nanos is True
|
codesearchnet
|
def fn(x: int) -> None:
pass
|
Test function
Args:
x: The first input
|
github-repos
|
def assert_present(self, selector, testid=None, **kwargs):
self.info_log(
"Assert present selector(%s) testid(%s)" % (selector, testid)
)
wait_until_present = kwargs.get(
'wait_until_present',
BROME_CONFIG['proxy_driver']['wait_until_present_before_assert_present']
)
self.debug_log(
"effective wait_until_present: %s" % wait_until_present
)
if wait_until_present:
element = self.wait_until_present(selector, raise_exception=False)
else:
element = self.is_present(selector)
if element:
if testid is not None:
self.create_test_result(testid, True)
return True
else:
if testid is not None:
self.create_test_result(testid, False)
return False
|
Assert that the element is present in the dom
Args:
selector (str): the selector used to find the element
test_id (str): the test_id or a str
Kwargs:
wait_until_present (bool)
Returns:
bool: True is the assertion succeed; False otherwise.
|
juraj-google-style
|
def bootstrap_results(self, state):
def loss():
q = self._flattened_variational_distribution()
samples = q.sample(self.train_batch_size)
return tf.reduce_mean(input_tensor=(q.log_prob(samples) - self._flattened_target_log_prob(samples)), axis=(- 1))
lr = tf.convert_to_tensor(value=self.learning_rate, dtype=self._dtype)
dtype = lr.dtype
learning_rate = tf.compat.v2.optimizers.schedules.PiecewiseConstantDecay(list((self.num_train_steps * np.array([0.2, 0.8]).astype(dtype.as_numpy_dtype()))), [lr, (lr * 0.1), (lr * 0.01)])
opt = tf.compat.v2.optimizers.Adam(learning_rate)
@tf.function(autograph=False)
def train_step():
with tf.GradientTape() as tape:
loss_val = loss()
vals = tape.watched_variables()
grads = tape.gradient(loss_val, vals)
grads_and_vals = list(zip(grads, vals))
opt.apply_gradients(grads_and_vals)
return loss_val
for step in range(self.num_train_steps):
loss_val = train_step()
tf.debugging.assert_all_finite(loss_val, 'NeuTra loss is NaN at step {}'.format(step))
if self.train_debug_fn:
self.train_debug_fn(self, step, loss_val)
state_parts = tf.nest.flatten(state)
flat_state_shapes = tf.nest.flatten(self.state_shape)
batch_shape = tf.shape(input=state_parts[0])[:(- flat_state_shapes[0].ndims)]
return self._kernel.bootstrap_results(self._flattened_variational_distribution().sample(batch_shape, seed=self.seed))
|
Trains the bijector and creates initial `previous_kernel_results`.
The supplied `state` is only used to determine the number of chains to run
in parallel_iterations
Args:
state: `Tensor` or Python `list` of `Tensor`s representing the initial
state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*state))`.
Returns:
kernel_results: Instance of
`UncalibratedHamiltonianMonteCarloKernelResults` inside
`MetropolisHastingsResults` inside `TransformedTransitionKernelResults`
inside `SimpleStepSizeAdaptationResults`.
|
codesearchnet
|
def __init__(self, tokenizer=None, trie=None):
pyee.EventEmitter.__init__(self)
self.tokenizer = tokenizer or EnglishTokenizer()
self.trie = trie or Trie()
self.regular_expressions_entities = []
self._regex_strings = set()
self.tagger = EntityTagger(self.trie, self.tokenizer, self.regular_expressions_entities)
self.intent_parsers = []
|
Initialize the IntentDeterminationEngine
Args:
tokenizer(tokenizer) : tokenizer used to break up spoken text
example EnglishTokenizer()
trie(Trie): tree of matches to Entites
|
juraj-google-style
|
def _GetNetworkInfo(self, signatures_key):
network_info = {}
for category in signatures_key.GetSubkeys():
for signature in category.GetSubkeys():
profile_guid_value = signature.GetValueByName('ProfileGuid')
if profile_guid_value:
profile_guid = profile_guid_value.GetDataAsObject()
else:
continue
default_gateway_mac_value = signature.GetValueByName('DefaultGatewayMac')
if default_gateway_mac_value:
default_gateway_mac = ':'.join(['{0:02x}'.format(octet) for octet in bytearray(default_gateway_mac_value.data)])
else:
default_gateway_mac = None
dns_suffix_value = signature.GetValueByName('DnsSuffix')
if dns_suffix_value:
dns_suffix = dns_suffix_value.GetDataAsObject()
else:
dns_suffix = None
network_info[profile_guid] = (default_gateway_mac, dns_suffix)
return network_info
|
Retrieves the network info within the signatures subkey.
Args:
signatures_key (dfwinreg.WinRegistryKey): a Windows Registry key.
Returns:
dict[str, tuple]: a tuple of default_gateway_mac and dns_suffix per
profile identifier (GUID).
|
codesearchnet
|
def get_op_consumers(self, src_op_name):
return self._op_consumers[src_op_name]
|
Get all the downstream consumers of this op.
Only data (non-control) edges are tracked.
Args:
src_op_name: Name of the op providing the tensor being consumed.
Returns:
A list of (src_slot, dst_op_name, dst_slot) tuples. In each item of
the list:
src_slot: 0-based output slot of the op of which the output tensor
is being consumed.
dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")
dst_slot: 0-based input slot of the consuming op that receives
the tensor from this op.
|
github-repos
|
def loads(string, triples=False, cls=PENMANCodec, **kwargs):
codec = cls(**kwargs)
return list(codec.iterdecode(string, triples=triples))
|
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
|
juraj-google-style
|
def _build(self, ids):
if (self._existing_vocab is None):
if (self.EMBEDDINGS not in self._initializers):
self._initializers[self.EMBEDDINGS] = tf.initializers.random_normal()
self._embeddings = tf.get_variable('embeddings', shape=[self._vocab_size, self._embed_dim], dtype=tf.float32, initializer=self._initializers[self.EMBEDDINGS], partitioner=self._partitioners.get(self.EMBEDDINGS, None), regularizer=self._regularizers.get(self.EMBEDDINGS, None), trainable=self._trainable)
else:
self._embeddings = tf.get_variable('embeddings', dtype=tf.float32, initializer=self._existing_vocab, regularizer=self._regularizers.get(self.EMBEDDINGS, None), trainable=self._trainable)
if self._densify_gradients:
embeddings = util.convert_gradient_to_tensor(self._embeddings)
else:
embeddings = self._embeddings
return tf.nn.embedding_lookup(embeddings, ids, name='embedding_lookup')
|
Lookup embeddings.
Looks up an embedding vector for each value in `ids`. All ids must be within
[0, vocab_size), else an `InvalidArgumentError` is raised at runtime.
Args:
ids: Tensor of dtype int64.
Returns:
Tensor of tf.shape(ids) + [embedding_dim] and dtype float32.
|
codesearchnet
|
def Detect(self, str_in):
components = SplitIntoComponents(str_in)
extracted_paths = set()
for extractor in self.extractors:
extracted_paths.update(extractor.Extract(components))
results = set(extracted_paths)
for post_processor in self.post_processors:
processed_results = set()
for result in results:
processed_results.update(post_processor.Process(result))
results = processed_results
return results
|
Detects paths in a given string.
Args:
str_in: String where the paths should be detected.
Returns:
A list of paths (as strings) detected inside the given string.
|
juraj-google-style
|
def get_target(self, target):
if (target not in self._target_cache):
self._target_cache[target] = self._get_target(target)
return self._target_cache[target]
|
Get the result of _get_target, cache it and return it.
Args:
target (str): target to find.
Returns:
Package/Module: package containing target or corresponding module.
|
codesearchnet
|
def GetUserinfo(credentials, http=None):
http = (http or httplib2.Http())
url = _GetUserinfoUrl(credentials)
(response, content) = http.request(url)
if (response.status == http_client.BAD_REQUEST):
credentials.refresh(http)
url = _GetUserinfoUrl(credentials)
(response, content) = http.request(url)
return json.loads((content or '{}'))
|
Get the userinfo associated with the given credentials.
This is dependent on the token having either the userinfo.email or
userinfo.profile scope for the given token.
Args:
credentials: (oauth2client.client.Credentials) incoming credentials
http: (httplib2.Http, optional) http instance to use
Returns:
The email address for this token, or None if the required scopes
aren't available.
|
codesearchnet
|
def moment_by_moment_schedule(device: Device, circuit: Circuit):
schedule = Schedule(device)
t = Timestamp()
for moment in circuit:
if (not moment.operations):
continue
for op in moment.operations:
scheduled_op = ScheduledOperation.op_at_on(op, t, device)
schedule.include(scheduled_operation=scheduled_op)
device.validate_scheduled_operation(schedule, scheduled_op)
max_duration = max((device.duration_of(op) for op in moment.operations))
t += max_duration
return schedule
|
Returns a schedule aligned with the moment structure of the Circuit.
This method attempts to create a schedule in which each moment of a circuit
is scheduled starting at the same time. Given the constraints of the
given device, such a schedule may not be possible, in this case the
the method will raise a ValueError with a description of the conflict.
The schedule that is produced will take each moments and schedule the
operations in this moment in a time slice of length equal to the maximum
time of an operation in the moment.
Returns:
A Schedule for the circuit.
Raises:
ValueError: if the scheduling cannot be done.
|
codesearchnet
|
def retry_api_check(exception):
if isinstance(exception, apiclient.errors.HttpError):
if (exception.resp.status in TRANSIENT_HTTP_ERROR_CODES):
_print_error('Retrying...')
return True
if isinstance(exception, socket.error):
if (exception.errno in TRANSIENT_SOCKET_ERROR_CODES):
_print_error('Retrying...')
return True
if isinstance(exception, oauth2client.client.AccessTokenRefreshError):
_print_error('Retrying...')
return True
if isinstance(exception, SSLError):
_print_error('Retrying...')
return True
if isinstance(exception, ServerNotFoundError):
_print_error('Retrying...')
return True
return False
|
Return True if we should retry. False otherwise.
Args:
exception: An exception to test for transience.
Returns:
True if we should retry. False otherwise.
|
codesearchnet
|
def pickle_load(cls, filepath):
if os.path.isdir(filepath):
for dirpath, dirnames, filenames in os.walk(filepath):
fnames = [f for f in filenames if f == cls.PICKLE_FNAME]
if fnames:
if len(fnames) == 1:
filepath = os.path.join(dirpath, fnames[0])
break
else:
err_msg = "Found multiple databases:\n %s" % str(fnames)
raise RuntimeError(err_msg)
else:
err_msg = "Cannot find %s inside directory %s" % (cls.PICKLE_FNAME, filepath)
raise ValueError(err_msg)
with open(filepath, "rb") as fh:
new = pickle.load(fh)
from .flows import Flow
flow_workdirs, new.flows = new.flows, []
for flow in map(Flow.pickle_load, flow_workdirs):
new.add_flow(flow)
return new
|
Loads the object from a pickle file.
Args:
filepath: Filename or directory name. It filepath is a directory, we
scan the directory tree starting from filepath and we
read the first pickle database. Raise RuntimeError if multiple
databases are found.
|
juraj-google-style
|
def PublishEvent(cls, event_name, msg, token=None):
cls.PublishMultipleEvents({event_name: [msg]}, token=token)
|
Publish the message into all listeners of the event.
We send the message to all event handlers which contain this
string in their EVENT static member. This allows the event to be
sent to multiple interested listeners.
Args:
event_name: An event name.
msg: The message to send to the event handler.
token: ACL token.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage.
|
codesearchnet
|
def reshard(self, checkpoint_values: List[tensor.Tensor], shape_and_slice_spec: List[str]) -> tensor.Tensor:
del shape_and_slice_spec
if len(checkpoint_values) != 1:
raise ValueError('Default reshard expects a single checkpoint value.')
return checkpoint_values[0]
|
Reshards the checkpoint values as read from the checkpoint file.
Override this to reshard/modify the restored values
Args:
checkpoint_values: The values returned by the restore op, as read from
file.
shape_and_slice_spec: The shape and slice spec required by the caller.
Returns:
List of restored Tensor values after being resharded.
|
github-repos
|
def consume(self, msg):
msg['body'] = crypto.sign(msg['body'], **self.hub.config)
super(SigningRelayConsumer, self).consume(msg)
|
Sign the message prior to sending the message.
Args:
msg (dict): The message to sign and relay.
|
codesearchnet
|
def Validate(self, value):
if (value is None):
return
if (not isinstance(value, self.rdfclass)):
try:
return self.rdfclass(value)
except rdfvalue.InitializeError:
raise TypeValueError(('Value for arg %s should be an %s' % (self.name, self.rdfclass.__name__)))
return value
|
Validate an RDFValue instance.
Args:
value: An RDFValue instance or something which may be used to instantiate
the correct instance.
Raises:
TypeValueError: If the value is not a valid RDFValue instance or the
required type.
Returns:
A Valid RDFValue instance.
|
codesearchnet
|
def compile_state_invariants(self,
state: Sequence[tf.Tensor]) -> List[TensorFluent]:
scope = self.state_invariant_scope(state)
invariants = []
with self.graph.as_default():
with tf.name_scope('state_invariants'):
for p in self.rddl.domain.invariants:
fluent = self._compile_expression(p, scope)
invariants.append(fluent)
return invariants
|
Compiles the state invarints given current `state` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
|
juraj-google-style
|
def labels2onehot(labels: [List[str], List[List[str]], np.ndarray], classes: [list, np.ndarray]) -> np.ndarray:
n_classes = len(classes)
y = []
for sample in labels:
curr = np.zeros(n_classes)
if isinstance(sample, list):
for intent in sample:
if (intent not in classes):
log.warning('Unknown intent {} detected. Assigning no class'.format(intent))
else:
curr[np.where((np.array(classes) == intent))[0]] = 1
else:
curr[np.where((np.array(classes) == sample))[0]] = 1
y.append(curr)
y = np.asarray(y)
return y
|
Convert labels to one-hot vectors for multi-class multi-label classification
Args:
labels: list of samples where each sample is a class or a list of classes which sample belongs with
classes: array of classes' names
Returns:
2d array with one-hot representation of given samples
|
codesearchnet
|
def db_wb020(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `db_wb020`'.format(value))
self._db_wb020 = value
|
Corresponds to IDD Field `db_wb020`
mean coincident dry-bulb temperature to
Wet-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `db_wb020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def _log_normalization(self, name='log_normalization'):
with tf.name_scope((name or 'log_normalization_lkj')):
logpi = np.log(np.pi)
ans = tf.zeros_like(self.concentration)
for k in range(1, self.dimension):
ans += (logpi * (k / 2.0))
ans += tf.math.lgamma((self.concentration + (((self.dimension - 1) - k) / 2.0)))
ans -= tf.math.lgamma((self.concentration + ((self.dimension - 1) / 2.0)))
return ans
|
Returns the log normalization of an LKJ distribution.
Args:
name: Python `str` name prefixed to Ops created by this function.
Returns:
log_z: A Tensor of the same shape and dtype as `concentration`, containing
the corresponding log normalizers.
|
codesearchnet
|
def add_behaviour(self, behaviour, template=None):
behaviour.set_agent(self)
if issubclass(type(behaviour), FSMBehaviour):
for _, state in behaviour.get_states().items():
state.set_agent(self)
behaviour.set_template(template)
self.behaviours.append(behaviour)
if self.is_alive():
behaviour.start()
|
Adds and starts a behaviour to the agent.
If template is not None it is used to match
new messages and deliver them to the behaviour.
Args:
behaviour (spade.behaviour.CyclicBehaviour): the behaviour to be started
template (spade.template.Template, optional): the template to match messages with (Default value = None)
|
juraj-google-style
|
def train_on_batch(self, data: List[Iterable], labels: Iterable[list]) -> None:
X, Y = self._transform_batch(data, labels)
self.model_.train_on_batch(X, Y)
|
Trains model on a single batch
Args:
data: a batch of word sequences
labels: a batch of correct tag sequences
Returns:
the trained model
|
juraj-google-style
|
def qubo_circuit(
graph: nx.Graph,
steps: int,
beta: Sequence,
gamma: Sequence) -> Circuit:
qubits = list(graph.nodes())
circ = Circuit()
for q0 in qubits:
circ += H(q0)
for p in range(0, steps):
for q0, q1 in graph.edges():
weight = graph[q0][q1].get('weight', 1.0)
circ += ZZ(-weight * gamma[p] / np.pi, q0, q1)
for q0 in qubits:
node_weight = graph.nodes[q0].get('weight', None)
if node_weight is not None:
circ += RZ(node_weight, q0)
for q0 in qubits:
circ += RX(beta[p], q0)
return circ
|
A QAOA circuit for the Quadratic Unconstrained Binary Optimization
problem (i.e. an Ising model).
Args:
graph : a networkx graph instance with optional edge and node weights
steps : number of QAOA steps
beta : driver parameters (One per step)
gamma : cost parameters (One per step)
|
juraj-google-style
|
def support_set(self):
roots = set()
if self.has_attr():
roots.update(self.parent.support_set)
elif self.has_subscript():
roots.update(self.parent.support_set)
roots.update(self.qn[1].support_set)
else:
roots.add(self)
return roots
|
Returns the set of simple symbols that this QN relies on.
This would be the smallest set of symbols necessary for the QN to
statically resolve (assuming properties and index ranges are verified
at runtime).
Examples:
'a.b' has only one support symbol, 'a'
'a[i]' has two support symbols, 'a' and 'i'
|
github-repos
|
def set_sleep_timer(self, sleep_time_seconds):
try:
if sleep_time_seconds is None:
sleep_time = ''
else:
sleep_time = format(
datetime.timedelta(seconds=int(sleep_time_seconds))
)
self.avTransport.ConfigureSleepTimer([
('InstanceID', 0),
('NewSleepTimerDuration', sleep_time),
])
except SoCoUPnPException as err:
if 'Error 402 received' in str(err):
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
raise
except ValueError:
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
|
Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors
|
juraj-google-style
|
def save(self, path):
data = self.encode()
with open(path, 'wb') as out:
out.write(data)
|
Save a binary copy of this report
Args:
path (string): The path where we should save the binary copy of the report
|
codesearchnet
|
def debase64(byte_str):
if isinstance(byte_str, str) and not PYTHON2:
byte_str = bytes(byte_str, 'utf-8')
return base64.b64decode(byte_str)
|
Decode base64 encoded bytes/strings.
Args:
- ``byte_str``: The string or bytes to base64 encode.
Returns:
- decoded string as type str for python2 and type byte for python3.
|
juraj-google-style
|
def validate_seeded_answers_simple(answers, options, algo):
seen_options = {}
for answer in answers:
if answer:
key = options[answer['answer']].get('text')
if options[answer['answer']].get('image_url'):
key += options[answer['answer']].get('image_url')
seen_options.setdefault(key, 0)
seen_options[key] += 1
missing_options = []
index = 1
for option in options:
key = option.get('text') + option.get('image_url') if option.get('image_url') else option.get('text')
if option.get('text') != 'n/a':
if seen_options.get(key, 0) == 0:
missing_options.append(_('Option ') + str(index))
index += 1
if missing_options:
return {'seed_error': _('Missing option seed(s): ') + ', '.join(missing_options)}
return None
|
This validator checks if the answers includes all possible options
Args:
answers (str): the answers to be checked
options (dict): all options that should exist in the answers
algo (str): selection algorithm
Returns:
None if everything is good. Otherwise, the missing option error message.
|
juraj-google-style
|
def block_matrix(A, B, C, D):
return vstackm((hstackm((A, B)), hstackm((C, D))))
|
r"""Generate the operator matrix with quadrants
.. math::
\begin{pmatrix} A B \\ C D \end{pmatrix}
Args:
A (Matrix): Matrix of shape ``(n, m)``
B (Matrix): Matrix of shape ``(n, k)``
C (Matrix): Matrix of shape ``(l, m)``
D (Matrix): Matrix of shape ``(l, k)``
Returns:
Matrix: The combined block matrix ``[[A, B], [C, D]]``.
|
codesearchnet
|
def _get_elmt_amt_in_rxt(self, rxt):
return sum([rxt.get_el_amount(e) for e in self.pd.elements])
|
Computes total number of atoms in a reaction formula for elements
not in external reservoir. This method is used in the calculation
of reaction energy per mol of reaction formula.
Args:
rxt (Reaction): a reaction.
Returns:
Total number of atoms for non_reservoir elements.
|
codesearchnet
|
def set_size(a, validate_indices=True):
a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name='a')
if not isinstance(a, sparse_tensor.SparseTensor):
raise TypeError('Expected `SparseTensor`, got %s.' % a)
if a.values.dtype.base_dtype not in _VALID_DTYPES:
raise TypeError(f'Invalid dtype `{a.values.dtype}` not in supported dtypes: `{_VALID_DTYPES}`.')
return gen_set_ops.set_size(a.indices, a.values, a.dense_shape, validate_indices)
|
Compute number of unique elements along last dimension of `a`.
Args:
a: `SparseTensor`, with indices sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a`. Note that setting this to `false` allows for undefined behavior
when calling this function with invalid indices.
Returns:
`int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with
rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the
number of unique elements in the corresponding `[0...n-1]` dimension of `a`.
Raises:
TypeError: If `a` is an invalid types.
|
github-repos
|
def search(self, terms):
messages = self._connection.get("search/%s" % urllib.quote_plus(terms), key="messages")
if messages:
messages = [Message(self, message) for message in messages]
return messages
|
Search transcripts.
Args:
terms (str): Terms for search
Returns:
array. Messages
|
juraj-google-style
|
def iuptri(items, diago=True, with_inds=False):
for (ii, item1) in enumerate(items):
for (jj, item2) in enumerate(items):
do_yield = (jj >= ii) if diago else (jj > ii)
if do_yield:
if with_inds:
yield (ii, jj), (item1, item2)
else:
yield item1, item2
|
A generator that yields the upper triangle of the matrix (items x items)
Args:
items: Iterable object with elements [e0, e1, ...]
diago: False if diagonal matrix elements should be excluded
with_inds: If True, (i,j) (e_i, e_j) is returned else (e_i, e_j)
>>> for (ij, mate) in iuptri([0,1], with_inds=True):
... print("ij:", ij, "mate:", mate)
ij: (0, 0) mate: (0, 0)
ij: (0, 1) mate: (0, 1)
ij: (1, 1) mate: (1, 1)
|
juraj-google-style
|
def __init__(self, hostname, auth=AnonymousAuth()):
self._hostname = self._construct_full_hostname(hostname)
_logger.debug("Hostname is %s" % self._hostname)
self._auth_info = auth
|
Initializer for the base class.
Save the hostname to use for all requests as well as any
authentication info needed.
Args:
hostname: The host for the requests.
auth: The authentication info needed for any requests.
|
juraj-google-style
|
def power(maf=0.5,beta=0.1, N=100, cutoff=5e-8):
assert maf>=0.0 and maf<=0.5, "maf needs to be between 0.0 and 0.5, got %f" % maf
if beta<0.0:
beta=-beta
std_beta = 1.0/np.sqrt(N*(2.0 * maf*(1.0-maf)))
non_centrality = beta
beta_samples = np.random.normal(loc=non_centrality, scale=std_beta)
n_grid = 100000
beta_in = np.arange(0.5/(n_grid+1.0),(n_grid-0.5)/(n_grid+1.0),1.0/(n_grid+1.0))
beta_theoretical = ((st.norm.isf(beta_in)* std_beta) + non_centrality)
pvals = st.chi2.sf( (beta_theoretical/std_beta)*(beta_theoretical/std_beta) ,1.0)
power = (pvals<cutoff).mean()
return power, pvals
|
estimate power for a given allele frequency, effect size beta and sample size N
Assumption:
z-score = beta_ML distributed as p(0) = N(0,1.0(maf*(1-maf)*N))) under the null hypothesis
the actual beta_ML is distributed as p(alt) = N( beta , 1.0/(maf*(1-maf)N) )
Arguments:
maf: minor allele frequency of the SNP
beta: effect size of the SNP
N: sample size (number of individuals)
Returns:
power: probability to detect a SNP in that study with the given parameters
|
juraj-google-style
|
def price(self, valuation_date, market, model=None, pricing_context=None, name=None):
model = model or rc.InterestRateModelType.LOGNORMAL_RATE
name = name or self._name + '_price'
with tf.name_scope(name):
swap_annuity = self._swap.annuity(valuation_date, market, model)
forward_swap_rate = self._swap.par_rate(valuation_date, market, model)
strike = self._swap.fixed_rate
expiry_time = dates.daycount_actual_365_fixed(start_date=valuation_date, end_date=self._expiry_date, dtype=self._dtype)
if model == rc.InterestRateModelType.LOGNORMAL_RATE:
option_value = self._price_lognormal_rate(market, pricing_context, forward_swap_rate, strike, expiry_time)
else:
raise ValueError('Unsupported model.')
return self._swap.notional[-1] * swap_annuity * option_value
|
Returns the present value of the swaption on the valuation date.
Args:
valuation_date: A scalar `DateTensor` specifying the date on which
valuation is being desired.
market: A namedtuple of type `InterestRateMarket` which contains the
necessary information for pricing the FRA instrument.
model: An optional input of type `InterestRateModelType` to specify which
model to use for pricing.
Default value: `None` in which case LOGNORMAL_RATE model is used.
pricing_context: An optional input to provide additional parameters (such
as model parameters) relevant for pricing.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'price'.
Returns:
A Rank 1 `Tensor` of real type containing the modeled price of each IRS
contract based on the input market data.
Raises:
ValueError: If an unsupported model is supplied to the function.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.