code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def wait_for_prompt(self, timeout_s=None):
with self._cond:
if self._prompt:
if timeout_s is None:
self._cond.wait(3600 * 24 * 365)
else:
self._cond.wait(timeout_s)
if self._response is None:
raise PromptUnansweredError
return self._response
|
Wait for the user to respond to the current prompt.
Args:
timeout_s: Seconds to wait before raising a PromptUnansweredError.
Returns:
A string response, or the empty string if text_input was False.
Raises:
PromptUnansweredError: Timed out waiting for the user to respond.
|
juraj-google-style
|
def _calc_rms(mol1, mol2, clabel1, clabel2):
obmol1 = BabelMolAdaptor(mol1).openbabel_mol
obmol2 = BabelMolAdaptor(mol2).openbabel_mol
cmol1 = ob.OBMol()
for i in clabel1:
oa1 = obmol1.GetAtom(i)
a1 = cmol1.NewAtom()
a1.SetAtomicNum(oa1.GetAtomicNum())
a1.SetVector(oa1.GetVector())
cmol2 = ob.OBMol()
for i in clabel2:
oa2 = obmol2.GetAtom(i)
a2 = cmol2.NewAtom()
a2.SetAtomicNum(oa2.GetAtomicNum())
a2.SetVector(oa2.GetVector())
aligner = ob.OBAlign(True, False)
aligner.SetRefMol(cmol1)
aligner.SetTargetMol(cmol2)
aligner.Align()
return aligner.GetRMSD()
|
Calculate the RMSD.
Args:
mol1: The first molecule. OpenBabel OBMol or pymatgen Molecule
object
mol2: The second molecule. OpenBabel OBMol or pymatgen Molecule
object
clabel1: The atom indices that can reorder the first molecule to
uniform atom order
clabel1: The atom indices that can reorder the second molecule to
uniform atom order
Returns:
The RMSD.
|
juraj-google-style
|
def to_json_file(self, json_file_path: Union[str, os.PathLike]):
with open(json_file_path, 'w', encoding='utf-8') as writer:
config_dict = self.to_dict()
json_string = json.dumps(config_dict, indent=2, sort_keys=True) + '\n'
writer.write(json_string)
|
Save this instance to a JSON file.
Args:
json_file_path (Union[str, os.PathLike]): Path to the JSON file in which this configuration instance's parameters will be saved.
|
github-repos
|
def quota(self):
response = self._call(mm_calls.ClientState, self.uploader_id)
client_state = response.body.clientstate_response
return (client_state.total_track_count, client_state.locker_track_limit)
|
Get the uploaded track count and allowance.
Returns:
tuple: Number of uploaded tracks, number of tracks allowed.
|
codesearchnet
|
def Get(self, request, global_params=None):
config = self.GetMethodConfig('Get')
return self._RunMethod(config, request, global_params=global_params)
|
Gets information about a snapshot.
Args:
request: (DataflowProjectsSnapshotsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Snapshot) The response message.
|
github-repos
|
def make_message(self, data):
data = self.codec.loads(data)
msg = Message(data.get('data'), *data.get('args', []), **data.get('kwargs', {}))
msg.meta.update(data.get('meta'))
self.trigger('make_message', data, msg)
return msg
|
Create a Message instance from data, data will be loaded
via munge according to the codec specified in the
transport_content_type attribute
Returns:
Message: message object
|
codesearchnet
|
def _replace_tensors_by_numpy_ndarrays(repr_ds_map: rd.RepresentativeDatasetMapping) -> None:
with session.Session() as sess:
for signature_def_key in repr_ds_map:
ds = repr_ds_map[signature_def_key]
repr_ds_map[signature_def_key] = rd.replace_tensors_by_numpy_ndarrays(ds, sess)
|
Replaces tf.Tensors by their evaluated numpy arrays.
This assumes that tf.Tensors in representative samples are created in the
default Graph. It will raise an error if tensors are created in a different
graph.
Args:
repr_ds_map: SignatureDef key -> RepresentativeDataset mapping.
|
github-repos
|
def calc_crc16(buf):
crc_table = [0x0000, 0xc0c1, 0xc181, 0x0140, 0xc301, 0x03c0, 0x0280, 0xc241,
0xc601, 0x06c0, 0x0780, 0xc741, 0x0500, 0xc5c1, 0xc481, 0x0440,
0xcc01, 0x0cc0, 0x0d80, 0xcd41, 0x0f00, 0xcfc1, 0xce81, 0x0e40,
0x0a00, 0xcac1, 0xcb81, 0x0b40, 0xc901, 0x09c0, 0x0880, 0xc841,
0xd801, 0x18c0, 0x1980, 0xd941, 0x1b00, 0xdbc1, 0xda81, 0x1a40,
0x1e00, 0xdec1, 0xdf81, 0x1f40, 0xdd01, 0x1dc0, 0x1c80, 0xdc41,
0x1400, 0xd4c1, 0xd581, 0x1540, 0xd701, 0x17c0, 0x1680, 0xd641,
0xd201, 0x12c0, 0x1380, 0xd341, 0x1100, 0xd1c1, 0xd081, 0x1040,
0xf001, 0x30c0, 0x3180, 0xf141, 0x3300, 0xf3c1, 0xf281, 0x3240,
0x3600, 0xf6c1, 0xf781, 0x3740, 0xf501, 0x35c0, 0x3480, 0xf441,
0x3c00, 0xfcc1, 0xfd81, 0x3d40, 0xff01, 0x3fc0, 0x3e80, 0xfe41,
0xfa01, 0x3ac0, 0x3b80, 0xfb41, 0x3900, 0xf9c1, 0xf881, 0x3840,
0x2800, 0xe8c1, 0xe981, 0x2940, 0xeb01, 0x2bc0, 0x2a80, 0xea41,
0xee01, 0x2ec0, 0x2f80, 0xef41, 0x2d00, 0xedc1, 0xec81, 0x2c40,
0xe401, 0x24c0, 0x2580, 0xe541, 0x2700, 0xe7c1, 0xe681, 0x2640,
0x2200, 0xe2c1, 0xe381, 0x2340, 0xe101, 0x21c0, 0x2080, 0xe041,
0xa001, 0x60c0, 0x6180, 0xa141, 0x6300, 0xa3c1, 0xa281, 0x6240,
0x6600, 0xa6c1, 0xa781, 0x6740, 0xa501, 0x65c0, 0x6480, 0xa441,
0x6c00, 0xacc1, 0xad81, 0x6d40, 0xaf01, 0x6fc0, 0x6e80, 0xae41,
0xaa01, 0x6ac0, 0x6b80, 0xab41, 0x6900, 0xa9c1, 0xa881, 0x6840,
0x7800, 0xb8c1, 0xb981, 0x7940, 0xbb01, 0x7bc0, 0x7a80, 0xba41,
0xbe01, 0x7ec0, 0x7f80, 0xbf41, 0x7d00, 0xbdc1, 0xbc81, 0x7c40,
0xb401, 0x74c0, 0x7580, 0xb541, 0x7700, 0xb7c1, 0xb681, 0x7640,
0x7200, 0xb2c1, 0xb381, 0x7340, 0xb101, 0x71c0, 0x7080, 0xb041,
0x5000, 0x90c1, 0x9181, 0x5140, 0x9301, 0x53c0, 0x5280, 0x9241,
0x9601, 0x56c0, 0x5780, 0x9741, 0x5500, 0x95c1, 0x9481, 0x5440,
0x9c01, 0x5cc0, 0x5d80, 0x9d41, 0x5f00, 0x9fc1, 0x9e81, 0x5e40,
0x5a00, 0x9ac1, 0x9b81, 0x5b40, 0x9901, 0x59c0, 0x5880, 0x9841,
0x8801, 0x48c0, 0x4980, 0x8941, 0x4b00, 0x8bc1, 0x8a81, 0x4a40,
0x4e00, 0x8ec1, 0x8f81, 0x4f40, 0x8d01, 0x4dc0, 0x4c80, 0x8c41,
0x4400, 0x84c1, 0x8581, 0x4540, 0x8701, 0x47c0, 0x4680, 0x8641,
0x8201, 0x42c0, 0x4380, 0x8341, 0x4100, 0x81c1, 0x8081, 0x4040]
crc = 0xffff
for c in buf:
index = (crc ^ ord(c)) & 0xff
crct = crc_table[index]
crc = (crc >> 8) ^ crct
crc = (crc << 8) | (crc >> 8)
crc &= 0x7F7F
return "%04x" % crc
|
Drop in pure python replacement for ekmcrc.c extension.
Args:
buf (bytes): String or byte array (implicit Python 2.7 cast)
Returns:
str: 16 bit CRC per EKM Omnimeters formatted as hex string.
|
juraj-google-style
|
def _get_course_content(course_id, course_url, sailthru_client, site_code, config):
cache_key = "{}:{}".format(site_code, course_url)
response = cache.get(cache_key)
if not response:
try:
sailthru_response = sailthru_client.api_get("content", {"id": course_url})
if not sailthru_response.is_ok():
response = {}
else:
response = sailthru_response.json
cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))
except SailthruClientError:
response = {}
if not response:
logger.error('Could not get course data from Sailthru on enroll/purchase event. '
'Calling Ecommerce Course API to get course info for enrollment confirmation email')
response = _get_course_content_from_ecommerce(course_id, site_code=site_code)
if response:
cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))
return response
|
Get course information using the Sailthru content api or from cache.
If there is an error, just return with an empty response.
Arguments:
course_id (str): course key of the course
course_url (str): LMS url for course info page.
sailthru_client (object): SailthruClient
site_code (str): site code
config (dict): config options
Returns:
course information from Sailthru
|
juraj-google-style
|
def asdict_with_event(self):
event = threading.Event()
with self._lock:
self._update_events.add(event)
return (self._asdict(), event)
|
Get a dict representation of this object and an update event.
Returns:
state: Dict representation of this object.
update_event: An event that is guaranteed to be set if an update has been
triggered since the returned dict was generated.
|
codesearchnet
|
def make_pool3d_tests(pool_op):
def f(options, expected_tf_failures=0):
test_parameters = [{'ksize': [[1, 1, 1, 1, 1], [1, 2, 2, 2, 1], [1, 2, 3, 4, 1]], 'strides': [[1, 1, 1, 1, 1], [1, 2, 1, 2, 1], [1, 2, 2, 4, 1]], 'input_shape': [[1, 1, 1, 1, 1], [1, 16, 15, 14, 1], [3, 16, 15, 14, 3]], 'padding': ['SAME', 'VALID'], 'data_format': ['NDHWC']}]
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=parameters['input_shape'])
out = pool_op(input_tensor, ksize=parameters['ksize'], strides=parameters['strides'], data_format=parameters['data_format'], padding=parameters['padding'])
return ([input_tensor], [out])
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters['input_shape'])
return ([input_values], sess.run(outputs, feed_dict=dict(zip(inputs, [input_values]))))
extra_convert_options = ExtraConvertOptions()
extra_convert_options.allow_custom_ops = True
make_zip_of_tests(options, test_parameters, build_graph, build_inputs, extra_convert_options, expected_tf_failures=expected_tf_failures)
return f
|
Make a set of tests to do pooling.
Args:
pool_op: TensorFlow pooling operation to test i.e. `tf.nn.max_pool3d`.
Returns:
A function representing the true generator (after curried pool_op).
|
github-repos
|
def add_string_pairs_from_text_view_element(xib_file, results, text_view, special_ui_components_prefix):
text_view_entry_comment = extract_element_internationalized_comment(text_view)
if text_view_entry_comment is None:
return
if text_view.hasAttribute('usesAttributedText') and text_view.attributes['usesAttributedText'].value == 'YES':
add_string_pairs_from_attributed_ui_element(results, text_view, text_view_entry_comment)
else:
try:
text_view_entry_key = text_view.attributes['text'].value
results.append((text_view_entry_key, text_view_entry_comment + ' default text value'))
except KeyError:
pass
warn_if_element_not_of_class(text_view, 'TextView', special_ui_components_prefix)
|
Adds string pairs from a textview element.
Args:
xib_file (str): Path to the xib file.
results (list): The list to add the results to.
text_view(element): The textview element from the xib, to extract the string pairs from.
special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
|
juraj-google-style
|
def acquire(self, uuid_path, subnet=None):
try:
with self._create_lock():
if subnet:
LOGGER.debug('Trying to acquire subnet {}'.format(subnet))
acquired_subnet = self._acquire_given_subnet(uuid_path, subnet)
else:
LOGGER.debug('Trying to acquire a free subnet')
acquired_subnet = self._acquire(uuid_path)
return acquired_subnet
except (utils.TimerException, IOError):
raise LagoSubnetLeaseLockException(self.path)
|
Lease a free subnet for the given uuid path.
If subnet is given, try to lease that subnet, otherwise try to lease a
free subnet.
Args:
uuid_path (str): Path to the uuid file of a :class:`lago.Prefix`
subnet (str): A subnet to lease.
Returns:
netaddr.IPAddress: An object which represents the subnet.
Raises:
LagoSubnetLeaseException:
1. If this store is full
2. If the requested subnet is already taken.
LagoSubnetLeaseLockException:
If the lock to self.path can't be acquired.
|
codesearchnet
|
def interpolate(features, hparams, decode_hp):
(inputs, targets) = (features['inputs'], features['targets'])
inputs = tf.unstack(inputs, axis=1)
targets = tf.unstack(targets, axis=1)
coeffs = np.linspace(0.0, 1.0, decode_hp.num_interp)
(first_frame, last_frame) = (inputs[0], targets[(- 1)])
(first_top_z, first_level_eps) = frame_to_latents(first_frame, hparams)
(last_top_z, last_level_eps) = frame_to_latents(last_frame, hparams)
first_lats = (first_level_eps + [first_top_z])
last_lats = (last_level_eps + [last_top_z])
interp_lats = []
lat_iterator = enumerate(zip(first_lats, last_lats))
for (level_ind, (first_lat, last_lat)) in lat_iterator:
if (level_ind in decode_hp.level_interp):
if (decode_hp.channel_interp == 'all'):
interp_lat = glow_ops.linear_interpolate(first_lat, last_lat, coeffs)
else:
interp_lat = glow_ops.linear_interpolate_rank(first_lat, last_lat, coeffs, decode_hp.rank_interp)
else:
interp_lat = tf.tile(first_lat, [decode_hp.num_interp, 1, 1, 1])
interp_lats.append(interp_lat)
level_eps_interp = interp_lats[:(hparams.n_levels - 1)]
z_top_interp = interp_lats[(- 1)]
images = latents_to_frames(z_top_interp, level_eps_interp, hparams)
return (images, first_frame, last_frame)
|
Interpolate between the first input frame and last target frame.
Args:
features: dict of tensors
hparams: HParams, training hparams.
decode_hp: HParams, decode hparams.
Returns:
images: interpolated images, 4-D Tensor, shape=(num_interp, H, W, C)
first_frame: image, 3-D Tensor, shape=(1, H, W, C)
last_frame: image, 3-D Tensor, shape=(1, H, W, C)
|
codesearchnet
|
def write(self, obj: BioCDocument or BioCPassage or BioCSentence):
if self.level == DOCUMENT and not isinstance(obj, BioCDocument):
raise ValueError
if self.level == PASSAGE and not isinstance(obj, BioCPassage):
raise ValueError
if self.level == SENTENCE and not isinstance(obj, BioCSentence):
raise ValueError
self.writer.write(BioCJSONEncoder().default(obj))
|
Encode and write a single object.
Args:
obj: an instance of BioCDocument, BioCPassage, or BioCSentence
Returns:
|
juraj-google-style
|
def draw(vertexes, edges):
Xs = []
Ys = []
sug = _build_sugiyama_layout(vertexes, edges)
for vertex in sug.g.sV:
Xs.append(vertex.view.xy[0] - vertex.view.w / 2.0)
Xs.append(vertex.view.xy[0] + vertex.view.w / 2.0)
Ys.append(vertex.view.xy[1])
Ys.append(vertex.view.xy[1] + vertex.view.h)
for edge in sug.g.sE:
for x, y in edge.view._pts:
Xs.append(x)
Ys.append(y)
minx = min(Xs)
miny = min(Ys)
maxx = max(Xs)
maxy = max(Ys)
canvas_cols = int(math.ceil(math.ceil(maxx) - math.floor(minx))) + 1
canvas_lines = int(round(maxy - miny))
canvas = AsciiCanvas(canvas_cols, canvas_lines)
for edge in sug.g.sE:
assert len(edge.view._pts) > 1
for index in range(1, len(edge.view._pts)):
start = edge.view._pts[index - 1]
end = edge.view._pts[index]
start_x = int(round(start[0] - minx))
start_y = int(round(start[1] - miny))
end_x = int(round(end[0] - minx))
end_y = int(round(end[1] - miny))
assert start_x >= 0
assert start_y >= 0
assert end_x >= 0
assert end_y >= 0
canvas.line(start_x, start_y, end_x, end_y, "*")
for vertex in sug.g.sV:
x = vertex.view.xy[0] - vertex.view.w / 2.0
y = vertex.view.xy[1]
canvas.box(
int(round(x - minx)),
int(round(y - miny)),
vertex.view.w,
vertex.view.h,
)
canvas.text(
int(round(x - minx)) + 1, int(round(y - miny)) + 1, vertex.data
)
canvas.draw()
|
Build a DAG and draw it in ASCII.
Args:
vertexes (list): list of graph vertexes.
edges (list): list of graph edges.
|
juraj-google-style
|
def cardinal(self, to):
return sum((m.cardinal(to) for m in self.submodules))
|
Return the number of dependencies of this package to the given node.
Args:
to (Package/Module): target node.
Returns:
int: number of dependencies.
|
codesearchnet
|
def _ReadCharacterDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):
return self._ReadFixedSizeDataTypeDefinition(definitions_registry, definition_values, data_types.CharacterDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE, is_member=is_member, supported_size_values=(1, 2, 4))
|
Reads a character data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
CharacterDataTypeDefinition: character data type definition.
|
codesearchnet
|
def __init__(self,
unique_identifier=None):
super(ActivateRequestPayload, self).__init__(
tag=enums.Tags.REQUEST_PAYLOAD)
self.unique_identifier = unique_identifier
self.validate()
|
Construct a ActivateRequestPayload object.
Args:
unique_identifier (UniqueIdentifier): The UUID of a managed
cryptographic object.
|
juraj-google-style
|
def greater_equal(x, y):
return math_ops.greater_equal(x, y)
|
Element-wise truth value of (x >= y).
Args:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
|
github-repos
|
def save(obj, filename, protocol=4):
with open(filename, 'wb') as f:
pickle.dump(obj, f, protocol=protocol)
|
Serialize an object to disk using pickle protocol.
Args:
obj: The object to serialize.
filename: Path to the output file.
protocol: Version of the pickle protocol.
|
codesearchnet
|
def xresnet50_2(pretrained=False, **kwargs):
model = XResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['xresnet50']))
return model
|
Constructs a XResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
juraj-google-style
|
def CheckHashes(hash_ids):
return {
k: bool(v)
for k, v in data_store.REL_DB.ReadHashBlobReferences(hash_ids).items()
}
|
Checks if files with given hashes are present in the file store.
Args:
hash_ids: A list of SHA256HashID objects.
Returns:
A dict where SHA256HashID objects are keys. Corresponding values
may be False (if hash id is not present) or True if it is not present.
|
juraj-google-style
|
def is_expired(self):
expiration_time = (self.created_at + datetime.timedelta(days=1))
return (timezone.now() > expiration_time)
|
Determine if the confirmation has expired.
Returns:
bool:
``True`` if the confirmation has expired and ``False``
otherwise.
|
codesearchnet
|
def revnet_step(name, x, hparams, reverse=True):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if hparams.coupling == "additive":
coupling_layer = functools.partial(
additive_coupling, name="additive", reverse=reverse,
mid_channels=hparams.coupling_width,
activation=hparams.activation, dropout=hparams.coupling_dropout)
else:
coupling_layer = functools.partial(
affine_coupling, name="affine", reverse=reverse,
mid_channels=hparams.coupling_width,
activation=hparams.activation, dropout=hparams.coupling_dropout)
ops = [
functools.partial(actnorm, name="actnorm", reverse=reverse),
functools.partial(invertible_1x1_conv, name="invertible",
reverse=reverse), coupling_layer]
if reverse:
ops = ops[::-1]
objective = 0.0
for op in ops:
x, curr_obj = op(x=x)
objective += curr_obj
return x, objective
|
One step of glow generative flow.
Actnorm + invertible 1X1 conv + affine_coupling.
Args:
name: used for variable scope.
x: input
hparams: coupling_width is the only hparam that is being used in
this function.
reverse: forward or reverse pass.
Returns:
z: Output of one step of reversible flow.
|
juraj-google-style
|
def from_json(cls, json):
params = dict(((str(k), v) for (k, v) in json.iteritems() if (k in cls._PARAMS)))
if (cls._OFFSET_PARAM in params):
params[cls._OFFSET_PARAM] = base64.b64decode(params[cls._OFFSET_PARAM])
return cls(**params)
|
Creates an instance of the InputReader for the given input shard's state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the given JSON parameters.
|
codesearchnet
|
def write_build_info(filename, key_value_list):
build_info = {}
if cuda_config:
build_info.update(cuda_config.config)
if tensorrt_config:
build_info.update(tensorrt_config.config)
for arg in key_value_list:
key, value = arg.split('=')
if value.lower() == 'true':
build_info[key] = True
elif value.lower() == 'false':
build_info[key] = False
else:
build_info[key] = value.format(**build_info)
sorted_build_info_pairs = sorted(build_info.items())
contents = '\n
open(filename, 'w').write(contents)
|
Writes a Python that describes the build.
Args:
filename: filename to write to.
key_value_list: A list of "key=value" strings that will be added to the
module's "build_info" dictionary as additional entries.
|
github-repos
|
def _send_socket_request(self, xml_request):
def to_variant(number):
buff = []
while number:
byte = number % 128
number = number
if number > 0:
byte |= 0x80
buff.append(chr(byte))
return ''.join(buff)
def from_variant(stream):
used = 0
number = 0
q = 1
while True:
byte = ord(stream[used])
used += 1
number += q * (byte & 0x7F)
q *= 128
if byte&0x80==0:
break
return (number, used)
def encode_fields(fields):
chunks = []
for field_id, message in fields.items():
chunks.append(to_variant((field_id << 3) | 2))
chunks.append(to_variant(len(message)))
chunks.append(message)
return ''.join(chunks)
def decode_fields(stream):
fields = {}
offset = 0
stream_lenght = len(stream)
while offset<stream_lenght:
field_header, used = from_variant(stream[offset:])
offset += used
wire_type = field_header & 0x07
field_id = field_header >> 3
if wire_type==2:
message_lenght, used = from_variant(stream[offset:])
offset += used
fields[field_id] = stream[offset:offset+message_lenght]
offset += message_lenght
elif wire_type==0:
fields[field_id], used = from_variant(stream[offset:])
offset += used
elif wire_type==1:
fields[field_id] = stream[offset:offset+8]
offset += 8
elif wire_type==3:
raise ConnectionError()
elif wire_type==4:
raise ConnectionError()
elif wire_type==5:
fields[field_id] = stream[offse:offset+4]
offset += 4
else:
raise ConnectionError()
return fields
def make_header(lenght):
result = []
result.append(chr((lenght & 0x000000FF)))
result.append(chr((lenght & 0x0000FF00) >> 8))
result.append(chr((lenght & 0x00FF0000) >> 16))
result.append(chr((lenght & 0xFF000000) >> 24))
return '\t\t\x00\x00' + ''.join(result)
def parse_header(header):
if len(header) == 8 and header[0] == '\t' and header[1] == '\t' and\
header[2] == '\00' and header[3] == '\00':
return ord(header[4]) | (ord(header[5]) << 8) |\
(ord(header[6]) << 16) | (ord(header[7]) << 24)
else:
raise ConnectionError()
def socket_send(data):
sent_bytes = 0
failures = 0
total_bytes = len(data)
while sent_bytes < total_bytes:
sent = self._connection.send(data[sent_bytes:])
if sent == 0:
failures += 1
if failures > 5:
raise ConnectionError()
continue
sent_bytes += sent
def socket_recieve(lenght):
total_recieved = 0
failures = 5
recieved_chunks = []
while total_recieved<lenght:
chunk = self._connection.recv(lenght-total_recieved)
if not chunk:
failures += 1
if failures > 5:
raise ConnectionError()
continue
recieved_chunks.append(chunk)
total_recieved += len(chunk)
return ''.join(recieved_chunks)
encoded_message = encode_fields({1: xml_request,
2: self._storage if self._storage else "special:detect-storage"})
header = make_header(len(encoded_message))
try:
socket_send(header+encoded_message)
except (ConnectionError, socket.error):
self._connection.close()
self._open_connection()
socket_send(header+encoded_message)
header = socket_recieve(8)
lenght = parse_header(header)
encoded_response = socket_recieve(lenght)
response = decode_fields(encoded_response)
return response[1]
|
Send a request via protobuf.
Args:
xml_request -- A fully formed xml request string for the CPS.
Returns:
The raw xml response string.
|
juraj-google-style
|
def checkTUN(self):
packet = self._TUN._tun.read(self._TUN._tun.mtu)
return packet
|
Checks the TUN adapter for data and returns any that is found.
Returns:
packet: Data read from the TUN adapter
|
codesearchnet
|
def with_contextual_override(func: Callable[..., Any]) -> Callable[..., Any]:
with contextual_override() as current_context:
pass
def _func(*args, **kwargs) -> Any:
with contextual_override(**current_context):
return func(*args, **kwargs)
return _func
|
Wraps a user function with the access to the current contextual override.
The wrapped function can be called from another thread.
Args:
func: The user function to be wrapped.
Returns:
A wrapper function that have the access to the current contextual override,
which can be called from another thread.
|
github-repos
|
def __init__(self, auth, api='/logs/search', **kwargs):
self.api = api
self.log = auth.log
try:
self.url = '%s%s' % (auth.get_url(), self.api)
except AttributeError:
self.url = 'https:
try:
self.auth = auth.get_auth()
except AttributeError:
self.auth = auth
|
Search the logs.
Args:
auth (Client): Authentication object
api (str): Api endpath
|
juraj-google-style
|
def read(cls, data):
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http:
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
|
Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
|
juraj-google-style
|
def netmiko_send_config(
task: Task,
config_commands: Optional[List[str]] = None,
config_file: Optional[str] = None,
**kwargs: Any
) -> Result:
net_connect = task.host.get_connection("netmiko", task.nornir.config)
net_connect.enable()
if config_commands:
result = net_connect.send_config_set(config_commands=config_commands, **kwargs)
elif config_file:
result = net_connect.send_config_from_file(config_file=config_file, **kwargs)
else:
raise ValueError("Must specify either config_commands or config_file")
return Result(host=task.host, result=result, changed=True)
|
Execute Netmiko send_config_set method (or send_config_from_file)
Arguments:
config_commands: Commands to configure on the remote network device.
config_file: File to read configuration commands from.
kwargs: Additional arguments to pass to method.
Returns:
Result object with the following attributes set:
* result (``str``): string showing the CLI from the configuration changes.
|
juraj-google-style
|
def __type_to_tag(self, type_: Type) -> str:
if type_ in scalar_type_to_tag:
return scalar_type_to_tag[type_]
if is_generic_list(type_):
return 'tag:yaml.org,2002:seq'
if is_generic_dict(type_):
return 'tag:yaml.org,2002:map'
if type_ in self._registered_classes.values():
return '!{}'.format(type_.__name__)
raise RuntimeError((
'Unknown type {} in type_to_tag,'
' please report a YAtiML bug.').format(type_))
|
Convert a type to the corresponding YAML tag.
Args:
type_: The type to convert
Returns:
A string containing the YAML tag.
|
juraj-google-style
|
def nhapDaiHan(self, cucSo, gioiTinh):
for cung in self.thapNhiCung:
khoangCach = khoangCachCung(cung.cungSo, self.cungMenh, gioiTinh)
cung.daiHan((cucSo + (khoangCach * 10)))
return self
|
Nhap dai han
Args:
cucSo (TYPE): Description
gioiTinh (TYPE): Description
Returns:
TYPE: Description
|
codesearchnet
|
def setup(logdir='log'):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logdir = os.path.normpath(logdir)
if (not os.path.exists(logdir)):
os.makedirs(logdir)
t = datetime.datetime.now()
logfile = '{year:04d}{mon:02d}{day:02d}-{hour:02d}{min:02d}{sec:02d}.log'.format(year=t.year, mon=t.month, day=t.day, hour=t.hour, min=t.minute, sec=t.second)
logfile = os.path.join(logdir, logfile)
filehandler = logging.handlers.RotatingFileHandler(filename=logfile, maxBytes=((10 * 1024) * 1024), backupCount=100)
filehandler.setLevel(logging.DEBUG)
fileformatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')
filehandler.setFormatter(fileformatter)
logger.addHandler(filehandler)
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.WARNING)
streamformatter = logging.Formatter('%(levelname)s: %(message)s')
streamhandler.setFormatter(streamformatter)
logger.addHandler(streamhandler)
|
Set up dual logging to console and to logfile.
When this function is called, it first creates the given directory. It then
creates a logfile and passes all log messages to come to it. The logfile
name encodes the date and time when it was created, for example
"20181115-153559.txt". All messages with a log level of at least "WARNING"
are also forwarded to the console.
Args:
logdir: path of the directory where to store the log files. Both a
relative or an absolute path may be specified. If a relative path is
specified, it is interpreted relative to the working directory.
If no directory is given, the logs are written to a folder called
"log" in the working directory.
|
codesearchnet
|
def remove_sonos_playlist(self, sonos_playlist):
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
return self.contentDirectory.DestroyObject([('ObjectID', object_id)])
|
Remove a Sonos playlist.
Args:
sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove
or the item_id (str).
Returns:
bool: True if succesful, False otherwise
Raises:
SoCoUPnPException: If sonos_playlist does not point to a valid
object.
|
juraj-google-style
|
def _ParseValueData(self, knowledge_base, value_data):
if not isinstance(value_data, py2to3.UNICODE_TYPE):
raise errors.PreProcessFail(
'Unsupported Windows Registry value type: {0:s} for '
'artifact: {1:s}.'.format(
type(value_data), self.ARTIFACT_DEFINITION_NAME))
if not knowledge_base.GetHostname():
hostname_artifact = artifacts.HostnameArtifact(name=value_data)
knowledge_base.SetHostname(hostname_artifact)
|
Parses Windows Registry value data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
value_data (object): Windows Registry value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
|
juraj-google-style
|
def add_backend_policy(self, json_data):
env = boto3.session.Session(profile_name=self.env, region_name=self.region)
elbclient = env.client('elb')
for job in json.loads(json_data)['job']:
for listener in job['listeners']:
instance_port = listener['internalPort']
backend_policy_list = listener['backendPolicies']
if backend_policy_list:
LOG.info('Adding backend server policies: %s', backend_policy_list)
elbclient.set_load_balancer_policies_for_backend_server(
LoadBalancerName=self.app, InstancePort=instance_port, PolicyNames=backend_policy_list)
|
Attaches backend server policies to an ELB
Args:
json_data (json): return data from ELB upsert
|
juraj-google-style
|
def click_exists(self, timeout=0):
e = self.get(timeout=timeout, raise_error=False)
if e is None:
return False
e.click()
return True
|
Wait element and perform click
Args:
timeout (float): timeout for wait
Returns:
bool: if successfully clicked
|
juraj-google-style
|
def countriesdata(cls, use_live=True):
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
|
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
|
juraj-google-style
|
def as_saver_def(self):
return self.saver_def
|
Generates a `SaverDef` representation of this saver.
Returns:
A `SaverDef` proto.
|
github-repos
|
def is_mobile_number_portable_region(region_code):
metadata = PhoneMetadata.metadata_for_region(region_code, None)
if metadata is None:
return False
return metadata.mobile_number_portable_region
|
Returns true if the supplied region supports mobile number portability.
Returns false for invalid, unknown or regions that don't support mobile
number portability.
Arguments:
region_code -- the region for which we want to know whether it supports mobile number
portability or not.
|
juraj-google-style
|
def update_with_zero_body(self, uri=None, timeout=-1, custom_headers=None):
if not uri:
uri = self.data['uri']
logger.debug('Update with zero length body (uri = %s)' % uri)
resource_data = self._helper.do_put(uri, None, timeout, custom_headers)
return resource_data
|
Makes a PUT request to update a resource when no request body is required.
Args:
uri: Allows to use a different URI other than resource URI
timeout: Timeout in seconds. Wait for task completion by default.
The timeout does not abort the operation in OneView; it just stops waiting for its completion.
custom_headers: Allows to set custom HTTP headers.
Returns:
A dict with updated resource data.
|
juraj-google-style
|
def make_state_space_model(self, num_timesteps, param_vals=None, initial_state_prior=None, initial_step=0):
return self._make_state_space_model(num_timesteps=num_timesteps, param_map=self._canonicalize_param_vals_as_map(param_vals), initial_state_prior=initial_state_prior, initial_step=initial_step)
|
Instantiate this model as a Distribution over specified `num_timesteps`.
Args:
num_timesteps: Python `int` number of timesteps to model.
param_vals: a list of `Tensor` parameter values in order corresponding to
`self.parameters`, or a dict mapping from parameter names to values.
initial_state_prior: an optional `Distribution` instance overriding the
default prior on the model's initial state. This is used in forecasting
("today's prior is yesterday's posterior").
initial_step: optional `int` specifying the initial timestep to model.
This is relevant when the model contains time-varying components,
e.g., holidays or seasonality.
Returns:
dist: a `LinearGaussianStateSpaceModel` Distribution object.
|
codesearchnet
|
class AlphaDropout(Layer):
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(f'Invalid value received for argument `rate`. Expected a float value between 0 and 1. Received: rate={rate}')
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self._build_at_init()
def call(self, inputs, training=False):
if training and self.rate > 0:
noise_shape = self._get_concrete_noise_shape(inputs, self.noise_shape)
alpha = 1.6732632423543772
scale = 1.0507009873554805
alpha_p = -alpha * scale
kept_idx = ops.greater_equal(ops.random.uniform(noise_shape, seed=self.seed_generator), self.rate)
kept_idx = ops.cast(kept_idx, inputs.dtype)
a = ((1 - self.rate) * (1 + self.rate * alpha_p ** 2)) ** (-0.5)
b = -a * alpha_p * self.rate
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
return a * x + b
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def _get_concrete_noise_shape(self, inputs, noise_shape):
if noise_shape is None:
return ops.shape(inputs)
concrete_inputs_shape = ops.shape(inputs)
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return concrete_noise_shape
def get_config(self):
base_config = super().get_config()
config = {'rate': self.rate, 'seed': self.seed, 'noise_shape': self.noise_shape}
return {**base_config, **config}
|
Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
to their original values, in order to ensure the self-normalizing property
even after this dropout.
Alpha Dropout fits well to Scaled Exponential Linear Units (SELU) by
randomly setting activations to the negative saturation value.
Args:
rate: Float between 0 and 1. The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
noise_shape: 1D integer tensor representing the shape of the
binary alpha dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the alpha dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding alpha dropout) or in inference mode
(doing nothing).
|
github-repos
|
def __init__(self, default: typing.Optional[float]=MISSING_VALUE, min_value: typing.Optional[float]=None, max_value: typing.Optional[float]=None, is_noneable: bool=False, frozen: bool=False):
super().__init__(float, default, min_value, max_value, is_noneable, frozen)
|
Constructor.
Args:
default: (Optional) default value for this spec.
min_value: (Optional) minimum value of acceptable values.
max_value: (Optional) maximum value of acceptable values.
is_noneable: If True, None is acceptable.
frozen: If True, values other than the default value is not accceptable.
|
github-repos
|
def _construct_forward_backward(self, num_doutputs):
trainable_outputs = [output for output in self._func_graph.outputs[:num_doutputs] if backprop_util.IsTrainable(output)]
signature = []
for t in trainable_outputs:
signature.append(tensor_lib.TensorSpec(*default_gradient.shape_and_dtype(t)))
def _backprop_function(*grad_ys):
with ops.device(None):
return gradients_util._GradientsHelper(trainable_outputs, self._func_graph.inputs, grad_ys=grad_ys, src_graph=self._func_graph)
with self._func_graph.as_default():
backwards_graph = func_graph_module.FuncGraph(_backward_name(self._func_graph.name))
func_graph_module.func_graph_from_py_func(name=backwards_graph.name, python_func=_backprop_function, args=[], kwargs={}, signature=signature, func_graph=backwards_graph)
backwards_graph_captures = backwards_graph.external_captures
captures_from_forward = [c for c in backwards_graph_captures if not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph]
existing_outputs = object_identity.ObjectIdentitySet(self._func_graph.outputs)
for capture in captures_from_forward:
if capture not in existing_outputs:
existing_outputs.add(capture)
self._func_graph.outputs.append(capture)
forward_function, backward_function = _create_forward_backward_with_graph(self._attrs, self._func_graph, backwards_graph)
return (forward_function, backward_function)
|
Constructs a pair of forward and backward functions.
Args:
num_doutputs: The constructed backprop function will take output gradients
for the first `num_doutputs` outputs of the forward function. Defaults
to the number of outputs for the inference function, but when
higher-order gradients are computed this will increase to include side
outputs.
Returns:
A pair of (forward_function, backward_function):
forward_function: A re-generated inference function (an
AtomicFunction) to account for new side outputs, if any extra
were required when building the backward pass.
backward_function: A ConcreteFunction that Takes `num_doutputs`
arguments and returns gradients with respect to inputs of the forward
function.
|
github-repos
|
def _FormatValue(self, value, level=0):
def FormatDictItem(key_value):
'Formats single dictionary item.'
(key, value) = key_value
return ((self._FormatValue(key, (level + 1)) + ': ') + self._FormatValue(value, (level + 1)))
def LimitedEnumerate(items, formatter, level=0):
'Returns items in the specified enumerable enforcing threshold.'
count = 0
limit = (self.max_sublist_items if (level > 0) else self.max_list_items)
for item in items:
if (count == limit):
(yield '...')
break
(yield formatter(item))
count += 1
def FormatList(items, formatter, level=0):
'Formats a list using a custom item formatter enforcing threshold.'
return ', '.join(LimitedEnumerate(items, formatter, level=level))
if isinstance(value, _PRIMITIVE_TYPES):
return _TrimString(repr(value), self.max_value_len)
if isinstance(value, _DATE_TYPES):
return str(value)
if (level > self.max_depth):
return str(type(value))
if isinstance(value, dict):
return (('{' + FormatList(six.iteritems(value), FormatDictItem)) + '}')
if isinstance(value, _VECTOR_TYPES):
return _ListTypeFormatString(value).format(FormatList(value, (lambda item: self._FormatValue(item, (level + 1))), level=level))
if isinstance(value, types.FunctionType):
return ('function ' + value.__name__)
if (hasattr(value, '__dict__') and value.__dict__):
return self._FormatValue(value.__dict__, level)
return str(type(value))
|
Pretty-prints an object for a logger.
This function is very similar to the standard pprint. The main difference
is that it enforces limits to make sure we never produce an extremely long
string or take too much time.
Args:
value: Python object to print.
level: current recursion level.
Returns:
Formatted string.
|
codesearchnet
|
def catch(func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
return e
|
Call the supplied function with the supplied arguments,
catching and returning any exception that it throws.
Arguments:
func: the function to run.
*args: positional arguments to pass into the function.
**kwargs: keyword arguments to pass into the function.
Returns:
If the function throws an exception, return the exception.
If the function does not throw an exception, return None.
|
codesearchnet
|
def cancel(self, invoice_id, **kwargs):
url = "{}/{}/cancel".format(self.base_url, invoice_id)
return self.post_url(url, {}, **kwargs)
|
Cancel an unpaid Invoice with given ID via API
It can only be called on an invoice that is not in the paid state.
Args:
invoice_id : Id for cancel the invoice
Returns:
The response for the API will be the invoice entity, similar to create/update API response, with status attribute's value as cancelled
|
juraj-google-style
|
def _get_example(filepath: str, filename: str, tag: Tag, sdk: int) -> Example:
context_line = tag.context_line if tag.context_line <= tag.line_start else tag.context_line - (tag.line_finish - tag.line_start)
return Example(sdk=SdkEnum(sdk), tag=tag, filepath=filepath, status=STATUS_UNSPECIFIED, type=_get_object_type(filename, filepath), code=_get_content(filepath, tag.line_start, tag.line_finish), url_vcs=_get_url_vcs(filepath), context_line=context_line)
|
Return an Example by filepath and filename.
Args:
filepath: path of the example's file.
filename: name of the example's file.
tag: tag of the example.
Returns:
Parsed Example object.
|
github-repos
|
def read(self, domain, type_name, search_command, body=None):
return self._request(domain, type_name, search_command, 'GET', body)
|
Read entry in ThreatConnect Data Store
Args:
domain (string): One of 'local', 'organization', or 'system'.
type_name (string): This is a free form index type name. The ThreatConnect API will use
this resource verbatim.
search_command (string): Search command to pass to ES.
body (str): JSON body
|
juraj-google-style
|
def _build(self, inputs, prev_state):
input_size = inputs.get_shape()[1]
weight_shape = (input_size, self._hidden_size)
u_shape = (self._hidden_size, self._hidden_size)
bias_shape = (self._hidden_size,)
def _get_variable(name, shape):
return tf.get_variable(name, shape, dtype=inputs.dtype, initializer=self._initializers.get(name), partitioner=self._partitioners.get(name), regularizer=self._regularizers.get(name))
pre_highway_wt = _get_variable(self.WT, weight_shape)
pre_highway_wh = _get_variable(self.WH, weight_shape)
state = prev_state
for layer_index in xrange(self._num_layers):
layer_str = str(layer_index)
layer_wt = _get_variable((self.WT + layer_str), u_shape)
layer_bt = _get_variable((self.BT + layer_str), bias_shape)
layer_wh = _get_variable((self.WH + layer_str), u_shape)
layer_bh = _get_variable((self.BH + layer_str), bias_shape)
linear_t = (tf.matmul(state, layer_wt) + layer_bt)
linear_h = (tf.matmul(state, layer_wh) + layer_bh)
if (layer_index == 0):
linear_t += tf.matmul(inputs, pre_highway_wt)
linear_h += tf.matmul(inputs, pre_highway_wh)
output_t = tf.sigmoid(linear_t)
output_h = tf.tanh(linear_h)
state = ((state * (1 - output_t)) + (output_h * output_t))
return (state, state)
|
Connects the highway core module into the graph.
Args:
inputs: Tensor of size `[batch_size, input_size]`.
prev_state: Tensor of size `[batch_size, hidden_size]`.
Returns:
A tuple (output, next_state) where `output` is a Tensor of size
`[batch_size, hidden_size]` and `next_state` is a Tensor of size
`[batch_size, hidden_size]`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
|
codesearchnet
|
def get_block_details(self, block_ids):
if (not hasattr(block_ids, '__iter__')):
block_ids = [block_ids]
for _id in block_ids:
block_key = self._db.get_block(_id)[0]
block_data = self._db.get_all_field_value(block_key)
for key in block_data:
for char in ['[', '{']:
if (char in block_data[key]):
block_data[key] = ast.literal_eval(str(block_data[key]))
(yield block_data)
|
Get details of scheduling or processing block
Args:
block_ids (list): List of block IDs
|
codesearchnet
|
def weak_scaling(timing_stats, scaling_var, data_points):
timing_data = dict()
proc_counts = []
bench_means = []
bench_mins = []
bench_maxs = []
model_means = []
model_mins = []
model_maxs = []
for point in data_points:
size = point[0]
proc = point[1]
try:
model_data = timing_stats[size][proc]['model'][scaling_var]
bench_data = timing_stats[size][proc]['bench'][scaling_var]
except KeyError:
continue
proc_counts.append(proc)
model_means.append(model_data['mean'])
model_mins.append(model_data['min'])
model_maxs.append(model_data['max'])
bench_means.append(bench_data['mean'])
bench_mins.append(bench_data['min'])
bench_maxs.append(bench_data['max'])
timing_data['bench'] = dict(mins=bench_mins, means=bench_means, maxs=bench_maxs)
timing_data['model'] = dict(mins=model_mins, means=model_means, maxs=model_maxs)
timing_data['proc_counts'] = [int(pc[1:]) for pc in proc_counts]
return timing_data
|
Generate data for plotting weak scaling. The data points keep
a constant amount of work per processor for each data point.
Args:
timing_stats: the result of the generate_timing_stats function
scaling_var: the variable to select from the timing_stats dictionary
(can be provided in configurations via the 'scaling_var' key)
data_points: the list of size and processor counts to use as data
(can be provided in configurations via the 'weak_scaling_points' key)
Returns:
A dict of the form:
{'bench' : {'mins' : [], 'means' : [], 'maxs' : []},
'model' : {'mins' : [], 'means' : [], 'maxs' : []},
'proc_counts' : []}
|
codesearchnet
|
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
|
Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
|
juraj-google-style
|
def download_image(self, device_label, image_id, file_name):
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
|
Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
|
juraj-google-style
|
def get_eligible_features(examples, num_mutants):
features_dict = (
get_numeric_features_to_observed_range(
examples))
features_dict.update(
get_categorical_features_to_sampling(
examples, num_mutants))
features_list = []
for k, v in sorted(features_dict.items()):
v['name'] = k
features_list.append(v)
return features_list
|
Returns a list of JSON objects for each feature in the examples.
This list is used to drive partial dependence plots in the plugin.
Args:
examples: Examples to examine to determine the eligible features.
num_mutants: The number of mutations to make over each feature.
Returns:
A list with a JSON object for each feature.
Numeric features are represented as {name: observedMin: observedMax:}.
Categorical features are repesented as {name: samples:[]}.
|
juraj-google-style
|
def _parse_compound_info(self, line):
for (k, regexes) in six.iteritems(self.compound_regex):
for reg in regexes:
if self.compound_info[k]:
continue
m = re.search(reg, line, re.IGNORECASE)
if m:
self.compound_info[k] = m.group(1).strip()
self._get_other_names(line)
|
Parse and extract all compound data by looping through the dictionary of compound_info regexs
updates self.compound_info
Args:
line (str): line of the msp file
|
codesearchnet
|
def _make_intermediates_match(branch_graphs, branch_optionals):
new_branch_optionals = []
intermediates_size = max((len(o) for o in branch_optionals))
for i, branch_graph in enumerate(branch_graphs):
other_optionals = _create_none_optionals(branch_graph, intermediates_size - len(branch_optionals[i]))
new_branch_optionals.append(branch_optionals[i] + other_optionals)
return new_branch_optionals
|
Returns new optionals lists that have matching signatures.
This is done by mirroring each list in the other using none optionals.
There is no merging of like optionals.
Args:
branch_graphs: `list` of `FuncGraph`.
branch_optionals: `list` of `list`s of optional `Tensor`s from other
branch_graphs
Returns:
A `list` of `list`s of `Tensor`s for each branch_graph. Each list has the
same number of `Tensor`s, all of which will be optionals of the same
shape/type.
|
github-repos
|
def _check_for_definition(iface, cls, tag, defines):
attributes = (attr for attr in iface.__abstractmethods__ if hasattr(getattr(iface, attr), tag))
for attribute in attributes:
for node in cls.__mro__:
if (hasattr(node, attribute) and defines(getattr(node, attribute))):
return True
try:
attribute
return False
except NameError:
return True
|
Check for a valid definition of a value.
Args:
iface (Iface): An Iface specification.
cls (type): Some type to check for a definition.
tag (str): The name of the tag attribute used to mark the abstract
methods.
defines (callable): A callable that accepts an attribute and returns
True if the attribute is a valid definition.
Returns:
bool: Whether or not the definition is found.
|
codesearchnet
|
def inject_argument_info_in_traceback(fn, object_name=None):
if backend.backend() == 'tensorflow':
from tensorflow import errors as tf_errors
else:
tf_errors = None
@wraps(fn)
def error_handler(*args, **kwargs):
if not is_traceback_filtering_enabled():
return fn(*args, **kwargs)
signature = None
bound_signature = None
try:
return fn(*args, **kwargs)
except Exception as e:
if hasattr(e, '_keras_call_info_injected'):
raise e
signature = inspect.signature(fn)
try:
bound_signature = signature.bind(*args, **kwargs)
except TypeError:
raise e
arguments_context = []
for arg in list(signature.parameters.values()):
if arg.name in bound_signature.arguments:
value = tree.map_structure(format_argument_value, bound_signature.arguments[arg.name])
else:
value = arg.default
arguments_context.append(f' • {arg.name}={value}')
if arguments_context:
arguments_context = '\n'.join(arguments_context)
if tf_errors is not None and isinstance(e, tf_errors.OpError):
message = e.message
elif e.args:
message = e.args[0]
else:
message = ''
display_name = f'{(object_name if object_name else fn.__name__)}'
message = f'Exception encountered when calling {display_name}.\n\n\x1b[1m{message}\x1b[0m\n\nArguments received by {display_name}:\n{arguments_context}'
if tf_errors is not None and isinstance(e, tf_errors.OpError):
new_e = e.__class__(e.node_def, e.op, message, e.error_code)
else:
try:
new_e = e.__class__(message)
except TypeError:
new_e = RuntimeError(message)
new_e._keras_call_info_injected = True
else:
new_e = e
raise new_e.with_traceback(e.__traceback__) from None
finally:
del signature
del bound_signature
return error_handler
|
Add information about call argument values to an error message.
Arguments:
fn: Function to wrap. Exceptions raised by the this function will be
re-raised with additional information added to the error message,
displaying the values of the different arguments that the function
was called with.
object_name: String, display name of the class/function being called,
e.g. `'layer "layer_name" (LayerClass)'`.
Returns:
A wrapped version of `fn`.
|
github-repos
|
def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Any, Any]:
hidden_states_2, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions)
hidden_states_2 = nn.functional.dropout(hidden_states_2, p=self.dropout, training=self.training)
hidden_states = hidden_states + hidden_states_2
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
cross_attn_weights = None
hidden_states = hidden_states if position_embeddings is None else hidden_states + position_embeddings
hidden_states_2, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list)
hidden_states_2 = nn.functional.dropout(hidden_states_2, p=self.dropout, training=self.training)
hidden_states = self.gateway(residual, hidden_states_2)
hidden_states_2 = self.activation_fn(self.fc1(hidden_states))
hidden_states_2 = nn.functional.dropout(hidden_states_2, p=self.activation_dropout, training=self.training)
hidden_states_2 = self.fc2(hidden_states_2)
hidden_states_2 = nn.functional.dropout(hidden_states_2, p=self.dropout, training=self.training)
hidden_states = hidden_states + hidden_states_2
hidden_states = self.final_layer_norm(hidden_states.clamp(min=-65504, max=65504))
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
|
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(seq_len, batch, embed_dim)`.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings that are added to the queries and keys in the self-attention layer.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
|
github-repos
|
def pseudo_with_symbol(self, symbol, allow_multi=False):
pseudos = self.select_symbols(symbol, ret_list=True)
if ((not pseudos) or ((len(pseudos) > 1) and (not allow_multi))):
raise ValueError(('Found %d occurrences of symbol %s' % (len(pseudos), symbol)))
if (not allow_multi):
return pseudos[0]
else:
return pseudos
|
Return the pseudo with the given chemical symbol.
Args:
symbols: String with the chemical symbol of the element
allow_multi: By default, the method raises ValueError
if multiple occurrences are found. Use allow_multi to prevent this.
Raises:
ValueError if symbol is not found or multiple occurences are present and not allow_multi
|
codesearchnet
|
def __init__(self, formatter, object_representer):
super().__init__(formatter)
self.object_representer = object_representer
logger.debug('obj loader set')
|
Initialize formatter and object representer.
Args:
formatter: Callable object/function that will format object loaded
from in file. Formatter signature:
iterable = formatter(iterable)
object_representer: An ObjectRepresenter instance.
|
juraj-google-style
|
def stop_server(self, grace=1.0):
self._server_lock.acquire()
try:
if not self._server_started:
raise ValueError('Server has not started running')
if self._stop_requested:
raise ValueError('Server has already stopped')
self._stop_requested = True
return self.server.stop(grace=grace)
finally:
self._server_lock.release()
|
Request server stopping.
Once stopped, server cannot be stopped or started again. This method is
non-blocking. Call `wait()` on the returned event to block until the server
has completely stopped.
Args:
grace: Grace period in seconds to be used when calling `server.stop()`.
Raises:
ValueError: If server stop has already been requested, or if the server
has not started running yet.
Returns:
A threading.Event that will be set when the server has completely stopped.
|
github-repos
|
def _AddEdge(self, start_node, end_node):
self.graph[start_node].outgoing.append(end_node)
if (end_node in self.graph):
self.graph[end_node].incoming.append(start_node)
|
Add a directed edge to the graph.
Add the end to the list of outgoing nodes of the start and the start to the
list of incoming nodes of the end node.
Args:
start_node: name of the start node
end_node: name of the end node
|
codesearchnet
|
def put(self, key, value):
if value is None:
self.delete(key)
else:
self._collection(key)[key] = value
|
Stores the object `value` named by `key`.
Stores the object in the collection corresponding to ``key.path``.
Args:
key: Key naming `value`
value: the object to store.
|
juraj-google-style
|
def aggregate(self):
(_, indices, inverse) = np.unique(self.record.sample, axis=0, return_index=True, return_inverse=True)
order = np.argsort(indices)
indices = indices[order]
record = self.record[indices]
record.num_occurrences = 0
for (old_idx, new_idx) in enumerate(inverse):
new_idx = order[new_idx]
record[new_idx].num_occurrences += self.record[old_idx].num_occurrences
return type(self)(record, self.variables, copy.deepcopy(self.info), self.vartype)
|
Create a new SampleSet with repeated samples aggregated.
Returns:
:obj:`.SampleSet`
Note:
:attr:`.SampleSet.record.num_occurrences` are accumulated but no
other fields are.
|
codesearchnet
|
def get_task_scfcycles(self, nids=None, wslice=None, task_class=None, exclude_ok_tasks=False):
select_status = [self.S_RUN] if exclude_ok_tasks else [self.S_RUN, self.S_OK]
tasks_cycles = []
for task in self.select_tasks(nids=nids, wslice=wslice):
if task.status not in select_status or task.cycle_class is None:
continue
if task_class is not None and not task.isinstance(task_class):
continue
try:
cycle = task.cycle_class.from_file(task.output_file.path)
if cycle is not None:
tasks_cycles.append((task, cycle))
except Exception:
pass
return tasks_cycles
|
Return list of (taks, scfcycle) tuples for all the tasks in the flow with a SCF algorithm
e.g. electronic GS-SCF iteration, DFPT-SCF iterations etc.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
task_class: String or class used to select tasks. Ignored if None.
exclude_ok_tasks: True if only running tasks should be considered.
Returns:
List of `ScfCycle` subclass instances.
|
juraj-google-style
|
def get_country_by_name(self, country_name: str) -> typing.Optional['Country']:
VALID_STR.validate(country_name, 'get_country_by_name', exc=ValueError)
if country_name not in self._countries_by_name.keys():
for country in self.countries:
if country.country_name == country_name:
self._countries_by_name[country_name] = country
return country
raise ValueError(country_name)
else:
return self._countries_by_name[country_name]
|
Gets a country from its name
Args:
country_name: country name
Returns: Country
|
juraj-google-style
|
def sampling_query(sql, fields=None, count=5, sampling=None):
if (sampling is None):
sampling = Sampling.default(count=count, fields=fields)
return sampling(sql)
|
Returns a sampling query for the SQL object.
Args:
sql: the SQL object to sample
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
Returns:
A SQL query string for sampling the input sql.
|
codesearchnet
|
async def update(
self,
service_id: str,
version: str,
*,
image: str = None,
rollback: bool = False
) -> bool:
if image is None and rollback is False:
raise ValueError("You need to specify an image.")
inspect_service = await self.inspect(service_id)
spec = inspect_service["Spec"]
if image is not None:
spec["TaskTemplate"]["ContainerSpec"]["Image"] = image
params = {"version": version}
if rollback is True:
params["rollback"] = "previous"
data = json.dumps(clean_map(spec))
await self.docker._query_json(
"services/{service_id}/update".format(service_id=service_id),
method="POST",
data=data,
params=params,
)
return True
|
Update a service.
If rollback is True image will be ignored.
Args:
service_id: ID or name of the service.
version: Version of the service that you want to update.
rollback: Rollback the service to the previous service spec.
Returns:
True if successful.
|
juraj-google-style
|
def issue_closed(issue_key, server=None, username=None, password=None):
if (not issue_key):
return None
jira_ = _get_jira(server=server, username=username, password=password)
try:
ticket = jira_.issue(issue_key)
except jira.exceptions.JIRAError:
return None
return (ticket.fields().status.name == 'Closed')
|
Check if the issue is closed.
issue_key
The JIRA iD of the ticket to close.
Returns:
- ``True``: the ticket exists and it is closed.
- ``False``: the ticket exists and it has not been closed.
- ``None``: the ticket does not exist.
CLI Example:
.. code-block:: bash
salt '*' jira.issue_closed NE-123
|
codesearchnet
|
def fn_with_code_in_docstring():
return True
|
This has code in the docstring.
Example:
x = fn_with_code_in_docstring()
indentation_matters = True
Returns:
True.
|
github-repos
|
def update_scores(self, scores: torch.FloatTensor, g_values: torch.FloatTensor) -> torch.FloatTensor:
_, _, depth = g_values.shape
probs = torch.softmax(scores, dim=1)
for i in range(depth):
g_values_at_depth = g_values[:, :, i]
g_mass_at_depth = (g_values_at_depth * probs).sum(axis=1, keepdims=True)
probs = probs * (1 + g_values_at_depth - g_mass_at_depth)
log_probs = torch.log(probs)
log_probs = torch.where(torch.isfinite(log_probs), log_probs, torch.finfo(log_probs.dtype).min)
return log_probs
|
Updates scores using the g values.
We assume that the scores are in the log space.
Args:
scores (`torch.FloatTensor`): Scores (batch_size, vocab_size).
g_values (`torch.FloatTensor`): G values (batch_size, vocab_size, depth).
Returns:
Updated scores (batch_size, vocab_size).
|
github-repos
|
def num_samples(self, dataset_split):
return {problem.DatasetSplit.TRAIN: 1000000, problem.DatasetSplit.EVAL: 10000, problem.DatasetSplit.TEST: 10000}[dataset_split]
|
Determine the dataset sized given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The desired number of samples for this dataset_split.
|
codesearchnet
|
def delete(self, interface, vrid):
vrrp_str = ('no vrrp %d' % vrid)
return self.configure_interface(interface, vrrp_str)
|
Deletes a vrrp instance from an interface
Note:
This method will attempt to delete the vrrp from the node's
operational config. If the vrrp does not exist on the
interface then this method will not perform any changes
but still return True
Args:
interface (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be deleted.
Returns:
True if the vrrp could be deleted otherwise False (see Node)
|
codesearchnet
|
def assert_rank_in(x, ranks, data=None, summarize=None, message=None, name=None):
with ops.name_scope(name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])):
if not isinstance(x, sparse_tensor.SparseTensor):
x = ops.convert_to_tensor(x, name='x')
ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks])
message = _message_prefix(message)
if context.executing_eagerly() or isinstance(x, sparse_tensor.SparseTensor):
name = ''
else:
name = x.name
if data is None:
data = [message, 'Tensor %s must have rank in' % name] + list(ranks) + ['Received shape: ', array_ops.shape(x)]
try:
assert_op = _assert_ranks_condition(x, ranks, _static_rank_in, _dynamic_rank_in, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError('%sTensor %s must have rank in %s. Received rank %d, shape %s' % (message, name, tuple((r.item() for r in e.args[2])), e.args[1], x.get_shape()))
else:
raise
return assert_op
|
Assert `x` has rank in `ranks`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.assert_rank_in(x, (2, 4))]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
ranks: Iterable of scalar `Tensor` objects.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_in".
Returns:
Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.
If static checks determine `x` has matching rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has mismatched rank.
|
github-repos
|
def select_serial_number_row(self, serial_number):
sheet = self.table
col = self.db_sheet_cols.id
rows = (sheet.loc[(:, col)] == serial_number)
return sheet.loc[(rows, :)]
|
Select row for identification number serial_number
Args:
serial_number: serial number
Returns:
pandas.DataFrame
|
codesearchnet
|
def add(self, arg, options=None):
fut = tasklets.Future(('%s.add(%s, %s)' % (self, arg, options)))
todo = self._queues.get(options)
if (todo is None):
utils.logging_debug('AutoBatcher(%s): creating new queue for %r', self._todo_tasklet.__name__, options)
if (not self._queues):
eventloop.add_idle(self._on_idle)
todo = self._queues[options] = []
todo.append((fut, arg))
if (len(todo) >= self._limit):
del self._queues[options]
self.run_queue(options, todo)
return fut
|
Adds an arg and gets back a future.
Args:
arg: one argument for _todo_tasklet.
options: rpc options.
Return:
An instance of future, representing the result of running
_todo_tasklet without batching.
|
codesearchnet
|
def check_config_attributes_being_used(config_class):
signature = dict(inspect.signature(config_class.__init__).parameters)
parameter_names = [x for x in list(signature.keys()) if x not in ['self', 'kwargs']]
parameter_defaults = [signature[param].default for param in parameter_names]
reversed_attribute_map = {}
if len(config_class.attribute_map) > 0:
reversed_attribute_map = {v: k for k, v in config_class.attribute_map.items()}
config_source_file = inspect.getsourcefile(config_class)
model_dir = os.path.dirname(config_source_file)
modeling_paths = [os.path.join(model_dir, fn) for fn in os.listdir(model_dir) if fn.startswith('modeling_')]
modeling_sources = []
for path in modeling_paths:
if os.path.isfile(path):
with open(path, encoding='utf8') as fp:
modeling_sources.append(fp.read())
unused_attributes = []
for config_param, default_value in zip(parameter_names, parameter_defaults):
attributes = [config_param]
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param])
if not check_attribute_being_used(config_class, attributes, default_value, modeling_sources):
unused_attributes.append(attributes[0])
return sorted(unused_attributes)
|
Check the arguments in `__init__` of `config_class` are used in the modeling files in the same directory
Args:
config_class (`type`):
The configuration class for which the arguments in its `__init__` will be checked.
|
github-repos
|
def create(cls, session, attributes=None, relationships=None):
resource_type = cls._resource_type()
resource_path = cls._resource_path()
url = session._build_url(resource_path)
json = build_request_body(resource_type, None, attributes=attributes, relationships=relationships)
process = cls._mk_one(session)
return session.post(url, CB.json(201, process), json=json)
|
Create a resource of the resource.
This should only be called from sub-classes
Args:
session(Session): The session to create the resource in.
attributes(dict): Any attributes that are valid for the
given resource type.
relationships(dict): Any relationships that are valid for the
given resource type.
Returns:
Resource: An instance of a resource.
|
codesearchnet
|
def start(self, name: str, increment_count: bool = True) -> None:
if not self._timing:
return
now = get_now_utc_pendulum()
if self._stack:
last = self._stack[-1]
self._totaldurations[last] += now - self._starttimes[last]
if name not in self._starttimes:
self._totaldurations[name] = datetime.timedelta()
self._count[name] = 0
self._starttimes[name] = now
if increment_count:
self._count[name] += 1
self._stack.append(name)
|
Start a named timer.
Args:
name: name of the timer
increment_count: increment the start count for this timer
|
juraj-google-style
|
def get(cls, keyval, key='id', user_id=None):
if (keyval is None):
return None
if ((key in cls.__table__.columns) and cls.__table__.columns[key].primary_key):
return cls.query.get(keyval)
else:
result = cls.query.filter((getattr(cls, key) == keyval))
return result.first()
|
Fetches a single instance which has value `keyval`
for the attribute `key`.
Args:
keyval: The value of the attribute.
key (str, optional): The attribute to search by. By default,
it is 'id'.
Returns:
A model instance if found. Else None.
Examples:
>>> User.get(35)
user35@i.com
>>> User.get('user35@i.com', key='email')
user35@i.com
|
codesearchnet
|
def safe_rt(resource_type, lower=False):
if (resource_type is not None):
resource_type = resource_type.replace(' ', '_')
if lower:
resource_type = resource_type.lower()
return resource_type
|
Format the Resource Type.
Takes Custom Indicator types with a space character and return a *safe* string.
(e.g. *User Agent* is converted to User_Agent or user_agent.)
Args:
resource_type (string): The resource type to format.
lower (boolean): Return type in all lower case
Returns:
(string): The formatted resource type.
|
codesearchnet
|
def start(self, hostname=None, port=None, templates_path=None):
self.hostname = (hostname if hostname else 'localhost')
if port:
self.port = port
elif (not self.port):
self.port = unused_port(self.hostname)
if templates_path:
self.loaders.insert(0, jinja2.FileSystemLoader(templates_path))
self._set_loaders()
self.setup_routes()
self.runner = aioweb.AppRunner(self.app)
return self.agent.submit(start_server_in_loop(self.runner, self.hostname, self.port, self.agent))
|
Starts the web interface.
Args:
hostname (str, optional): host name to listen from. (Default value = None)
port (int, optional): port to listen from. (Default value = None)
templates_path (str, optional): path to look for templates. (Default value = None)
|
codesearchnet
|
def makefile(self):
return self.env.get_template('Makefile.j2').render(metadata=self.metadata, package=self.package)
|
Generate the documentation Makefile.
Returns:
(str): the contents of the `Makefile`.
|
codesearchnet
|
def get_what_follows_raw(s: str, prefix: str, onlyatstart: bool=True, stripwhitespace: bool=True) -> Tuple[(bool, str)]:
prefixstart = s.find(prefix)
if (((prefixstart == 0) and onlyatstart) or ((prefixstart != (- 1)) and (not onlyatstart))):
resultstart = (prefixstart + len(prefix))
result = s[resultstart:]
if stripwhitespace:
result = result.strip()
return (True, result)
return (False, '')
|
Find the part of ``s`` that is after ``prefix``.
Args:
s: string to analyse
prefix: prefix to find
onlyatstart: only accept the prefix if it is right at the start of
``s``
stripwhitespace: remove whitespace from the result
Returns:
tuple: ``(found, result)``
|
codesearchnet
|
def prepend(self, key, value, expire=0, noreply=None):
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'prepend', {key: value}, expire, noreply)[key]
|
The memcached "prepend" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True.
|
juraj-google-style
|
def tanh(x):
return ops.tanh(x)
|
Hyperbolic tangent activation function.
It is defined as:
`tanh(x) = sinh(x) / cosh(x)`, i.e.
`tanh(x) = ((exp(x) - exp(-x)) / (exp(x) + exp(-x)))`.
Args:
x: Input tensor.
|
github-repos
|
def disconnect_sync(self, connection_handle):
self.bable.disconnect(connection_handle=connection_handle, sync=True)
|
Synchronously disconnect from whoever has connected to us
Args:
connection_handle (int): The handle of the connection we wish to disconnect.
|
juraj-google-style
|
def predict_on_batch(self, x):
self._check_call_args('predict_on_batch')
if self._distribution_strategy and distribute_lib.in_cross_replica_context():
raise NotImplementedError('`predict_on_batch` is not supported for models distributed with tf.distribute.Strategy.')
inputs, _, _ = self._standardize_user_data(x, extract_tensors_from_dataset=True)
if self.run_eagerly or self._distribution_strategy:
inputs = training_utils_v1.cast_if_floating_dtype(inputs)
if isinstance(inputs, collections.abc.Sequence):
if len(inputs) == 1:
inputs = inputs[0]
return self(inputs)
self._make_predict_function()
outputs = self.predict_function(inputs)
if len(outputs) == 1:
return outputs[0]
return outputs
|
Returns predictions for a single batch of samples.
Args:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
|
github-repos
|
def replace(self, pattern, replacement):
for (i, line) in enumerate(self):
if (pattern in line):
self[i] = line.replace(pattern, replacement)
|
Replace all instances of a pattern with a replacement.
Args:
pattern (str): Pattern to replace
replacement (str): Text to insert
|
codesearchnet
|
def _insert_layers(self, layers, relevant_nodes=None):
layers = nest.flatten(layers)
tf_utils.assert_no_legacy_layers(layers)
node_to_depth = {}
for depth, nodes in self._nodes_by_depth.items():
node_to_depth.update({node: depth for node in nodes})
if not relevant_nodes:
relevant_nodes = nest.flatten([layer._inbound_nodes for layer in layers])
network_nodes = set(relevant_nodes + list(node_to_depth.keys()))
def _get_min_depth(node):
min_depth = 0
for layer, node_id, _, _ in node.iterate_inbound():
inbound_node = layer._inbound_nodes[node_id]
if inbound_node in node_to_depth:
min_depth = min(min_depth, node_to_depth[inbound_node])
elif inbound_node not in network_nodes:
continue
else:
return None
return min_depth - 1
unprocessed_nodes = copy.copy(relevant_nodes)
i = 0
while unprocessed_nodes:
i += 1
if i > 10000:
raise ValueError('Layers could not be added due to missing dependencies.')
node = unprocessed_nodes.pop(0)
depth = _get_min_depth(node)
if depth is None:
unprocessed_nodes.append(node)
continue
node_key = _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node))
if node_key not in self._network_nodes:
node_to_depth[node] = depth
self._network_nodes.add(node_key)
self._nodes_by_depth[depth].append(node)
layer_set = set(self._self_tracked_trackables)
deferred_layers = []
for layer in layers:
if layer not in layer_set:
self._self_tracked_trackables.append(layer)
deferred_layers.append(layer)
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
layer_set.add(layer)
self._handle_deferred_layer_dependencies(deferred_layers)
self._compute_tensor_usage_count()
|
Inserts Layers into the Network after Network creation.
This is only valid for Keras Graph Networks. Layers added via this function
will be included in the `call` computation and `get_config` of this Network.
They will not be added to the Network's outputs.
Args:
layers: Arbitrary nested structure of Layers. Layers must be reachable
from one or more of the `keras.Input` Tensors that correspond to this
Network's inputs.
relevant_nodes: Nodes from the Layers that should be considered part of
this Network. If `None`, all Nodes will be considered part of this
Network.
Raises:
ValueError: If the layers depend on `Input`s not found in this Model.
|
github-repos
|
def CheckAltTokens(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if Match(r'^\s*
return
if line.find('') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
|
Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
juraj-google-style
|
def parse_args(self, arglist=None):
args = self._parser.parse_args(args=arglist)
sub_cmd = args.loam_sub_name
if (sub_cmd is None):
for (opt, sct) in self._opt_bare.items():
self._conf[sct][opt] = getattr(args, opt, None)
else:
for (opt, sct) in self._opt_cmds[sub_cmd].items():
self._conf[sct][opt] = getattr(args, opt, None)
return args
|
Parse arguments and update options accordingly.
Args:
arglist (list of str): list of arguments to parse. If set to None,
``sys.argv[1:]`` is used.
Returns:
:class:`Namespace`: the argument namespace returned by the
:class:`argparse.ArgumentParser`.
|
codesearchnet
|
def convert_to_rgb(video: np.array, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array:
if not isinstance(video, np.ndarray):
raise ValueError(f'Video has to be a numpy array to convert to RGB format, but found {type(video)}')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(video)
video = to_channel_dimension_format(video, ChannelDimension.FIRST, input_channel_dim=input_data_format)
if video.shape[-3] == 3:
return video
if video.shape[-3] == 1:
return video.repeat(3, -3)
if not (video[..., 3, :, :] < 255).any():
return video
alpha = video[..., 3, :, :] / 255.0
video = (1 - alpha[..., None, :, :]) * 255 + alpha[..., None, :, :] * video[..., 3, :, :]
return video
|
Convert video to RGB by blending the transparency layer if it's in RGBA format, otherwise simply returns it.
Args:
video (`np.array`):
The video to convert.
data_format (`ChannelDimension`, *optional*):
The channel dimension format of the output video. If unset, will use the inferred format from the input.
input_data_format (`ChannelDimension`, *optional*):
The channel dimension format of the input video. If unset, will use the inferred format from the input.
|
github-repos
|
def compose(*coros):
coros = list(coros)
@asyncio.coroutine
def reducer(acc, coro):
return (yield from coro(acc))
@asyncio.coroutine
def wrapper(acc):
return (yield from reduce(reducer, coros, initializer=acc, right=True))
return wrapper
|
Creates a coroutine function based on the composition of the passed
coroutine functions.
Each function consumes the yielded result of the coroutine that follows.
Composing coroutine functions f(), g(), and h() would produce
the result of f(g(h())).
Arguments:
*coros (coroutinefunction): variadic coroutine functions to compose.
Raises:
RuntimeError: if cannot execute a coroutine function.
Returns:
coroutinefunction
Usage::
async def sum_1(num):
return num + 1
async def mul_2(num):
return num * 2
coro = paco.compose(sum_1, mul_2, sum_1)
await coro(2)
# => 7
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.