code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def labels(self):
if (not self.__labels):
self.__labels = Labels(self.__connection)
return self.__labels | Gets the Labels API client.
Returns:
Labels: | codesearchnet |
def predict(fqdn, result, *argl, **argd):
out = None
if len(argl) > 0:
machine = argl[0]
if isclassifier(machine):
out = classify_predict(fqdn, result, None, *argl, **argd)
elif isregressor(machine):
out = regress_predict(fqdn, result, None, *argl, **argd)
return out | Analyzes the result of a generic predict operation performed by
`sklearn`.
Args:
fqdn (str): full-qualified name of the method that was called.
result: result of calling the method with `fqdn`.
argl (tuple): positional arguments passed to the method call.
argd (dict): keyword arguments passed to the method call. | juraj-google-style |
def reqHeadTimeStamp(
self, contract: Contract, whatToShow: str,
useRTH: bool, formatDate: int = 1) -> datetime.datetime:
return self._run(
self.reqHeadTimeStampAsync(
contract, whatToShow, useRTH, formatDate)) | Get the datetime of earliest available historical data
for the contract.
Args:
contract: Contract of interest.
useRTH: If True then only show data from within Regular
Trading Hours, if False then show all data.
formatDate: If set to 2 then the result is returned as a
timezone-aware datetime.datetime with UTC timezone. | juraj-google-style |
def get_studies_by_regions(dataset, masks, threshold=0.08, remove_overlap=True, studies=None, features=None, regularization='scale'):
import nibabel as nib
import os
try:
loaded_masks = [nib.load(os.path.relpath(m)) for m in masks]
except OSError:
print('Error loading masks. Check the path')
grouped_ids = [dataset.get_studies(mask=m, activation_threshold=threshold) for m in loaded_masks]
flat_ids = reduce((lambda a, b: (a + b)), grouped_ids)
if remove_overlap:
import collections
flat_ids = [id for (id, count) in collections.Counter(flat_ids).items() if (count == 1)]
grouped_ids = [[x for x in m if (x in flat_ids)] for m in grouped_ids]
y = [([idx] * len(ids)) for (idx, ids) in enumerate(grouped_ids)]
y = reduce((lambda a, b: (a + b)), y)
y = np.array(y)
X = [dataset.get_feature_data(ids=group_ids, features=features) for group_ids in grouped_ids]
X = np.vstack(tuple(X))
if regularization:
X = regularize(X, method=regularization)
return (X, y) | Set up data for a classification task given a set of masks
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features, and returns studies by feature matrix
(X) and class labels (y)
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
regularize: Optional boolean indicating if X should be regularized
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels | codesearchnet |
def _create_dummy_input(func_graph, template_tensor):
with func_graph.as_default():
return array_ops.placeholder(template_tensor.dtype, shape=template_tensor.shape) | Creates tensors in func_graph to represent template_tensors.
Args:
func_graph: FuncGraph.
template_tensor: a tensor in the outer graph.
Returns:
A tensor in func_graph. | github-repos |
def transform(self, input_df):
_df = input_df.copy(deep=False)
for column in self.cat_columns:
if column not in _df:
raise RuntimeError('Required column {:s} not found'.format(column))
if _df[column].dtype == 'object':
print('Changing column {:s} to category'.format(column))
_df[column] = pd.Categorical(_df[column])
_df = _df.select_dtypes(include=['bool', 'int', 'float', 'category'])
if self.normalize:
for column in list(_df.select_dtypes(include=[np.number]).columns.values):
print('Normalizing column {:s}...'.format(column))
smin, smax = self.norm_map[column]
_df[column] = (_df[column] - smin) / (smax - smin)
return self.dummy_encoder.transform(_df) | Convert the dataframe to a matrix (numpy ndarray)
Args:
input_df (dataframe): The dataframe to convert | juraj-google-style |
def get_sanger_unevaluated(store, institute_id, user_id):
sanger_ordered_by_case = store.sanger_ordered(institute_id, user_id)
unevaluated = []
for item in sanger_ordered_by_case:
case_id = item['_id']
case_obj = store.case(case_id=case_id)
if (not case_obj):
continue
case_display_name = case_obj.get('display_name')
varid_list = item['vars']
unevaluated_by_case = {}
unevaluated_by_case[case_display_name] = []
for var_id in varid_list:
variant_obj = store.variant(document_id=var_id, case_id=case_id)
if ((variant_obj is None) or (variant_obj.get('sanger_ordered') is None) or (variant_obj.get('sanger_ordered') is False)):
continue
validation = variant_obj.get('validation', 'not_evaluated')
if (validation in ['True positive', 'False positive']):
continue
unevaluated_by_case[case_display_name].append(variant_obj['_id'])
if (len(unevaluated_by_case[case_display_name]) > 0):
unevaluated.append(unevaluated_by_case)
return unevaluated | Get all variants for an institute having Sanger validations ordered but still not evaluated
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
Returns:
unevaluated: a list that looks like this: [ {'case1': [varID_1, varID_2, .., varID_n]}, {'case2' : [varID_1, varID_2, .., varID_n]} ],
where the keys are case_ids and the values are lists of variants with Sanger ordered but not yet validated | codesearchnet |
def GetSecurityDescriptor(self):
fwnt_security_descriptor = pyfwnt.security_descriptor()
fwnt_security_descriptor.copy_from_byte_stream(self._fsntfs_file_entry.security_descriptor_data)
return fwnt_security_descriptor | Retrieves the security descriptor.
Returns:
pyfwnt.security_descriptor: security descriptor. | codesearchnet |
def get_request_data(self, path, action, body=None):
body = (body or '')
(path_name, path_spec) = self.get_path_spec(path)
response = {}
if ((path_spec is not None) and (action in path_spec.keys())):
for status_code in path_spec[action]['responses'].keys():
resp = path_spec[action]['responses'][status_code]
try:
response[int(status_code)] = self.get_response_example(resp)
except ValueError:
response[status_code] = self.get_response_example(resp)
if (response == {}):
response[400] = ''
return response | Get the default data and status code of the given path + action request.
Args:
path: path of the request.
action: action of the request(get, post, delete...)
body: body sent, used to sent it back for post request.
Returns:
A tuple with the default response data and status code
In case of default status_code, use 0 | codesearchnet |
def list_vmss_sub(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/virtualMachineScaleSets', '?api-version=', COMP_API])
return do_get_next(endpoint, access_token) | List VM Scale Sets in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of VM scale sets. | codesearchnet |
def add_applicator(self, table, cols, function):
if table not in self.relations:
raise ItsdbError('Cannot add applicator; table "{}" is not '
'defined by the relations file.'
.format(table))
if cols is None:
raise ItsdbError('Cannot add applicator; columns not specified.')
fields = set(f.name for f in self.relations[table])
for col in cols:
if col not in fields:
raise ItsdbError('Cannot add applicator; column "{}" not '
'defined by the relations file.'
.format(col))
self.applicators[table].append((cols, function)) | Add an applicator. When reading *table*, rows in *table* will be
modified by apply_rows().
Args:
table: The table to apply the function to.
cols: The columns in *table* to apply the function on.
function: The applicator function. | juraj-google-style |
def _get_help_for_modules(self, modules, prefix, include_special_flags):
output_lines = []
for module in modules:
self._render_our_module_flags(module, output_lines, prefix)
if include_special_flags:
self._render_module_flags('absl.flags', six.itervalues(_helpers.SPECIAL_FLAGS._flags()), output_lines, prefix)
return '\n'.join(output_lines) | Returns the help string for a list of modules.
Private to absl.flags package.
Args:
modules: List[str], a list of modules to get the help string for.
prefix: str, a string that is prepended to each generated help line.
include_special_flags: bool, whether to include description of
SPECIAL_FLAGS, i.e. --flagfile and --undefok. | codesearchnet |
def time_estimate(self, duration, **kwargs):
path = ('%s/%s/time_estimate' % (self.manager.path, self.get_id()))
data = {'duration': duration}
return self.manager.gitlab.http_post(path, post_data=data, **kwargs) | Set an estimated time of work for the object.
Args:
duration (str): Duration in human format (e.g. 3h30)
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done | codesearchnet |
def load_metascenario(self, scenario_list):
for scenario in scenario_list:
name = scenario.get('name')
if (name is None):
raise DataError('Scenario in scenario list is missing a name parameter', scenario=scenario)
tile_address = scenario.get('tile')
args = scenario.get('args', {})
dest = self
if (tile_address is not None):
dest = self._tiles.get(tile_address)
if (dest is None):
raise DataError('Attempted to load a scenario into a tile address that does not exist', address=tile_address, valid_addresses=list(self._tiles))
dest.load_scenario(name, **args) | Load one or more scenarios from a list.
Each entry in scenario_list should be a dict containing at least a
name key and an optional tile key and args key. If tile is present
and its value is not None, the scenario specified will be loaded into
the given tile only. Otherwise it will be loaded into the entire
device.
If the args key is specified is will be passed as keyword arguments
to load_scenario.
Args:
scenario_list (list): A list of dicts for each scenario that should
be loaded. | codesearchnet |
def save(self, savefile):
with open(str(savefile), 'wb') as f:
self.write_to_fp(f)
log.debug('Saved to %s', savefile) | Do the TTS API request and write result to file.
Args:
savefile (string): The path and file name to save the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request. | codesearchnet |
def is_subset(self, other):
if isinstance(other, _basebag):
for elem, count in self.counts():
if not count <= other.count(elem):
return False
else:
for elem in self:
if self.count(elem) > 1 or elem not in other:
return False
return True | Check that every element in self has a count <= in other.
Args:
other (Set) | juraj-google-style |
def setCTRatio(self, new_ct, password="00000000"):
ret = False
self.setContext("setCTRatio")
try:
self.clearCmdMsg()
if ((new_ct != CTRatio.Amps_100) and (new_ct != CTRatio.Amps_200) and
(new_ct != CTRatio.Amps_400) and (new_ct != CTRatio.Amps_600) and
(new_ct != CTRatio.Amps_800) and (new_ct != CTRatio.Amps_1000) and
(new_ct != CTRatio.Amps_1200) and (new_ct != CTRatio.Amps_1500) and
(new_ct != CTRatio.Amps_2000) and (new_ct != CTRatio.Amps_3000) and
(new_ct != CTRatio.Amps_4000) and (new_ct != CTRatio.Amps_5000)):
self.writeCmdMsg("Legal CT Ratios: 100, 200, 400, 600, " +
"800, 1000, 1200, 1500, 2000, 3000, 4000 and 5000")
self.setContext("")
return ret
if len(password) != 8:
self.writeCmdMsg("Invalid password length.")
self.setContext("")
return ret
if not self.request(False):
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_str = "015731023030443028" + binascii.hexlify(str(new_ct).zfill(4)) + "2903"
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success(setCTRatio): 06 returned.")
ret = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return ret | Serial call to set CT ratio for attached inductive pickup.
Args:
new_ct (int): A :class:`~ekmmeters.CTRatio` value, a legal amperage setting.
password (str): Optional password.
Returns:
bool: True on completion with ACK. | juraj-google-style |
def _force_edge_active_move(self, state: _STATE) -> _STATE:
seqs, edges = state
unused_edges = edges.copy()
for seq in seqs:
for i in range(1, len(seq)):
unused_edges.remove(self._normalize_edge((seq[i - 1], seq[i])))
edge = self._choose_random_edge(unused_edges)
if not edge:
return seqs, edges
return (
self._force_edge_active(seqs,
edge,
lambda: bool(self._rand.randint(2))),
edges) | Move which forces a random edge to appear on some sequence.
This move chooses random edge from the edges which do not belong to any
sequence and modifies state in such a way, that this chosen edge
appears on some sequence of the search state.
Args:
state: Search state, not mutated.
Returns:
New search state with one of the unused edges appearing in some
sequence. | juraj-google-style |
def disconnect_sync(self, connection_handle):
self.bable.disconnect(connection_handle=connection_handle, sync=True) | Synchronously disconnect from whoever has connected to us
Args:
connection_handle (int): The handle of the connection we wish to disconnect. | codesearchnet |
def update_work_as_completed(self, worker_id, work_id, other_values=None, error=None):
client = self._datastore_client
try:
with client.transaction() as transaction:
work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id, KIND_WORK, work_id)
work_entity = client.get(work_key, transaction=transaction)
if (work_entity['claimed_worker_id'] != worker_id):
return False
work_entity['is_completed'] = True
if other_values:
work_entity.update(other_values)
if error:
work_entity['error'] = text_type(error)
transaction.put(work_entity)
except Exception:
return False
return True | Updates work piece in datastore as completed.
Args:
worker_id: ID of the worker which did the work
work_id: ID of the work which was done
other_values: dictionary with additonal values which should be saved
with the work piece
error: if not None then error occurred during computation of the work
piece. In such case work will be marked as completed with error.
Returns:
whether work was successfully updated | codesearchnet |
def check_required_tags_compliance(self, resource):
missing_tags = []
notes = []
resource_tags = {tag.key.lower(): tag.value for tag in resource.tags}
if (resource.resource_type in self.alert_schedule):
target_accounts = self.alert_schedule[resource.resource_type]['scope']
else:
target_accounts = self.alert_schedule['*']['scope']
if (not ((resource.account.account_name in target_accounts) or ('*' in target_accounts))):
return (missing_tags, notes)
if (self.audit_ignore_tag.lower() in resource_tags):
return (missing_tags, notes)
required_tags = list(self.required_tags)
if (self.gdpr_enabled and (resource.account.account_name in self.gdpr_accounts)):
required_tags.append(self.gdpr_tag)
'\n
for key in [tag.lower() for tag in required_tags]:
if (key not in resource_tags):
missing_tags.append(key)
elif (not self.validate_tag(key, resource_tags[key])):
missing_tags.append(key)
notes.append('{} tag is not valid'.format(key))
return (missing_tags, notes) | Check whether a resource is compliance
Args:
resource: A single resource
Returns:
`(list, list)`
A tuple contains missing tags (if there were any) and notes | codesearchnet |
def call_replica_local_fn(fn, *args, **kwargs):
strategy = None
if 'strategy' in kwargs:
strategy = kwargs.pop('strategy')
elif distribute_lib.has_strategy():
strategy = distribute_lib.get_strategy()
is_tpu = backend.is_tpu_strategy(strategy)
if not is_tpu and strategy and distribute_lib.in_cross_replica_context():
with strategy.scope():
return strategy.extended.call_for_each_replica(fn, args, kwargs)
return fn(*args, **kwargs) | Call a function that uses replica-local variables.
This function correctly handles calling `fn` in a cross-replica
context.
Args:
fn: The function to call.
*args: Positional arguments to the `fn`.
**kwargs: Keyword argument to `fn`.
Returns:
The result of calling `fn`. | github-repos |
def query(self, coords, order=1):
out = np.full(len(coords.l.deg), np.nan, dtype='f4')
for pole in self.poles:
m = (coords.b.deg >= 0) if pole == 'ngp' else (coords.b.deg < 0)
if np.any(m):
data, w = self._data[pole]
x, y = w.wcs_world2pix(coords.l.deg[m], coords.b.deg[m], 0)
out[m] = map_coordinates(data, [y, x], order=order, mode='nearest')
return out | Returns the map value at the specified location(s) on the sky.
Args:
coords (`astropy.coordinates.SkyCoord`): The coordinates to query.
order (Optional[int]): Interpolation order to use. Defaults to `1`,
for linear interpolation.
Returns:
A float array containing the map value at every input coordinate.
The shape of the output will be the same as the shape of the
coordinates stored by `coords`. | juraj-google-style |
def put(self, key, value):
value = self.serializedValue(value)
self.child_datastore.put(key, value) | Stores the object `value` named by `key`.
Serializes values on the way in, and stores the serialized data into the
``child_datastore``.
Args:
key: Key naming `value`
value: the object to store. | juraj-google-style |
async def send(self, metric):
message = json.dumps(metric).encode('utf-8')
(await self.loop.create_datagram_endpoint((lambda : UDPClientProtocol(message)), remote_addr=(self.ip, self.port))) | Transform metric to JSON bytestring and send to server.
Args:
metric (dict): Complete metric to send as JSON. | codesearchnet |
def execute(api):
try:
return api.execute()
except Exception as exception:
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
_print_error('%s: Exception %s: %s' % (now, type(exception).__name__,
str(exception)))
raise exception | Executes operation.
Args:
api: The base API object
Returns:
A response body object | juraj-google-style |
def lf_summary(L, Y=None, lf_names=None, est_accs=None):
n, m = L.shape
if lf_names is not None:
col_names = ["j"]
d = {"j": list(range(m))}
else:
lf_names = list(range(m))
col_names = []
d = {}
col_names.extend(["Polarity", "Coverage", "Overlaps", "Conflicts"])
d["Polarity"] = Series(data=lf_polarities(L), index=lf_names)
d["Coverage"] = Series(data=lf_coverages(L), index=lf_names)
d["Overlaps"] = Series(data=lf_overlaps(L), index=lf_names)
d["Conflicts"] = Series(data=lf_conflicts(L), index=lf_names)
if Y is not None:
col_names.extend(["Correct", "Incorrect", "Emp. Acc."])
confusions = [
confusion_matrix(Y, L[:, i], pretty_print=False) for i in range(m)
]
corrects = [np.diagonal(conf).sum() for conf in confusions]
incorrects = [
conf.sum() - correct for conf, correct in zip(confusions, corrects)
]
accs = lf_empirical_accuracies(L, Y)
d["Correct"] = Series(data=corrects, index=lf_names)
d["Incorrect"] = Series(data=incorrects, index=lf_names)
d["Emp. Acc."] = Series(data=accs, index=lf_names)
if est_accs is not None:
col_names.append("Learned Acc.")
d["Learned Acc."] = Series(est_accs, index=lf_names)
return DataFrame(data=d, index=lf_names)[col_names] | Returns a pandas DataFrame with the various per-LF statistics.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
Y: an [n] or [n, 1] np.ndarray of gold labels.
If provided, the empirical accuracy for each LF will be calculated | juraj-google-style |
def operator(name=None, operators=None, aliases=None, kind=None):
def delegator(assertion, subject, expected, *args, **kw):
return assertion.test(subject, expected, *args, **kw)
def decorator(fn):
operator = Operator(fn=fn, aliases=aliases, kind=kind)
_name = name if isinstance(name, six.string_types) else fn.__name__
operator.operators = (_name,)
_operators = operators
if isinstance(_operators, list):
_operators = tuple(_operators)
if isinstance(_operators, tuple):
operator.operators += _operators
Engine.register(operator)
return functools.partial(delegator, operator)
return decorator(name) if inspect.isfunction(name) else decorator | Registers a new operator function in the test engine.
Arguments:
*args: variadic arguments.
**kw: variadic keyword arguments.
Returns:
function | juraj-google-style |
def _copy_fn(fn):
if not callable(fn):
raise TypeError('fn is not callable: %s' % fn)
return types.FunctionType(code=fn.__code__, globals=fn.__globals__, name=fn.__name__, argdefs=fn.__defaults__, closure=fn.__closure__) | Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable. | github-repos |
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_stream = BytearrayStream()
if (self._device_serial_number is not None):
self._device_serial_number.write(local_stream, kmip_version=kmip_version)
if (self._password is not None):
self._password.write(local_stream, kmip_version=kmip_version)
if (self._device_identifier is not None):
self._device_identifier.write(local_stream, kmip_version=kmip_version)
if (self._network_identifier is not None):
self._network_identifier.write(local_stream, kmip_version=kmip_version)
if (self._machine_identifier is not None):
self._machine_identifier.write(local_stream, kmip_version=kmip_version)
if (self._media_identifier is not None):
self._media_identifier.write(local_stream, kmip_version=kmip_version)
self.length = local_stream.length()
super(DeviceCredential, self).write(output_stream, kmip_version=kmip_version)
output_stream.write(local_stream.buffer) | Write the data encoding the DeviceCredential struct to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0. | codesearchnet |
def convert(isbn, code='978'):
isbn = _isbn_cleanse(isbn)
if (len(isbn) == 10):
isbn = (code + isbn[:(- 1)])
return (isbn + calculate_checksum(isbn))
elif isbn.startswith('978'):
return (isbn[3:(- 1)] + calculate_checksum(isbn[3:(- 1)]))
else:
raise IsbnError('Only ISBN-13s with 978 Bookland code can be converted to ISBN-10.') | Convert ISBNs between ISBN-10 and ISBN-13.
Note:
No attempt to hyphenate converted ISBNs is made, because the
specification requires that *any* hyphenation must be correct but
allows ISBNs without hyphenation.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
code (str): EAN Bookland code
Returns:
``str``: Converted ISBN-10 or ISBN-13
Raise:
IsbnError: When ISBN-13 isn't convertible to an ISBN-10 | codesearchnet |
def _GetFieldByName(message_descriptor, field_name):
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message %s has no "%s" field.' %
(message_descriptor.name, field_name)) | Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name. | juraj-google-style |
def write_input(self, output_dir=".", make_dir_if_not_present=True):
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
feff = self.all_input()
feff_input = "\n\n".join(str(feff[k]) for k in
["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"]
if k in feff)
for k, v in feff.items():
with open(os.path.join(output_dir, k), "w") as f:
f.write(str(v))
with open(os.path.join(output_dir, "feff.inp"), "w") as f:
f.write(feff_input)
if "ATOMS" not in feff:
self.atoms.struct.to(fmt="cif",
filename=os.path.join(
output_dir, feff["PARAMETERS"]["CIF"])) | Writes a set of FEFF input to a directory.
Args:
output_dir: Directory to output the FEFF input files
make_dir_if_not_present: Set to True if you want the directory (
and the whole path) to be created if it is not present. | juraj-google-style |
def propose(self):
candidates = self._get_candidates()
if (candidates is None):
return None
predictions = self.predict(candidates)
idx = self._acquire(predictions)
return candidates[idx] | Use the trained model to propose a new pipeline.
Returns:
int: Index corresponding to pipeline to try in ``dpp_matrix``. | codesearchnet |
def stack_and_pad_tensors(batch, padding_index=DEFAULT_PADDING_INDEX, dim=0):
lengths = [tensor.shape[0] for tensor in batch]
max_len = max(lengths)
padded = [pad_tensor(tensor, max_len, padding_index) for tensor in batch]
lengths = torch.tensor(lengths)
padded = torch.stack(padded, dim=dim).contiguous()
for _ in range(dim):
lengths = lengths.unsqueeze(0)
return padded, lengths | Pad a :class:`list` of ``tensors`` (``batch``) with ``padding_index``.
Args:
batch (:class:`list` of :class:`torch.Tensor`): Batch of tensors to pad.
padding_index (int, optional): Index to pad tensors with.
dim (int, optional): Dimension on to which to concatenate the batch of tensors.
Returns
torch.Tensor, torch.Tensor: Padded tensors and original lengths of tensors. | juraj-google-style |
def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
mask = np.zeros(output_size, dtype=np.int64)
mask[:input_height, :input_width] = 1
return mask | Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
Args:
image (`np.ndarray`):
Image to make the pixel mask for.
output_size (`Tuple[int, int]`):
Output size of the mask. | github-repos |
def get_all(cls, include_disabled=True):
if (cls == BaseAccount):
raise InquisitorError('get_all on BaseAccount is not supported')
account_type_id = db.AccountType.find_one(account_type=cls.account_type).account_type_id
qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name)
if (not include_disabled):
qry = qry.filter((Account.enabled == 1))
accounts = qry.find((Account.account_type_id == account_type_id))
return {res.account_id: cls(res) for res in accounts} | Returns a list of all accounts of a given type
Args:
include_disabled (`bool`): Include disabled accounts. Default: `True`
Returns:
list of account objects | codesearchnet |
def get_mealy_conjecture(self):
mma = MealyMachine()
for s in self.observation_table.sm_vector:
for i in self.alphabet:
dst = self.observation_table.equiv_classes[s + i]
if dst is None:
logging.debug('Conjecture attempt on non closed table.')
return None
o = self.observation_table[s, i]
src_id = self.observation_table.sm_vector.index(s)
dst_id = self.observation_table.sm_vector.index(dst)
mma.add_arc(src_id, dst_id, i, o)
for s in mma.states:
s.final = True
return mma | Utilize the observation table to construct a Mealy Machine.
The library used for representing the Mealy Machine is the python
bindings of the openFST library (pyFST).
Args:
None
Returns:
MealyMachine: A mealy machine build based on a closed and consistent
observation table. | juraj-google-style |
def _central_crop(image, crop_height, crop_width):
shape = tf.shape(image)
height, width = shape[0], shape[1]
mlperf_log.resnet_print(key=mlperf_log.INPUT_CENTRAL_CROP,
value=[crop_height, crop_width])
amount_to_be_cropped_h = (height - crop_height)
crop_top = amount_to_be_cropped_h
amount_to_be_cropped_w = (width - crop_width)
crop_left = amount_to_be_cropped_w
return tf.slice(
image, [crop_top, crop_left, 0], [crop_height, crop_width, -1]) | Performs central crops of the given image list.
Args:
image: a 3-D image tensor
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
3-D tensor with cropped image. | juraj-google-style |
def _request(self, path, key, data, method, key_is_cik, extra_headers={}):
if (method == 'GET'):
if (len(data) > 0):
url = ((path + '?') + data)
else:
url = path
body = None
else:
url = path
body = data
headers = {}
if key_is_cik:
headers['X-Exosite-CIK'] = key
else:
headers['X-Exosite-Token'] = key
if (method == 'POST'):
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8'
headers['Accept'] = 'text/plain, text/csv, application/x-www-form-urlencoded'
headers.update(extra_headers)
(body, response) = self._onephttp.request(method, url, body, headers)
pr = ProvisionResponse(body, response)
if (self._raise_api_exceptions and (not pr.isok)):
raise ProvisionException(pr)
return pr | Generically shared HTTP request method.
Args:
path: The API endpoint to interact with.
key: A string for the key used by the device for the API. Either a CIK or token.
data: A string for the pre-encoded data to be sent with this request.
method: A string denoting the HTTP verb to use for the request (e.g. 'GET', 'POST')
key_is_cik: Whether or not the device key used is a CIK or token.
extra_headers: A dictionary of extra headers to include with the request.
Returns:
A ProvisionResponse containing the result of the HTTP request. | codesearchnet |
def to_representation(self, instance):
if self.id_only():
return instance.pk
pk = getattr(instance, 'pk', None)
if not settings.ENABLE_SERIALIZER_OBJECT_CACHE or pk is None:
return self._to_representation(instance)
else:
if pk not in self.obj_cache:
self.obj_cache[pk] = self._to_representation(instance)
return self.obj_cache[pk] | Modified to_representation method. Optionally may cache objects.
Arguments:
instance: A model instance or data object.
Returns:
Instance ID if the serializer is meant to represent its ID.
Otherwise, a tagged data dict representation. | juraj-google-style |
def ssh_reachable(self, tries=None, propagate_fail=True):
if not self.running():
return False
try:
ssh.get_ssh_client(
ip_addr=self.ip(),
host_name=self.name(),
ssh_tries=tries,
propagate_fail=propagate_fail,
ssh_key=self.virt_env.prefix.paths.ssh_id_rsa(),
username=self._spec.get('ssh-user'),
password=self._spec.get('ssh-password'),
)
except ssh.LagoSSHTimeoutException:
return False
return True | Check if the VM is reachable with ssh
Args:
tries(int): Number of tries to try connecting to the host
propagate_fail(bool): If set to true, this event will appear
in the log and fail the outter stage. Otherwise, it will be
discarded.
Returns:
bool: True if the VM is reachable. | juraj-google-style |
def from_celery(cls, name, worker_dict, queues):
return WorkerStats(
name=name,
broker=BrokerStats.from_celery(worker_dict['broker']),
pid=worker_dict['pid'],
process_pids=worker_dict['pool']['processes'],
concurrency=worker_dict['pool']['max-concurrency'],
job_count=worker_dict['pool']['writes']['total'],
queues=queues
) | Create a WorkerStats object from the dictionary returned by celery.
Args:
name (str): The name of the worker.
worker_dict (dict): The dictionary as returned by celery.
queues (list): A list of QueueStats objects that represent the queues this
worker is listening on.
Returns:
WorkerStats: A fully initialized WorkerStats object. | juraj-google-style |
def nodes_on_wire(self, wire, only_ops=False):
current_node = self.input_map.get(wire, None)
if not current_node:
raise DAGCircuitError('The given wire %s is not present in the circuit'
% str(wire))
more_nodes = True
while more_nodes:
more_nodes = False
if current_node.type == 'op' or not only_ops:
yield current_node
for node, edges in self._multi_graph.adj[current_node].items():
if any(wire == edge['wire'] for edge in edges.values()):
current_node = node
more_nodes = True
break | Iterator for nodes that affect a given wire
Args:
wire (tuple(Register, index)): the wire to be looked at.
only_ops (bool): True if only the ops nodes are wanted
otherwise all nodes are returned.
Yield:
DAGNode: the successive ops on the given wire
Raises:
DAGCircuitError: if the given wire doesn't exist in the DAG | juraj-google-style |
def update_batch(self, loss_per_instance):
if self.batch_indices is None:
raise TensorForceError("Need to call get_batch before each update_batch call.")
for index, loss in zip(self.batch_indices, loss_per_instance):
new_priority = (np.abs(loss) + self.prioritization_constant) ** self.prioritization_weight
self.observations._move(index, new_priority)
self.none_priority_index += 1 | Computes priorities according to loss.
Args:
loss_per_instance: | juraj-google-style |
def MakePartialStat(self, fd):
is_dir = "Container" in fd.behaviours
return {
"pathspec": fd.Get(fd.Schema.PATHSPEC, ""),
"st_atime": fd.Get(fd.Schema.LAST, 0),
"st_blksize": 0,
"st_blocks": 0,
"st_ctime": 0,
"st_dev": 0,
"st_gid": 0,
"st_ino": 0,
"st_mode": self.default_dir_mode if is_dir else self.default_file_mode,
"st_mtime": 0,
"st_nlink": 0,
"st_rdev": 0,
"st_size": fd.Get(fd.Schema.SIZE, 0),
"st_uid": 0
} | Try and give a 'stat' for something not in the data store.
Args:
fd: The object with no stat.
Returns:
A dictionary corresponding to what we'll say the 'stat' is
for objects which are not actually files, so have no OS level stat. | juraj-google-style |
def _compute_linear_scaling_rope_parameters(config: Optional[PretrainedConfig]=None, device: Optional['torch.device']=None, seq_len: Optional[int]=None, **rope_kwargs) -> tuple['torch.Tensor', float]:
if config is not None and len(rope_kwargs) > 0:
raise ValueError(f'Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in `_compute_linear_scaling_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}')
if len(rope_kwargs) > 0:
factor = rope_kwargs['factor']
elif config is not None:
factor = config.rope_scaling['factor']
inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs)
inv_freq /= factor
return (inv_freq, attention_factor) | Computes the inverse frequencies with linear scaling. Credits to the Reddit user /u/kaiokendev
Args:
config ([`~transformers.PretrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
rope_kwargs (`Dict`, *optional*):
BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). | github-repos |
def _wait_all_creative_activation(self, feed_item, timeout=128):
for association in feed_item['creative_assignment']:
creative = self._creative_dao.get(association, required=True)
self._wait_creative_activation(creative['id'], timeout) | Waits for activation of all creatives that should be associated to the feed item that represents an ad.
Args:
feed_item: Feed item representing an Ad from the Bulkdozer feed.
timeout: Optional parameter identifying how long to wait for all creatives
to be activated in seconds.
Raises:
Exception: In case one or more creatives do not get activated within the
specified timeout. | github-repos |
def __init__(self, vfs_object):
super(ObjectsCacheValue, self).__init__()
self._reference_count = 0
self.vfs_object = vfs_object | Initializes the resolver objects cache value object.
Args:
vfs_object (object): VFS object to cache. | juraj-google-style |
def supported_language(lang):
try:
self.get_collection(lang=lang)
return True
except LanguageNotSupported as e:
return False | Return True if polyglot supports the language.
Args:
lang (string): Language code. | juraj-google-style |
def by_phone(self, phone, cc=None):
header, content = self._http_request(self.BASE_URL, phone=phone, cc=cc)
return json.loads(content) | Perform a Yelp Phone API Search based on phone number given.
Args:
phone - Phone number to search by
cc - ISO 3166-1 alpha-2 country code. (Optional) | juraj-google-style |
def get_dump_sizes_bytes(self, node_name, output_slot, debug_op, device_name=None):
device_name = self._infer_device_name(device_name, node_name)
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum[device_name]:
raise WatchKeyDoesNotExistInDebugDumpDirError('Watch key "%s" does not exist in the debug dump of device %s' % (watch_key, device_name))
return self._watch_key_to_dump_size_bytes[device_name][watch_key] | Get the sizes of the dump files for a debug-dumped tensor.
Unit of the file size: byte.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
(`list` of `int`): list of dump file sizes in bytes.
Raises:
WatchKeyDoesNotExistInDebugDumpDirError: If the tensor watch key does not
exist in the debug dump data. | github-repos |
def lines_from_file(path, as_interned=False, encoding=None):
lines = None
with io.open(path, encoding=encoding) as f:
if as_interned:
lines = [sys.intern(line) for line in f.read().splitlines()]
else:
lines = f.read().splitlines()
return lines | Create a list of file lines from a given filepath.
Args:
path (str): File path
as_interned (bool): List of "interned" strings (default False)
Returns:
strings (list): File line list | codesearchnet |
def traverse(self, index=0):
if (index < len(self.nodes)):
for entity in self.nodes[index]:
for next_result in self.traverse(index=(index + 1)):
if isinstance(entity, list):
(yield (entity + next_result))
else:
(yield ([entity] + next_result))
else:
(yield []) | This is used to produce a list of lists where each each item
in that list is a diffrent combination of items from the lists
within with every combination of such values.
Args:
index (int) : the index at witch to start the list.
Note this is used only in the function as a processing
Returns:
list : is every combination. | codesearchnet |
def molecule(lines):
count_line = lines[3]
num_atoms = int(count_line[0:3])
num_bonds = int(count_line[3:6])
compound = Compound()
compound.graph._node = atoms(lines[4: num_atoms+4])
compound.graph._adj = bonds(lines[num_atoms+4: num_atoms+num_bonds+4],
compound.graph._node.keys())
props = properties(lines[num_atoms+num_bonds+4:])
add_properties(props, compound)
return compound | Parse molfile part into molecule object
Args:
lines (list): lines of molfile part
Raises:
ValueError: Symbol not defined in periodictable.yaml
(Polymer expression not supported yet) | juraj-google-style |
def depth_december_average_ground_temperature(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_december_average_ground_temperature`'.format(value))
self._depth_december_average_ground_temperature = value | Corresponds to IDD Field `depth_december_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_december_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | juraj-google-style |
def _checkSetpointValue( setpointvalue, maxvalue ):
if maxvalue is None:
raise TypeError('The maxvalue (for the setpoint) must not be None!')
minimalmodbus._checkNumerical(setpointvalue, minvalue=0, maxvalue=maxvalue, description='setpoint value') | Check that the given setpointvalue is valid.
Args:
* setpointvalue (numerical): The setpoint value to be checked. Must be positive.
* maxvalue (numerical): Upper limit for setpoint value. Must be positive.
Raises:
TypeError, ValueError | juraj-google-style |
def _extract_all_responses(self, resources, api_endpoint, api_name):
(all_responses, resources) = self._bulk_cache_lookup(api_name, resources)
resource_chunks = self._prepare_resource_chunks(resources)
response_chunks = self._request_reports('resource', resource_chunks, api_endpoint)
self._extract_response_chunks(all_responses, response_chunks, api_name)
return all_responses | Aux function to extract all the API endpoint responses.
Args:
resources: list of string hashes.
api_endpoint: endpoint path
api_name: endpoint name
Returns:
A dict with the hash as key and the VT report as value. | codesearchnet |
def add_individual(self, genotype):
logger.debug('Adding genotype {0} to variant {1}'.format(genotype, self['variant_id']))
self['individuals'].append(genotype) | Add the information for a individual
This adds a genotype dict to variant['individuals']
Args:
genotype (dict): A genotype dictionary | codesearchnet |
def conditionally_inline_policies(role_name, sr_entry):
service_type = sr_entry['type']
if not (service_type in SERVICE_TYPE_ROLE and "policies" in sr_entry):
print_if_verbose("not eligible for policies; service_type: {} is not valid for policies "
"or no 'policies' key in service registry for this role".format(service_type))
return
for policy_name in sr_entry['policies']:
print_if_verbose("loading policy: {} for role: {}".format(policy_name, role_name))
try:
policy_document = resolve_policy_document(policy_name)
except:
fail("Exception loading policy: {} for role: {}".format(policy_name, role_name), sys.exc_info())
if CONTEXT.commit:
try:
CLIENTS["iam"].put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy_document)
except:
fail("Exception putting policy: {} onto role: {}".format(policy_name, role_name), sys.exc_info()) | If 'policies' key lists the filename prefixes of policies to bind to the role,
load them from the expected path and inline them onto the role
Args:
role_name: name of the role to attach the policies to
sr_entry: service registry entry | juraj-google-style |
def diff_levenshtein(self, diffs):
levenshtein = 0
insertions = 0
deletions = 0
for (op, data) in diffs:
if op == self.DIFF_INSERT:
insertions += len(data)
elif op == self.DIFF_DELETE:
deletions += len(data)
elif op == self.DIFF_EQUAL:
levenshtein += max(insertions, deletions)
insertions = 0
deletions = 0
levenshtein += max(insertions, deletions)
return levenshtein | Compute the Levenshtein distance; the number of inserted, deleted or
substituted characters.
Args:
diffs: Array of diff tuples.
Returns:
Number of changes. | juraj-google-style |
def ne(left: Any, right: Any) -> bool:
return not eq(left, right) | Compares if two values are not equal. Use symbolic equality if possible.
Example::
@pg.members([
('x', pg.typing.Any())
])
class A(pg.Object):
def sym_eq(self, right):
if super().sym_eq(right):
return True
return pg.eq(self.x, right)
class B:
pass
assert pg.ne(1, 2)
assert pg.ne(A(1), A(2))
# A has override `sym_eq`.
assert not pg.ne(A(1), 1)
# Objects of B are compared by references.
assert pg.ne(A(B()), A(B()))
Args:
left: The left-hand value to compare.
right: The right-hand value to compare.
Returns:
True if left and right is not equal or symbolically equal. Otherwise False. | github-repos |
def add_module(self, module_name, module_ui):
m_button = tk.Label(self.module_selection, text=module_name, bg='white', anchor='w')
m_button.grid(column=0, row=len(self.module_selection.winfo_children()), padx=0, pady=0, sticky='W E N S')
self.module_buttons[module_name] = m_button
m_button.bind('<Button-1>', (lambda e: self.module_selected(module_name, module_ui))) | Adds a module to the list
Args:
module_name (str): The name of the module
module_ui: The function to call to create the module's UI | codesearchnet |
def export(self, top=True):
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.comments_2))
return ",".join(out) | Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation | juraj-google-style |
def convert_placeholder_to_const(input_graph_def, nodes_to_convert=None):
input_node_map = {}
for node in input_graph_def.node:
if node.name not in input_node_map:
input_node_map[node.name] = node
else:
raise ValueError('Duplicate node names detected for ', node.name)
dict_to_change = {}
for key in PLACEHOLDER_WITH_DEFAULT_LIST:
dict_to_change[key] = PLACEHOLDER_WITH_DEFAULT_LIST[key]
if nodes_to_convert is not None and len(nodes_to_convert) > 0:
dict_list = parse_nodes_dict(nodes_to_convert)
dict_to_change.update(dict_list)
ph_node_list = []
for ph_node in dict_to_change:
if not ph_node and ph_node not in input_node_map:
continue
ph_node_list.append(ph_node)
if not ph_node_list:
tf_logging.warning('No PlaceholderWithDefault nodes found to convert to Constant. Maybe check the spellings')
return input_graph_def
result_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
is_replaced = False
new_node = node_def_pb2.NodeDef()
if node.op == 'PlaceholderWithDefault' or node.op == 'Placeholder':
match_key = [find_key for find_key in dict_to_change.keys() if find_key in node.name]
if len(match_key) > 0:
if dtypes.bool.as_datatype_enum == node.attr['dtype'].type:
new_val_str = dict_to_change[match_key[0]]
new_node.op = 'Const'
new_node.name = node.name
new_node.attr['dtype'].CopyFrom(node.attr['dtype'])
new_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(strtobool(new_val_str), dtype=dtypes.bool, shape=[])))
is_replaced = True
else:
tf_logging.warning('Not converting to Const. Currently only bool PlaceholderWithDefault or Placeholder can be converted to const. current dtype = ', node.attr['dtype'])
if not is_replaced:
new_node.CopyFrom(node)
result_graph_def.node.extend([new_node])
return result_graph_def | Rename the PlaceHolderWithDefault node to constant
In a frozen graph, PlaceholderWithDefault nodes can be converted to
Constant op nodes with same value. This will help simplify the graph.
Args:
input_graph_def: A GraphDef containing a model.
nodes_to_convert: A list of PlaceholderWithDefault or Placeholder nodes to
be converted to Constants with their new value.
Returns:
modified graph with PlaceholderWithDefault node converted to Constant node | github-repos |
def _ParseRecordExtraField(self, byte_stream, file_offset):
extra_field_map = self._GetDataTypeMap('asl_record_extra_field')
try:
record_extra_field = self._ReadStructureFromByteStream(byte_stream, file_offset, extra_field_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse record extra field at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))
return record_extra_field | Parses a record extra field.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the record extra field relative to
the start of the file.
Returns:
asl_record_extra_field: record extra field.
Raises:
ParseError: if the record extra field cannot be parsed. | codesearchnet |
def __init__(
self,
path: str,
query_string: bytes,
scheme: str,
headers: CIMultiDict,
subprotocols: List[str],
receive: Callable,
send: Callable,
accept: Callable,
) -> None:
super().__init__('GET', scheme, path, query_string, headers)
self._accept = accept
self._receive = receive
self._send = send
self._subprotocols = subprotocols | Create a request object.
Arguments:
path: The full unquoted path of the request.
query_string: The raw bytes for the query string part.
scheme: The scheme used for the request.
headers: The request headers.
subprotocols: The subprotocols requested.
receive: Returns an awaitable of the current data
accept: Idempotent callable to accept the websocket connection. | juraj-google-style |
def popn(self, buffer_type, count):
buffer_type = str(buffer_type)
if buffer_type == u'streaming':
chosen_buffer = self.streaming_data
else:
chosen_buffer = self.storage_data
if count > len(chosen_buffer):
raise StreamEmptyError("Not enough data in buffer for popn command", requested=count, stored=len(chosen_buffer), buffer=buffer_type)
popped = chosen_buffer[:count]
remaining = chosen_buffer[count:]
if buffer_type == u'streaming':
self.streaming_data = remaining
else:
self.storage_data = remaining
return popped | Remove and return the oldest count values from the named buffer
Args:
buffer_type (str): The buffer to pop from (either u"storage" or u"streaming")
count (int): The number of readings to pop
Returns:
list(IOTileReading): The values popped from the buffer | juraj-google-style |
def ValidateCertificateHostname(cert, hostname):
hosts = GetValidHostsForCert(cert)
boto.log.debug('validating server certificate: hostname=%s, certificate hosts=%s', hostname, hosts)
for host in hosts:
host_re = host.replace('.', '\\.').replace('*', '[^.]*')
if re.search(('^%s$' % (host_re,)), hostname, re.I):
return True
return False | Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate. | codesearchnet |
def incr(self, key, value, noreply=False):
key = self.check_key(key)
cmd = (((b'incr ' + key) + b' ') + six.text_type(value).encode('ascii'))
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'incr', noreply)
if noreply:
return None
if (results[0] == b'NOT_FOUND'):
return None
return int(results[0]) | The memcached "incr" command.
Args:
key: str, see class docs for details.
value: int, the amount by which to increment the value.
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns None. Otherwise returns the new
value of the key, or None if the key wasn't found. | codesearchnet |
def index_all(self, index_name):
oks = 0
notoks = 0
for ok, item in streaming_bulk(
self.es_client,
self._iter_documents(index_name)
):
if ok:
oks += 1
else:
notoks += 1
logging.info(
"Import results: %d ok, %d not ok",
oks,
notoks
) | Index all available documents, using streaming_bulk for speed
Args:
index_name (string): The index | juraj-google-style |
def pad_image(self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
output_height, output_width = (size['height'], size['width'])
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
delta_width = output_width - input_width
delta_height = output_height - input_height
pad_top = delta_height
pad_left = delta_width
pad_bottom = delta_height - pad_top
pad_right = delta_width - pad_left
padding = ((pad_top, pad_bottom), (pad_left, pad_right))
return pad(image, padding, data_format=data_format, input_data_format=input_data_format) | Pad the image to the specified size at the top, bottom, left and right.
Args:
image (`np.ndarray`):
The image to be padded.
size (`Dict[str, int]`):
The size `{"height": h, "width": w}` to pad the image to.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred. | github-repos |
def AsDict(self, dt=True):
data = {}
if self.body:
data['body'] = self.body
if self.posted_at:
data['posted_at'] = self.posted_at
if self.user:
data['user'] = self.user.AsDict()
return data | A dict representation of this Comment instance.
The return value uses the same key names as the JSON representation.
Args:
dt (bool): If True, return dates as python datetime objects. If
False, return dates as ISO strings.
Return:
A dict representing this Comment instance | juraj-google-style |
def sanitize(vpc_config):
if (vpc_config is None):
return vpc_config
elif (type(vpc_config) is not dict):
raise ValueError('vpc_config is not a dict: {}'.format(vpc_config))
elif (not vpc_config):
raise ValueError('vpc_config is empty')
subnets = vpc_config.get(SUBNETS_KEY)
if (subnets is None):
raise ValueError('vpc_config is missing key: {}'.format(SUBNETS_KEY))
if (type(subnets) is not list):
raise ValueError('vpc_config value for {} is not a list: {}'.format(SUBNETS_KEY, subnets))
elif (not subnets):
raise ValueError('vpc_config value for {} is empty'.format(SUBNETS_KEY))
security_group_ids = vpc_config.get(SECURITY_GROUP_IDS_KEY)
if (security_group_ids is None):
raise ValueError('vpc_config is missing key: {}'.format(SECURITY_GROUP_IDS_KEY))
if (type(security_group_ids) is not list):
raise ValueError('vpc_config value for {} is not a list: {}'.format(SECURITY_GROUP_IDS_KEY, security_group_ids))
elif (not security_group_ids):
raise ValueError('vpc_config value for {} is empty'.format(SECURITY_GROUP_IDS_KEY))
return to_dict(subnets, security_group_ids) | Checks that an instance of VpcConfig has the expected keys and values, removes unexpected keys,
and raises ValueErrors if any expectations are violated
Args:
vpc_config (dict): a VpcConfig dict containing 'Subnets' and 'SecurityGroupIds'
Returns:
A valid VpcConfig dict containing only 'Subnets' and 'SecurityGroupIds' from the vpc_config parameter
If vpc_config parameter is None, returns None
Raises:
ValueError if any expectations are violated:
* vpc_config must be a non-empty dict
* vpc_config must have key `Subnets` and the value must be a non-empty list
* vpc_config must have key `SecurityGroupIds` and the value must be a non-empty list | codesearchnet |
def _get_mutation_to_unknown(self, node: cfg.CFGNode, values: list[_base.BaseValue]) -> list[function.Mutation]:
mutations = []
for v in values:
if isinstance(v, _instance_base.SimpleValue):
for name in v.instance_type_parameters:
if name in self._mutated_type_parameters:
mutations.append(function.Mutation(v, name, self.ctx.convert.create_new_unknown(node, action='type_param_' + name)))
return mutations | Mutation for making all type parameters in a list of instances "unknown".
This is used if we call a function that has mutable parameters and
multiple signatures with unknown parameters.
Args:
node: The current CFG node.
values: A list of instances of BaseValue.
Returns:
A list of function.Mutation instances. | github-repos |
def get_general_case_info(adapter, institute_id=None, slice_query=None):
general = {}
name_query = slice_query
cases = adapter.cases(owner=institute_id, name_query=name_query)
phenotype_cases = 0
causative_cases = 0
pinned_cases = 0
cohort_cases = 0
pedigree = {
1: {
'title': 'Single',
'count': 0
},
2: {
'title': 'Duo',
'count': 0
},
3: {
'title': 'Trio',
'count': 0
},
'many': {
'title': 'Many',
'count': 0
},
}
case_ids = set()
total_cases = 0
for total_cases,case in enumerate(cases,1):
if institute_id:
case_ids.add(case['_id'])
if case.get('phenotype_terms'):
phenotype_cases += 1
if case.get('causatives'):
causative_cases += 1
if case.get('suspects'):
pinned_cases += 1
if case.get('cohorts'):
cohort_cases += 1
nr_individuals = len(case.get('individuals',[]))
if nr_individuals == 0:
continue
if nr_individuals > 3:
pedigree['many']['count'] += 1
else:
pedigree[nr_individuals]['count'] += 1
general['total_cases'] = total_cases
general['phenotype_cases'] = phenotype_cases
general['causative_cases'] = causative_cases
general['pinned_cases'] = pinned_cases
general['cohort_cases'] = cohort_cases
general['pedigree'] = pedigree
general['case_ids'] = case_ids
return general | Return general information about cases
Args:
adapter(adapter.MongoAdapter)
institute_id(str)
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
general(dict) | juraj-google-style |
def vcf_records(self, format_tags=None, qualified=False):
if qualified:
sample_names = self.qualified_sample_names
else:
sample_names = self.sample_names
for line in self._file_reader.read_lines():
if line.startswith("
continue
vcf_record = vcf.VcfRecord.parse_record(line, sample_names)
if format_tags:
vcf_record = self.modify_format_tag(vcf_record, format_tags)
yield vcf_record | Generates parsed VcfRecord objects.
Typically called in a for loop to process each vcf record in a
VcfReader. VcfReader must be opened in advanced and closed when
complete. Skips all headers.
Args:
qualified: When True, sample names are prefixed with file name
Returns:
Parsed VcfRecord
Raises:
StopIteration: when reader is exhausted.
TypeError: if reader is closed. | juraj-google-style |
def export_node(self, n) -> Dict[(str, Union[(str, List[str])])]:
node_dict = {'name': n[0], 'units': _get_units(n[0]), 'dtype': _get_dtype(n[0]), 'arguments': list(self.predecessors(n[0]))}
if (not (n[1].get('indicators') is None)):
for indicator in n[1]['indicators'].values():
if ('dataset' in indicator.__dict__):
del indicator.__dict__['dataset']
node_dict['indicators'] = [_process_datetime(indicator.__dict__) for indicator in n[1]['indicators'].values()]
else:
node_dict['indicators'] = None
return node_dict | Return dict suitable for exporting to JSON.
Args:
n: A dict representing the data in a networkx AnalysisGraph node.
Returns:
The node dict with additional fields for name, units, dtype, and
arguments. | codesearchnet |
def launch_simulation(self, parameter):
return next(SimulationRunner.run_simulations(self, [parameter],
self.data_folder)) | Launch a single simulation, using SimulationRunner's facilities.
This function is used by ParallelRunner's run_simulations to map
simulation running over the parameter list.
Args:
parameter (dict): the parameter combination to simulate. | juraj-google-style |
def is_frozen_graph(sess):
for op in sess.graph.get_operations():
if op.type.startswith('Variable') or op.type.endswith('VariableOp'):
return False
return True | Determines if the graph is frozen.
Determines if a graph has previously been frozen by checking for any
operations of type Variable*. If variables are found, the graph is not frozen.
Args:
sess: TensorFlow Session.
Returns:
Bool. | github-repos |
def _AssertAtLeast3DImage(image):
return control_flow_ops.with_dependencies(_CheckAtLeast3DImage(image, require_static=False), image) | Assert that we are working with a properly shaped image.
Performs the check statically if possible (i.e. if the shape
is statically known). Otherwise adds a control dependency
to an assert op that checks the dynamic shape.
Args:
image: >= 3-D Tensor of size [*, height, width, depth]
Raises:
ValueError: if image.shape is not a [>= 3] vector.
Returns:
If the shape of `image` could be verified statically, `image` is
returned unchanged, otherwise there will be a control dependency
added that asserts the correct dynamic shape. | github-repos |
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu) | Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score. | juraj-google-style |
def _update_service_current_state(service: ServiceState):
LOG.debug("Setting current state from target state for %s", service.id)
service.update_current_state(service.target_state) | Update the current state of a service.
Updates the current state of services after their target state has changed.
Args:
service (ServiceState): Service state object to update | juraj-google-style |
def save(self, data: dict):
with open(self.output_path, 'w') as f:
json.dump(data, f) | Save the provided data object in a json file.
Args:
data (`dict`): The data to store. | github-repos |
def init(self, basedir, config, sourcedir, targetdir, cwd='', commit=True):
if (not basedir):
basedir = '.'
(abs_basedir, abs_config, abs_sourcedir, abs_targetdir) = self.expand(basedir, config, sourcedir, targetdir, cwd)
self.valid_paths(abs_config, abs_sourcedir, abs_targetdir)
if commit:
self.commit(sourcedir, targetdir, abs_config, abs_sourcedir, abs_targetdir)
return {'basedir': abs_basedir, 'config': abs_config, 'sourcedir': abs_sourcedir, 'targetdir': abs_targetdir} | Init project structure and configuration from given arguments
Args:
basedir (string): Project base directory used to prepend relative
paths. If empty or equal to '.', it will be filled with current
directory path.
config (string): Settings file path.
sourcedir (string): Source directory path.
targetdir (string): Compiled files target directory path.
Keyword Arguments:
cwd (string): Current directory path to prepend base dir if empty.
commit (bool): If ``False``, directory structure and settings file
won't be created.
Returns:
dict: A dict containing expanded given paths. | codesearchnet |
def _ParseFieldsMetadata(self, structure):
fields = structure.fields.split(' ')
log_line_structure = pyparsing.Empty()
if fields[0] == 'date' and fields[1] == 'time':
log_line_structure += self.DATE_TIME.setResultsName('date_time')
fields = fields[2:]
for member in fields:
log_line_structure += self._LOG_LINE_STRUCTURES.get(member, self.URI)
updated_structures = []
for line_structure in self._line_structures:
if line_structure[0] != 'logline':
updated_structures.append(line_structure)
updated_structures.append(('logline', log_line_structure))
self._line_structures = updated_structures | Parses the fields metadata and updates the log line definition to match.
Args:
structure (pyparsing.ParseResults): structure parsed from the log file. | juraj-google-style |
def _project_dict(self, **kwargs: Dict[str, Any]) -> Dict[str, Hist]:
get_hist_args = copy.deepcopy(kwargs)
projection_name_args = copy.deepcopy(kwargs)
for key, input_observable in self.observable_to_project_from.items():
output_hist, projection_name, projection_name_args, = self._project_observable(
input_key = key,
input_observable = input_observable,
get_hist_args = get_hist_args,
projection_name_args = projection_name_args,
**kwargs,
)
output_hist_args = projection_name_args
output_hist_args.update({
"output_hist": output_hist,
"projection_name": projection_name
})
output_key_name = self.output_key_name(**output_hist_args)
self.output_observable[output_key_name] = self.output_hist(**output_hist_args)
return self.output_observable | Driver function for projecting and storing a dictionary of observables.
Args:
kwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...)
Returns:
The projected histograms. The projected histograms are also stored in ``output_observable``. | juraj-google-style |
def LateBind(self, target=None):
if (not issubclass(target, RDFProtoStruct)):
raise TypeError(('Field %s expects a protobuf, but target is %s' % (self, target)))
self.late_bound = False
self.type = target
self.owner.AddDescriptor(self) | Late binding callback.
This method is called on this field descriptor when the target RDFValue
class is finally defined. It gives the field descriptor an opportunity to
initialize after the point of definition.
Args:
target: The target nested class.
Raises:
TypeError: If the target class is not of the expected type. | codesearchnet |
def get_full_alias(self, query):
if (query in self.alias_table.sections()):
return query
return next((section for section in self.alias_table.sections() if (section.split()[0] == query)), '') | Get the full alias given a search query.
Args:
query: The query this function performs searching on.
Returns:
The full alias (with the placeholders, if any). | codesearchnet |
def delete_branch(profile, name):
ref = ('heads/' + name)
data = refs.delete_ref(profile, ref)
return data | Delete a branch.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
name
The name of the branch to delete.
Returns:
The response of the DELETE request. | codesearchnet |
def run(self, text):
for regex in self.regexes:
text = regex.sub(self.repl, text)
return text | Run each regex substitution on ``text``.
Args:
text (string): the input text.
Returns:
string: text after all substitutions have been sequentially
applied. | juraj-google-style |
def get_all_artifacts_per_task_id(chain, upstream_artifacts):
all_artifacts_per_task_id = {}
for link in chain.links:
if (link.task_type in PARENT_TASK_TYPES):
add_enumerable_item_to_dict(dict_=all_artifacts_per_task_id, key=link.task_id, item='public/task-graph.json')
if (link.task_type in DECISION_TASK_TYPES):
add_enumerable_item_to_dict(dict_=all_artifacts_per_task_id, key=link.task_id, item='public/actions.json')
add_enumerable_item_to_dict(dict_=all_artifacts_per_task_id, key=link.task_id, item='public/parameters.yml')
if upstream_artifacts:
for upstream_dict in upstream_artifacts:
add_enumerable_item_to_dict(dict_=all_artifacts_per_task_id, key=upstream_dict['taskId'], item=upstream_dict['paths'])
for (task_id, paths) in all_artifacts_per_task_id.items():
all_artifacts_per_task_id[task_id] = sorted(set(paths))
return all_artifacts_per_task_id | Return every artifact to download, including the Chain Of Trust Artifacts.
Args:
chain (ChainOfTrust): the chain of trust object
upstream_artifacts: the list of upstream artifact definitions
Returns:
dict: sorted list of paths to downloaded artifacts ordered by taskId | codesearchnet |
def serialize_ndarray_b64(o):
if o.flags['C_CONTIGUOUS']:
o_data = o.data
else:
o_data = np.ascontiguousarray(o).data
data_b64 = base64.b64encode(o_data)
return dict(
_type='np.ndarray',
data=data_b64.decode('utf-8'),
dtype=o.dtype,
shape=o.shape) | Serializes a :obj:`numpy.ndarray` in a format where the datatype and shape are
human-readable, but the array data itself is binary64 encoded.
Args:
o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`. | juraj-google-style |
def rsub(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"rsub", other, axis=axis, level=level, fill_value=fill_value
) | Subtract a DataFrame/Series/scalar from this DataFrame.
Args:
other: The object to use to apply the subtraction to this.
axis: The axis to apply the subtraction over.
level: Mutlilevel index level to subtract over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the subtraciont applied. | juraj-google-style |
def get_tensor_mtf_dimension_names(self, tensor_name):
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, mtf.Tensor):
return tensor.shape.dimension_names
else:
return [] | The Mesh TensorFlow dimensions associated with a tensor.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a [string], the names of Mesh TensorFlow dimensions. | juraj-google-style |
def prune_unused_nodes(meta_graph, signature_def):
graph = tf_v1.Graph()
with graph.as_default():
tf_v1.train.import_meta_graph(meta_graph, input_map={}, import_scope="")
used_node_names = set()
for _, tensor_def in signature_def.outputs.items():
output_tensor = graph.get_tensor_by_name(tensor_def.name)
mark_backward(output_tensor, used_node_names)
node_filter_in_list = []
for node in meta_graph.graph_def.node:
if node.name in used_node_names or node.op == "VarHandleOp":
node_filter_in_list.append(node)
del meta_graph.graph_def.node[:]
meta_graph.graph_def.node.extend(node_filter_in_list)
del graph | Function to prune unused ops given a signature def.
This function does a graph traversal through from all outputs as
defined in the signature_def to collect all used nodes. Then, any
nodes which are unused can be discarded. This is useful for graph which are
executing eagerly or on TPUs.
Args:
meta_graph: The input/output MetaGraphDef for which we wish to prune.
signature_def: A SignatureDef which specifies the outputs from which we wish
to start graph traversal. | juraj-google-style |
def __init__(self, dev_id, address, local_key=None, dev_type=None, connection_timeout=10):
self.id = dev_id
self.address = address
self.local_key = local_key
self.local_key = local_key.encode('latin1')
self.dev_type = dev_type
self.connection_timeout = connection_timeout
self.port = 6668 | Represents a Tuya device.
Args:
dev_id (str): The device id.
address (str): The network address.
local_key (str, optional): The encryption key. Defaults to None.
dev_type (str, optional): The device type.
It will be used as key for lookups in payload_dict.
Defaults to None.
Attributes:
port (int): The port to connect to. | juraj-google-style |
def NormalizePath(path, sep='/'):
if (not path):
return sep
path = SmartUnicode(path)
path_list = path.split(sep)
if (path_list[0] in ['.', '..', '']):
path_list.pop(0)
i = 0
while True:
list_len = len(path_list)
for i in range(i, len(path_list)):
if ((path_list[i] == '.') or (not path_list[i])):
path_list.pop(i)
break
elif (path_list[i] == '..'):
path_list.pop(i)
if (((i == 1) and path_list[0]) or (i > 1)):
i -= 1
path_list.pop(i)
break
if (len(path_list) == list_len):
return (sep + sep.join(path_list)) | A sane implementation of os.path.normpath.
The standard implementation treats leading / and // as different leading to
incorrect normal forms.
NOTE: Its ok to use a relative path here (without leading /) but any /../ will
still be removed anchoring the path at the top level (e.g. foo/../../../../bar
=> bar).
Args:
path: The path to normalize.
sep: Separator used.
Returns:
A normalized path. In this context normalized means that all input paths
that would result in the system opening the same physical file will produce
the same normalized path. | codesearchnet |
def _get_structured_grad_output(outputs, grads, body_grad_graph):
result = []
outputs_idx = 3
structured_outputs_idx = 3
for g in grads:
if g is None:
result.append(None)
continue
output = body_grad_graph.structured_outputs[structured_outputs_idx]
structured_outputs_idx += 1
if isinstance(output, indexed_slices.IndexedSlices):
result.append(indexed_slices.IndexedSlices(values=outputs[outputs_idx], indices=outputs[outputs_idx + 1], dense_shape=outputs[outputs_idx + 2]))
outputs_idx += 3
else:
assert isinstance(output, tensor_lib.Tensor)
result.append(outputs[outputs_idx])
outputs_idx += 1
return result | Returns the values that should be returned from the while grad function.
Args:
outputs: the raw Tensor outputs of the grad While op.
grads: the input gradients to the gradient function.
body_grad_graph: _WhileBodyGradFuncGraph.
Returns:
A list of gradient values. May include Nones. | github-repos |
def is_compatible_with(self, spec_or_tensor):
return (self._dtype.is_compatible_with(spec_or_tensor.dtype) and
self._shape.is_compatible_with(spec_or_tensor.shape)) | Returns True if spec_or_tensor is compatible with this TensorSpec.
Two tensors are considered compatible if they have the same dtype
and their shapes are compatible (see `tf.TensorShape.is_compatible_with`).
Args:
spec_or_tensor: A tf.TensorSpec or a tf.Tensor
Returns:
True if spec_or_tensor is compatible with self. | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.