code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _ImportPythonModule(module_name):
try:
module_object = list(map(__import__, [module_name]))[0]
except ImportError:
return None
if ('.' in module_name):
for submodule_name in module_name.split('.')[1:]:
module_object = getattr(module_object, submodule_name, None)
return module_object
|
Imports a Python module.
Args:
module_name (str): name of the module.
Returns:
module: Python module or None if the module cannot be imported.
|
codesearchnet
|
def get_layer(self, name=None, index=None):
if index is not None and name is not None:
raise ValueError('Provide only a layer name or a layer index.')
if index is not None:
if len(self.layers) <= index:
raise ValueError('Was asked to retrieve layer at index ' + str(index) + ' but model only has ' + str(len(self.layers)) + ' layers.')
else:
return self.layers[index]
if name is not None:
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError('No such layer: ' + name + '.')
raise ValueError('Provide either a layer name or layer index.')
|
Retrieves a layer based on either its name (unique) or index.
If `name` and `index` are both provided, `index` will take precedence.
Indices are based on order of horizontal graph traversal (bottom-up).
Args:
name: String, name of layer.
index: Integer, index of layer.
Returns:
A layer instance.
Raises:
ValueError: In case of invalid layer name or index.
|
github-repos
|
async def _grab_connection(self, url):
(scheme, host, _, _, _, _) = urlparse(url)
host_loc = urlunparse((scheme, host, '', '', '', ''))
sock = self._checkout_connection(host_loc)
if (sock is None):
sock = (await self._make_connection(host_loc))
return sock
|
The connection pool handler. Returns a connection
to the caller. If there are no connections ready, and
as many connections checked out as there are available total,
we yield control to the event loop.
If there is a connection ready or space to create a new one, we
pop/create it, register it as checked out, and return it.
Args:
url (str): breaks the url down and uses the top level location
info to see if we have any connections to the location already
lying around.
|
codesearchnet
|
def execute(self, asm_instr):
self.ir_emulator.registers[self.ip] = asm_instr.address + asm_instr.size
if self.arch_info.instr_is_syscall(asm_instr):
raise Syscall()
return self.__execute(asm_instr)
|
Execute an assembler instruction.
Args:
asm_instr (X86Instruction): A instruction to execute.
Returns:
A int. The address of the next instruction to execute.
|
juraj-google-style
|
def get_device_locations(mesh: layout_lib.Mesh, client_id: Optional[int]=None) -> List[Dict[str, int]]:
if mesh.device_type() != _TPU_DEVICE_TYPE:
raise ValueError('The mesh must be a TPU mesh')
if client_id is None or client_id == config.client_id():
return mesh.local_device_locations()
raise NotImplementedError("Looking up other clients' device locations is not supported")
|
Returns the device locations of all TPU cores local to the given client.
A device location is a dictionary from dimension names to indices on those
dimensions. For example, for a 2x2 mesh ('x', 'y'), this function returns a
permutation of this list:
[{'x': 0, 'y': 0},
{'x': 0, 'y': 1},
{'x': 1, 'y': 0},
{'x': 1, 'y': 1}].
Note that device IDs and device locations are equivalent. The former is a
linearization of the latter along mesh dimensions.
Args:
mesh: A TPU mesh.
client_id: Optional; A DTensor client ID. If empty, query this client.
|
github-repos
|
def _get_typed_list_value(self, key, target_type, type_convert, is_optional=False, is_secret=False, is_local=False, default=None, options=None):
value = self._get_typed_value(key=key, target_type=list, type_convert=json.loads, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)
if (not value):
return default
raise_type = ('dict' if (target_type == Mapping) else target_type)
if (not isinstance(value, list)):
raise RheaError('Cannot convert value `{}` (key: `{}`) to `{}`'.format(value, key, raise_type))
result = []
for v in value:
if isinstance(v, six.string_types):
try:
result.append(type_convert(v))
except ValueError:
raise RheaError('Cannot convert value `{}` (found in list key: `{}`) to `{}`'.format(v, key, raise_type))
elif isinstance(v, target_type):
result.append(v)
else:
raise RheaError('Cannot convert value `{}` (found in list key: `{}`) to `{}`'.format(v, key, raise_type))
return result
|
Return the value corresponding to the key converted first to list
than each element to the given type.
Args:
key: the dict key.
target_type: The type we expect the variable or key to be in.
type_convert: A lambda expression that converts the key to the desired type.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
|
codesearchnet
|
def nCr(n, r):
f = math.factorial
return int(f(n) / f(r) / f(n-r))
|
Calculates nCr.
Args:
n (int): total number of items.
r (int): items to choose
Returns:
nCr.
|
juraj-google-style
|
def prepare_adiabatic_limit(slh, k=None):
if k is None:
k = symbols('k', positive=True)
Ld = slh.L.dag()
LdL = (Ld * slh.L)[0, 0]
K = (-LdL / 2 + I * slh.H).expand().simplify_scalar()
N = slh.S.dag()
B, A, Y = K.series_expand(k, 0, 2)
G, F = Ld.series_expand(k, 0, 1)
return Y, A, B, F, G, N
|
Prepare the adiabatic elimination on an SLH object
Args:
slh: The SLH object to take the limit for
k: The scaling parameter $k \rightarrow \infty$. The default is a
positive symbol 'k'
Returns:
tuple: The objects ``Y, A, B, F, G, N``
necessary to compute the limiting system.
|
juraj-google-style
|
def assert_stmt(expression1, expression2):
if not callable(expression2):
raise ValueError('{} must be a callable'.format(expression2))
args, _, keywords, _ = tf_inspect.getargspec(expression2)
if args or keywords:
raise ValueError('{} may not have any arguments'.format(expression2))
if tensor_util.is_tf_type(expression1):
return _tf_assert_stmt(expression1, expression2)
else:
return _py_assert_stmt(expression1, expression2)
|
Functional form of an assert statement.
This follows the semantics of the Python assert statement, however the
concrete implementations may deviate from it. See the respective
implementation for details.
In general, the assert statement should not be used for control flow.
Furthermore, it is encouraged that the assertion expressions should not have
side effects.
Args:
expression1: Any
expression2: Callable[[], Any], returns the expression to include in the
error message when expression1 evaluates to False. When expression1 is
True, the result of expression2 will not be evaluated, however,
expression2 itself may be evaluated in some implementations.
Returns:
Any, implementation-dependent.
Raises:
ValueError: if any arguments are illegal.
|
github-repos
|
def find_library_linux(cls):
dll = Library.JLINK_SDK_NAME
root = os.path.join('/', 'opt', 'SEGGER')
for (directory_name, subdirs, files) in os.walk(root):
fnames = []
x86_found = False
for f in files:
path = os.path.join(directory_name, f)
if os.path.isfile(path) and f.startswith(dll):
fnames.append(f)
if '_x86' in path:
x86_found = True
for fname in fnames:
fpath = os.path.join(directory_name, fname)
if util.is_os_64bit():
if '_x86' not in fname:
yield fpath
elif x86_found:
if '_x86' in fname:
yield fpath
else:
yield fpath
|
Loads the SEGGER DLL from the root directory.
On Linux, the SEGGER tools are installed under the ``/opt/SEGGER``
directory with versioned directories having the suffix ``_VERSION``.
Args:
cls (Library): the ``Library`` class
Returns:
The paths to the J-Link library files in the order that they are
found.
|
juraj-google-style
|
def generate_nb_state_data(means, weights, R):
cells = weights.shape[1]
x_true = np.dot(means, weights)
R_ = np.tile(R, (cells, 1)).T
P_true = x_true/(R_ + x_true)
sample = np.random.negative_binomial(np.tile(R, (cells, 1)).T, P_true)
return sample.astype(float)
|
Generates data according to the Negative Binomial Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
R (array): dispersion parameter - 1 x genes
Returns:
data matrix - genes x cells
|
juraj-google-style
|
def validate_arg_values(ast, bo):
if not bo.api_url:
log.info("No API endpoint defined")
return bo
log.debug(f"AST: {ast}")
if isinstance(ast, NSArg):
term_id = "{}:{}".format(ast.namespace, ast.value)
value_types = ast.value_types
log.debug(f"Value types: {value_types} AST value: {ast.value}")
if ast.namespace == "DEFAULT":
for value_type in value_types:
default_namespace = [
ns["name"] for ns in bo.spec["namespaces"][value_type]["info"]
] + [
ns["abbreviation"]
for ns in bo.spec["namespaces"][value_type]["info"]
]
if ast.value in default_namespace:
log.debug("Default namespace valid term: {}".format(term_id))
break
else:
log.debug("Default namespace invalid term: {}".format(term_id))
bo.validation_messages.append(
("WARNING", f"Default Term: {term_id} not found")
)
else:
request_url = bo.api_url + "/terms/{}".format(
url_path_param_quoting(term_id)
)
log.info(f"Validate Arg Values url {request_url}")
r = get_url(request_url)
if r and r.status_code == 200:
result = r.json()
log.debug(
f'AST.value_types {ast.value_types} Entity types {result.get("entity_types", [])}'
)
if (
len(
set(ast.value_types).intersection(
result.get("entity_types", [])
)
)
== 0
):
log.debug(
"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format(
term_id, ast.value_types, result.get("entity_types", [])
)
)
bo.validation_messages.append(
(
"WARNING",
"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format(
term_id, ast.value_types, result.get("entity_types", [])
),
)
)
if term_id in result.get("obsolete_ids", []):
bo.validation_messages.append(
(
"WARNING",
f'Obsolete term: {term_id} Current term: {result["id"]}',
)
)
elif r.status_code == 404:
bo.validation_messages.append(
("WARNING", f"Term: {term_id} not found in namespace")
)
else:
log.error(f"Status {r.status_code} - Bad URL: {request_url}")
if isinstance(ast, StrArg):
log.debug(f" Check String Arg: {ast.value} {ast.value_types}")
for value_type in ast.value_types:
if re.match("/", value_type):
value_type = re.sub("^/", "", value_type)
value_type = re.sub("/$", "", value_type)
match = re.match(value_type, ast.value)
if match:
break
if value_type in bo.spec["namespaces"]:
default_namespace = [
ns["name"] for ns in bo.spec["namespaces"][value_type]["info"]
] + [
ns["abbreviation"]
for ns in bo.spec["namespaces"][value_type]["info"]
]
if ast.value in default_namespace:
break
else:
bo.validation_messages.append(
(
"WARNING",
f"String value {ast.value} does not match default namespace value or regex pattern: {ast.value_types}",
)
)
if hasattr(ast, "args"):
for arg in ast.args:
validate_arg_values(arg, bo)
return bo
|
Recursively validate arg (NSArg and StrArg) values
Check that NSArgs are found in BELbio API and match appropriate entity_type.
Check that StrArgs match their value - either default namespace or regex string
Generate a WARNING if not.
Args:
bo: bel object
Returns:
bel object
|
juraj-google-style
|
def sspro8_summary(self):
summary = {}
records = ssbio.protein.sequence.utils.fasta.load_fasta_file(self.out_sspro8)
for r in records:
seq_summary = {}
seq_summary['percent_H-sspro8'] = (r.seq.count('H') / float(len(r)))
seq_summary['percent_G-sspro8'] = (r.seq.count('G') / float(len(r)))
seq_summary['percent_I-sspro8'] = (r.seq.count('I') / float(len(r)))
seq_summary['percent_E-sspro8'] = (r.seq.count('E') / float(len(r)))
seq_summary['percent_B-sspro8'] = (r.seq.count('B') / float(len(r)))
seq_summary['percent_T-sspro8'] = (r.seq.count('T') / float(len(r)))
seq_summary['percent_S-sspro8'] = (r.seq.count('S') / float(len(r)))
seq_summary['percent_C-sspro8'] = (r.seq.count('C') / float(len(r)))
summary[r.id] = seq_summary
return summary
|
Parse the SSpro8 output file and return a summary of secondary structure composition.
The output file is just a FASTA formatted file, so you can get residue level
information by parsing it like a normal sequence file.
Returns:
dict: Percentage of:
H: alpha-helix
G: 310-helix
I: pi-helix (extremely rare)
E: extended strand
B: beta-bridge
T: turn
S: bend
C: the rest
|
codesearchnet
|
def _sort_scores_and_boxes(scores, boxes):
with ops.name_scope('sort_scores_and_boxes'):
sorted_scores_indices = sort_ops.argsort(scores, axis=1, direction='DESCENDING')
sorted_scores = array_ops.gather(scores, sorted_scores_indices, axis=1, batch_dims=1)
sorted_boxes = array_ops.gather(boxes, sorted_scores_indices, axis=1, batch_dims=1)
return (sorted_scores, sorted_boxes, sorted_scores_indices)
|
Sort boxes based their score from highest to lowest.
Args:
scores: a tensor with a shape of [batch_size, num_boxes] representing
the scores of boxes.
boxes: a tensor with a shape of [batch_size, num_boxes, 4] representing
the boxes.
Returns:
sorted_scores: a tensor with a shape of [batch_size, num_boxes]
representing the sorted scores.
sorted_boxes: a tensor representing the sorted boxes.
sorted_scores_indices: a tensor with a shape of [batch_size, num_boxes]
representing the index of the scores in a sorted descending order.
|
github-repos
|
def update(self, **kwargs):
kwargs = self._preprocess_params(kwargs)
kwargs = self.preprocess_kwargs_before_update(kwargs)
for (key, value) in kwargs.iteritems():
cls = type(self)
if ((not hasattr(cls, key)) or isinstance(getattr(cls, key), property)):
continue
if (key not in self._no_overwrite_):
setattr(self, key, value)
if isinstance(getattr(self, key), OrderingList):
getattr(self, key).reorder()
elif isinstance(getattr(cls, key), AssociationProxyInstance):
target_name = getattr(cls, key).target_collection
target_rel = getattr(self, target_name)
if isinstance(target_rel, OrderingList):
target_rel.reorder()
try:
self.session.commit()
return self
except Exception as e:
self.session.rollback()
raise e
|
Updates an instance.
Args:
**kwargs : Arbitrary keyword arguments. Column names are
keywords and their new values are the values.
Examples:
>>> customer.update(email="newemail@x.com", name="new")
|
codesearchnet
|
def get_voltage(self, cycle=None, dataset_number=None, full=True):
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
voltage_header = self.headers_normal.voltage_txt
test = self.datasets[dataset_number].dfdata
if cycle:
self.logger.debug("getting voltage curve for cycle")
c = test[(test[cycle_index_header] == cycle)]
if not self.is_empty(c):
v = c[voltage_header]
return v
else:
if not full:
self.logger.debug(
"getting list of voltage-curves for all cycles"
)
v = []
no_cycles = np.amax(test[cycle_index_header])
for j in range(1, no_cycles + 1):
txt = "Cycle %i: " % j
self.logger.debug(txt)
c = test[(test[cycle_index_header] == j)]
v.append(c[voltage_header])
else:
self.logger.debug("getting frame of all voltage-curves")
v = test[voltage_header]
return v
|
Returns voltage (in V).
Args:
cycle: cycle number (all cycles if None)
dataset_number: first dataset if None
full: valid only for cycle=None (i.e. all cycles), returns the full
pandas.Series if True, else a list of pandas.Series
Returns:
pandas.Series (or list of pandas.Series if cycle=None og full=False)
|
juraj-google-style
|
def library_line(self, file_name):
gulplib_set = (lambda : ('GULP_LIB' in os.environ.keys()))
readable = (lambda f: (os.path.isfile(f) and os.access(f, os.R_OK)))
gin = ''
(dirpath, fname) = os.path.split(file_name)
if (dirpath and readable(file_name)):
gin = ('library ' + file_name)
else:
fpath = os.path.join(os.getcwd(), file_name)
if readable(fpath):
gin = ('library ' + fpath)
elif gulplib_set():
fpath = os.path.join(os.environ['GULP_LIB'], file_name)
if readable(fpath):
gin = ('library ' + file_name)
if gin:
return (gin + '\n')
else:
raise GulpError('GULP Library not found')
|
Specifies GULP library file to read species and potential parameters.
If using library don't specify species and potential
in the input file and vice versa. Make sure the elements of
structure are in the library file.
Args:
file_name: Name of GULP library file
Returns:
GULP input string specifying library option
|
codesearchnet
|
def worker(url_key, property_name, function, function_arguments):
error_msg = None
try:
data = function(*function_arguments)
except Exception as e:
data = []
error_msg = ('Error: ' + traceback.format_exc().strip())
error_msg += ('\n' + str(e.message))
if error_msg:
logger.error(error_msg)
error_msg = None
func_name = str(function.__name__)
logger.info(('Attempting to save output from `%s`.' % func_name))
return _save_to_database(url=url_key, property_name=property_name, data=data)
|
This function usually runs as process on the background.
It runs ``function(*function_arguments)`` and then stores them in REST API
storage.
Warning:
This function puts data into DB, isntead of returning them.
Args:
url_key (str): Key which will be used for database lookup.
property_name (str): Name of the property used to store data.
function (obj): Function used to load the data.
function_arguments (list): List of parameters for function which will
be called to retreive data.
error_log_path (str): If set, log errors into this file, otherwise
stderr.
|
codesearchnet
|
def keywords_special_characters(keywords):
invalid_chars = '!\"
if any(char in invalid_chars for char in keywords):
raise ValidationError(MESSAGE_KEYWORD_SPECIAL_CHARS)
|
Confirms that the keywords don't contain special characters
Args:
keywords (str)
Raises:
django.forms.ValidationError
|
juraj-google-style
|
def _TerminateProcessByPid(self, pid):
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
self._TerminateProcess(process)
self._StopMonitoringProcess(process)
|
Terminate a process that's monitored by the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not registered with and monitored by the
engine.
|
codesearchnet
|
def group_pairs(blocks, layout_blocks_list):
image_dict={}
for block_id in layout_blocks_list:
image_seq=blocks[block_id].ec_hdr.image_seq
if image_seq not in image_dict:
image_dict[image_seq]=[block_id]
else:
image_dict[image_seq].append(block_id)
log(group_pairs, 'Layout blocks found at PEBs: %s' % list(image_dict.values()))
return list(image_dict.values())
|
Sort a list of layout blocks into pairs
Arguments:
List:blocks -- List of block objects
List:layout_blocks -- List of layout block indexes
Returns:
List -- Layout block pair indexes grouped in a list
|
juraj-google-style
|
def scheduler(self, sleep_time=0.2):
while self.listening:
if self.scheduled_calls:
timestamp = time.time()
self.scheduled_calls[:] = [item for item in self.scheduled_calls if (not self.time_reached(timestamp, item))]
time.sleep(sleep_time)
logger.info('Shutting down the call scheduler...')
|
Starts the scheduler to check for scheduled calls and execute them
at the correct time.
Args:
sleep_time (float): The amount of time to wait in seconds between
each loop iteration. This prevents the scheduler from consuming
100% of the host's CPU. Defaults to 0.2 seconds.
Returns:
None
|
codesearchnet
|
def get_values(self, field_name: str) -> List[object]:
result = list()
if self.validate_field(field_name):
for value_key in self._kg.get(field_name):
result.append(value_key['value'])
return result
|
Get a list of all the values of a field.
Args:
field_name:
Returns: the list of values (not the keys)
|
codesearchnet
|
def split_field_path(path):
if not path:
return []
elements = []
want_dot = False
for element in _tokenize_field_path(path):
if want_dot:
if element != ".":
raise ValueError("Invalid path: {}".format(path))
else:
want_dot = False
else:
if element == ".":
raise ValueError("Invalid path: {}".format(path))
elements.append(element)
want_dot = True
if not want_dot or not elements:
raise ValueError("Invalid path: {}".format(path))
return elements
|
Split a field path into valid elements (without dots).
Args:
path (str): field path to be lexed.
Returns:
List(str): tokens
Raises:
ValueError: if the path does not match the elements-interspersed-
with-dots pattern.
|
juraj-google-style
|
def predict_task(self, X, t=0, break_ties='random', **kwargs):
Y_tp = self.predict_task_proba(X, t=t, **kwargs)
Y_tph = self._break_ties(Y_tp, break_ties)
return Y_tph
|
Predicts int labels for an input X on task t
Args:
X: The input for the predict_task_proba method
t: The task index to predict
Returns:
An n-dim tensor of int predictions for the specified task
|
codesearchnet
|
def export_artifacts(self, processed_artifacts, sketch_id):
for (timeline_name, artifact_path) in processed_artifacts:
print('Uploading {0:s} to timeline {1:s}'.format(artifact_path, timeline_name))
new_timeline_id = self.upload_timeline(timeline_name, artifact_path)
self.add_timeline_to_sketch(sketch_id, new_timeline_id)
return sketch_id
|
Upload provided artifacts to specified, or new if non-existent, sketch.
Args:
processed_artifacts: List of (timeline_name, artifact_path) tuples
sketch_id: ID of sketch to append the timeline to
Returns:
int: ID of sketch.
|
codesearchnet
|
async def skip(self, query='1'):
if (not (self.state == 'ready')):
logger.debug("Trying to skip from wrong state '{}'".format(self.state))
return
if (query == ''):
query = '1'
elif (query == 'all'):
query = str((len(self.queue) + 1))
try:
num = int(query)
except TypeError:
self.statuslog.error('Skip argument must be a number')
except ValueError:
self.statuslog.error('Skip argument must be a number')
else:
self.statuslog.info('Skipping')
for i in range((num - 1)):
if (len(self.queue) > 0):
self.prev_queue.append(self.queue.pop(0))
try:
self.streamer.stop()
except Exception as e:
logger.exception(e)
|
The skip command
Args:
query (str): The number of items to skip
|
codesearchnet
|
def __init__(self, win_registry):
if not win_registry:
raise ValueError('Missing Windows Registry value.')
super(WinRegistrySearcher, self).__init__()
self._win_registry = win_registry
|
Initializes a Windows Registry searcher.
Args:
win_registry (WinRegistry): Windows Registry.
Raises:
ValueError: when Windows Registry is not set.
|
juraj-google-style
|
def update_plot_limits(ax, white_space):
if hasattr(ax, 'zz_dataLim'):
bounds = ax.xy_dataLim.bounds
ax.set_xlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
ax.set_ylim(bounds[1] - white_space, bounds[1] + bounds[3] + white_space)
bounds = ax.zz_dataLim.bounds
ax.set_zlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
else:
bounds = ax.dataLim.bounds
assert not any(map(np.isinf, bounds)), 'Cannot set bounds if dataLim has infinite elements'
ax.set_xlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
ax.set_ylim(bounds[1] - white_space, bounds[1] + bounds[3] + white_space)
|
Sets the limit options of a matplotlib plot.
Args:
ax: matplotlib axes
white_space(float): whitespace added to surround the tight limit of the data
Note: This relies on ax.dataLim (in 2d) and ax.[xy, zz]_dataLim being set in 3d
|
juraj-google-style
|
def order_nodes(nodes: Sequence[_OrderableNode]) -> list[_OrderableNode]:
if not nodes:
return []
root = nodes[0]
predecessor_map = compute_predecessors(nodes)
dead = {node for node, predecessors in predecessor_map.items() if root not in predecessors}
queue = {root: predecessor_map[root]}
order = []
seen = set()
while queue:
_, _, node = min(((len(predecessors), node.id, node) for node, predecessors in queue.items()))
del queue[node]
if node in seen:
continue
order.append(node)
seen.add(node)
for _, predecessors in queue.items():
predecessors.discard(node)
for n in node.outgoing:
if n not in queue:
queue[n] = predecessor_map[n] - seen
assert len(set(order) | dead) == len(set(nodes))
return order
|
Build an ancestors first traversal of CFG nodes.
This guarantees that at least one predecessor of a block is scheduled before
the block itself, and it also tries to schedule as many of them before the
block as possible (so e.g. if two branches merge in a node, it prefers to
process both the branches before that node).
Args:
nodes: A list of nodes or blocks. They have two attributes: "id" (an int to
enable deterministic sorting) and "outgoing" (a list of nodes).
Returns:
A list of nodes in the proper order.
|
github-repos
|
def list_vms(access_token, subscription_id, resource_group):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachines',
'?api-version=', COMP_API])
return do_get(endpoint, access_token)
|
List VMs in a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
Returns:
HTTP response. JSON body of a list of VM model views.
|
juraj-google-style
|
def play(env, transpose=True, fps=30, nop_=0):
assert isinstance(env.observation_space, gym.spaces.box.Box)
obs_s = env.observation_space
is_bw = (len(obs_s.shape) == 2)
is_rgb = ((len(obs_s.shape) == 3) and (obs_s.shape[2] in [1, 3]))
assert (is_bw or is_rgb)
if hasattr(env, 'get_keys_to_action'):
keys_to_action = env.get_keys_to_action()
elif hasattr(env.unwrapped, 'get_keys_to_action'):
keys_to_action = env.unwrapped.get_keys_to_action()
else:
raise ValueError('env has no get_keys_to_action method')
relevant_keys = set(sum(map(list, keys_to_action.keys()), []))
video_size = (env.observation_space.shape[0], env.observation_space.shape[1])
if transpose:
video_size = tuple(reversed(video_size))
pressed_keys = []
running = True
env_done = True
flags = ((pygame.RESIZABLE | pygame.HWSURFACE) | pygame.DOUBLEBUF)
screen = pygame.display.set_mode(video_size, flags)
pygame.event.set_blocked(pygame.MOUSEMOTION)
if (env.spec is not None):
pygame.display.set_caption(env.spec.id)
else:
pygame.display.set_caption('nes-py')
clock = pygame.time.Clock()
while running:
if env_done:
env_done = False
obs = env.reset()
else:
action = keys_to_action.get(tuple(sorted(pressed_keys)), nop_)
(obs, rew, env_done, info) = env.step(action)
if (obs is not None):
if (len(obs.shape) == 2):
obs = obs[(:, :, None)]
if (obs.shape[2] == 1):
obs = obs.repeat(3, axis=2)
display_arr(screen, obs, video_size, transpose)
for event in pygame.event.get():
if (event.type == pygame.KEYDOWN):
if (event.key in relevant_keys):
pressed_keys.append(event.key)
elif (event.key == 27):
running = False
elif (event.key == ord('e')):
env.unwrapped._backup()
elif (event.key == ord('r')):
env.unwrapped._restore()
elif (event.type == pygame.KEYUP):
if (event.key in relevant_keys):
pressed_keys.remove(event.key)
elif (event.type == pygame.QUIT):
running = False
pygame.display.flip()
clock.tick(fps)
pygame.quit()
|
Play the game using the keyboard as a human.
Args:
env (gym.Env): the environment to use for playing
transpose (bool): whether to transpose frame before viewing them
fps (int): number of steps of the environment to execute every second
nop_ (any): the object to use as a null op action for the environment
Returns:
None
|
codesearchnet
|
class PerceiverAudioPreprocessor(AbstractPreprocessor):
def __init__(self, config, prep_type: str='patches', samples_per_patch: int=96, position_encoding_type: str='fourier', concat_or_add_pos: str='concat', out_channels=64, project_pos_dim=-1, **position_encoding_kwargs):
super().__init__()
self.config = config
if prep_type not in ('patches',):
raise ValueError(f"Prep_type {prep_type} is invalid, can only be 'patches'.")
if concat_or_add_pos not in ['concat', 'add']:
raise ValueError(f"Concat_or_pos {concat_or_add_pos} is invalid, can only be 'concat' or 'add'.")
self.samples_per_patch = samples_per_patch
self.position_encoding_type = position_encoding_type
self.concat_or_add_pos = concat_or_add_pos
self.project_pos_dim = project_pos_dim
self.position_embeddings, self.positions_projection = build_position_encoding(position_encoding_type=position_encoding_type, out_channels=out_channels, project_pos_dim=project_pos_dim, **position_encoding_kwargs)
@property
def num_channels(self) -> int:
if self.project_pos_dim > 0:
pos_dim = self.project_pos_dim
else:
pos_dim = self.position_embeddings.output_size()
if self.concat_or_add_pos == 'add':
return pos_dim
return self.samples_per_patch + pos_dim
def _build_network_inputs(self, inputs):
batch_size = inputs.shape[0]
index_dims = inputs.shape[1:-1]
if self.position_encoding_type == 'trainable':
pos_enc = self.position_embeddings(batch_size)
elif self.position_encoding_type == 'fourier':
pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype)
pos_enc = self.positions_projection(pos_enc)
if self.concat_or_add_pos == 'concat':
inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)
elif self.concat_or_add_pos == 'add':
inputs_with_pos = inputs + pos_enc
return (inputs_with_pos, inputs)
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, network_input_is_1d: bool=True, interpolate_pos_encoding: bool=False):
inputs = torch.reshape(inputs, [inputs.shape[0], -1, self.samples_per_patch])
inputs, inputs_without_pos = self._build_network_inputs(inputs)
modality_sizes = None
return (inputs, modality_sizes, inputs_without_pos)
|
Audio preprocessing for Perceiver Encoder.
Args:
config ([*PerceiverConfig*]):
Model configuration.
prep_type (`str`, *optional*, defaults to `"patches"`):
Preprocessor type to use. Only "patches" is supported.
samples_per_patch (`int`, *optional*, defaults to 96):
Number of samples per patch.
position_encoding_type (`str`, *optional*, defaults to `"fourier"`):
Type of position encoding to use. Can be "trainable" or "fourier".
concat_or_add_pos (`str`, *optional*, defaults to `"concat"`):
How to concatenate the position encoding to the input. Can be "concat" or "add".
out_channels (`int`, *optional*, defaults to 64):
Number of channels in the output.
project_pos_dim (`int`, *optional*, defaults to -1):
Dimension of the position encoding to project to. If -1, no projection is applied.
**position_encoding_kwargs (`Dict`, *optional*):
Keyword arguments for the position encoding.
|
github-repos
|
def get_node_sum(self, age=None):
if (age is None):
age = self.age
return (age if (self.comp == 1) else int(((pow(self.comp, (age + 1)) - 1) / (self.comp - 1))))
|
Get sum of all branches in the tree.
Returns:
int: The sum of all nodes grown until the age.
|
codesearchnet
|
def expand(self, *args, **kwargs):
if args:
if (len(args) == 1):
data_dict = args[0]
trace = kwargs.get('trace')
style = kwargs.get('style')
else:
raise TypeError(('expand() only takes 1 positional argument (got %s)' % args))
else:
data_dict = kwargs
trace = None
style = None
tokens = []
group = _MakeGroupFromRootSection(self._program, self.undefined_str)
if style:
style.execute(data_dict, tokens.append, group=group, trace=trace)
else:
self.execute(data_dict, tokens.append, group=group, trace=trace)
return JoinTokens(tokens)
|
Expands the template with the given data dictionary, returning a string.
This is a small wrapper around execute(), and is the most convenient
interface.
Args:
data_dict: The JSON data dictionary. Like the builtin dict() constructor,
it can take a single dictionary as a positional argument, or arbitrary
keyword arguments.
trace: Trace object for debugging
style: Template instance to be treated as a style for this template (the
"outside")
Returns:
The return value could be a str() or unicode() instance, depending on the
the type of the template string passed in, and what the types the strings
in the dictionary are.
|
codesearchnet
|
def _close_rpc_interface(self, connection_id, callback):
try:
context = self.connections.get_context(connection_id)
except ArgumentError:
callback(connection_id, self.id, False, 'Could not find connection information')
return
self.connections.begin_operation(connection_id, 'close_interface', callback, self.get_config('default_timeout'))
try:
service = context['services'][TileBusService]
header_characteristic = service[ReceiveHeaderChar]
payload_characteristic = service[ReceivePayloadChar]
except KeyError:
self.connections.finish_operation(connection_id, False, "Can't find characteristics to open rpc interface")
return
self.bable.set_notification(enabled=False, connection_handle=context['connection_handle'], characteristic=header_characteristic, on_notification_set=[self._on_interface_closed, context, payload_characteristic], timeout=1.0)
|
Disable RPC interface for this IOTile device
Args:
connection_id (int): The unique identifier for the connection
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
|
codesearchnet
|
def set_invite_only(self, invite_only):
join_rule = ('invite' if invite_only else 'public')
try:
self.client.api.set_join_rule(self.room_id, join_rule)
self.invite_only = invite_only
return True
except MatrixRequestError:
return False
|
Set how the room can be joined.
Args:
invite_only(bool): If True, users will have to be invited to join
the room. If False, anyone who knows the room link can join.
Returns:
True if successful, False if not
|
codesearchnet
|
def add_data(self, data):
if (self.state == self.ErrorState):
return
self.raw_data += bytearray(data)
still_processing = True
while still_processing:
still_processing = self.process_data()
|
Add data to our stream, emitting reports as each new one is seen
Args:
data (bytearray): A chunk of new data to add
|
codesearchnet
|
def occurrence(self, indicator=None):
self._request_entity = 'fileOccurrence'
self._request_uri = '{}/fileOccurrences'.format(self._request_uri)
if (indicator is not None):
self._request_uri = '{}/{}/fileOccurrences'.format(self._api_uri, indicator)
|
Update the URI to retrieve file occurrences for the provided indicator.
Args:
indicator (string): The indicator to retrieve file occurrences.
|
codesearchnet
|
def export(self, filepath, encoding="utf-8", gzipped=True):
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data)
|
Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not
|
juraj-google-style
|
def set_query_parameter(url, param_name, param_value):
(scheme, netloc, path, query_string, fragment) = urlsplit(url)
query_params = parse_qs(query_string)
query_params[param_name] = [param_value]
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
|
Given a URL, set or replace a query parameter and return the modified URL.
Args:
url: a given URL
param_name: the parameter name to add
param_value: the parameter value
Returns:
URL with the added parameter
|
codesearchnet
|
def create(self, *args, **kwargs):
data = self.get_data('floating_ips/',
type=POST,
params={'droplet_id': self.droplet_id})
if data:
self.ip = data['floating_ip']['ip']
self.region = data['floating_ip']['region']
return self
|
Creates a FloatingIP and assigns it to a Droplet.
Note: Every argument and parameter given to this method will be
assigned to the object.
Args:
droplet_id: int - droplet id
|
juraj-google-style
|
def thumbnail(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = (size['height'], size['width'])
height = min(input_height, output_height)
width = min(input_width, output_width)
if height == input_height and width == input_width:
return image
if input_height > input_width:
width = int(input_width * height / input_height)
elif input_width > input_height:
height = int(input_height * width / input_width)
return resize(image, size=(height, width), resample=resample, reducing_gap=2.0, data_format=data_format, input_data_format=input_data_format, **kwargs)
|
Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any
corresponding dimension of the specified size.
Args:
image (`np.ndarray`):
The image to be resized.
size (`Dict[str, int]`):
The size `{"height": h, "width": w}` to resize the image to.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
The resampling filter to use.
data_format (`Optional[Union[str, ChannelDimension]]`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
|
github-repos
|
def _read_transitions(self):
states = []
i = 0
regex = re.compile('[ \t\n\r:,]+')
found = 0
state = 0
substate = 0
mapping = []
cur_line = None
with open(self.outfile) as flex_file:
for cur_line in flex_file:
if cur_line[0:35] == "static yyconst flex_int16_t yy_nxt[" or cur_line[0:33] == "static const flex_int16_t yy_nxt[":
found = 1
continue
if found == 1:
if state == 0 and cur_line[0:5] == " {":
state = 1
continue
if state == 1 and cur_line[0:7] == " } ;":
state = 0
break
if substate == 0 and cur_line[0:5] == " {":
mapping = []
substate = 1
continue
if substate == 1:
if cur_line[0:6] != " },":
cur_line = "".join(cur_line.split())
if cur_line == '':
continue
if cur_line[cur_line.__len__() - 1] == ',':
splitted_line = regex.split(
cur_line[:cur_line.__len__() - 1])
else:
splitted_line = regex.split(cur_line)
mapping = mapping + splitted_line
continue
else:
cleared = []
for j in mapping:
cleared.append(int(j))
states.append(cleared)
mapping = []
substate = 0
return states
|
Read DFA transitions from flex compiled file
Args:
None
Returns:
list: The list of states and the destination for a character
|
juraj-google-style
|
def cctop_save_xml(jobid, outpath):
status = cctop_check_status(jobid=jobid)
if (status == 'Finished'):
result = 'http:
result_text = requests.post(result)
with open(outpath, 'w') as f:
f.write(result_text.text)
return outpath
else:
raise ConnectionRefusedError('CCTOP job incomplete, status is "{}"'.format(status))
|
Save the CCTOP results file in XML format.
Args:
jobid (str): Job ID obtained when job was submitted
outpath (str): Path to output filename
Returns:
str: Path to output filename
|
codesearchnet
|
def enable_beacon(name, **kwargs):
ret = {'comment': [],
'result': True}
if not name:
ret['comment'] = 'Beacon name is required.'
ret['result'] = False
return ret
if 'test' in kwargs and kwargs['test']:
ret['comment'] = 'Beacon {0} would be enabled.'.format(name)
else:
_beacons = list_(return_yaml=False, **kwargs)
if name not in _beacons:
ret['comment'] = 'Beacon {0} is not currently configured.' \
''.format(name)
ret['result'] = False
return ret
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'func': 'enable_beacon',
'name': name},
'manage_beacons')
if res:
event_ret = eventer.get_event(
tag='/salt/minion/minion_beacon_enabled_complete',
wait=kwargs.get('timeout', 30))
if event_ret and event_ret['complete']:
beacons = event_ret['beacons']
beacon_config_dict = _get_beacon_config_dict(beacons[name])
if 'enabled' in beacon_config_dict and beacon_config_dict['enabled']:
ret['result'] = True
ret['comment'] = 'Enabled beacon {0} on minion.' \
''.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to enable beacon {0} on ' \
'minion.'.format(name)
elif event_ret:
ret['result'] = False
ret['comment'] = event_ret['comment']
else:
ret['result'] = False
ret['comment'] = 'Did not receive the manage event ' \
'before the timeout of {0}s' \
''.format(kwargs.get('timeout', 30))
return ret
except KeyError:
ret['result'] = False
ret['comment'] = 'Event module not available. Beacon enable job ' \
'failed.'
return ret
|
Enable a beacon on the minion.
Args:
name (str): Name of the beacon to enable.
Returns:
dict: Boolean and status message on success or failure of enable.
CLI Example:
.. code-block:: bash
salt '*' beacons.enable_beacon ps
|
juraj-google-style
|
def _set_input(el, value):
if isinstance(value, dict):
el.value = value['val']
elif (type(value) in [list, tuple]):
el.value = ', '.join((item['val'] for item in value))
else:
el.value = value
|
Set content of given `el` to `value`.
Args:
el (obj): El reference to input you wish to set.
value (obj/list): Value to which the `el` will be set.
|
codesearchnet
|
def city(self, value=None):
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str '
'for field `city`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `city`')
self._city = value
|
Corresponds to IDD Field `city`
Args:
value (str): value for IDD Field `city`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def add_grad(left, right):
assert ((left is not None) and (right is not None))
left_type = type(left)
right_type = type(right)
if (left_type is ZeroGradient):
return right
if (right_type is ZeroGradient):
return left
return grad_adders[(left_type, right_type)](left, right)
|
Recursively add the gradient of two objects.
Args:
left: The left value to add. Can be either an array, a number, list or
dictionary.
right: The right value. Must be of the same type (recursively) as the left.
Returns:
The sum of the two gradients, which will of the same type.
|
codesearchnet
|
def singleprint_from_saved_model(export_dir: str) -> str:
try:
return singleprint_from_fingerprint_proto(export_dir)
except ValueError:
pass
try:
write_fingerprint(export_dir)
return singleprint_from_fingerprint_proto(export_dir)
except ValueError:
pass
try:
return singleprint_from_saved_model_proto(export_dir)
except ValueError as e:
raise ValueError(e) from None
|
Returns the singleprint of the SavedModel in `export_dir`.
First tries to construct the singleprint from `fingerprint.pb`, then from
`saved_model.pb`. Attempts to write the `fingerprint.pb` if not found, but
doesn't return an error if it isn't writeable.
Args:
export_dir: The directory that contains the SavedModel.
Returns:
A string containing the singleprint of the SavedModel in `export_dir`.
Raises:
ValueError: If a valid singleprint cannot be constructed from the
SavedModel.
|
github-repos
|
def try_checkpoint_metadata(self, trial):
if (trial._checkpoint.storage == Checkpoint.MEMORY):
logger.debug('Not saving data for trial w/ memory checkpoint.')
return
try:
logger.debug('Saving trial metadata.')
self._cached_trial_state[trial.trial_id] = trial.__getstate__()
except Exception:
logger.exception('Error checkpointing trial metadata.')
|
Checkpoints metadata.
Args:
trial (Trial): Trial to checkpoint.
|
codesearchnet
|
def __init__(self,
text_encoder_config=None,
language_pair=(None, None),
**kwargs):
encoder_name = (
text_encoder_config.name if text_encoder_config else "plain_text")
name = "%s%s_%s" % (language_pair[0], language_pair[1], encoder_name)
description = (
"Translation dataset from %s to %s, uses encoder %s.") % (
language_pair[0], language_pair[1], encoder_name)
super(FloresConfig, self).__init__(
name=name, description=description, **kwargs)
self.text_encoder_config = (
text_encoder_config or tfds.features.text.TextEncoderConfig())
assert "en" in language_pair, (
"Config language pair must contain `en`, got: %s",
self.builder_config.language_pair)
source, target = language_pair
non_en = source if target == "en" else target
assert non_en in ["ne", "si"], (
"Invalid non-en language in pair: %s", non_en)
self.language_pair = language_pair
|
BuilderConfig for FLoRes.
Args:
text_encoder_config: `tfds.features.text.TextEncoderConfig`, configuration
for the `tfds.features.text.TextEncoder` used for the features feature.
language_pair: pair of languages that will be used for translation. Should
contain 2-letter coded strings. First will be used at source and second
as target in supervised mode. For example: ("se", "en").
**kwargs: keyword arguments forwarded to super.
|
juraj-google-style
|
def python_value(self, value):
value = super(PendulumDateTimeField, self).python_value(value)
if isinstance(value, datetime.datetime):
value = pendulum.instance(value)
elif isinstance(value, datetime.date):
value = pendulum.instance(datetime.datetime.combine(value, datetime.datetime.min.time()))
elif isinstance(value, string_types):
value = pendulum.parse(value)
return value
|
Return the value in the database as an Pendulum object.
Returns:
pendulum.Pendulum:
An instance of Pendulum with the field filled in.
|
codesearchnet
|
def inspect_volume(self, name):
url = self._url('/volumes/{0}', name)
return self._result(self._get(url), True)
|
Retrieve volume info by name.
Args:
name (str): volume name
Returns:
(dict): Volume information dictionary
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> cli.inspect_volume('foobar')
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'}
|
codesearchnet
|
def get_chief_queue_runner(self):
if self._gradients_applied is False:
raise ValueError('Should be called after apply_gradients().')
return self._chief_queue_runner
|
Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
|
github-repos
|
def recipe_url(config, auth, status, read, dataset, table):
url(config, {'auth': auth, 'status': status, 'read': read, 'urls': {'bigquery': {'dataset': dataset, 'query': table, 'legacy': False}}, 'to': {'bigquery': {'dataset': dataset, 'table': table}}})
|
Pull URL list from a table, fetch them, and write the results to another table.
Args:
auth (authentication) - Credentials used for rading and writing data.
status (boolean) - Pull status of HTTP request.
read (boolean) - Pull data from HTTP request.
dataset (string) - Name of Google BigQuery dataset to write.
table (string) - Name of Google BigQuery table to write.
|
github-repos
|
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
vision_data = {}
if image_sizes is not None:
images_kwargs = LlavaOnevisionProcessorKwargs._defaults.get('images_kwargs', {})
images_kwargs.update(kwargs)
size = images_kwargs.get('size', None) or self.image_processor.size
size = (size['shortest_edge'], size['shortest_edge']) if 'shortest_edge' in size else (min(size['height'], size['width']), min(size['height'], size['width']))
processed_height, processed_width = size
batch_num_image_tokens = []
num_image_patches = [1] * len(image_sizes)
for image_size in image_sizes:
orig_height, orig_width = image_size
num_image_tokens = self._get_number_of_features(orig_height, orig_width, processed_height, processed_width)
if self.vision_feature_select_strategy == 'default':
num_image_tokens -= 1
batch_num_image_tokens.append(num_image_tokens)
vision_data.update({'num_image_tokens': batch_num_image_tokens, 'num_image_patches': num_image_patches})
return MultiModalData(**vision_data)
|
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (List[List[str]], *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (List[List[str]], *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
audio_lengths (List[int], *optional*):
The input length formatted as per each audio.
Returns:
Dict[str, List[int]]: A dictionary mapping each modality ("image", "video", "audio")
to a list containing the number of placeholder tokens required. If the model doesn't accept
a certain modality or no input sizes are provided, the dict value is set to an empty list.
|
github-repos
|
def align_up(offset, align):
remain = offset % align
if remain == 0:
return offset
else:
return offset + (align - remain)
|
Align ``offset`` up to ``align`` boundary.
Args:
offset (int): value to be aligned.
align (int): alignment boundary.
Returns:
int: aligned offset.
>>> align_up(3, 2)
4
>>> align_up(3, 1)
3
|
juraj-google-style
|
def get_pe(self):
return PE(self.rest_client.make_request(self.pe), self.rest_client)
|
Get the Streams processing element this operator is executing in.
Returns:
PE: Processing element for this operator.
.. versionadded:: 1.9
|
codesearchnet
|
def ParseOptions(cls, options, output_module):
if not isinstance(output_module, sqlite_4n6time.SQLite4n6TimeOutputModule):
raise errors.BadConfigObject(
'Output module is not an instance of SQLite4n6TimeOutputModule')
shared_4n6time_output.Shared4n6TimeOutputArgumentsHelper.ParseOptions(
options, output_module)
filename = getattr(options, 'write', None)
if not filename:
raise errors.BadConfigOption(
'Output filename was not provided use "-w filename" to specify.')
output_module.SetFilename(filename)
|
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when the output filename was not provided.
|
juraj-google-style
|
def recommend(self, limit=10):
expected_list = [(arm_id, beta_dist.expected_value()) for arm_id, beta_dist in self.__beta_dist_dict.items()]
expected_list = sorted(expected_list, key=lambda x: x[1], reverse=True)
return expected_list[:limit]
|
Listup arms and expected value.
Args:
limit: Length of the list.
Returns:
[Tuple(`Arms master id`, `expected value`)]
|
juraj-google-style
|
def get_parameter_dict(self, include_frozen=False):
return OrderedDict(zip(
self.get_parameter_names(include_frozen=include_frozen),
self.get_parameter_vector(include_frozen=include_frozen),
))
|
Get an ordered dictionary of the parameters
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``)
|
juraj-google-style
|
def get_contact(self, jid):
try:
return self.get_contacts()[jid.bare()]
except KeyError:
raise ContactNotFound
except AttributeError:
raise AttributeError('jid must be an aioxmpp.JID object')
|
Returns a contact
Args:
jid (aioxmpp.JID): jid of the contact
Returns:
dict: the roster of contacts
|
codesearchnet
|
def pad_sparse_embedding_lookup_indices(sparse_indices, padded_size):
batch_size = sparse_indices.dense_shape[0]
sparse_indices = sparse_ops.sparse_slice(sparse_indices, [0, 0], [batch_size, padded_size])
indices, values = (sparse_indices.indices, sparse_indices.values)
padded_values = array_ops.scatter_nd(indices, math_ops.cast(values, dtypes.int32), shape=(batch_size, padded_size))
weights = array_ops.ones_like(values, dtype=dtypes.float32)
padded_mask = array_ops.scatter_nd(indices, weights, shape=(batch_size, padded_size))
return (padded_values, padded_mask)
|
Creates statically-sized Tensors containing indices and weights.
From third_party/cloud_tpu/models/movielens/tpu_embedding.py
Also computes sparse_indices.values % embedding_table_size, for equivalent
functionality to sparse_column_with_integerized_feature. The returned
padded weight Tensor also doubles as a mask indicating which values in
the returned padded indices Tensor are indices versus padded zeros.
Args:
sparse_indices: SparseTensor of embedding lookup indices.
padded_size: Number of columns of the returned Tensors. Indices which fall
out of bounds will be truncated to the padded size.
Returns:
(sparse_indices.values padded to the specified size,
a mask the same size as the returned padded values in which 0s
indicate padded locations and 1s (or values from sparse_weights)
indicate actual values)
|
github-repos
|
def dumps(ms, single=False, properties=False, pretty_print=True, show_status=False, predicate_modifiers=False, **kwargs):
if ((not pretty_print) and kwargs.get('indent')):
pretty_print = True
if single:
ms = [ms]
return serialize(ms, properties=properties, pretty_print=pretty_print, show_status=show_status, predicate_modifiers=predicate_modifiers, **kwargs)
|
Serialize an Xmrs object to a Eds representation
Args:
ms: an iterator of :class:`~delphin.mrs.xmrs.Xmrs` objects to
serialize (unless the *single* option is `True`)
single (bool): if `True`, treat *ms* as a single
:class:`~delphin.mrs.xmrs.Xmrs` object instead of as an
iterator
properties (bool): if `False`, suppress variable properties
pretty_print (bool): if `True`, add newlines and indentation
show_status (bool): if `True`, annotate disconnected graphs and
nodes
Returns:
an :class:`Eds` string representation of a corpus of Xmrs
|
codesearchnet
|
def _confirm_overwrite(filename):
message = '{}Would you like to overwrite the contents of {} (y/[n])? '.format(
c.Fore.MAGENTA, filename
)
response = raw_input(message)
response = response.lower()
if response in ['y', 'yes']:
return True
return False
|
Confirm overwrite of template files.
Make sure the user would like to continue downloading a file which will overwrite a file
in the current directory.
Args:
filename (str): The name of the file to overwrite.
Returns:
bool: True if the user specifies a "yes" response.
|
juraj-google-style
|
def parse_date_range(date, alt_end_date=None):
NOT_ENDED = '9999'
all_years = re.findall('\\d{4}', date)
if alt_end_date:
NOT_ENDED = alt_end_date
if (not all_years):
return ('****', NOT_ENDED)
elif (len(all_years) == 1):
return (all_years[0], NOT_ENDED)
return (all_years[0], all_years[1])
|
Parse input `date` string in free-text format for four-digit long groups.
Args:
date (str): Input containing years.
Returns:
tuple: ``(from, to)`` as four-digit strings.
|
codesearchnet
|
def int_to_str_digit(n):
if n < 10:
return str(n)
elif n < 36:
return chr(n + 55)
else:
return chr(n + 61)
|
Converts a positive integer, to a single string character.
Where: 9 -> "9", 10 -> "A", 11 -> "B", 12 -> "C", ...etc
Args:
n(int): A positve integer number.
Returns:
The character representation of the input digit of value n (str).
|
juraj-google-style
|
def validate_list(self, value):
if (len(value) > self.max_items):
raise ValidationError(u'list must not contain more than {max_items} items.'.format(max_items=self.max_items))
if (all((isinstance(item, six.string_types) for item in value)) is False):
raise ValidationError(u'list must only contain strings.')
return value
|
Validate data before saving to database.
Arguemtns:
value(list): list to be validated
Returns:
list if validation is successful
Raises:
ValidationError
|
codesearchnet
|
def __init__(self, resolver_context, encoding='utf-8'):
super(TARFileSystem, self).__init__(resolver_context)
self._file_object = None
self._tar_file = None
self.encoding = encoding
|
Initializes a file system.
Args:
resolver_context (Context): resolver context.
encoding (Optional[str]): file entry name encoding.
|
juraj-google-style
|
def replace_list(items, match, replacement):
return [replace(item, match, replacement) for item in items]
|
Replaces occurrences of a match string in a given list of strings and returns
a list of new strings. The match string can be a regex expression.
Args:
items (list): the list of strings to modify.
match (str): the search expression.
replacement (str): the string to replace with.
|
codesearchnet
|
def get_metric_fns(metric_names, labels, outputs):
metric_fns = {}
for metric_name in metric_names:
metric_fn_name = metric_name.split('/')[(- 1)]
if hasattr(metrics, metric_fn_name):
metric_fn = getattr(metrics, metric_fn_name)
metric_fns[metric_name] = metric_fn(labels, outputs)
else:
raise ValueError('Metric {} is not implemented'.format(metric_fn_name))
return metric_fns
|
Generate a dictionary of metric name to metric function.
Args:
metric_names: list of strings in the format "prefix/metric_function_name".
metric_function_name should refer to a function name in metrics.py. The
prefix will be included in the key in the returned dict.
labels: a tensor where batch is the first dimension.
outputs: a tensor of model predictions, same dimensionality as labels.
Returns:
metric_fns: dict of metric functions keyed by their name.
|
codesearchnet
|
def get_plot_frame(map_obj, key_map, cached=False):
if (map_obj.kdims and (len(map_obj.kdims) == 1) and (map_obj.kdims[0] == 'Frame')):
return map_obj.last
key = tuple((key_map[kd.name] for kd in map_obj.kdims if (kd.name in key_map)))
if ((key in map_obj.data) and cached):
return map_obj.data[key]
else:
try:
return map_obj[key]
except KeyError:
return None
except StopIteration as e:
raise e
except Exception:
print(traceback.format_exc())
return None
|
Returns the current frame in a mapping given a key mapping.
Args:
obj: Nested Dimensioned object
key_map: Dictionary mapping between dimensions and key value
cached: Whether to allow looking up key in cache
Returns:
The item in the mapping corresponding to the supplied key.
|
codesearchnet
|
def assemble(ops, target=None):
target = get_py_internals(target)
opmap = target['opmap']
hasjrel = target['hasjrel']
hasjabs = target['hasjabs']
hasjump = set(hasjrel) | set(hasjabs)
have_argument = target['have_argument']
extended_arg = target['extended_arg']
wordcode = target['wordcode']
if not wordcode:
def encode_op(output, op_code, op_arg=None):
n = 1
if op_arg is None:
output.append(op_code)
else:
n += 2
ext_arg = op_arg >> 16
if ext_arg:
n += 3
output.extend([extended_arg, ext_arg & 255, ext_arg >> 8])
op_arg &= 65535
output.extend([op_code, op_arg & 255, op_arg >> 8])
return n
else:
def encode_op(output, op_code, op_arg=None):
n = 2
if op_arg is None:
output.extend([op_code, 0])
else:
ext_arg = op_arg >> 8
if ext_arg:
n += encode_op(extended_arg, ext_arg)
output.extend([op_code, op_arg & 255])
return n
label_address = {}
while True:
retry = False
output = bytearray()
address = 0
for op in ops:
if isinstance(op, Label):
if label_address.get(op) != address:
retry = True
label_address[op] = address
continue
op_code = opmap[op.name]
op_arg = op.arg
if op_code >= have_argument and op_arg is None:
raise ValueError('Opcode %s requires argument.' % op)
elif op_code < have_argument and op_arg is not None:
raise ValueError('Opcode %s should not have an argument.' % op)
elif isinstance(op_arg, Label):
if op_code not in hasjump:
raise ValueError('Did not expect label as argument for opcode %s.' % op)
if op_arg not in ops:
raise ValueError('Label is not part of this op list.')
op_arg = label_address.get(op_arg)
if op_arg is None:
address += encode_op(output, op_code, 0)
continue
if op_code in hasjrel:
op_arg -= address
elif op_code in hasjump:
raise ValueError('Expected label as argument for opcode %s.' % op)
n = encode_op(output, op_code, op_arg)
address += n
if op_code in hasjrel:
if not wordcode:
op_arg = output[-2] + (output[-1] << 8)
if op_arg < n:
ext_arg = output[-5] + (output[-4] << 8) - 1
output[-5], output[-4] = ext_arg & 255, ext_arg >> 8
op_arg += 65536
op_arg -= n
output[-2], output[-1] = op_arg & 255, op_arg >> 8
else:
for i in itertools.count(1, 2):
if n <= output[-i]:
output[-i] -= n
break
output[-i] += 256 - n
n = 1
if not retry:
return bytes(output)
|
Assemble a set of :class:`Op` and :class:`Label` instance back into
bytecode.
Arguments:
ops(list): A list of opcodes and labels (as returned by
:func:`disassemble`).
target: The opcode specification of the targeted python
version. If this is ``None`` the specification of the currently
running python version will be used.
Returns:
bytes: The assembled bytecode.
|
juraj-google-style
|
def response_data_to_model_instance(self, response_data):
response_data["datetime_created"] = dateutil.parser.parse(
response_data["datetime_created"]
)
return super(
BaseTaskTypeManager, self
).response_data_to_model_instance(response_data)
|
Convert response data to a task type model.
Args:
response_data (dict): The data from the request's response.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A model instance representing the task type from the
reponse data.
|
juraj-google-style
|
def _CreateUserIdentifier(identifier_type=None, value=None):
if identifier_type in _HASHED_IDENTIFIER_TYPES:
value = hashlib.sha256(value.strip().lower()).hexdigest()
user_identifier = {
'userIdentifierType': identifier_type,
'value': value
}
return user_identifier
|
Creates a user identifier from the specified type and value.
Args:
identifier_type: a str specifying the type of user identifier.
value: a str value of the identifier; to be hashed using SHA-256 if needed.
Returns:
A dict specifying a user identifier, with a value hashed using SHA-256 if
needed.
|
juraj-google-style
|
def _BiasAddGradGrad(op: ops.Operation, received_grad):
try:
data_format = op.get_attr('data_format')
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b'NCHW':
expanded_shape = array_ops.concat([array_ops.ones_like(shape[:1]), bias_shape, array_ops.ones_like(shape[2:])], 0)
tile_mults = array_ops.concat([shape[:1], [1], shape[2:]], 0)
else:
expanded_shape = array_ops.concat([array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
|
Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
|
github-repos
|
def post_content(url, headers={}, post_data={}, decoded=True, **kwargs):
if kwargs.get('post_data_raw'):
logging.debug(('post_content: %s\npost_data_raw: %s' % (url, kwargs['post_data_raw'])))
else:
logging.debug(('post_content: %s\npost_data: %s' % (url, post_data)))
req = request.Request(url, headers=headers)
if cookies:
cookies.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
if kwargs.get('post_data_raw'):
post_data_enc = bytes(kwargs['post_data_raw'], 'utf-8')
else:
post_data_enc = bytes(parse.urlencode(post_data), 'utf-8')
response = urlopen_with_retry(req, data=post_data_enc)
data = response.read()
content_encoding = response.getheader('Content-Encoding')
if (content_encoding == 'gzip'):
data = ungzip(data)
elif (content_encoding == 'deflate'):
data = undeflate(data)
if decoded:
charset = match1(response.getheader('Content-Type'), 'charset=([\\w-]+)')
if (charset is not None):
data = data.decode(charset)
else:
data = data.decode('utf-8')
return data
|
Post the content of a URL via sending a HTTP POST request.
Args:
url: A URL.
headers: Request headers used by the client.
decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
Returns:
The content as a string.
|
codesearchnet
|
def mahalanobis_distances(df, axis=0):
df = (df.transpose() if (axis == 1) else df)
means = df.mean()
try:
inv_cov = np.linalg.inv(df.cov())
except LinAlgError:
return pd.Series(([np.NAN] * len(df.index)), df.index, name='Mahalanobis')
dists = []
for (i, sample) in df.iterrows():
dists.append(mahalanobis(sample, means, inv_cov))
return pd.Series(dists, df.index, name='Mahalanobis')
|
Returns a pandas Series with Mahalanobis distances for each sample on the
axis.
Note: does not work well when # of observations < # of dimensions
Will either return NaN in answer
or (in the extreme case) fail with a Singular Matrix LinAlgError
Args:
df: pandas DataFrame with columns to run diagnostics on
axis: 0 to find outlier rows, 1 to find outlier columns
|
codesearchnet
|
def get_course_current_grades(self, course_id):
resp = self.requester.get(
urljoin(
self.base_url,
'/api/grades/v1/courses/{course_key}/'.format(course_key=course_id)
)
)
resp.raise_for_status()
resp_json = resp.json()
if 'results' in resp_json:
grade_entries = [CurrentGrade(entry) for entry in resp_json["results"]]
while resp_json['next'] is not None:
resp = self.requester.get(resp_json['next'])
resp.raise_for_status()
resp_json = resp.json()
grade_entries.extend((CurrentGrade(entry) for entry in resp_json["results"]))
else:
grade_entries = [CurrentGrade(entry) for entry in resp_json]
return CurrentGradesByCourse(grade_entries)
|
Returns a CurrentGradesByCourse object for all users in the specified course.
Args:
course_id (str): an edX course ids.
Returns:
CurrentGradesByCourse: object representing the student current grades
Authorization:
The authenticated user must have staff permissions to see grades for all users
in a course.
|
juraj-google-style
|
def _form_output(span_doc: span, output_format: str, relations: Dict, patterns: List) -> str:
format_value = []
output_inf = [a_pattern.in_output for a_pattern in patterns]
for i in range(len(output_inf)):
token_range = relations[i]
if token_range and output_inf[i]:
format_value.append(span_doc[token_range[0]:token_range[1]].text)
if not output_format:
return " ".join(format_value)
result_str = re.sub("{}", " ".join(format_value), output_format)
positions = re.findall("{[0-9]+}", result_str)
if not positions:
return result_str
position_indices = [int(x[1:-1]) for x in positions]
if max(position_indices) < len(format_value):
result_str = result_str.format(*format_value)
else:
try:
result_str = result_str.format("", *format_value)
except:
positions = [x for x in positions if int(x[1:-1]) > len(format_value)-1 or int(x[1:-1]) < 0]
for pos in positions:
result_str = result_str.replace(pos, "")
result_str = result_str.format(*format_value)
return result_str
|
Form an output value according to user input of output_format
Args:
span_doc: span
format: str
relations: Dict
patterns: List
Returns: str
|
juraj-google-style
|
def deserialize(config, custom_objects=None):
return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects)
|
Deserializes a serialized metric class/function instance.
Args:
config: Metric configuration.
custom_objects: Optional dictionary mapping names (strings)
to custom objects (classes and functions) to be
considered during deserialization.
Returns:
A Keras `Metric` instance or a metric function.
|
github-repos
|
def extract_header_comment_key_value_tuples_from_file(file_descriptor):
file_data = file_descriptor.read()
findall_result = re.findall(HEADER_COMMENT_KEY_VALUE_TUPLES_REGEX, file_data, re.MULTILINE | re.DOTALL)
returned_list = []
for header_comment, _ignored, raw_comments, key, value in findall_result:
comments = re.findall("/\* (.*?) \*/", raw_comments)
if len(comments) == 0:
comments = [u""]
returned_list.append((header_comment, comments, key, value))
return returned_list
|
Extracts tuples representing comments and localization entries from strings file.
Args:
file_descriptor (file): The file to read the tuples from
Returns:
list : List of tuples representing the headers and localization entries.
|
juraj-google-style
|
def _ParseRecord(
self, parser_mediator, record_index, evtx_record, recovered=False):
event_data = self._GetEventData(
parser_mediator, record_index, evtx_record, recovered=recovered)
try:
written_time = evtx_record.get_written_time_as_integer()
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read written time from event record: {0:d} '
'with error: {1!s}').format(record_index, exception))
written_time = None
if not written_time:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(timestamp=written_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extract data from a Windows XML EventLog (EVTX) record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
record_index (int): event record index.
evtx_record (pyevtx.record): event record.
recovered (Optional[bool]): True if the record was recovered.
|
juraj-google-style
|
def set_filetype(self, filetype, bufnr=None):
if bufnr:
self._vim.command(((str(bufnr) + 'bufdo set filetype=') + filetype))
else:
self._vim.command(('set filetype=' + filetype))
|
Set filetype for a buffer.
Note: it's a quirk of Vim's Python API that using the buffer.options
dictionary to set filetype does not trigger ``FileType`` autocommands,
hence this implementation executes as a command instead.
Args:
filetype (str): The filetype to set.
bufnr (Optional[int]): A Vim buffer number, current if ``None``.
|
codesearchnet
|
def _pre_action(self, action):
assert len(action) == self.dof, "environment got invalid action dimension"
low, high = self.action_spec
action = np.clip(action, low, high)
if self.has_gripper:
arm_action = action[: self.mujoco_robot.dof]
gripper_action_in = action[
self.mujoco_robot.dof : self.mujoco_robot.dof + self.gripper.dof
]
gripper_action_actual = self.gripper.format_action(gripper_action_in)
action = np.concatenate([arm_action, gripper_action_actual])
ctrl_range = self.sim.model.actuator_ctrlrange
bias = 0.5 * (ctrl_range[:, 1] + ctrl_range[:, 0])
weight = 0.5 * (ctrl_range[:, 1] - ctrl_range[:, 0])
applied_action = bias + weight * action
self.sim.data.ctrl[:] = applied_action
self.sim.data.qfrc_applied[
self._ref_joint_vel_indexes
] = self.sim.data.qfrc_bias[self._ref_joint_vel_indexes]
if self.use_indicator_object:
self.sim.data.qfrc_applied[
self._ref_indicator_vel_low : self._ref_indicator_vel_high
] = self.sim.data.qfrc_bias[
self._ref_indicator_vel_low : self._ref_indicator_vel_high
]
|
Overrides the superclass method to actuate the robot with the
passed joint velocities and gripper control.
Args:
action (numpy array): The control to apply to the robot. The first
@self.mujoco_robot.dof dimensions should be the desired
normalized joint velocities and if the robot has
a gripper, the next @self.gripper.dof dimensions should be
actuation controls for the gripper.
|
juraj-google-style
|
def delete_group(self, name):
self.project_service.set_auth(self._token_project)
return self.project_service.delete_group(name)
|
Delete given group.
Args:
name (string): Name of group.
Returns:
(bool): True on success.
Raises:
requests.HTTPError on failure.
|
juraj-google-style
|
def latex_sanitize_command_name(_cmdname):
r
import utool as ut
command_name = _cmdname
try:
def subroman(match):
import roman
try:
groupdict = match.groupdict()
num = int(groupdict['num'])
if num == 0:
return ''
return roman.toRoman(num)
except Exception as ex:
ut.printex(ex, keys=['groupdict'])
raise
command_name = re.sub(ut.named_field('num', r'\d+'), subroman, command_name)
except ImportError as ex:
if ut.SUPER_STRICT:
ut.printex(ex)
raise
command_name = re.sub(r'[\d' + re.escape('
str_list = re.split('[_ ]', command_name)
command_name = ut.to_camel_case('_'.join(str_list), mixed=True)
return command_name
|
r"""
Args:
_cmdname (?):
Returns:
?: command_name
CommandLine:
python -m utool.util_latex --exec-latex_sanitize_command_name
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> _cmdname = '#foo bar.'
>>> command_name = latex_sanitize_command_name(_cmdname)
>>> result = ('command_name = %s' % (str(command_name),))
>>> print(result)
FooBar
|
juraj-google-style
|
def get_ratio(self, max_denominator=5, index_none=None):
structure = self.initial_structure
lat_type = self.lat_type
if lat_type == 't' or lat_type == 'h':
a, c = (structure.lattice.a, structure.lattice.c)
if c > a:
frac = Fraction(c ** 2 / a ** 2).limit_denominator(max_denominator)
ratio = [frac.numerator, frac.denominator]
else:
frac = Fraction(a ** 2 / c ** 2).limit_denominator(max_denominator)
ratio = [frac.denominator, frac.numerator]
elif lat_type == 'r':
cos_alpha = cos(structure.lattice.alpha / 180 * np.pi)
frac = Fraction((1 + 2 * cos_alpha) / cos_alpha).limit_denominator(max_denominator)
ratio = [frac.numerator, frac.denominator]
elif lat_type == 'o':
ratio = [None] * 3
lat = (structure.lattice.c, structure.lattice.b, structure.lattice.a)
index = [0, 1, 2]
if index_none is None:
min_index = np.argmin(lat)
index.pop(min_index)
frac1 = Fraction(lat[index[0]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)
frac2 = Fraction(lat[index[1]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)
com_lcm = lcm(frac1.denominator, frac2.denominator)
ratio[min_index] = com_lcm
ratio[index[0]] = frac1.numerator * int(round((com_lcm / frac1.denominator)))
ratio[index[1]] = frac2.numerator * int(round((com_lcm / frac2.denominator)))
else:
index.pop(index_none)
if (lat[index[0]] > lat[index[1]]):
frac = Fraction(lat[index[0]] ** 2 / lat[index[1]] ** 2).limit_denominator(max_denominator)
ratio[index[0]] = frac.numerator
ratio[index[1]] = frac.denominator
else:
frac = Fraction(lat[index[1]] ** 2 / lat[index[0]] ** 2).limit_denominator(max_denominator)
ratio[index[1]] = frac.numerator
ratio[index[0]] = frac.denominator
elif lat_type == 'c':
raise RuntimeError('Cubic system does not need axial ratio.')
else:
raise RuntimeError('Lattice type not implemented.')
return ratio
|
find the axial ratio needed for GB generator input.
Args:
max_denominator (int): the maximum denominator for
the computed ratio, default to be 5.
index_none (int): specify the irrational axis.
0-a, 1-b, 2-c. Only may be needed for orthorombic system.
Returns:
axial ratio needed for GB generator (list of integers).
|
juraj-google-style
|
def stop_tuning_job(self, name):
try:
LOGGER.info('Stopping tuning job: {}'.format(name))
self.sagemaker_client.stop_hyper_parameter_tuning_job(HyperParameterTuningJobName=name)
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ValidationException':
LOGGER.info('Tuning job: {} is already stopped or not running.'.format(name))
else:
LOGGER.error('Error occurred while attempting to stop tuning job: {}. Please try again.'.format(name))
raise
|
Stop the Amazon SageMaker hyperparameter tuning job with the specified name.
Args:
name (str): Name of the Amazon SageMaker hyperparameter tuning job.
Raises:
ClientError: If an error occurs while trying to stop the hyperparameter tuning job.
|
juraj-google-style
|
def clean_df(df, header=None, **read_csv_kwargs):
df = read_csv(df, header=header, **read_csv_kwargs)
df = df.fillna(' ')
for col in df.columns:
df[col] = df[col].apply(unicode2ascii)
return df
|
Convert UTF8 characters in a CSV file or dataframe into ASCII
Args:
df (DataFrame or str): DataFrame or path or url to CSV
|
codesearchnet
|
def sanitize_filename(filename):
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
basename = _sanitize_windows_filename(basename)
basename = _truncate_filename(basename, LINUX_MAX_FILENAME_LENGTH)
basename = basename.replace(' ', '_')
return os.path.join(dirname, basename)
|
Sanitizes a filename for various operating systems.
Args:
filename: string, the filename to sanitize.
Returns:
A string that is safe to use as a filename on various operating systems.
|
github-repos
|
def AddConnectedPeer(self, peer):
self.RemoveFromQueue(peer.address)
self.AddKnownAddress(peer.address)
if len(self.Peers) > settings.CONNECTED_PEER_MAX:
peer.Disconnect("Max connected peers reached", isDead=False)
if peer not in self.Peers:
self.Peers.append(peer)
else:
self.RemoveKnownAddress(peer.address)
peer.Disconnect()
|
Add a new connect peer to the known peers list.
Args:
peer (NeoNode): instance.
|
juraj-google-style
|
def add_time(data):
payload = data['data']
updated = data['updated'].date()
if (updated == date.today()):
payload['last_updated'] = data['updated'].strftime('today at %H:%M:%S')
elif (updated >= (date.today() - timedelta(days=1))):
payload['last_updated'] = 'yesterday'
elif (updated >= (date.today() - timedelta(days=7))):
payload['last_updated'] = updated.strftime('on %A')
else:
payload['last_updated'] = updated.strftime('%Y-%m-%d')
return payload
|
And a friendly update time to the supplied data.
Arguments:
data (:py:class:`dict`): The response data and its update time.
Returns:
:py:class:`dict`: The data with a friendly update time.
|
codesearchnet
|
def to_pandas(self):
dataframe = self.get().to_pandas()
assert ((type(dataframe) is pandas.DataFrame) or (type(dataframe) is pandas.Series))
return dataframe
|
Convert the object stored in this partition to a Pandas DataFrame.
Returns:
A Pandas DataFrame.
|
codesearchnet
|
def remove_by_threshold(self, threshold=5):
keys = [x for x in self._dictionary.keys()]
for key in keys:
if (self._dictionary[key] <= threshold):
self._dictionary.pop(key)
self._update_dictionary()
|
Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed
|
codesearchnet
|
def download(self, url, destination_path):
self._pbar_url.update_total(1)
future = self._executor.submit(self._sync_download, url, destination_path)
return promise.Promise.resolve(future)
|
Download url to given path.
Returns Promise -> sha256 of downloaded file.
Args:
url: address of resource to download.
destination_path: `str`, path to directory where to download the resource.
Returns:
Promise obj -> (`str`, int): (downloaded object checksum, size in bytes).
|
codesearchnet
|
def CreateSmartShoppingAd(client, ad_group_id):
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201809')
adgroup_ad = {
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'GoalOptimizedShoppingAd'
}
}
ad_operation = {
'operator': 'ADD',
'operand': adgroup_ad
}
ad_result = ad_group_ad_service.mutate([ad_operation])
for adgroup_ad in ad_result['value']:
print 'Smart Shopping ad with ID "%s" was added.' % adgroup_ad['ad']['id']
|
Adds a new Smart Shopping ad.
Args:
client: an AdWordsClient instance.
ad_group_id: an integer ID for an ad group.
|
juraj-google-style
|
def Validate(self, problems, validate_children=True):
self.ValidateRouteId(problems)
self.ValidateServicePeriod(problems)
self.ValidateDirectionId(problems)
self.ValidateTripId(problems)
self.ValidateShapeIdsExistInShapeList(problems)
self.ValidateRouteIdExistsInRouteList(problems)
self.ValidateServiceIdExistsInServiceList(problems)
self.ValidateBikesAllowed(problems)
self.ValidateWheelchairAccessible(problems)
if self._schedule and validate_children:
self.ValidateChildren(problems)
|
Validate attributes of this object.
Check that this object has all required values set to a valid value without
reference to the rest of the schedule. If the _schedule attribute is set
then check that references such as route_id and service_id are correct.
Args:
problems: A ProblemReporter object
validate_children: if True and the _schedule attribute is set than call
ValidateChildren
|
juraj-google-style
|
def poll(self, query_id=None, sequence_no=None, params=None, **kwargs):
path = '/logging-service/v1/queries/{}/{}'.format(query_id, sequence_no)
r = self._httpclient.request(method='GET', url=self.url, params=params, path=path, **kwargs)
return r
|
Poll for asynchronous query results.
Continue to poll for results until this endpoint reports
JOB_FINISHED or JOB_FAILED. The results of queries can be
returned in multiple pages, each of which may contain many log
records. Use this endpoint to poll for query result batches, as
well as to track query result status.
Args:
params (dict): Payload/request dictionary.
query_id (str): Specifies the ID of the query job.
sequence_no (int): Specifies the sequenceNo.
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``logging_query.py`` example.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.