code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def find_priority_list(py_files):
dependencies = map_dependencies(py_files)
ordered_files = topological_sort(dependencies)
return (ordered_files, dependencies)
|
Given a list of modular files, sorts them by topological order. Modular models that DON'T depend on other modular
models will be higher in the topological order.
Args:
py_files: List of paths to the modular files
Returns:
A tuple with the ordered files (list) and their dependencies (dict)
|
github-repos
|
def _assert_same_base_type(items, expected_type=None):
original_expected_type = expected_type
mismatch = False
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
elif expected_type != item_type:
mismatch = True
break
if mismatch:
expected_type = original_expected_type
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (item.name if hasattr(item, 'name') else str(item), item_type, expected_type, ' as %s' % original_item_str if original_item_str else ''))
return expected_type
else:
return expected_type
|
Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
|
github-repos
|
def get(self, blocking=True):
if self.closed:
raise PoolAlreadyClosedError("Connection pool is already closed.")
if not self.limiter.acquire(blocking=blocking):
return None
c = None
try:
c = self.idle_conns.pop()
except IndexError:
try:
c = self.connect_func()
except Exception:
self.limiter.release()
raise
return _ConnectionProxy(self, c)
|
Gets a connection.
Args:
blocking: Whether to block when max_size connections are already in use.
If false, may return None.
Returns:
A connection to the database.
Raises:
PoolAlreadyClosedError: if close() method was already called on
this pool.
|
juraj-google-style
|
def ts_to_str(jwt_dict):
d = ts_to_dt(jwt_dict)
for (k, v) in list(d.items()):
if isinstance(v, datetime.datetime):
d[k] = v.isoformat().replace('T', ' ')
return d
|
Convert timestamps in JWT to human readable dates.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with human readable
dates.
|
codesearchnet
|
def _RegisterCredentialsMethod(method, position=None):
if (position is None):
position = len(_CREDENTIALS_METHODS)
else:
position = min(position, len(_CREDENTIALS_METHODS))
_CREDENTIALS_METHODS.insert(position, method)
return method
|
Register a new method for fetching credentials.
This new method should be a function with signature:
client_info, **kwds -> Credentials or None
This method can be used as a decorator, unless position needs to
be supplied.
Note that method must *always* accept arbitrary keyword arguments.
Args:
method: New credential-fetching method.
position: (default: None) Where in the list of methods to
add this; if None, we append. In all but rare cases,
this should be either 0 or None.
Returns:
method, for use as a decorator.
|
codesearchnet
|
def find_backend(line: str) -> Optional[str]:
if _re_test_backend.search(line) is None:
return None
backends = [b[0] for b in _re_backend.findall(line)]
backends.sort()
return '_and_'.join(backends)
|
Find one (or multiple) backend in a code line of the init.
Args:
line (`str`): A code line in an init file.
Returns:
Optional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line
contains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so
`xxx_and_yyy` for instance).
|
github-repos
|
def create(self, reference, document_data):
write_pbs = _helpers.pbs_for_create(reference._document_path, document_data)
self._add_write_pbs(write_pbs)
|
Add a "change" to this batch to create a document.
If the document given by ``reference`` already exists, then this
batch will fail when :meth:`commit`-ed.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference to be created in this batch.
document_data (dict): Property names and values to use for
creating a document.
|
juraj-google-style
|
def gray2bgr(img):
img = (img[(..., None)] if (img.ndim == 2) else img)
out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return out_img
|
Convert a grayscale image to BGR image.
Args:
img (ndarray or str): The input image.
Returns:
ndarray: The converted BGR image.
|
codesearchnet
|
def associated_stream(self):
if (not self.important):
raise InternalError('You may only call autocopied_stream on when DataStream.important is True', stream=self)
if (self.stream_id >= DataStream.ImportantSystemStorageStart):
stream_type = DataStream.BufferedType
else:
stream_type = DataStream.OutputType
return DataStream(stream_type, self.stream_id, True)
|
Return the corresponding output or storage stream for an important system input.
Certain system inputs are designed as important and automatically
copied to output streams without requiring any manual interaction.
This method returns the corresponding stream for an important system
input. It will raise an InternalError unlesss the self.important
property is True.
Returns:
DataStream: The corresponding output or storage stream.
Raises:
InternalError: If this stream is not marked as an important system input.
|
codesearchnet
|
def from_bytes(b):
if (len(b) != 64):
raise ValueError('from_bytes: Signature length != 64.')
r = int.from_bytes(b[0:32], 'big')
s = int.from_bytes(b[32:64], 'big')
return Signature(r, s)
|
Extracts the r and s components from a byte string.
Args:
b (bytes): A 64-byte long string. The first 32 bytes are
extracted as the r component and the second 32 bytes
are extracted as the s component.
Returns:
Signature: A Signature object.
Raises:
ValueError: If signature is incorrect length
|
codesearchnet
|
def remote_command(task: Task, command: str) -> Result:
client = task.host.get_connection("paramiko", task.nornir.config)
connection_state = task.host.get_connection_state("paramiko")
chan = client.get_transport().open_session()
if connection_state["ssh_forward_agent"]:
AgentRequestHandler(chan)
chan.exec_command(command)
with chan.makefile() as f:
stdout = f.read().decode()
with chan.makefile_stderr() as f:
stderr = f.read().decode()
exit_status_code = chan.recv_exit_status()
if exit_status_code:
raise CommandError(command, exit_status_code, stdout, stderr)
result = stderr if stderr else stdout
return Result(result=result, host=task.host, stderr=stderr, stdout=stdout)
|
Executes a command remotely on the host
Arguments:
command (``str``): command to execute
Returns:
Result object with the following attributes set:
* result (``str``): stderr or stdout
* stdout (``str``): stdout
* stderr (``str``): stderr
Raises:
:obj:`nornir.core.exceptions.CommandError`: when there is a command error
|
juraj-google-style
|
def pretty_plot_two_axis(x, y1, y2, xlabel=None, y1label=None, y2label=None, width=8, height=None, dpi=300):
import palettable.colorbrewer.diverging
colors = palettable.colorbrewer.diverging.RdYlBu_4.mpl_colors
c1 = colors[0]
c2 = colors[(- 1)]
golden_ratio = ((math.sqrt(5) - 1) / 2)
if (not height):
height = int((width * golden_ratio))
import matplotlib.pyplot as plt
width = 12
labelsize = int((width * 3))
ticksize = int((width * 2.5))
styles = ['-', '--', '-.', '.']
(fig, ax1) = plt.subplots()
fig.set_size_inches((width, height))
if dpi:
fig.set_dpi(dpi)
if isinstance(y1, dict):
for (i, (k, v)) in enumerate(y1.items()):
ax1.plot(x, v, c=c1, marker='s', ls=styles[(i % len(styles))], label=k)
ax1.legend(fontsize=labelsize)
else:
ax1.plot(x, y1, c=c1, marker='s', ls='-')
if xlabel:
ax1.set_xlabel(xlabel, fontsize=labelsize)
if y1label:
ax1.set_ylabel(y1label, color=c1, fontsize=labelsize)
ax1.tick_params('x', labelsize=ticksize)
ax1.tick_params('y', colors=c1, labelsize=ticksize)
ax2 = ax1.twinx()
if isinstance(y2, dict):
for (i, (k, v)) in enumerate(y2.items()):
ax2.plot(x, v, c=c2, marker='o', ls=styles[(i % len(styles))], label=k)
ax2.legend(fontsize=labelsize)
else:
ax2.plot(x, y2, c=c2, marker='o', ls='-')
if y2label:
ax2.set_ylabel(y2label, color=c2, fontsize=labelsize)
ax2.tick_params('y', colors=c2, labelsize=ticksize)
return plt
|
Variant of pretty_plot that does a dual axis plot. Adapted from matplotlib
examples. Makes it easier to create plots with different axes.
Args:
x (np.ndarray/list): Data for x-axis.
y1 (dict/np.ndarray/list): Data for y1 axis (left). If a dict, it will
be interpreted as a {label: sequence}.
y2 (dict/np.ndarray/list): Data for y2 axis (right). If a dict, it will
be interpreted as a {label: sequence}.
xlabel (str): If not None, this will be the label for the x-axis.
y1label (str): If not None, this will be the label for the y1-axis.
y2label (str): If not None, this will be the label for the y2-axis.
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
dpi (int): Sets dot per inch for figure. Defaults to 300.
Returns:
matplotlib.pyplot
|
codesearchnet
|
def FromTXOutputsConfirmed(outputs):
uns = UnspentCoinState()
uns.Items = ([0] * len(outputs))
for i in range(0, len(outputs)):
uns.Items[i] = int(CoinState.Confirmed)
return uns
|
Get unspent outputs from a list of transaction outputs.
Args:
outputs (list): of neo.Core.TX.Transaction.TransactionOutput items.
Returns:
UnspentCoinState:
|
codesearchnet
|
def helper_add(access_token, ck_id, path, body):
full_path = ''.join([path, "('", ck_id, "')"])
full_path_encoded = urllib.parse.quote(full_path, safe='')
endpoint = ''.join([ams_rest_endpoint, full_path_encoded])
return do_ams_put(endpoint, full_path_encoded, body, access_token, "json_only", "1.0;NetFx")
|
Helper Function to add strings to a URL path.
Args:
access_token (str): A valid Azure authentication token.
ck_id (str): A CK ID.
path (str): A URL Path.
body (str): A Body.
Returns:
HTTP response. JSON body.
|
juraj-google-style
|
def set_status(self, name: str = None):
game = None
if name:
game = {
'name': name
}
payload = {
'op': WebSocketEvent.STATUS_UPDATE.value,
'd': {
'game': game,
'status': 'online',
'afk': False,
'since': 0.0
}
}
data = json.dumps(payload, indent=2)
self.logger.debug(f'Sending status update payload: {data}')
self._ws.send(data)
|
Updates the bot's status
This is used to get the game that the bot is "playing" or to clear it.
If you want to set a game, pass a name; if you want to clear it, either
call this method without the optional ``name`` parameter or explicitly
pass ``None``.
Args:
name: the game's name, or None
|
juraj-google-style
|
def convert_image_to_example_proto(tensor: tf.Tensor) -> tf.train.Example:
serialized_non_scalar = tf.io.serialize_tensor(tensor)
feature_of_bytes = tf.train.Feature(bytes_list=tf.train.BytesList(value=[serialized_non_scalar.numpy()]))
features_for_example = {'image': feature_of_bytes}
example_proto = tf.train.Example(features=tf.train.Features(feature=features_for_example))
return example_proto
|
This method performs the following:
1. Accepts the tensor as input
2. Serializes the tensor into bytes and pass it through
tf.train.Feature
3. Pass the serialized tensor feature using tf.train.Example
Proto to the RunInference transform.
Args:
tensor: A TF tensor.
Returns:
example_proto: A tf.train.Example containing serialized tensor.
|
github-repos
|
def lint(self, content, **kwargs):
post_data = {'content': content}
data = self.http_post('/ci/lint', post_data=post_data, **kwargs)
return ((data['status'] == 'valid'), data['errors'])
|
Validate a gitlab CI configuration.
Args:
content (txt): The .gitlab-ci.yml content
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabVerifyError: If the validation could not be done
Returns:
tuple: (True, []) if the file is valid, (False, errors(list))
otherwise
|
codesearchnet
|
def indent(self, space=4):
if (not isinstance(space, int)):
raise TypeError('space must be an int')
if (space < 0):
raise ValueError('space must be a non-negative integer')
space = (' ' * space)
o = []
l = 0
for c in self.newick():
if (c == '('):
o.append('(\n')
l += 1
o.append((space * l))
elif (c == ')'):
o.append('\n')
l -= 1
o.append((space * l))
o.append(')')
elif (c == ','):
o.append(',\n')
o.append((space * l))
else:
o.append(c)
return ''.join(o)
|
Return an indented Newick string, just like ``nw_indent`` in Newick Utilities
Args:
``space`` (``int``): The number of spaces a tab should equal
Returns:
``str``: An indented Newick string
|
codesearchnet
|
def __init__(self, file_pattern, batch_size=1, buffer_size=1, parallelism=1, shift_ratio=0, seed=0, name=None, batches=None, compression_type=None):
self._batch_size = batch_size
if batches is not None:
self._batch_size *= batches
self._batches = batches
self._file_pattern = file_pattern
self._buffer_size = buffer_size
self._parallelism = parallelism
self._shift_ratio = shift_ratio
self._seed = seed
self._name = name
self._compression_type = python_io.TFRecordCompressionType.NONE
if compression_type is not None:
self._compression_type = compression_type
|
Constructs a RecordInput Op.
Args:
file_pattern: File path to the dataset, possibly containing wildcards.
All matching files will be iterated over each epoch.
batch_size: How many records to return at a time.
buffer_size: The maximum number of records the buffer will contain.
parallelism: How many reader threads to use for reading from files.
shift_ratio: What percentage of the total number files to move the start
file forward by each epoch.
seed: Specify the random number seed used by generator that randomizes
records.
name: Optional name for the operation.
batches: None by default, creating a single batch op. Otherwise specifies
how many batches to create, which are returned as a list when
`get_yield_op()` is called. An example use case is to split processing
between devices on one computer.
compression_type: The type of compression for the file. Currently ZLIB and
GZIP are supported. Defaults to none.
Raises:
ValueError: If one of the arguments is invalid.
|
github-repos
|
def write_json(self, path, contents, message):
log.debug(message.format(path=path))
makedirs(os.path.dirname(path))
with open(path, "w") as fh:
json.dump(contents, fh, indent=2, sort_keys=True)
|
Write json to disk.
Args:
path (str): the path to write to
contents (dict): the contents of the json blob
message (str): the message to log
|
juraj-google-style
|
def start_app(self, bundle_id):
idevicedebug = must_look_exec('idevicedebug')
kwargs = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE}
if sys.platform != 'darwin':
kwargs['close_fds'] = True
return subprocess.Popen([idevicedebug, "--udid", self.udid, 'run', bundle_id], **kwargs)
|
Start app by bundle_id
Args:
- bundle_id(string): ex com.netease.my
Returns:
idevicedebug subprocess instance
|
juraj-google-style
|
def get_student_enrollments(self):
resp = self.requester.get(urljoin(self.base_url, self.enrollment_url))
resp.raise_for_status()
return Enrollments(resp.json())
|
Returns an Enrollments object with the user enrollments
Returns:
Enrollments: object representing the student enrollments
|
codesearchnet
|
def read(self, vals):
i = 0
if len(vals[i]) == 0:
self.typical_or_extreme_period_name = None
else:
self.typical_or_extreme_period_name = vals[i]
i += 1
if len(vals[i]) == 0:
self.typical_or_extreme_period_type = None
else:
self.typical_or_extreme_period_type = vals[i]
i += 1
if len(vals[i]) == 0:
self.period_start_day = None
else:
self.period_start_day = vals[i]
i += 1
if len(vals[i]) == 0:
self.period_end_day = None
else:
self.period_end_day = vals[i]
i += 1
|
Read values.
Args:
vals (list): list of strings representing values
|
juraj-google-style
|
def play_random(env, steps):
try:
done = True
progress = tqdm(range(steps))
for _ in progress:
if done:
_ = env.reset()
action = env.action_space.sample()
_, reward, done, info = env.step(action)
progress.set_postfix(reward=reward, info=info)
env.render()
except KeyboardInterrupt:
pass
env.close()
|
Play the environment making uniformly random decisions.
Args:
env (gym.Env): the initialized gym environment to play
steps (int): the number of random steps to take
Returns:
None
|
juraj-google-style
|
def ToTsvExcel(self, columns_order=None, order_by=()):
csv_result = self.ToCsv(columns_order, order_by, separator='\t')
if (not isinstance(csv_result, six.text_type)):
csv_result = csv_result.decode('utf-8')
return csv_result.encode('UTF-16LE')
|
Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
|
codesearchnet
|
def require_debian_packages(packages: List[str]) -> None:
present = are_debian_packages_installed(packages)
missing_packages = [k for k, v in present.items() if not v]
if missing_packages:
missing_packages.sort()
msg = (
"Debian packages are missing, as follows. Suggest:\n\n"
"sudo apt install {}".format(" ".join(missing_packages))
)
log.critical(msg)
raise ValueError(msg)
|
Ensure specific packages are installed under Debian.
Args:
packages: list of packages
Raises:
ValueError: if any are missing
|
juraj-google-style
|
def dispatch_event(event):
try:
if event.http_verb == enums.HTTPVerbs.GET:
requests.get(event.url, params=event.params, timeout=REQUEST_TIMEOUT).raise_for_status()
elif event.http_verb == enums.HTTPVerbs.POST:
requests.post(
event.url, data=json.dumps(event.params), headers=event.headers, timeout=REQUEST_TIMEOUT
).raise_for_status()
except request_exception.RequestException as error:
logging.error('Dispatch event failed. Error: %s' % str(error))
|
Dispatch the event being represented by the Event object.
Args:
event: Object holding information about the request to be dispatched to the Optimizely backend.
|
juraj-google-style
|
def __init__(self, linter_name, path, msg, line_nr=None, col=None):
if line_nr:
line_nr = int(line_nr)
if col:
col = int(col)
self._linter_name = linter_name
self.path = path
self.line_nr = line_nr
self.msg = msg
self.col = col
|
Optionally set all attributes.
Args:
path (str): Relative file path.
line (int): Line number.
msg (str): Explanation of what is wrong.
col (int): Column where the problem begins.
|
juraj-google-style
|
def get_configuration(head, update, head_source=None):
head_source = (head_source or get_head_source(head))
update_source = get_acquisition_source(update)
if ((not is_arxiv_and_publisher(head_source, update_source)) and is_manual_merge(head, update)):
return ManualMergeOperations
if (head_source == 'arxiv'):
if (update_source == 'arxiv'):
return ArxivOnArxivOperations
else:
return PublisherOnArxivOperations
elif (update_source == 'arxiv'):
return ArxivOnPublisherOperations
else:
return PublisherOnPublisherOperations
|
This function return the right configuration for the inspire_merge
function in according to the given sources. Both parameters can not be None.
Params:
head(dict): the HEAD record
update(dict): the UPDATE record
head_source(string): the source of the HEAD record
Returns:
MergerConfigurationOperations: an object containing
the rules needed to merge HEAD and UPDATE
|
codesearchnet
|
def _get_segments(self, start, request_size):
if (not request_size):
return []
end = (start + request_size)
futures = []
while (request_size > self._max_request_size):
futures.append(self._get_segment(start, self._max_request_size))
request_size -= self._max_request_size
start += self._max_request_size
if (start < end):
futures.append(self._get_segment(start, (end - start)))
return [fut.get_result() for fut in futures]
|
Get segments of the file from Google Storage as a list.
A large request is broken into segments to avoid hitting urlfetch
response size limit. Each segment is returned from a separate urlfetch.
Args:
start: start offset to request. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request.
Returns:
A list of file segments in order
|
codesearchnet
|
def _serve_plugins_listing(self, request):
response = {}
for plugin in self._plugins:
start = time.time()
response[plugin.plugin_name] = plugin.is_active()
elapsed = (time.time() - start)
logger.info('Plugin listing: is_active() for %s took %0.3f seconds', plugin.plugin_name, elapsed)
return http_util.Respond(request, response, 'application/json')
|
Serves an object mapping plugin name to whether it is enabled.
Args:
request: The werkzeug.Request object.
Returns:
A werkzeug.Response object.
|
codesearchnet
|
def review_score(self, reviewer, product):
return self._g.retrieve_review(reviewer, product).score
|
Find a review score from a given reviewer to a product.
Args:
reviewer: Reviewer i.e. an instance of :class:`ria.bipartite.Reviewer`.
product: Product i.e. an instance of :class:`ria.bipartite.Product`.
Returns:
A review object representing the review from the reviewer to the product.
|
juraj-google-style
|
def create_attachment(cls, session, attachment):
return super(Conversations, cls).create(session, attachment, endpoint_override='/attachments.json', out_type=Attachment)
|
Create an attachment.
An attachment must be sent to the API before it can be used in a
thread. Use this method to create the attachment, then use the
resulting hash when creating a thread.
Note that HelpScout only supports attachments of 10MB or lower.
Args:
session (requests.sessions.Session): Authenticated session.
attachment (helpscout.models.Attachment): The attachment to be
created.
Returns:
helpscout.models.Attachment: The newly created attachment (hash
property only). Use this hash when associating the attachment with
a new thread.
|
codesearchnet
|
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.pointwise_conv1(hidden_states)
hidden_states = nn.functional.glu(hidden_states, dim=1)
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.norm(hidden_states)
hidden_states = hidden_states * torch.sigmoid(hidden_states)
hidden_states = self.pointwise_conv2(hidden_states)
return hidden_states.transpose(1, 2)
|
Compute convolution module.
Args:
hidden_states (`torch.Tensor` of shape `(batch, time, channels)`): Input tensor.
Returns:
`torch.Tensor`: Output tensor of shape `(batch, time, channels)`.
|
github-repos
|
def are_debian_packages_installed(packages: List[str]) -> Dict[(str, bool)]:
assert (len(packages) >= 1)
require_executable(DPKG_QUERY)
args = ([DPKG_QUERY, '-W', '-f=${Package} ${Status}\n'] + packages)
completed_process = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
encoding = sys.getdefaultencoding()
stdout = completed_process.stdout.decode(encoding)
stderr = completed_process.stderr.decode(encoding)
present = OrderedDict()
for line in stdout.split('\n'):
if line:
words = line.split()
assert (len(words) >= 2)
package = words[0]
present[package] = ('installed' in words[1:])
for line in stderr.split('\n'):
if line:
words = line.split()
assert (len(words) >= 2)
package = words[(- 1)]
present[package] = False
log.debug('Debian package presence: {}', present)
return present
|
Check which of a list of Debian packages are installed, via ``dpkg-query``.
Args:
packages: list of Debian package names
Returns:
dict: mapping from package name to boolean ("present?")
|
codesearchnet
|
class PatchingSpec:
o: Any
name: str
custom_op: Callable
orig_op: Optional[Callable] = None
op_wrapper: Optional[Callable] = None
|
Data class that holds patching specifications.
Args:
o: Module / object where the op to patch is located
name: Name of the op to monkey patch
custom_op: Custom op that patches the original op
orig_op: Original op that is being patched
op_wrapper: Wrapper (optional) that wraps both the original and custom ops.
It is useful for ops that are class or static methods for instance.
|
github-repos
|
def make_noise_surface(dims=DEFAULT_DIMS, blur=10, seed=None):
if seed is not None:
np.random.seed(seed)
return gaussian_filter(np.random.normal(size=dims), blur)
|
Makes a surface by generating random noise and blurring it.
Args:
dims (pair): the dimensions of the surface to create
blur (float): the amount of Gaussian blur to apply
seed (int): a random seed to use (optional)
Returns:
surface: A surface.
|
juraj-google-style
|
def _open_tracing_interface(self, conn_id, callback):
try:
handle = self._find_handle(conn_id)
services = self._connections[handle]['services']
except (ValueError, KeyError):
callback(conn_id, self.id, False, 'Connection closed unexpectedly before we could open the streaming interface')
return
self._command_task.async_command(['_enable_tracing', handle, services], self._on_interface_finished, {'connection_id': conn_id, 'callback': callback})
|
Enable the debug tracing interface for this IOTile device
Args:
conn_id (int): the unique identifier for the connection
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
|
codesearchnet
|
def get_sequence_properties(self, clean_seq=False, representatives_only=True):
for g in tqdm(self.genes):
g.protein.get_sequence_properties(clean_seq=clean_seq, representative_only=representatives_only)
|
Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of all protein sequences.
Results are stored in the protein's respective SeqProp objects at ``.annotations``
Args:
representative_only (bool): If analysis should only be run on the representative sequences
|
juraj-google-style
|
def members(self):
resp = self._rtm_client.get('v1/current_team.members?all=true')
if resp.is_fail():
raise RTMServiceError('Failed to get members of current team', resp)
return resp.data['result']
|
Gets members of current team
Returns:
list of User
Throws:
RTMServiceError when request failed
|
codesearchnet
|
def _find_docstring_line_for_no_body(self, start):
tracked = sorted(list(self._tokenized_triple_quotes.keys()))
for i in tracked:
if min(start, i) == start:
return i
return None
|
Find the docstring associated with a definition with no body
in the node.
In these cases, the provided start and end line number for that
element are the same, so we must get the docstring based on the
sequential position of known docstrings.
Args:
start: the row where the class / function starts.
Returns:
int: the row number where the docstring is found.
|
juraj-google-style
|
def ParseOptions(cls, options, output_module):
if not hasattr(output_module, 'SetServerInformation'):
raise errors.BadConfigObject('Unable to set server information.')
server = cls._ParseStringOption(
options, 'server', default_value=cls._DEFAULT_SERVER)
port = cls._ParseNumericOption(
options, 'port', default_value=cls._DEFAULT_PORT)
output_module.SetServerInformation(server, port)
|
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object does not have the
SetServerInformation method.
|
juraj-google-style
|
def _split_ir_into_match_steps(pruned_ir_blocks):
output = []
current_tuple = None
for block in pruned_ir_blocks:
if isinstance(block, OutputSource):
continue
elif isinstance(block, root_block_types):
if (current_tuple is not None):
output.append(current_tuple)
current_tuple = (block,)
elif isinstance(block, (CoerceType, Filter, MarkLocation)):
current_tuple += (block,)
else:
raise AssertionError(u'Unexpected block type when converting to MATCH query: {} {}'.format(block, pruned_ir_blocks))
if (current_tuple is None):
raise AssertionError(u'current_tuple was unexpectedly None: {}'.format(pruned_ir_blocks))
output.append(current_tuple)
return [_per_location_tuple_to_step(x) for x in output]
|
Split a list of IR blocks into per-location MATCH steps.
Args:
pruned_ir_blocks: list of IR basic block objects that have gone through a lowering step.
Returns:
list of MatchStep namedtuples, each of which contains all basic blocks that correspond
to a single MATCH step.
|
codesearchnet
|
def setup(self, disk_name, project, turbinia_zone):
if ((project is None) or (turbinia_zone is None)):
self.state.add_error('project or turbinia_zone are not all specified, bailing out', critical=True)
return
self.disk_name = disk_name
self.project = project
self.turbinia_zone = turbinia_zone
try:
turbinia_config.LoadConfig()
self.turbinia_region = turbinia_config.TURBINIA_REGION
self.instance = turbinia_config.PUBSUB_TOPIC
if (turbinia_config.PROJECT != self.project):
self.state.add_error('Specified project {0:s} does not match Turbinia configured project {1:s}. Use gcp_turbinia_import recipe to copy the disk into the same project.'.format(self.project, turbinia_config.PROJECT), critical=True)
return
self._output_path = tempfile.mkdtemp()
self.client = turbinia_client.TurbiniaClient()
except TurbiniaException as e:
self.state.add_error(e, critical=True)
return
|
Sets up the object attributes.
Args:
disk_name (string): Name of the disk to process
project (string): The project containing the disk to process
turbinia_zone (string): The zone containing the disk to process
|
codesearchnet
|
def PyParseIntCast(string, location, tokens):
for index, token in enumerate(tokens):
try:
tokens[index] = int(token)
except ValueError:
logger.error('Unable to cast [{0:s}] to an int, setting to 0'.format(
token))
tokens[index] = 0
for key in tokens.keys():
try:
tokens[key] = int(tokens[key], 10)
except ValueError:
logger.error(
'Unable to cast [{0:s} = {1:d}] to an int, setting to 0'.format(
key, tokens[key]))
tokens[key] = 0
|
Return an integer from a string.
This is a pyparsing callback method that converts the matched
string into an integer.
The method modifies the content of the tokens list and converts
them all to an integer value.
Args:
string (str): original string.
location (int): location in the string where the match was made.
tokens (list[str]): extracted tokens, where the string to be converted
is stored.
|
juraj-google-style
|
def encode_chunk(dataframe):
csv_buffer = six.StringIO()
dataframe.to_csv(
csv_buffer,
index=False,
header=False,
encoding="utf-8",
float_format="%.15g",
date_format="%Y-%m-%d %H:%M:%S.%f",
)
body = csv_buffer.getvalue()
if isinstance(body, bytes):
body = body.decode("utf-8")
body = body.encode("utf-8")
return six.BytesIO(body)
|
Return a file-like object of CSV-encoded rows.
Args:
dataframe (pandas.DataFrame): A chunk of a dataframe to encode
|
juraj-google-style
|
def __init__(self, level, message, message_id, timestamp=None, now_reference=None):
self.level = level
self.message = message
self.count = 1
self.id = message_id
if timestamp is None:
self.created = monotonic()
elif now_reference is None:
self.created = timestamp
else:
now = monotonic()
adj = now - now_reference
self.created = timestamp + adj
if self.created > now:
self.created = now
|
Constructor.
Args:
level (int): The message importance
message (string): The message contents
message_id (int): A unique id for the message
timestamp (float): An optional monotonic value in seconds for when the message was created
now_reference (float): If timestamp is not relative to monotonic() as called from this
module then this should be now() as seen by whoever created the timestamp.
|
juraj-google-style
|
def complete_acquaintance_strategy(qubit_order: Sequence[ops.Qid],
acquaintance_size: int=0,
) -> circuits.Circuit:
if acquaintance_size < 0:
raise ValueError('acquaintance_size must be non-negative.')
elif acquaintance_size == 0:
return circuits.Circuit(device=UnconstrainedAcquaintanceDevice)
if acquaintance_size > len(qubit_order):
return circuits.Circuit(device=UnconstrainedAcquaintanceDevice)
if acquaintance_size == len(qubit_order):
return circuits.Circuit.from_ops(
acquaint(*qubit_order), device=UnconstrainedAcquaintanceDevice)
strategy = circuits.Circuit.from_ops(
(acquaint(q) for q in qubit_order),
device=UnconstrainedAcquaintanceDevice)
for size_to_acquaint in range(2, acquaintance_size + 1):
expose_acquaintance_gates(strategy)
replace_acquaintance_with_swap_network(
strategy, qubit_order, size_to_acquaint)
return strategy
|
Returns an acquaintance strategy capable of executing a gate corresponding
to any set of at most acquaintance_size qubits.
Args:
qubit_order: The qubits on which the strategy should be defined.
acquaintance_size: The maximum number of qubits to be acted on by
an operation.
Returns:
An circuit capable of implementing any set of k-local
operation.
|
juraj-google-style
|
def softplus(x):
return ops.softplus(x)
|
Softplus activation function.
It is defined as: `softplus(x) = log(exp(x) + 1)`.
Args:
x: Input tensor.
|
github-repos
|
def get_dag(nodes, downstream_fn) -> Tuple[Dict, Dict]:
dag = {}
node_by_ids = {}
for node in nodes:
downstream_ops = downstream_fn(node)
dag[node.id] = set(downstream_ops)
node_by_ids[node.id] = node
return dag, node_by_ids
|
Return a dag representation of the nodes passed.
This is equally used for pipelines and pipeline runs.
Params:
nodes: an instance of `Operation` | `OperationRun` the nodes to represent en dag.
downstream_fn: a function that returns the downstream nodes of the a node.
Returns:
tuple: (dag, dict(node_id: node))
|
juraj-google-style
|
def GetDisplayName(self, file_entry=None):
if file_entry is None:
file_entry = self._file_entry
if file_entry is None:
raise ValueError('Missing file entry')
path_spec = getattr(file_entry, 'path_spec', None)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(
path_spec, mount_path=self._mount_path)
if not relative_path:
return file_entry.name
return self.GetDisplayNameForPathSpec(path_spec)
|
Retrieves the display name for a file entry.
Args:
file_entry (Optional[dfvfs.FileEntry]): file entry object, where None
will return the display name of self._file_entry.
Returns:
str: human readable string that describes the path to the file entry.
Raises:
ValueError: if the file entry is missing.
|
juraj-google-style
|
def create_frames(until=None):
now = Date.now()
if until:
get_orbit(until, now)
else:
for body in list_bodies():
get_orbit(body.name, now)
|
Create frames available in the JPL files
Args:
until (str): Name of the body you want to create the frame of, and all frames in between.
If ``None`` all the frames available in the .bsp files will be created
Example:
.. code-block:: python
# All frames between Earth and Mars are created (Earth, EarthBarycenter,
# SolarSystemBarycenter, MarsBarycenter and Mars)
create_frames(until='Mars')
# All frames between Earth and Phobos are created (Earth, EarthBarycenter,
# SolarSystemBarycenter, MarsBarycenter and Phobos)
create_frames(until='Phobos')
# All frames available in the .bsp files are created
create_frames()
|
codesearchnet
|
def uniprot_reviewed_checker_batch(uniprot_ids):
uniprot_ids = ssbio.utils.force_list(uniprot_ids)
invalid_ids = [i for i in uniprot_ids if not is_valid_uniprot_id(i)]
uniprot_ids = [i for i in uniprot_ids if is_valid_uniprot_id(i)]
if invalid_ids:
warnings.warn("Invalid UniProt IDs {} will be ignored".format(invalid_ids))
Nmax = 200
N, rest = divmod(len(uniprot_ids), Nmax)
uni_rev_dict = {}
if rest > 0:
N += 1
for i in range(0, N):
i1 = i * Nmax
i2 = (i + 1) * Nmax
if i2 > len(uniprot_ids):
i2 = len(uniprot_ids)
query = uniprot_ids[i1:i2]
query_string = ''
for x in query:
query_string += 'id:' + x + '+OR+'
query_string = query_string.strip('+OR+')
uni_rev_raw = StringIO(bsup.search(query_string, columns='id,reviewed', frmt='tab'))
uni_rev_df = pd.read_table(uni_rev_raw, sep='\t', index_col=0)
uni_rev_df = uni_rev_df.fillna(False)
uni_rev_df = uni_rev_df[pd.notnull(uni_rev_df.Status)]
uni_rev_df = uni_rev_df.replace(to_replace="reviewed", value=True)
uni_rev_df = uni_rev_df.replace(to_replace="unreviewed", value=False)
uni_rev_dict_adder = uni_rev_df.to_dict()['Status']
uni_rev_dict.update(uni_rev_dict_adder)
return uni_rev_dict
|
Batch check if uniprot IDs are reviewed or not
Args:
uniprot_ids: UniProt ID or list of UniProt IDs
Returns:
A dictionary of {UniProtID: Boolean}
|
juraj-google-style
|
def probability_density(self, X):
self.check_fit()
return norm.pdf(X, loc=self.mean, scale=self.std)
|
Compute probability density.
Arguments:
X: `np.ndarray` of shape (n, 1).
Returns:
np.ndarray
|
codesearchnet
|
def get_reference(root):
reference = {}
elem = root.find('bibliographyLink')
if elem is None:
raise MissingElementError('bibliographyLink')
ref_doi = elem.get('doi', None)
ref_key = elem.get('preferredKey', None)
if ref_doi is not None:
try:
ref = crossref_api.works(ids=ref_doi)['message']
except (HTTPError, habanero.RequestError, ConnectionError):
if ref_key is None:
raise KeywordError('DOI not found and preferredKey attribute not set')
else:
warn('Missing doi attribute in bibliographyLink or lookup failed. '
'Setting "detail" key as a fallback; please update to the appropriate fields.'
)
reference['detail'] = ref_key
if reference['detail'][-1] != '.':
reference['detail'] += '.'
else:
if ref_key is not None:
warn('Using DOI to obtain reference information, rather than preferredKey.')
reference['doi'] = elem.attrib['doi']
reference['journal'] = ref.get('container-title')[0]
ref_year = ref.get('published-print') or ref.get('published-online')
reference['year'] = int(ref_year['date-parts'][0][0])
reference['volume'] = int(ref.get('volume'))
reference['pages'] = ref.get('page')
reference['authors'] = []
for author in ref['author']:
auth = {}
auth['name'] = ' '.join([author['given'], author['family']])
orcid = author.get('ORCID')
if orcid:
auth['ORCID'] = orcid.lstrip('http:
reference['authors'].append(auth)
elif ref_key is not None:
warn('Missing doi attribute in bibliographyLink. '
'Setting "detail" key as a fallback; please update to the appropriate fields.'
)
reference['detail'] = ref_key
if reference['detail'][-1] != '.':
reference['detail'] += '.'
else:
raise MissingAttributeError('preferredKey', 'bibliographyLink')
return reference
|
Read reference info from root of ReSpecTh XML file.
Args:
root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file
Returns:
properties (`dict`): Dictionary with reference information
|
juraj-google-style
|
def __valueKeyWithHeaderIndex(self, values):
machingIndexes = {}
for (index, name) in enumerate(self.header):
if (name in values):
machingIndexes[index] = values[name]
return machingIndexes
|
This is hellper function, so that we can mach decision values with row index
as represented in header index.
Args:
values (dict): Normaly this will have dict of header values and values from decision
Return:
>>> return()
{
values[headerName] : int(headerName index in header array),
...
}
|
codesearchnet
|
def set_record(self, name, record_id, record):
if name not in self._cache:
self._cache[name] = {}
self._cache[name][record_id] = record
|
Save a record into the cache.
Args:
name (string): The name to save the model under.
record_id (int): The record id.
record (:class:`cinder_data.model.CinderModel`): The model
|
juraj-google-style
|
def cost(self, t_node, branch_length, multiplicity=2.0):
merger_time = (t_node + branch_length)
return ((self.integral_merger_rate(merger_time) - self.integral_merger_rate(t_node)) - ((np.log(self.total_merger_rate(merger_time)) * (multiplicity - 1.0)) / multiplicity))
|
returns the cost associated with a branch starting at t_node
t_node is time before present, the branch goes back in time
Args:
- t_node: time of the node
- branch_length: branch length, determines when this branch merges with sister
- multiplicity: 2 if merger is binary, higher if this is a polytomy
|
codesearchnet
|
def has_implicit_access_to_catalog(user, obj):
request = get_request_or_stub()
decoded_jwt = get_decoded_jwt_from_request(request)
return request_user_has_implicit_access_via_jwt(decoded_jwt, ENTERPRISE_CATALOG_ADMIN_ROLE, obj)
|
Check that if request user has implicit access to `ENTERPRISE_CATALOG_ADMIN_ROLE` feature role.
Returns:
boolean: whether the request user has access or not
|
codesearchnet
|
def dispatch(self, inp):
inp = tf.reshape(inp, [self._batch * self._length, -1])
ret = tf.gather(inp, self._flat_indices)
return ret
|
Send the inputs to the experts.
Args:
inp: a `Tensor` of shape "[batch, length, depth]`
Returns:
a tensor with shape [batch, num_experts, expert_capacity, depth]
|
juraj-google-style
|
def __rmtree(path):
logger.info("rmtree: %s" % path)
try:
shutil.rmtree(path)
return True
except Exception as e:
logger.error("rmtree: %s failed! Error: %s" % (path, e))
return False
|
Recursively delete a directory tree.
Args:
path (str): Path to the directory that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
|
juraj-google-style
|
def pop_events(self, regex_pattern, timeout):
if (not self.started):
raise IllegalStateError('Dispatcher needs to be started before popping.')
deadline = (time.time() + timeout)
while True:
results = self._match_and_pop(regex_pattern)
if ((len(results) != 0) or (time.time() > deadline)):
break
time.sleep(1)
if (len(results) == 0):
raise queue.Empty('Timeout after {}s waiting for event: {}'.format(timeout, regex_pattern))
return sorted(results, key=(lambda event: event['time']))
|
Pop events whose names match a regex pattern.
If such event(s) exist, pop one event from each event queue that
satisfies the condition. Otherwise, wait for an event that satisfies
the condition to occur, with timeout.
Results are sorted by timestamp in ascending order.
Args:
regex_pattern: The regular expression pattern that an event name
should match in order to be popped.
timeout: Number of seconds to wait for events in case no event
matching the condition exits when the function is called.
Returns:
Events whose names match a regex pattern.
Empty if none exist and the wait timed out.
Raises:
IllegalStateError: Raised if pop is called before the dispatcher
starts polling.
queue.Empty: Raised if no event was found before time out.
|
codesearchnet
|
def __init__(self, cronfile):
options = Options()
options.day_of_week_start_index_zero = False
options.use_24hour_time_format = True
with open(cronfile) as f:
for line in f.readlines():
parsed_line = self.parse_cron_line(line)
if parsed_line:
print("{} -> {}".format(parsed_line, ExpressionDescriptor(parsed_line, options)))
|
Initialize CrontabReader
Args:
cronfile: Path to cronfile
Returns:
None
|
juraj-google-style
|
def is_variable_initialized(variable):
from tensorflow.python.ops import state_ops
return state_ops.is_variable_initialized(variable)
|
Tests if a variable has been initialized.
Args:
variable: A `Variable`.
Returns:
Returns a scalar boolean Tensor, `True` if the variable has been
initialized, `False` otherwise.
|
github-repos
|
def get_region_vcf(self, case_obj, chrom=None, start=None, end=None, gene_obj=None, variant_type='clinical', category='snv', rank_threshold=None):
rank_threshold = (rank_threshold or (- 100))
variant_file = None
if (variant_type == 'clinical'):
if (category == 'snv'):
variant_file = case_obj['vcf_files'].get('vcf_snv')
elif (category == 'sv'):
variant_file = case_obj['vcf_files'].get('vcf_sv')
elif (category == 'str'):
variant_file = case_obj['vcf_files'].get('vcf_str')
elif (variant_type == 'research'):
if (category == 'snv'):
variant_file = case_obj['vcf_files'].get('vcf_snv_research')
elif (category == 'sv'):
variant_file = case_obj['vcf_files'].get('vcf_sv_research')
if (not variant_file):
raise SyntaxError('Vcf file does not seem to exist')
vcf_obj = VCF(variant_file)
region = ''
if gene_obj:
chrom = gene_obj['chromosome']
start = gene_obj['start']
end = gene_obj['end']
if chrom:
if (start and end):
region = '{0}:{1}-{2}'.format(chrom, start, end)
else:
region = '{0}'.format(chrom)
else:
rank_threshold = (rank_threshold or 5)
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp:
file_name = str(pathlib.Path(temp.name))
for header_line in vcf_obj.raw_header.split('\n'):
if (len(header_line) > 3):
temp.write((header_line + '\n'))
for variant in vcf_obj(region):
temp.write(str(variant))
return file_name
|
Produce a reduced vcf with variants from the specified coordinates
This is used for the alignment viewer.
Args:
case_obj(dict): A case from the scout database
variant_type(str): 'clinical' or 'research'. Default: 'clinical'
category(str): 'snv' or 'sv'. Default: 'snv'
rank_threshold(float): Only load variants above this score. Default: 5
chrom(str): Load variants from a certain chromosome
start(int): Specify the start position
end(int): Specify the end position
gene_obj(dict): A gene object from the database
Returns:
file_name(str): Path to the temporary file
|
codesearchnet
|
def parse_text(text):
span_dict = collections.defaultdict(list)
for match in _NUMBER_PATTERN.finditer(text):
span_text = text[match.start():match.end()]
number = _parse_number(span_text)
if number is not None:
span_dict[match.span()].append(_get_numeric_value_from_float(number))
for begin_index, end_index in get_all_spans(text, max_ngram_length=1):
if (begin_index, end_index) in span_dict:
continue
span_text = text[begin_index:end_index]
number = _parse_number(span_text)
if number is not None:
span_dict[begin_index, end_index].append(_get_numeric_value_from_float(number))
for number, word in enumerate(_NUMBER_WORDS):
if span_text == word:
span_dict[begin_index, end_index].append(_get_numeric_value_from_float(float(number)))
break
for number, word in enumerate(_ORDINAL_WORDS):
if span_text == word:
span_dict[begin_index, end_index].append(_get_numeric_value_from_float(float(number)))
break
for begin_index, end_index in get_all_spans(text, max_ngram_length=_MAX_DATE_NGRAM_SIZE):
span_text = text[begin_index:end_index]
date = _parse_date(span_text)
if date is not None:
span_dict[begin_index, end_index].append(date)
spans = sorted(span_dict.items(), key=lambda span_value: _get_span_length_key(span_value[0]), reverse=True)
selected_spans = []
for span, value in spans:
for selected_span, _ in selected_spans:
if selected_span[0] <= span[0] and span[1] <= selected_span[1]:
break
else:
selected_spans.append((span, value))
selected_spans.sort(key=lambda span_value: span_value[0][0])
numeric_value_spans = []
for span, values in selected_spans:
numeric_value_spans.append(NumericValueSpan(begin_index=span[0], end_index=span[1], values=values))
return numeric_value_spans
|
Extracts longest number and date spans.
Args:
text: text to annotate
Returns:
List of longest numeric value spans.
|
github-repos
|
def cos(duration: int, amp: complex, freq: float = None,
phase: float = 0, name: str = None) -> SamplePulse:
if freq is None:
freq = 1/duration
return _sampled_cos_pulse(duration, amp, freq, phase=phase, name=name)
|
Generates cosine wave `SamplePulse`.
Applies `left` sampling strategy to generate discrete pulse from continuous function.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude.
freq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle.
phase: Pulse phase.
name: Name of pulse.
|
juraj-google-style
|
def parse_key(key):
hkey, lkey = struct.unpack('<II',key[0:UBIFS_SK_LEN])
ino_num = hkey & UBIFS_S_KEY_HASH_MASK
key_type = lkey >> UBIFS_S_KEY_BLOCK_BITS
khash = lkey
return {'type':key_type, 'ino_num':ino_num, 'khash': khash}
|
Parse node key
Arguments:
Str:key -- Hex string literal of node key.
Returns:
Int:key_type -- Type of key, data, ino, dent, etc.
Int:ino_num -- Inode number.
Int:khash -- Key hash.
|
juraj-google-style
|
def fetch(self, subscription_id, data={}, **kwargs):
return super(Subscription, self).fetch(subscription_id, data, **kwargs)
|
Fetch Subscription for given Id
Args:
subscription_id : Id for which subscription object is retrieved
Returns:
Subscription dict for given subscription Id
|
juraj-google-style
|
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
hasher = resolve_hasher(algorithm, file_hash)
if str(hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
|
Validates a file against a sha256 or md5 hash.
Args:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of `"auto"`, `"sha256"`, or `"md5"`.
The default `"auto"` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Boolean, whether the file is valid.
|
github-repos
|
def group_systems(self, group_name, systems):
api_group_id = None
headers = {'Content-Type': 'application/json'}
group_path = self.api_url + '/v1/groups'
group_get_path = group_path + ('?display_name=%s' % quote(group_name))
logger.debug("GET group: %s", group_get_path)
net_logger.info("GET %s", group_get_path)
get_group = self.session.get(group_get_path)
logger.debug("GET group status: %s", get_group.status_code)
if get_group.status_code == 200:
api_group_id = get_group.json()['id']
if get_group.status_code == 404:
logger.debug("POST group")
data = json.dumps({'display_name': group_name})
net_logger.info("POST", group_path)
post_group = self.session.post(group_path,
headers=headers,
data=data)
logger.debug("POST group status: %s", post_group.status_code)
logger.debug("POST Group: %s", post_group.json())
self.handle_fail_rcs(post_group)
api_group_id = post_group.json()['id']
logger.debug("PUT group")
data = json.dumps(systems)
net_logger.info("PUT %s", group_path + ('/%s/systems' % api_group_id))
put_group = self.session.put(group_path +
('/%s/systems' % api_group_id),
headers=headers,
data=data)
logger.debug("PUT group status: %d", put_group.status_code)
logger.debug("PUT Group: %s", put_group.json())
|
Adds an array of systems to specified group
Args:
group_name: Display name of group
systems: Array of {'machine_id': machine_id}
|
juraj-google-style
|
def setDataFrame(self, dataFrame, copyDataFrame=False, filePath=None):
if (not isinstance(dataFrame, pandas.core.frame.DataFrame)):
raise TypeError('not of type pandas.core.frame.DataFrame')
self.layoutAboutToBeChanged.emit()
if copyDataFrame:
self._dataFrame = dataFrame.copy()
else:
self._dataFrame = dataFrame
self._columnDtypeModel = ColumnDtypeModel(dataFrame)
self._columnDtypeModel.dtypeChanged.connect(self.propagateDtypeChanges)
self._columnDtypeModel.changeFailed.connect((lambda columnName, index, dtype: self.changingDtypeFailed.emit(columnName, index, dtype)))
if (filePath is not None):
self._filePath = filePath
self.layoutChanged.emit()
self.dataChanged.emit()
self.dataFrameChanged.emit()
|
Setter function to _dataFrame. Holds all data.
Note:
It's not implemented with python properties to keep Qt conventions.
Raises:
TypeError: if dataFrame is not of type pandas.core.frame.DataFrame.
Args:
dataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed.
copyDataFrame (bool, optional): create a copy of dataFrame or use it as is. defaults to False.
If you use it as is, you can change it from outside otherwise you have to reset the dataFrame
after external changes.
|
codesearchnet
|
def make_serializable(json):
new_dict = dict()
for (key, value) in iteritems(json):
if is_valid_json(value):
new_dict[key] = value
return new_dict
|
This function ensures that the dictionary is JSON serializable. If not,
keys with non-serializable values are removed from the return value.
Args:
json (dict): Dictionary to convert to serializable
Returns:
new_dict (dict): New dictionary with non JSON serializable values removed
|
codesearchnet
|
def predictPhenos(self,use_fixed=None,use_random=None):
assert self.noisPos is not None, 'No noise element'
assert self.init, 'GP not initialised'
assert self.Ntest is not None, 'VarianceDecomposition:: specify Ntest for predictions (method VarianceDecomposition::setTestSampleSize)'
use_fixed = list(range(self.n_fixedEffs))
use_random = list(range(self.n_randEffs))
KiY = self.gp.agetKEffInvYCache()
if self.fast==False:
KiY = KiY.reshape(self.P,self.N).T
Ypred = sp.zeros((self.Ntest,self.P))
for term_i in use_random:
if term_i!=self.noisPos:
Kstar = self.Kstar[term_i]
if Kstar is None:
warnings.warn('warning: random effect term %d not used for predictions as it has None cross covariance'%term_i)
continue
term = sp.dot(Kstar.T,KiY)
if self.P>1:
C = self.getTraitCovar(term_i)
term = sp.dot(term,C)
else:
term *= self.getVarianceComps()[0,term_i]
Ypred += term
weights = self.getWeights()
w_i = 0
for term_i in use_fixed:
Fstar = self.Fstar[term_i]
if Fstar is None:
warnings.warn('warning: fixed effect term %d not used for predictions as it has None test sample design'%term_i)
continue
if self.P==1: A = sp.eye(1)
else: A = self.vd.getDesign(term_i)
Fstar = self.Fstar[term_i]
W = weights[w_i:w_i+A.shape[0],0:1].T
term = sp.dot(Fstar,sp.dot(W,A))
w_i += A.shape[0]
Ypred += term
return Ypred
|
predict the conditional mean (BLUP)
Args:
use_fixed: list of fixed effect indeces to use for predictions
use_random: list of random effect indeces to use for predictions
Returns:
predictions (BLUP)
|
juraj-google-style
|
def score_one(self, x: beam.Row) -> Optional[float]:
if len(x.__dict__) != 1:
raise ValueError('ZScore.score_one expected univariate input, but got %s', str(x))
v = next(iter(x))
if v is None or math.isnan(v):
return None
sub_stat = self._sub_stat_tracker.get()
stdev = self._stdev_tracker.get()
if math.isnan(stdev) or math.isnan(sub_stat):
return float('NaN')
if abs(stdev) < EPSILON:
return 0.0
return abs((v - sub_stat) / stdev)
|
Scores a data point using the Z-Score.
Args:
x: A `beam.Row` containing a single numerical value.
Returns:
float | None: The Z-Score.
|
github-repos
|
def set_logfile(self, filename, max_bytes=0, backup_count=0):
_logger = logging.getLogger("neo-python")
if not filename and not self.rotating_filehandler:
_logger.removeHandler(self.rotating_filehandler)
else:
self.rotating_filehandler = RotatingFileHandler(filename, mode='a', maxBytes=max_bytes, backupCount=backup_count, encoding=None)
self.rotating_filehandler.setLevel(logging.DEBUG)
self.rotating_filehandler.setFormatter(LogFormatter(color=False))
_logger.addHandler(self.rotating_filehandler)
|
Setup logging to a (rotating) logfile.
Args:
filename (str): Logfile. If filename is None, disable file logging
max_bytes (int): Maximum number of bytes per logfile. If used together with backup_count,
logfile will be rotated when it reaches this amount of bytes.
backup_count (int): Number of rotated logfiles to keep
|
juraj-google-style
|
def enum_to_yaml(cls: Type[T_EnumToYAML], representer: Representer, data: T_EnumToYAML) -> ruamel.yaml.nodes.ScalarNode:
return representer.represent_scalar(f'!{cls.__name__}', f'{str(data)}')
|
Encodes YAML representation.
This is a mixin method for writing enum values to YAML. It needs to be added to the enum
as a classmethod. See the module docstring for further information on this approach and how
to implement it.
This method writes whatever is used in the string representation of the YAML value.
Usually, this will be the unique name of the enumeration value. If the name is used,
the corresponding ``EnumFromYAML`` mixin can be used to recreate the value. If the name
isn't used, more care may be necessary, so a ``from_yaml`` method for that particular
enumeration may be necessary.
Note:
This method assumes that the name of the enumeration value should be stored as a scalar node.
Args:
representer: Representation from YAML.
data: Enumeration value to be encoded.
Returns:
Scalar representation of the name of the enumeration value.
|
codesearchnet
|
def children(cls, obj, save_type=base.SaveType.CHECKPOINT, **kwargs):
obj._maybe_initialize_trackable()
children = {}
for name, ref in obj._trackable_children(save_type, **kwargs).items():
ref = converter.convert_to_trackable(ref, parent=obj)
children[name] = ref
return children
|
Returns all child trackables attached to obj.
Args:
obj: A `Trackable` object.
save_type: A string, can be 'savedmodel' or 'checkpoint'.
**kwargs: kwargs to use when retrieving the object's children.
Returns:
Dictionary of all children attached to the object with name to trackable.
|
github-repos
|
def kill_raylet_monitor(self, check_alive=True):
self._kill_process_type(
ray_constants.PROCESS_TYPE_RAYLET_MONITOR, check_alive=check_alive)
|
Kill the raylet monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
|
juraj-google-style
|
def get_general_case_info(adapter, institute_id=None, slice_query=None):
general = {}
name_query = slice_query
cases = adapter.cases(owner=institute_id, name_query=name_query)
phenotype_cases = 0
causative_cases = 0
pinned_cases = 0
cohort_cases = 0
pedigree = {1: {'title': 'Single', 'count': 0}, 2: {'title': 'Duo', 'count': 0}, 3: {'title': 'Trio', 'count': 0}, 'many': {'title': 'Many', 'count': 0}}
case_ids = set()
total_cases = 0
for (total_cases, case) in enumerate(cases, 1):
if institute_id:
case_ids.add(case['_id'])
if case.get('phenotype_terms'):
phenotype_cases += 1
if case.get('causatives'):
causative_cases += 1
if case.get('suspects'):
pinned_cases += 1
if case.get('cohorts'):
cohort_cases += 1
nr_individuals = len(case.get('individuals', []))
if (nr_individuals == 0):
continue
if (nr_individuals > 3):
pedigree['many']['count'] += 1
else:
pedigree[nr_individuals]['count'] += 1
general['total_cases'] = total_cases
general['phenotype_cases'] = phenotype_cases
general['causative_cases'] = causative_cases
general['pinned_cases'] = pinned_cases
general['cohort_cases'] = cohort_cases
general['pedigree'] = pedigree
general['case_ids'] = case_ids
return general
|
Return general information about cases
Args:
adapter(adapter.MongoAdapter)
institute_id(str)
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
general(dict)
|
codesearchnet
|
def merge(self, other):
if ((self.m != other.m) or (self.p != other.p)):
raise ValueError('Cannot merge HyperLogLog with different precisions.')
self.reg = np.maximum(self.reg, other.reg)
|
Merge the other HyperLogLog with this one, making this the union of the
two.
Args:
other (datasketch.HyperLogLog):
|
codesearchnet
|
def SelectComponent(ds, idxs):
return MapData(ds, lambda dp: [dp[i] for i in idxs])
|
Select / reorder components from datapoints.
Args:
ds (DataFlow): input DataFlow.
idxs (list[int]): a list of component indices.
Example:
.. code-block:: none
original df produces: [c1, c2, c3]
idxs: [2,1]
this df: [c3, c2]
|
juraj-google-style
|
def __contains__(self, temp_ver):
return os.path.exists(self._prefixed(temp_ver.name))
|
Checks if a given version is in this store
Args:
temp_ver (TemplateVersion): Version to look for
Returns:
bool: ``True`` if the version is in this store
|
juraj-google-style
|
def append(self, *values):
for value in values:
list.append(self, value)
return self
|
Append values at the end of the list
Allow chaining.
Args:
values: values to be appened at the end.
Example:
>>> from ww import l
>>> lst = l([])
>>> lst.append(1)
[1]
>>> lst
[1]
>>> lst.append(2, 3).append(4,5)
[1, 2, 3, 4, 5]
>>> lst
[1, 2, 3, 4, 5]
|
juraj-google-style
|
def propagate(cls, date):
date = date.change_scale('TDB')
t_tdb = date.julian_century
def cos(angle):
'cosine in degrees'
return np.cos(np.radians(angle))
def sin(angle):
'sine in degrees'
return np.sin(np.radians(angle))
lambda_el = (((((((218.32 + (481267.8813 * t_tdb)) + (6.29 * sin((134.9 + (477198.85 * t_tdb))))) - (1.27 * sin((259.2 - (413335.38 * t_tdb))))) + (0.66 * sin((235.7 + (890534.23 * t_tdb))))) + (0.21 * sin((269.9 + (954397.7 * t_tdb))))) - (0.19 * sin((357.5 + (35999.05 * t_tdb))))) - (0.11 * sin((186.6 + (966404.05 * t_tdb)))))
phi_el = ((((5.13 * sin((93.3 + (483202.03 * t_tdb)))) + (0.28 * sin((228.2 + (960400.87 * t_tdb))))) - (0.28 * sin((318.3 + (6003.18 * t_tdb))))) - (0.17 * sin((217.6 - (407332.2 * t_tdb)))))
p = ((((0.9508 + (0.0518 * cos((134.9 + (477198.85 * t_tdb))))) + (0.0095 * cos((259.2 - (413335.38 * t_tdb))))) + (0.0078 * cos((235.7 + (890534.23 * t_tdb))))) + (0.0028 * cos((269.9 + (954397.7 * t_tdb)))))
e_bar = (((23.439291 - (0.0130042 * t_tdb)) - (1.64e-07 * (t_tdb ** 2))) + (5.04e-07 * (t_tdb ** 3)))
r_moon = (Earth.r / sin(p))
state_vector = (r_moon * np.array([(cos(phi_el) * cos(lambda_el)), (((cos(e_bar) * cos(phi_el)) * sin(lambda_el)) - (sin(e_bar) * sin(phi_el))), (((sin(e_bar) * cos(phi_el)) * sin(lambda_el)) + (cos(e_bar) * sin(phi_el))), 0, 0, 0]))
return Orbit(date, state_vector, 'cartesian', 'EME2000', cls())
|
Compute the Moon position at a given date
Args:
date (~beyond.utils.date.Date)
Return:
~beyond.orbits.orbit.Orbit: Position of the Moon in EME2000 frame
Example:
.. code-block:: python
from beyond.utils.date import Date
MoonPropagator.propagate(Date(1994, 4, 28))
# Orbit =
# date = 1994-04-28T00:00:00 UTC
# form = Cartesian
# frame = EME2000
# propag = MoonPropagator
# coord =
# x = -134181157.317
# y = -311598171.54
# z = -126699062.437
# vx = 0.0
# vy = 0.0
# vz = 0.0
|
codesearchnet
|
def get_path_str(self, sep=os.path.sep, type_str=None):
return sep.join(
list(
reversed(
[
v.label_str
for v in self.parent_gen
if type_str in (None, v.type_str)
]
)
)
)
|
Get path from root to this node.
Args:
sep: str
One or more characters to insert between each element in the path.
Defaults to "/" on Unix and "\" on Windows.
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
str: String describing the path from the root to this node.
|
juraj-google-style
|
def shannon_entropy(pvec, base=2):
if base == 2:
def logfn(x):
return - x * np.log2(x)
elif base == np.e:
def logfn(x):
return - x * np.log(x)
else:
def logfn(x):
return -x * np.log(x) / np.log(base)
h = 0.
for x in pvec:
if 0 < x < 1:
h += logfn(x)
return h
|
Compute the Shannon entropy of a probability vector.
The shannon entropy of a probability vector pv is defined as
$H(pv) = - \\sum_j pv[j] log_b (pv[j])$ where $0 log_b 0 = 0$.
Args:
pvec (array_like): a probability vector.
base (int): the base of the logarith
Returns:
float: The Shannon entropy H(pvec).
|
juraj-google-style
|
def ShowUnspentCoins(wallet, asset_id=None, from_addr=None, watch_only=False, do_count=False):
if (wallet is None):
print('Please open a wallet.')
return
watch_only_flag = (64 if watch_only else 0)
if asset_id:
unspents = wallet.FindUnspentCoinsByAsset(asset_id, from_addr=from_addr, watch_only_val=watch_only_flag)
else:
unspents = wallet.FindUnspentCoins(from_addr=from_addr, watch_only_val=watch_only)
if do_count:
print('\n-----------------------------------------------')
print(('Total Unspent: %s' % len(unspents)))
return unspents
for unspent in unspents:
print('\n-----------------------------------------------')
print(json.dumps(unspent.ToJson(), indent=4))
if (not unspents):
print('No unspent assets matching the arguments.')
return unspents
|
Show unspent coin objects in the wallet.
Args:
wallet (neo.Wallet): wallet to show unspent coins from.
asset_id (UInt256): a bytearray (len 32) representing an asset on the blockchain.
from_addr (UInt160): a bytearray (len 20) representing an address.
watch_only (bool): indicate if this shows coins that are in 'watch only' addresses.
do_count (bool): if True only show a count of unspent assets.
Returns:
list: a list of unspent ``neo.Wallet.Coin`` in the wallet
|
codesearchnet
|
def _encode_value(self, value):
if isinstance(value, (int, float, str, bool, datetime)):
return value
elif isinstance(value, list):
return [self._encode_value(item) for item in value]
elif isinstance(value, dict):
result = {}
for key, item in value.items():
result[key] = self._encode_value(item)
return result
else:
return self._gridfs.put(Binary(pickle.dumps(value)),
workflow_id=self._workflow_id)
|
Encodes the value such that it can be stored into MongoDB.
Any primitive types are stored directly into MongoDB, while non-primitive types
are pickled and stored as GridFS objects. The id pointing to a GridFS object
replaces the original value.
Args:
value (object): The object that should be encoded for storing in MongoDB.
Returns:
object: The encoded value ready to be stored in MongoDB.
|
juraj-google-style
|
def update_metadata(self, resource, keys_vals):
self.metadata_service.set_auth(self._token_metadata)
self.metadata_service.update(resource, keys_vals)
|
Updates key-value pairs with the given resource.
Will attempt to update all key-value pairs even if some fail.
Keys must already exist.
Args:
resource (intern.resource.boss.BossResource)
keys_vals (dictionary): Collection of key-value pairs to update on
the given resource.
Raises:
HTTPErrorList on failure.
|
codesearchnet
|
def set_device_state(self, device, state, id_override=None, type_override=None):
_LOGGER.info('Setting state via online API')
object_id = (id_override or device.object_id())
object_type = (type_override or device.object_type())
url_string = '{}/{}s/{}'.format(self.BASE_URL, object_type, object_id)
if ((state is None) or (object_type == 'group')):
url_string += '/activate'
if (state is None):
arequest = requests.post(url_string, headers=API_HEADERS)
else:
arequest = requests.post(url_string, data=json.dumps(state), headers=API_HEADERS)
else:
arequest = requests.put(url_string, data=json.dumps(state), headers=API_HEADERS)
if (arequest.status_code == 401):
new_token = refresh_access_token()
if new_token:
arequest = requests.put(url_string, data=json.dumps(state), headers=API_HEADERS)
else:
raise WinkAPIException('Failed to refresh access token.')
response_json = arequest.json()
_LOGGER.debug('%s', response_json)
return response_json
|
Set device state via online API.
Args:
device (WinkDevice): The device the change is being requested for.
state (Dict): The state being requested.
id_override (String, optional): A device ID used to override the
passed in device's ID. Used to make changes on sub-devices.
i.e. Outlet in a Powerstrip. The Parent device's ID.
type_override (String, optional): Used to override the device type
when a device inherits from a device other than WinkDevice.
Returns:
response_json (Dict): The API's response in dictionary format
|
codesearchnet
|
def is_initialized(self, name=None):
if values_util.is_saving_non_distributed():
return self._primary.is_initialized()
if self._use_packed_variable():
return self._packed_var.is_initialized()
result = self._primary.is_initialized()
for v in self._values[1:-1]:
result = math_ops.logical_and(result, v.is_initialized())
result = math_ops.logical_and(result, self._values[-1].is_initialized(), name=name)
return result
|
Identifies if all the component variables are initialized.
Args:
name: Name of the final `logical_and` op.
Returns:
The op that evaluates to True or False depending on if all the
component variables are initialized.
|
github-repos
|
def __init__(self, pred=None, pivot=None, branch=None, name='cond_text', context_def=None, import_scope=None):
self._name = ops.get_default_graph().unique_name(name)
if context_def:
self._init_from_proto(context_def, import_scope=import_scope)
else:
ControlFlowContext.__init__(self)
self._pred = pred
self._pivot = pivot
self._branch = branch
self._values.add(pred.name)
self._external_values[pred.name] = pred
self._values.add(pivot.name)
pivot.op._set_control_flow_context(self)
|
Creates a `CondContext`.
Args:
pred: The `boolean` tensor for the conditional predicate.
pivot: The predicate tensor in this branch.
branch: 0 or 1 representing this branch.
name: Name of the `CondContext` python object.
context_def: Optional `ContextDef` protocol buffer to initialize the
`CondContext` object from.
import_scope: Optional `string`. Name scope to add. Only used when
initialing from protocol buffer.
|
github-repos
|
def get_states(self, n):
return self.states[len(self.new_states):(len(self.new_states) + n)]
|
Get the next n recurrent states.
Called by layers in "incremental" mode.
Args:
n: an integer
Returns:
a list of n Tensors
|
codesearchnet
|
def _unflatten_dict(flat_dict, prefixes):
original_dict = {}
for key, value in flat_dict.items():
prefix_found = False
for prefix in prefixes:
full_prefix = "__" + prefix + "_"
if key.startswith(full_prefix):
if prefix not in original_dict:
original_dict[prefix] = {}
original_dict[prefix][key[len(full_prefix):]] = value
prefix_found = True
break
if not prefix_found:
original_dict[key] = value
return original_dict
|
Returns a dict of dicts if any prefixes match keys in the flat dict.
The function handles the case where the prefix may not be a dict.
Args:
flat_dict: A dict without any nesting.
prefixes: A list of strings which may have been dicts in the
original structure.
|
juraj-google-style
|
def __init__(self, filter=None):
self._filter = filter
self._context = datalab.Context.default()
self._api = discovery.build('ml', 'v1', credentials=self._context.credentials)
self._page_size = 0
|
Initializes an instance of a CloudML Job list that is iteratable ("for job in jobs()").
Args:
filter: filter string for retrieving jobs, such as "state=FAILED"
context: an optional Context object providing project_id and credentials.
api: an optional CloudML API client.
|
juraj-google-style
|
def _fulfillment_to_details(fulfillment):
if (fulfillment.type_name == 'ed25519-sha-256'):
return {'type': 'ed25519-sha-256', 'public_key': base58.b58encode(fulfillment.public_key).decode()}
if (fulfillment.type_name == 'threshold-sha-256'):
subconditions = [_fulfillment_to_details(cond['body']) for cond in fulfillment.subconditions]
return {'type': 'threshold-sha-256', 'threshold': fulfillment.threshold, 'subconditions': subconditions}
raise UnsupportedTypeError(fulfillment.type_name)
|
Encode a fulfillment as a details dictionary
Args:
fulfillment: Crypto-conditions Fulfillment object
|
codesearchnet
|
def __init__(self, report_type):
self._report_type = report_type
self.categories = dict([(x, {}) for x in self.active_days])
|
Constructor.
Args:
report_type: rdf_stats.ClientGraphSeries.ReportType for the client stats
to track.
|
juraj-google-style
|
def __init__(self, port=2223, task_type=None, task_id=None, rpc_layer=None, environment=None):
self._task_type = task_type
self._task_id = task_id
self._rpc_layer = rpc_layer
self._environment = environment
self._port = str(port)
|
Creates a new SageMakerClusterResolver.
Args:
port: (integer, optional) Override default port usage of 2223
task_type: (String, optional) Overrides the task type.
task_id: (Integer, optional) Overrides the task index.
rpc_layer: (String, optional) Overrides the rpc layer TensorFlow uses.
environment: (String, optional) Overrides the environment TensorFlow
operates in.
|
github-repos
|
def do_batch(args):
if (args.subcommand == 'list'):
do_batch_list(args)
if (args.subcommand == 'show'):
do_batch_show(args)
if (args.subcommand == 'status'):
do_batch_status(args)
if (args.subcommand == 'submit'):
do_batch_submit(args)
|
Runs the batch list, batch show or batch status command, printing output
to the console
Args:
args: The parsed arguments sent to the command at runtime
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.