code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def change_numbering(self, new_index=None):
if (new_index is None):
new_index = range(len(self))
elif (len(new_index) != len(self)):
raise ValueError('len(new_index) has to be the same as len(self)')
c_table = self.loc[(:, ['b', 'a', 'd'])]
c_table = c_table.replace(constants.int_label)
try:
c_table = c_table.astype('i8')
except ValueError:
raise ValueError('Due to a bug in pandas it is necessary to have integer columns')
c_table = c_table.replace(self.index, new_index)
c_table = c_table.replace({v: k for (k, v) in constants.int_label.items()})
out = self.copy()
out.unsafe_loc[(:, ['b', 'a', 'd'])] = c_table
out._frame.index = new_index
return out | Change numbering to a new index.
Changes the numbering of index and all dependent numbering
(bond_with...) to a new_index.
The user has to make sure that the new_index consists of distinct
elements.
Args:
new_index (list): If None the new_index is taken from 1 to the
number of atoms.
Returns:
Zmat: Reindexed version of the zmatrix. | codesearchnet |
def enable(self, key_id, **kwargs):
path = '%s/%s/enable' % (self.path, key_id)
self.gitlab.http_post(path, **kwargs) | Enable a deploy key for a project.
Args:
key_id (int): The ID of the key to enable
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabProjectDeployKeyError: If the key could not be enabled | juraj-google-style |
def verifymessage(self, address, signature, message):
verified = self.rpc.call('verifymessage', address, signature, message)
self.logger.debug(('Signature verified: %s' % str(verified)))
return verified | Verifies that a message has been signed by an address.
Args:
address (str): address claiming to have signed the message
signature (str): ECDSA signature
message (str): plaintext message which was signed
Returns:
bool: True if the address signed the message, False otherwise | codesearchnet |
def request_server_info(self):
if (self._server_info is None):
self._server_info = self._send_request_server_info()
return self._server_info | Ask for information about the server.
Returns:
A dictionary of server attributes. | codesearchnet |
def fetch(clobber=False):
dest_dir = fname_pattern = os.path.join(data_dir(), 'iphas')
url_pattern = 'http:
fname_pattern = os.path.join(dest_dir, 'A_samp_') + '{:03d}.tar.gz'
if not clobber:
h5_fname = os.path.join(dest_dir, 'iphas.h5')
h5_size = 227817543
h5_dsets = {
'samples': (61130,)
}
if fetch_utils.h5_file_exists(h5_fname, h5_size, dsets=h5_dsets):
print('File appears to exist already. Call `fetch(clobber=True)` '
'to force overwriting of existing file.')
return
file_md5sum = {
30: 'dd531e397622bc97d4ff92b6c7863ade',
40: 'b0f925eb3e46b77876e4054a26ad5b52',
50: 'ea3b9500f0419d66dd92d9f9c127c2b5',
60: 'cccf136f4e2306a6038e8093499216fd',
70: 'a05fe2f815086686056c18087cc5410b',
80: '799bf618c8827b3d7250c884ec66ec49',
90: 'd2a302d917da768bacf6ea74cb9dcfad',
100: '2c75e31ad9320818556c4c9964b6af65',
110: '742ea8de6f5f8a7e549f6c56b0088789',
120: '9beabfa2c9634f953adadb5016eab072',
130: '7cd7313f466eb60e8318d0f1bd32e035',
140: 'fb6d09e4d939081b891e245c30b791f1',
150: '8e9b6dc1561183aeadc64f41c85a64a8',
160: '8a35828457b7b1d53d06998114553674',
170: '7ffb29ec23e2f625dcfaaa84c293821d',
180: 'c737da479d132b88483d6ddab5b25fc8',
190: '9bc5fc7f7ba55f36a167473bb3679601',
200: '7d8ffc4aa2f7c7026d8aa3ffb670d48e',
210: 'e31b04964b7970b81fc90c120b4ebc24'
}
for key in file_md5sum:
url = url_pattern.format(key)
print('Downloading {}'.format(url))
fetch_utils.download_and_verify(
url,
file_md5sum[key],
fname_pattern.format(key))
print('Repacking files...')
ascii2h5(dest_dir, os.path.join(dest_dir, 'iphas.h5'))
print('Removing original files...')
for key in file_md5sum:
os.remove(fname_pattern.format(key)) | Downloads the IPHAS 3D dust map of Sale et al. (2014).
Args:
clobber (Optional[bool]): If ``True``, any existing file will be
overwritten, even if it appears to match. If ``False`` (the
default), ``fetch()`` will attempt to determine if the dataset
already exists. This determination is not 100\% robust against data
corruption. | juraj-google-style |
def __init__(self, dump_root, tfdbg_run_id, circular_buffer_size=DEFAULT_CIRCULAR_BUFFER_SIZE):
if not dump_root:
raise ValueError('Empty or None dump root')
self._dump_root = dump_root
self._tfdbg_run_id = tfdbg_run_id
_pywrap_debug_events_writer.Init(self._dump_root, self._tfdbg_run_id, circular_buffer_size) | Construct a DebugEventsWriter object.
NOTE: Given the same `dump_root`, all objects from this constructor
will point to the same underlying set of writers. In other words, they
will write to the same set of debug events files in the `dump_root`
folder.
Args:
dump_root: The root directory for dumping debug data. If `dump_root` does
not exist as a directory, it will be created.
tfdbg_run_id: Debugger Run ID.
circular_buffer_size: Size of the circular buffer for each of the two
execution-related debug events files: with the following suffixes: -
.execution - .graph_execution_traces If <= 0, the circular-buffer
behavior will be abolished in the constructed object. | github-repos |
def make_spiral_texture(spirals=6.0, ccw=False, offset=0.0, resolution=1000):
dist = np.sqrt(np.linspace(0.0, 1.0, resolution))
if ccw:
direction = 1.0
else:
direction = (- 1.0)
angle = ((((dist * spirals) * np.pi) * 2.0) * direction)
spiral_texture = ((((np.cos(angle) * dist) / 2.0) + 0.5), (((np.sin(angle) * dist) / 2.0) + 0.5))
return spiral_texture | Makes a texture consisting of a spiral from the origin.
Args:
spirals (float): the number of rotations to make
ccw (bool): make spirals counter-clockwise (default is clockwise)
offset (float): if non-zero, spirals start offset by this amount
resolution (int): number of midpoints along the spiral
Returns:
A texture. | codesearchnet |
def get_largest_schedule_within_budget(self, budget, proportion_discard):
valid_schedules_and_costs = []
for R in range(1, budget):
schedule = self.generate_hyperband_schedule(R, proportion_discard)
cost = self.compute_schedule_cost(schedule)
if cost <= budget:
valid_schedules_and_costs.append((schedule, cost))
valid_schedules_and_costs.sort(key=lambda x: x[1], reverse=True)
return valid_schedules_and_costs[0][0] | Gets the largest hyperband schedule within target_budget.
This is required since the original hyperband algorithm uses R,
the maximum number of resources per configuration.
TODO(maxlam): Possibly binary search it if this becomes a bottleneck.
Args:
budget: total budget of the schedule.
proportion_discard: hyperband parameter that specifies
the proportion of configurations to discard per iteration. | juraj-google-style |
def convert_to_rgb(image: np.ndarray, palette: Optional[PIL.ImagePalette.ImagePalette]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> ImageInput:
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image, num_channels=(1, 3, 4))
data_format = input_data_format if data_format is None else data_format
mode = 'P' if palette is not None else None
image = to_pil_image(image, image_mode=mode, input_data_format=input_data_format)
if image.mode == 'P' and palette is not None:
image.putpalette(palette)
image_rgba = image.convert('RGBA')
background = Image.new('RGBA', image_rgba.size, (255, 255, 255))
alpha_composite = Image.alpha_composite(background, image_rgba)
alpha_composite = alpha_composite.convert('RGB')
output_array = np.array(alpha_composite)
output_array = to_channel_dimension_format(output_array, data_format, input_channel_dim=ChannelDimension.LAST)
return output_array | Converts an image to RGB format.
Args:
image (`np.ndarray`):
The image to convert.
palette (List[int], *optional*):
The palette to use if given.
data_format (ChannelDimension or str, *optional*):
The channel dimension format for the output image. If not provided, it will be the same as the input image.
input_data_format (ChannelDimension or str, *optional*):
The channel dimension format of the input image. | github-repos |
def clip_action(action, space):
if isinstance(space, gym.spaces.Box):
return np.clip(action, space.low, space.high)
elif isinstance(space, gym.spaces.Tuple):
if type(action) not in (tuple, list):
raise ValueError("Expected tuple space for actions {}: {}".format(
action, space))
out = []
for a, s in zip(action, space.spaces):
out.append(clip_action(a, s))
return out
else:
return action | Called to clip actions to the specified range of this policy.
Arguments:
action: Single action.
space: Action space the actions should be present in.
Returns:
Clipped batch of actions. | juraj-google-style |
def mme_match(case_obj, match_type, mme_base_url, mme_token, nodes=None, mme_accepts=None):
query_patients = []
server_responses = []
url = None
query_patients = case_obj['mme_submission']['patients']
if (match_type == 'internal'):
url = ''.join([mme_base_url, '/match'])
for patient in query_patients:
json_resp = matchmaker_request(url=url, token=mme_token, method='POST', content_type=mme_accepts, accept=mme_accepts, data={'patient': patient})
resp_obj = {'server': 'Local MatchMaker node', 'patient_id': patient['id'], 'results': json_resp.get('results'), 'status_code': json_resp.get('status_code'), 'message': json_resp.get('message')}
server_responses.append(resp_obj)
else:
query_patients = [patient['id'] for patient in query_patients]
node_ids = [node['id'] for node in nodes]
if (match_type in node_ids):
node_ids = [match_type]
for patient in query_patients:
for node in node_ids:
url = ''.join([mme_base_url, '/match/external/', patient, '?node=', node])
json_resp = matchmaker_request(url=url, token=mme_token, method='POST')
resp_obj = {'server': node, 'patient_id': patient, 'results': json_resp.get('results'), 'status_code': json_resp.get('status_code'), 'message': json_resp.get('message')}
server_responses.append(resp_obj)
return server_responses | Initiate a MatchMaker match against either other Scout patients or external nodes
Args:
case_obj(dict): a scout case object already submitted to MME
match_type(str): 'internal' or 'external'
mme_base_url(str): base url of the MME server
mme_token(str): auth token of the MME server
mme_accepts(str): request content accepted by MME server (only for internal matches)
Returns:
matches(list): a list of eventual matches | codesearchnet |
def scopes_as(self, new_scopes):
old_scopes, self.scopes = self.scopes, new_scopes
yield
self.scopes = old_scopes | Replace my :attr:`scopes` for the duration of the with block.
My global scope is not replaced.
Args:
new_scopes (list of dict-likes): The new :attr:`scopes` to use. | juraj-google-style |
def on(self, event):
def decorator(f):
self.add_event_handler(f, event)
return f
return decorator | Decorator helper method around `add_event_handler`. Example:
>>> from telethon import TelegramClient, events
>>> client = TelegramClient(...)
>>>
>>> @client.on(events.NewMessage)
... async def handler(event):
... ...
...
>>>
Args:
event (`_EventBuilder` | `type`):
The event builder class or instance to be used,
for instance ``events.NewMessage``. | juraj-google-style |
def serialize_many_sparse(sp_input, name=None, out_type=dtypes.string):
return serialize_many_sparse_v2(sp_input, out_type, name) | Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
name: A name prefix for the returned tensors (optional).
out_type: The `dtype` to use for serialization.
Returns:
A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column
represents serialized `SparseTensor`'s indices, values, and shape
(respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`. | github-repos |
def slice(self, start, end):
reverse = False
if (start > end):
temp = start
start = end
end = temp
reverse = True
seg = self.copy()
seg.points = seg.points[start:(end + 1)]
if reverse:
seg.points = list(reversed(seg.points))
return seg | Creates a copy of the current segment between indexes. If end > start,
points are reverted
Args:
start (int): Start index
end (int): End index
Returns:
:obj:`Segment` | codesearchnet |
def cancel(self, nids=None):
if self.has_chrooted:
warnings.warn('Cannot cancel the flow via sshfs!')
return (- 1)
if os.path.exists(self.pid_file):
cprint('Found scheduler attached to this flow.', 'yellow')
cprint('Sending SIGKILL to the scheduler before cancelling the tasks!', 'yellow')
with open(self.pid_file, 'rt') as fh:
pid = int(fh.readline())
retcode = os.system(('kill -9 %d' % pid))
self.history.info(('Sent SIGKILL to the scheduler, retcode: %s' % retcode))
try:
os.remove(self.pid_file)
except IOError:
pass
num_cancelled = 0
for task in self.iflat_tasks(nids=nids):
num_cancelled += task.cancel()
return num_cancelled | Cancel all the tasks that are in the queue.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
Number of jobs cancelled, negative value if error | codesearchnet |
def _send_success_response(self, response, start_response):
headers = [('Content-Type', 'application/json; charset=UTF-8')]
return util.send_wsgi_response('200 OK', headers, response, start_response) | Sends an HTTP 200 json success response.
This calls start_response and returns the response body.
Args:
response: A string containing the response body to return.
start_response: A function with semantics defined in PEP-333.
Returns:
A string, the response body. | juraj-google-style |
def _publish_status(self, slug, data):
status_topic = (self.topics.prefix + 'devices/{}/data/status'.format(slug))
self._logger.debug('Publishing status message: (topic=%s) (message=%s)', status_topic, str(data))
self.client.publish(status_topic, data) | Publish a status message for a device
Args:
slug (string): The device slug that we are publishing on behalf of
data (dict): The status message data to be sent back to the caller | codesearchnet |
def epoch_rates_to_pmf(problems, epoch_rates=None):
if (epoch_rates is None):
epoch_rates = ([1.0] * len(problems))
example_rates = [(epoch_rate * p.num_training_examples) for (p, epoch_rate) in zip(problems, epoch_rates)]
return example_rates_to_pmf(example_rates) | Create a probability-mass-function based on relative epoch rates.
if epoch_rates=None, then we use uniform epoch rates [1.0] * len(problems)
i.e. it takes each problem the same time to go through one epoch.
If epoch_rates is given, then these are the relative numbers of epochs
of each problem to go through in a given amount of time.
Each must have problem.num_training_examples implemented.
Args:
problems: a list of Problem instances.
epoch_rates: an optional list of float
Returns:
a list of floating point values. | codesearchnet |
def get_linear_interpolated_value(x_values, y_values, x):
a = np.array(sorted(zip(x_values, y_values), key=(lambda d: d[0])))
ind = np.where((a[(:, 0)] >= x))[0]
if ((len(ind) == 0) or (ind[0] == 0)):
raise ValueError('x is out of range of provided x_values')
i = ind[0]
(x1, x2) = (a[(i - 1)][0], a[i][0])
(y1, y2) = (a[(i - 1)][1], a[i][1])
return (y1 + (((y2 - y1) / (x2 - x1)) * (x - x1))) | Returns an interpolated value by linear interpolation between two values.
This method is written to avoid dependency on scipy, which causes issues on
threading servers.
Args:
x_values: Sequence of x values.
y_values: Corresponding sequence of y values
x: Get value at particular x
Returns:
Value at x. | codesearchnet |
def Write(self, schedule, output_file):
root = ET.Element('kml')
root.attrib['xmlns'] = 'http:
doc = ET.SubElement(root, 'Document')
open_tag = ET.SubElement(doc, 'open')
open_tag.text = '1'
self._CreateStopsFolder(schedule, doc)
if self.split_routes:
route_types = set()
for route in schedule.GetRouteList():
route_types.add(route.route_type)
route_types = list(route_types)
route_types.sort()
for route_type in route_types:
self._CreateRoutesFolder(schedule, doc, route_type)
else:
self._CreateRoutesFolder(schedule, doc)
self._CreateShapesFolder(schedule, doc)
self._SetIndentation(root)
if isinstance(output_file, file):
output = output_file
else:
output = open(output_file, 'w')
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
ET.ElementTree(root).write(output, 'utf-8') | Writes out a feed as KML.
Args:
schedule: A transitfeed.Schedule object containing the feed to write.
output_file: The name of the output KML file, or file object to use. | codesearchnet |
def heightmap_has_land_on_border(hm: np.ndarray, waterlevel: float) -> bool:
return bool(lib.TCOD_heightmap_has_land_on_border(_heightmap_cdata(hm), waterlevel)) | Returns True if the map edges are below ``waterlevel``, otherwise False.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
waterLevel (float): The water level to use.
Returns:
bool: True if the map edges are below ``waterlevel``, otherwise False. | codesearchnet |
def get_stored_content_length(headers):
length = headers.get('x-goog-stored-content-length')
if length is None:
length = headers.get('content-length')
return length | Return the content length (in bytes) of the object as stored in GCS.
x-goog-stored-content-length should always be present except when called via
the local dev_appserver. Therefore if it is not present we default to the
standard content-length header.
Args:
headers: a dict of headers from the http response.
Returns:
the stored content length. | juraj-google-style |
def get_value(value_proto):
field = value_proto.WhichOneof('value_type')
if field in __native_value_types:
return getattr(value_proto, field)
if field == 'timestamp_value':
return from_timestamp(value_proto.timestamp_value)
if field == 'array_value':
return [get_value(sub_value)
for sub_value in value_proto.array_value.values]
return None | Gets the python object equivalent for the given value proto.
Args:
value_proto: datastore.Value proto message.
Returns:
the corresponding python object value. timestamps are converted to
datetime, and datastore.Value is returned for blob_key_value. | juraj-google-style |
def describe_message(message_definition):
message_descriptor = MessageDescriptor()
message_descriptor.name = message_definition.definition_name().split(
'.')[-1]
fields = sorted(message_definition.all_fields(),
key=lambda v: v.number)
if fields:
message_descriptor.fields = [describe_field(field) for field in fields]
try:
nested_messages = message_definition.__messages__
except AttributeError:
pass
else:
message_descriptors = []
for name in nested_messages:
value = getattr(message_definition, name)
message_descriptors.append(describe_message(value))
message_descriptor.message_types = message_descriptors
try:
nested_enums = message_definition.__enums__
except AttributeError:
pass
else:
enum_descriptors = []
for name in nested_enums:
value = getattr(message_definition, name)
enum_descriptors.append(describe_enum(value))
message_descriptor.enum_types = enum_descriptors
return message_descriptor | Build descriptor for Message class.
Args:
message_definition: Message class to provide descriptor for.
Returns:
Initialized MessageDescriptor instance describing the Message class. | juraj-google-style |
def emit(self, value):
if (not self._tstate.output_writer):
logging.error('emit is called, but no output writer is set.')
return
self._tstate.output_writer.write(value) | Emits a value to output writer.
Args:
value: a value of type expected by the output writer. | codesearchnet |
def register_list(self):
num_items = self.MAX_NUM_CPU_REGISTERS
buf = (ctypes.c_uint32 * num_items)()
num_regs = self._dll.JLINKARM_GetRegisterList(buf, num_items)
return buf[:num_regs] | Returns a list of the indices for the CPU registers.
The returned indices can be used to read the register content or grab
the register name.
Args:
self (JLink): the ``JLink`` instance
Returns:
List of registers. | juraj-google-style |
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: torch.LongTensor=None):
batch_size, num_images, num_channels, height, width = pixel_values.shape
pixel_values = pixel_values.to(dtype=self.dtype)
pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:])
nb_values_per_image = pixel_values.shape[1:].numel()
real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image
pixel_values = pixel_values[real_images_inds].contiguous()
if pixel_attention_mask is None:
pixel_attention_mask = torch.ones(size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)), dtype=torch.bool, device=pixel_values.device)
else:
pixel_attention_mask = pixel_attention_mask.view(batch_size * num_images, *pixel_attention_mask.shape[2:])
pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous()
patch_size = self.config.vision_config.patch_size
patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size)
patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size)
patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
image_hidden_states = self.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
image_hidden_states.last_hidden_state
image_hidden_states = self.connector(image_hidden_states.last_hidden_state)
return image_hidden_states | Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
pixel_attention_mask (`torch.LongTensor`, *optional*):
The attention mask indicating padded regions in the image. | github-repos |
def create(self, ospf_process_id, vrf=None):
value = int(ospf_process_id)
if (not (0 < value < 65536)):
raise ValueError('ospf as must be between 1 and 65535')
command = 'router ospf {}'.format(ospf_process_id)
if vrf:
command += (' vrf %s' % vrf)
return self.configure(command) | Creates a OSPF process in the specified VRF or the default VRF.
Args:
ospf_process_id (str): The OSPF process Id value
vrf (str): The VRF to apply this OSPF process to
Returns:
bool: True if the command completed successfully
Exception:
ValueError: If the ospf_process_id passed in less
than 0 or greater than 65536 | codesearchnet |
def add_material(self, x_min, x_max, n, angle=0):
self._mat_params.append([x_min, x_max, n, angle])
if (not callable(n)):
n_mat = (lambda wl: n)
else:
n_mat = n
Structure._add_material(self, x_min, self.y_min, x_max, self.y_max, n_mat(self._wl), angle)
return self.n | Add a refractive index between two x-points.
Args:
x_min (float): The start x-point.
x_max (float): The stop x-point.
n (float, function): Refractive index between
`x_min` and `x_max`. Either a constant (`float`), or
a function that accepts one parameters, the
wavelength, and returns a float of the refractive
index. This is useful when doing wavelength
sweeps and solving for the group velocity. The
function provided could be a Sellmeier equation.
angle (float): Angle in degrees of the slope of the
sidewalls at `x_min` and `x_max`. This is useful
for defining a ridge with angled sidewalls. | codesearchnet |
def resize(self, size_gigabytes, region):
return self.get_data(('volumes/%s/actions/' % self.id), type=POST, params={'type': 'resize', 'size_gigabytes': size_gigabytes, 'region': region}) | Detach a Volume to a Droplet.
Args:
size_gigabytes: int - size of the Block Storage volume in GiB
region: string - slug identifier for the region | codesearchnet |
def create_default_views(self, create_datastore_views=False):
package = deepcopy(self.data)
if self.resources:
package['resources'] = self._convert_hdxobjects(self.resources)
data = {'package': package, 'create_datastore_views': create_datastore_views}
self._write_to_hdx('create_default_views', data, 'package') | Create default resource views for all resources in dataset
Args:
create_datastore_views (bool): Whether to try to create resource views that point to the datastore
Returns:
None | codesearchnet |
def plot_helmholtz_free_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$\Delta F$ (kJ/mol)"
else:
ylabel = r"$\Delta F$ (kJ/mol-c)"
fig = self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylabel=ylabel, ylim=ylim,
factor=1e-3, **kwargs)
return fig | Plots the vibrational contribution to the Helmoltz free energy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure | juraj-google-style |
def shape2d(a):
if type(a) == int:
return [a, a]
if isinstance(a, (list, tuple)):
assert len(a) == 2
return list(a)
raise RuntimeError("Illegal shape: {}".format(a)) | Ensure a 2D shape.
Args:
a: a int or tuple/list of length 2
Returns:
list: of length 2. if ``a`` is a int, return ``[a, a]``. | juraj-google-style |
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):
if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):
if (num_lower < 0):
num_lower = (rows - 1)
if (num_upper < 0):
num_upper = (cols - 1)
lower_mask = np.tri(cols, rows, num_lower).T
upper_mask = np.tri(rows, cols, num_upper)
band = ((np.ones((rows, cols)) * lower_mask) * upper_mask)
if out_shape:
band = band.reshape(out_shape)
band = tf.constant(band, tf.float32)
else:
band = tf.matrix_band_part(tf.ones([rows, cols]), tf.cast(num_lower, tf.int64), tf.cast(num_upper, tf.int64))
if out_shape:
band = tf.reshape(band, out_shape)
return band | Matrix band part of ones.
Args:
rows: int determining number of rows in output
cols: int
num_lower: int, maximum distance backward. Negative values indicate
unlimited.
num_upper: int, maximum distance forward. Negative values indicate
unlimited.
out_shape: shape to reshape output by.
Returns:
Tensor of size rows * cols reshaped into shape out_shape. | codesearchnet |
def Copy(From, To):
from benchbuild.utils.cmd import cp
cp('-ar', '--reflink=auto', From, To) | Small copy wrapper.
Args:
From (str): Path to the SOURCE.
To (str): Path to the TARGET. | codesearchnet |
def str2dict_values(str_in):
tmp_dict = str2dict(str_in)
if tmp_dict is None:
return None
return [tmp_dict[key] for key in sorted(k for k in tmp_dict)] | Extracts the values from a string that represents a dict and returns them
sorted by key.
Args:
str_in (string) that contains python dict
Returns:
(list) with values or None if no valid dict was found
Raises:
- | juraj-google-style |
def teleport(self, agent_name, location=None, rotation=None):
self.agents[agent_name].teleport((location * 100), rotation)
self.tick() | Teleports the target agent to any given location, and applies a specific rotation.
Args:
agent_name (str): The name of the agent to teleport.
location (np.ndarray or list): XYZ coordinates (in meters) for the agent to be teleported to.
If no location is given, it isn't teleported, but may still be rotated. Defaults to None.
rotation (np.ndarray or list): A new rotation target for the agent.
If no rotation is given, it isn't rotated, but may still be teleported. Defaults to None. | codesearchnet |
def _format(name, arr):
title = '
tlen = len(title)
print('-' * tlen)
print(title)
print('-' * tlen)
print(' Total
if arr:
for item in arr:
detail = ''
if isinstance(item[1], list):
for itm in item[1]:
detail += str(itm) + ', '
detail = detail[:-2]
else:
detail = str(item[1])
print(" %s ('%s')\n" % (str(item[0]), detail))
else:
print(' No %s' % name)
print('\n') | Prints compatibility check results with a format.
Args:
name: String that is the title representing list `arr`.
arr: List of items to be printed in a certain format. | github-repos |
def get_dopants_from_substitution_probabilities(structure, num_dopants=5, threshold=0.001, match_oxi_sign=False):
els_have_oxi_states = [hasattr(s, 'oxi_state') for s in structure.species]
if (not all(els_have_oxi_states)):
raise ValueError('All sites in structure must have oxidation states to predict dopants.')
sp = SubstitutionPredictor(threshold=threshold)
subs = [sp.list_prediction([s]) for s in set(structure.species)]
subs = [{'probability': pred['probability'], 'dopant_species': list(pred['substitutions'].keys())[0], 'original_species': list(pred['substitutions'].values())[0]} for species_preds in subs for pred in species_preds]
subs.sort(key=(lambda x: x['probability']), reverse=True)
return _get_dopants(subs, num_dopants, match_oxi_sign) | Get dopant suggestions based on substitution probabilities.
Args:
structure (Structure): A pymatgen structure decorated with
oxidation states.
num_dopants (int): The number of suggestions to return for
n- and p-type dopants.
threshold (float): Probability threshold for substitutions.
match_oxi_sign (bool): Whether to force the dopant and original species
to have the same sign of oxidation state. E.g. If the original site
is in a negative charge state, then only negative dopants will be
returned.
Returns:
(dict): Dopant suggestions, given as a dictionary with keys "n_type" and
"p_type". The suggestions for each doping type are given as a list of
dictionaries, each with they keys:
- "probability": The probability of substitution.
- "dopant_species": The dopant species.
- "original_species": The substituted species. | codesearchnet |
def view(self, viewer=None, use_curr_dir=False):
if (viewer is None):
viewer = settings['defaults']['viewer']
if use_curr_dir:
TEMP_DIR = os.path.curdir
else:
TEMP_DIR = tempfile.gettempdir()
def give_filename(i):
filename = (('ChemCoord_' + str(i)) + '.xyz')
return os.path.join(TEMP_DIR, filename)
i = 1
while os.path.exists(give_filename(i)):
i = (i + 1)
self.to_xyz(give_filename(i))
def open_file(i):
'Open file and close after being finished.'
try:
subprocess.check_call([viewer, give_filename(i)])
except (subprocess.CalledProcessError, FileNotFoundError):
raise
finally:
if use_curr_dir:
pass
else:
os.remove(give_filename(i))
Thread(target=open_file, args=(i,)).start() | View your molecule.
.. note:: This function writes a temporary file and opens it with
an external viewer.
If you modify your molecule afterwards you have to recall view
in order to see the changes.
Args:
viewer (str): The external viewer to use. If it is None,
the default as specified in cc.settings['defaults']['viewer']
is used.
use_curr_dir (bool): If True, the temporary file is written to
the current diretory. Otherwise it gets written to the
OS dependendent temporary directory.
Returns:
None: | codesearchnet |
def update(self, rec=None, drop=None, tables=None, install=None, materialize=None, indexes=None, joins=0, views=0):
if (not drop):
drop = []
if (not tables):
tables = set()
if (not install):
install = set()
if (not materialize):
materialize = set()
if (not indexes):
indexes = set()
if rec:
self.update(drop=rec.drop, tables=rec.tables, install=rec.install, materialize=rec.materialize, indexes=rec.indexes, joins=rec.joins)
self.drop += drop
self.tables |= set(tables)
self.install |= set(install)
self.materialize |= set(materialize)
self.indexes |= set(indexes)
self.joins += joins
self.views += views
if ((self.joins > 0) or (self.views > 0)):
self.materialize |= self.install
self.install = set() | Updates current record.
Args:
rec (FIMRecord): | codesearchnet |
def __cloudflare_list_zones(self, *, account, **kwargs):
done = False
zones = []
page = 1
while not done:
kwargs['page'] = page
response = self.__cloudflare_request(account=account, path='/zones', args=kwargs)
info = response['result_info']
if 'total_pages' not in info or page == info['total_pages']:
done = True
else:
page += 1
zones += response['result']
return zones | Helper function to list all zones registered in the CloudFlare system. Returns a `list` of the zones
Args:
account (:obj:`CloudFlareAccount`): A CloudFlare Account object
**kwargs (`dict`): Extra arguments to pass to the API endpoint
Returns:
`list` of `dict` | juraj-google-style |
def get_or_search(self) -> List[GridQubit]:
if (not self._sequence):
self._sequence = self._find_sequence()
return self._sequence | Starts the search or gives previously calculated sequence.
Returns:
The linear qubit sequence found. | codesearchnet |
async def _catch_response(self, h11_connection):
response = (await self._recv_event(h11_connection))
resp_data = {'encoding': self.encoding, 'method': self.method, 'status_code': response.status_code, 'reason_phrase': str(response.reason, 'utf-8'), 'http_version': str(response.http_version, 'utf-8'), 'headers': c_i_dict([(str(name, 'utf-8'), str(value, 'utf-8')) for (name, value) in response.headers]), 'body': b'', 'url': self.req_url}
for header in response.headers:
if (header[0] == b'set-cookie'):
try:
resp_data['headers']['set-cookie'].append(str(header[1], 'utf-8'))
except (KeyError, AttributeError):
resp_data['headers']['set-cookie'] = [str(header[1], 'utf-8')]
get_body = False
try:
if (int(resp_data['headers']['content-length']) > 0):
get_body = True
except KeyError:
try:
if ('chunked' in resp_data['headers']['transfer-encoding'].lower()):
get_body = True
except KeyError:
if (resp_data['headers'].get('connection', '').lower() == 'close'):
get_body = True
if get_body:
if (self.callback is not None):
endof = (await self._body_callback(h11_connection))
elif self.stream:
if (not (((self.scheme == self.initial_scheme) and (self.host == self.initial_netloc)) or (resp_data['headers']['connection'].lower() == 'close'))):
self.sock._active = False
resp_data['body'] = StreamBody(h11_connection, self.sock, resp_data['headers'].get('content-encoding', None), resp_data['encoding'])
self.streaming = True
else:
while True:
data = (await self._recv_event(h11_connection))
if isinstance(data, h11.Data):
resp_data['body'] += data.data
elif isinstance(data, h11.EndOfMessage):
break
else:
endof = (await self._recv_event(h11_connection))
assert isinstance(endof, h11.EndOfMessage)
if self.streaming:
return StreamResponse(**resp_data)
return Response(**resp_data) | Instantiates the parser which manages incoming data, first getting
the headers, storing cookies, and then parsing the response's body,
if any.
This function also instances the Response class in which the response
status line, headers, cookies, and body is stored.
It should be noted that in order to remain preformant, if the user
wishes to do any file IO it should use async files or risk long wait
times and risk connection issues server-side when using callbacks.
If a callback is used, the response's body will be None.
Returns:
The most recent response object. | codesearchnet |
def sort_objects(objects: List[Any], key: Optional[Callable[[Any], str]]=None) -> List[Any]:
def noop(x):
return x
if key is None:
key = noop
constants = [obj for obj in objects if key(obj).isupper()]
classes = [obj for obj in objects if key(obj)[0].isupper() and (not key(obj).isupper())]
functions = [obj for obj in objects if not key(obj)[0].isupper()]
key1 = ignore_underscore_and_lowercase(key)
return sorted(constants, key=key1) + sorted(classes, key=key1) + sorted(functions, key=key1) | Sort a list of objects following the rules of isort (all uppercased first, camel-cased second and lower-cased
last).
Args:
objects (`List[Any]`):
The list of objects to sort.
key (`Callable[[Any], str]`, *optional*):
A function taking an object as input and returning a string, used to sort them by alphabetical order.
If not provided, will default to noop (so a `key` must be provided if the `objects` are not of type string).
Returns:
`List[Any]`: The sorted list with the same elements as in the inputs | github-repos |
def ConvertDateTimeToOffset(date_time_value):
date_time_obj = datetime(int(date_time_value['date']['year']),
int(date_time_value['date']['month']),
int(date_time_value['date']['day']),
int(date_time_value['hour']),
int(date_time_value['minute']),
int(date_time_value['second']))
date_time_str = pytz.timezone(
date_time_value['timeZoneId']).localize(date_time_obj).isoformat()
if date_time_str[-5:] == '00:00':
return date_time_str[:-6] + 'Z'
else:
return date_time_str | Converts the PQL formatted response for a dateTime object.
Output conforms to ISO 8061 format, e.g. 'YYYY-MM-DDTHH:MM:SSz.'
Args:
date_time_value: dict The date time value from the PQL response.
Returns:
str: A string representation of the date time value uniform to
ReportService. | juraj-google-style |
def read_double(self, little_endian=True):
if little_endian:
endian = "<"
else:
endian = ">"
return self.unpack("%sd" % endian, 8) | Read 8 bytes as a double value from the stream.
Args:
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
float: | juraj-google-style |
def create_summary_metadata(display_name, description, num_thresholds):
pr_curve_plugin_data = plugin_data_pb2.PrCurvePluginData(version=PROTO_VERSION, num_thresholds=num_thresholds)
content = pr_curve_plugin_data.SerializeToString()
return summary_pb2.SummaryMetadata(display_name=display_name, summary_description=description, plugin_data=summary_pb2.SummaryMetadata.PluginData(plugin_name=PLUGIN_NAME, content=content)) | Create a `summary_pb2.SummaryMetadata` proto for pr_curves plugin data.
Arguments:
display_name: The display name used in TensorBoard.
description: The description to show in TensorBoard.
num_thresholds: The number of thresholds to use for PR curves.
Returns:
A `summary_pb2.SummaryMetadata` protobuf object. | codesearchnet |
def process_tag(self, tag_proc_name, tag):
tag_processor = self.tag_procs[tag_proc_name]
db_entry = (tag_processor.get_name(tag),
tag_processor.get_entry_type(tag),
tag_processor.get_filename(tag))
self.zeal_db.insert(*db_entry)
self.entry_count += 1 | Process a tag with a tag processor and insert a DB entry.
Args:
tag_proc_name: A string key that maps to the TagProcessor to use.
tag: A BeautifulSoup Tag to process. | juraj-google-style |
def _InverseDistanceWeighted(self, latitude, longitude, radius=1):
if (radius == 1):
offsetmatrix = (None, (0, 1), None, ((- 1), 0), (0, 0), (1, 0), None, (0, (- 1)), None)
elif (radius == 2):
offsetmatrix = (None, None, (0, 2), None, None, None, ((- 1), 1), (0, 1), (1, 1), None, ((- 2), 0), ((- 1), 0), (0, 0), (1, 0), (2, 0), None, ((- 1), (- 1)), (0, (- 1)), (1, (- 1)), None, None, None, (0, (- 2)), None, None)
else:
raise ValueError('Radius {} invalid, expected 1 or 2'.format(radius))
(row, column) = self.get_row_and_column(latitude, longitude)
(center_lat, center_long) = self.get_lat_and_long(row, column)
if ((latitude == center_lat) and (longitude == center_long)):
return self.get_elevation_from_row_and_column(int(row), int(column))
weights = 0
elevation = 0
for offset in offsetmatrix:
if ((offset is not None) and (0 <= (row + offset[0]) < self.square_side) and (0 <= (column + offset[1]) < self.square_side)):
cell = self.get_elevation_from_row_and_column(int((row + offset[0])), int((column + offset[1])))
if (cell is not None):
distance = mod_utils.distance(latitude, longitude, (center_lat + (float(offset[0]) / (self.square_side - 1))), (center_long + (float(offset[1]) / (self.square_side - 1))))
weights += (1 / distance)
elevation += (cell / distance)
return (elevation / weights) | Return the Inverse Distance Weighted Elevation.
Interpolate the elevation of the given point using the inverse
distance weigthing algorithm (exp of 1) in the form:
sum((1/distance) * elevation)/sum(1/distance)
for each point in the matrix.
The matrix size is determined by the radius. A radius of 1 uses
5 points and a radius of 2 uses 13 points. The matrices are set
up to use cells adjacent to and including the one that contains
the given point. Any cells referenced by the matrix that are on
neighboring tiles are ignored.
Args:
latitude: float of the latitude in decimal degrees
longitude: float of the longitude in decimal degrees
radius: int of 1 or 2 indicating the size of the matrix
Returns:
a float of the interpolated elevation in the same units as
the underlying .hgt file (meters)
Exceptions:
raises a ValueError if an invalid radius is supplied | codesearchnet |
class RunOfflineDetector(beam.PTransform[beam.PCollection[NestedKeyedInputT], beam.PCollection[NestedKeyedOutputT]]):
def __init__(self, offline_detector: OfflineDetector):
self._offline_detector = offline_detector
def _restore_and_convert(self, elem: tuple[tuple[Any, Any, beam.Row], Any]) -> NestedKeyedOutputT:
(orig_key, temp_key, row), prediction = elem
assert isinstance(prediction, AnomalyPrediction), 'Wrong model handler output type.' + f"Expected: 'AnomalyPrediction', but got '{type(prediction).__name__}'. " + 'Consider adding a post-processing function via `with_postprocess_fn` ' + f"to convert from '{type(prediction).__name__}' to 'AnomalyPrediction', " + 'or use `score_prediction_adapter` or `label_prediction_adapter` to ' + 'perform the conversion.'
result = AnomalyResult(example=row, predictions=[dataclasses.replace(prediction, model_id=self._offline_detector._model_id)])
return (orig_key, (temp_key, result))
def _select_features(self, elem: tuple[Any, beam.Row]) -> tuple[Any, beam.Row]:
assert self._offline_detector._features is not None
k, v = elem
row_dict = v._asdict()
return (k, beam.Row(**{k: row_dict[k] for k in self._offline_detector._features}))
def expand(self, input: beam.PCollection[NestedKeyedInputT]) -> beam.PCollection[NestedKeyedOutputT]:
model_uuid = f'{self._offline_detector._model_id}:{uuid.uuid4().hex[:6]}'
run_inference = RunInference(self._offline_detector._keyed_model_handler, **self._offline_detector._run_inference_args)
rekeyed_model_input = input | 'Rekey' >> beam.Map(lambda x: ((x[0], x[1][0], x[1][1]), x[1][1]))
if self._offline_detector._features is not None:
rekeyed_model_input = rekeyed_model_input | 'Select Features' >> beam.Map(self._select_features)
rekeyed_model_output = rekeyed_model_input | f'Call RunInference ({model_uuid})' >> run_inference
ret = rekeyed_model_output | 'Restore keys and convert model output' >> beam.Map(self._restore_and_convert)
if self._offline_detector._threshold_criterion:
ret = ret | f'Run Threshold Criterion ({model_uuid})' >> RunThresholdCriterion(self._offline_detector._threshold_criterion)
return ret | Runs a offline anomaly detector on a PCollection of data.
This PTransform applies a `OfflineDetector` to the input data, handling
custom input/output conversion and inference.
Args:
offline_detector: The `OfflineDetector` to run. | github-repos |
async def get_matches(self, force_update=False) -> list:
if force_update or self.matches is None:
res = await self.connection('GET',
'tournaments/{}/matches'.format(self._id),
include_attachments=1)
self._refresh_matches_from_json(res)
return self.matches or [] | get all matches (once the tournament is started)
|methcoro|
Args:
force_update (default=False): True to force an update to the Challonge API
Returns:
list[Match]:
Raises:
APIException | juraj-google-style |
def __init__(self, package, ad, config=None):
super().__init__(package=package, device=ad)
self.host_port = None
self.device_port = None
self.uid = UNKNOWN_UID
self._adb = ad.adb
self._user_id = None if config is None else config.user_id
self._proc = None
self._client = None
self._conn = None
self._event_client = None
self._config = config or Config()
self._server_start_stdout = [] | Initializes the instance of Snippet Client V2.
Args:
package: str, see base class.
ad: AndroidDevice, the android device object associated with this client.
config: Config, the configuration object. See the docstring of the
`Config` class for supported configurations. | github-repos |
def predict(self, text):
pred = self.predict_proba(text)
tags = self._get_tags(pred)
return tags | Predict using the model.
Args:
text: string, the input text.
Returns:
tags: list, shape = (num_words,)
Returns predicted values. | codesearchnet |
def write_to_filterbank(self, filename_out):
print('[Filterbank] Warning: Non-standard function to write in filterbank (.fil) format. Please use Waterfall.')
n_bytes = int((self.header[b'nbits'] / 8))
with open(filename_out, 'wb') as fileh:
fileh.write(generate_sigproc_header(self))
j = self.data
if (n_bytes == 4):
np.float32(j.ravel()).tofile(fileh)
elif (n_bytes == 2):
np.int16(j.ravel()).tofile(fileh)
elif (n_bytes == 1):
np.int8(j.ravel()).tofile(fileh) | Write data to blimpy file.
Args:
filename_out (str): Name of output file | codesearchnet |
def ParseUserEngagedRow(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = WindowsTimelineUserEngagedEventData()
event_data.package_identifier = self._GetRowValue(
query_hash, row, 'PackageName')
payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload'))
payload_json_string = payload_json_bytes.decode('utf-8')
payload = json.loads(payload_json_string)
if 'reportingApp' in payload:
event_data.reporting_app = payload['reportingApp']
if 'activeDurationSeconds' in payload:
event_data.active_duration_seconds = int(payload['activeDurationSeconds'])
timestamp = self._GetRowValue(query_hash, row, 'StartTime')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a timeline row that describes a user interacting with an app.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row. | juraj-google-style |
def allreduce_grads_hierarchical(all_grads, devices, average=False):
num_gpu = len(devices)
assert (num_gpu == 8), num_gpu
assert (len(all_grads) == num_gpu), len(all_grads)
group_size = (num_gpu
agg_all_grads = []
for (varid, grads) in enumerate(zip(*all_grads)):
g0_main_gpu = (varid % num_gpu)
g1_main_gpu = ((g0_main_gpu + group_size) % num_gpu)
g0_start = (0 if (g0_main_gpu < group_size) else group_size)
g1_start = (0 if (g1_main_gpu < group_size) else group_size)
assert (g0_start != g1_start)
g0_grads = grads[g0_start:(g0_start + group_size)]
g1_grads = grads[g1_start:(g1_start + group_size)]
with tf.device(devices[g0_main_gpu]):
g0_agg = tf.add_n(g0_grads, name='group0_agg')
with tf.device(devices[g1_main_gpu]):
g1_agg = tf.add_n(g1_grads, name='group1_agg')
g1_total_agg = tf.add(g0_agg, g1_agg, name='group1_total_agg')
with tf.device(devices[g0_main_gpu]):
g0_total_agg = tf.identity(g1_total_agg, name='group0_total_agg')
agg_grads = []
for k in range(num_gpu):
if ((k < group_size) == (g0_main_gpu < group_size)):
main_gpu = g0_total_agg
else:
main_gpu = g1_total_agg
with tf.device(devices[k]):
if (not average):
device_total_agg = tf.identity(main_gpu, name='device{}_total_agg'.format(k))
else:
device_total_agg = tf.multiply(main_gpu, (1.0 / num_gpu), name='device{}_total_agg'.format(k))
agg_grads.append(device_total_agg)
agg_all_grads.append(agg_grads)
agg_all_grads = list(zip(*agg_all_grads))
return agg_all_grads | Hierarchical allreduce for DGX-1 system.
Args:
all_grads (K x N): List of list of gradients. N is the number of variables.
devices ([str]): K str for the K devices.
average (bool): average gradients or not.
Returns:
(K x N): same as input, but each grad is replaced by the average over K lists. | codesearchnet |
def convert_outlook_msg(msg_bytes):
if not is_outlook_msg(msg_bytes):
raise ValueError("The supplied bytes are not an Outlook MSG file")
orig_dir = os.getcwd()
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
with open("sample.msg", "wb") as msg_file:
msg_file.write(msg_bytes)
try:
subprocess.check_call(["msgconvert", "sample.msg"],
stdout=null_file, stderr=null_file)
eml_path = "sample.eml"
with open(eml_path, "rb") as eml_file:
rfc822 = eml_file.read()
except FileNotFoundError:
raise EmailParserError(
"Failed to convert Outlook MSG: msgconvert utility not found")
finally:
os.chdir(orig_dir)
shutil.rmtree(tmp_dir)
return rfc822 | Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to
standard RFC 822 format
Args:
msg_bytes (bytes): the content of the .msg file
Returns:
A RFC 822 string | juraj-google-style |
def use_color(setting):
if (setting not in COLOR_CHOICES):
raise InvalidColorSetting(setting)
return ((setting == 'always') or ((setting == 'auto') and sys.stdout.isatty() and terminal_supports_color)) | Choose whether to use color based on the command argument.
Args:
setting - Either `auto`, `always`, or `never` | codesearchnet |
def init_pool_generator(gens, random_seed=None, id_queue=None):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
worker_proc.name = f'Keras_worker_{worker_proc.name}'
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
id_queue.put(worker_proc.ident, block=True, timeout=0.1) | Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids.
This is used to indicate that a worker process
was created by Keras. | github-repos |
def get_all(self, seq_set: SequenceSet) \
-> Sequence[Tuple[int, CachedMessage]]:
if seq_set.uid:
all_uids = seq_set.flatten(self.max_uid) & self._uids
return [(seq, self._cache[uid])
for seq, uid in enumerate(self._sorted, 1)
if uid in all_uids]
else:
all_seqs = seq_set.flatten(self.exists)
return [(seq, self._cache[uid])
for seq, uid in enumerate(self._sorted, 1)
if seq in all_seqs] | Return the cached messages, and their sequence numbers, for the
given sequence set.
Args:
seq_set: The message sequence set. | juraj-google-style |
def angle_to_distance(angle, units='metric'):
distance = math.radians(angle) * BODY_RADIUS
if units in ('km', 'metric'):
return distance
elif units in ('sm', 'imperial', 'US customary'):
return distance / STATUTE_MILE
elif units in ('nm', 'nautical'):
return distance / NAUTICAL_MILE
else:
raise ValueError('Unknown units type %r' % units) | Convert angle in to distance along a great circle.
Args:
angle (float): Angle in degrees to convert to distance
units (str): Unit type to be used for distances
Returns:
float: Distance in ``units``
Raises:
ValueError: Unknown value for ``units`` | juraj-google-style |
def update_dtype(self, attr_name, index, dtype):
attr = self._node.attr[attr_name]
num_types = 0
if attr.HasField('list'):
types = attr.list.type
num_types = len(types)
if num_types > index:
types[index] = dtype
return
elif attr.HasField('type'):
num_types = 1
if index == 0:
attr.type = dtype
return
raise ValueError(f'`index` {index:d} is out of range for node({self._node.name}).attr({attr_name}), which has {num_types:d} elements.') | Changes the type of a given input.
Args:
attr_name: The NodeDef attribute containing the type to change.
index: The index of the input type to change.
dtype: The type to change to. | github-repos |
def getPageType(name,number=False):
if not name in pageNames():
return None
pageType=PyOrigin.Pages(name).GetType()
if number:
return str(pageType)
if pageType==1:
return "matrix"
if pageType==2:
return "book"
if pageType==3:
return "graph"
if pageType==4:
return "layout"
if pageType==5:
return "notes" | Returns the type of the page with that name.
If that name doesn't exist, None is returned.
Args:
name (str): name of the page to get the folder from
number (bool): if True, return numbers (i.e., a graph will be 3)
if False, return words where appropriate (i.e, "graph")
Returns:
string of the type of object the page is | juraj-google-style |
def add_options(cls, parser):
kwargs = {'action': 'store', 'default': '', 'parse_from_config': True, 'comma_separated_list': True}
for num in range(cls.min_check, cls.max_check):
parser.add_option(None, '--filename_check{}'.format(num), **kwargs) | Required by flake8
add the possible options, called first
Args:
parser (OptionsManager): | codesearchnet |
def giant_text_sqltype(dialect: Dialect) -> str:
if (dialect.name == SqlaDialectName.SQLSERVER):
return 'NVARCHAR(MAX)'
elif (dialect.name == SqlaDialectName.MYSQL):
return 'LONGTEXT'
else:
raise ValueError('Unknown dialect: {}'.format(dialect.name)) | Returns the SQL column type used to make very large text columns for a
given dialect.
Args:
dialect: a SQLAlchemy :class:`Dialect`
Returns:
the SQL data type of "giant text", typically 'LONGTEXT' for MySQL
and 'NVARCHAR(MAX)' for SQL Server. | codesearchnet |
def read_molden(inputfile, start_index=0, get_bonds=True):
from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian
with open(inputfile, 'r') as f:
found = False
while not found:
line = f.readline()
if '[N_GEO]' in line:
found = True
number_of_molecules = int(f.readline().strip())
energies = []
found = False
while not found:
line = f.readline()
if 'energy' in line:
found = True
for _ in range(number_of_molecules):
energies.append(float(f.readline().strip()))
found = False
while not found:
line = f.readline()
if '[GEOMETRIES] (XYZ)' in line:
found = True
current_line = f.tell()
number_of_atoms = int(f.readline().strip())
f.seek(current_line)
cartesians = []
for energy in energies:
cartesian = Cartesian.read_xyz(
f, start_index=start_index, get_bonds=get_bonds,
nrows=number_of_atoms, engine='python')
cartesian.metadata['energy'] = energy
cartesians.append(cartesian)
return cartesians | Read a molden file.
Args:
inputfile (str):
start_index (int):
Returns:
list: A list containing :class:`~chemcoord.Cartesian` is returned. | juraj-google-style |
def most_uncertain_by_mask(self, mask, y):
idxs = np.where(mask)[0]
return idxs[np.argsort(np.abs(self.probs[idxs,y]-(1/self.num_classes)))[:4]] | Extracts the first 4 most uncertain indexes from the ordered list of probabilities
Arguments:
mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere else
y (int): the selected class
Returns:
idxs (ndarray): An array of indexes of length 4 | juraj-google-style |
def _write_to_command_buffer(self, to_write):
np.copyto(self._command_bool_ptr, True)
to_write += '0'
input_bytes = str.encode(to_write)
for (index, val) in enumerate(input_bytes):
self._command_buffer_ptr[index] = val | Write input to the command buffer. Reformat input string to the correct format.
Args:
to_write (str): The string to write to the command buffer. | codesearchnet |
def get_path(name, *default):
global g_config
value = get(name, *default)
if (value is None):
return None
return proj_path(value) | Get config value as path relative to the project directory.
This allows easily defining the project configuration within the fabfile
as always relative to that fabfile.
Args:
name (str):
The name of the config value containing the path.
*default (Any):
If given and the key doesn't not exist, this will be returned
instead. If it's not given and the config value does not exist,
AttributeError will be raised
Returns:
The requested config value. This is one of the global values defined
in this file. If the value does not exist it will return `default` if
give or raise `AttributeError`.
Raises:
AttributeError: If the value does not exist and `default` was not given. | codesearchnet |
def get_tensors(graph):
if not isinstance(graph, ops.Graph):
raise TypeError('Expected a graph, got: {}'.format(type(graph)))
ts = []
for op in graph.get_operations():
ts += op.outputs
return ts | get all the tensors which are input or output of an op in the graph.
Args:
graph: a `tf.Graph`.
Returns:
A list of `tf.Tensor`.
Raises:
TypeError: if graph is not a `tf.Graph`. | github-repos |
def _ParseLogLine(self, parser_mediator, structure):
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=structure.date_time)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
event_data = SophosAVLogEventData()
event_data.text = structure.text
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED,
time_zone=parser_mediator.timezone)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a log line.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file. | juraj-google-style |
def with_headers(self, headers):
copy = headers.copy()
copy.update(self._headers)
return self.__copy_and_set('headers', copy) | Adds headers to the request
Args:
headers (dict): The headers to add the request headers
Returns:
The request builder instance in order to chain calls | codesearchnet |
def get(self):
raise NotImplementedError('Must be implemented in subclasses.') | Wait for the result of `RemoteValue` and return the tensor result.
This makes the value concrete by copying the remote tensor to local.
Returns:
The actual output (in the form of `tf.Tensor`s) of the `tf.function`
associated with this `RemoteValue`, previously returned by a
`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` call.
This can be a single Tensor, or a structure of Tensors, depending on the
output of the `tf.function`.
Raises:
tf.errors.CancelledError: If the function that produces this `RemoteValue`
is aborted or cancelled due to failure. | github-repos |
def size_str(size_in_bytes):
if not size_in_bytes:
return "?? GiB"
size_in_bytes = float(size_in_bytes)
for (name, size_bytes) in _NAME_LIST:
value = size_in_bytes / size_bytes
if value >= 1.0:
return "{:.2f} {}".format(value, name)
return "{} {}".format(int(size_in_bytes), "bytes") | Returns a human readable size string.
If size_in_bytes is None, then returns "?? GiB".
For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`.
Args:
size_in_bytes: `int` or `None`, the size, in bytes, that we want to
format as a human-readable size string. | juraj-google-style |
def actnorm_center(name, x, reverse=False, init=False):
shape = common_layers.shape_list(x)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
assert ((len(shape) == 2) or (len(shape) == 4))
if (len(shape) == 2):
x_mean = tf.reduce_mean(x, [0], keepdims=True)
b = get_variable_ddi('b', (1, shape[1]), initial_value=(- x_mean), init=init)
elif (len(shape) == 4):
x_mean = tf.reduce_mean(x, [0, 1, 2], keepdims=True)
b = get_variable_ddi('b', (1, 1, 1, shape[3]), initial_value=(- x_mean), init=init)
if (not reverse):
x += b
else:
x -= b
return x | Add a bias to x.
Initialize such that the output of the first minibatch is zero centered
per channel.
Args:
name: scope
x: 2-D or 4-D Tensor.
reverse: Forward or backward operation.
init: data-dependent initialization.
Returns:
x_center: (x + b), if reverse is True and (x - b) otherwise. | codesearchnet |
def query(self, time_indices):
if self._disposed:
raise ValueError(
'Cannot query: this _WatchStore instance is already disposed')
if not isinstance(time_indices, (tuple, list)):
time_indices = [time_indices]
output = []
for time_index in time_indices:
if isinstance(self._data[time_index], _TensorValueDiscarded):
output.append(None)
else:
data_item = self._data[time_index]
if (hasattr(data_item, 'dtype') and
tensor_helper.translate_dtype(data_item.dtype) == 'string'):
_, _, data_item = tensor_helper.array_view(data_item)
data_item = np.array(
tensor_helper.process_buffers_for_display(data_item),
dtype=np.object)
output.append(data_item)
return output | Query the values at given time indices.
Args:
time_indices: 0-based time indices to query, as a `list` of `int`.
Returns:
Values as a list of `numpy.ndarray` (for time indices in memory) or
`None` (for time indices discarded). | juraj-google-style |
def create_socket(self):
socket_path = os.path.join(self.config_dir, 'pueue.sock')
try:
if os.path.exists(socket_path):
os.remove(socket_path)
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(socket_path)
self.socket.setblocking(0)
self.socket.listen(0)
os.chmod(socket_path, stat.S_IRWXU)
except Exception:
self.logger.error("Daemon couldn't socket. Aborting")
self.logger.exception()
sys.exit(1)
return self.socket | Create a socket for the daemon, depending on the directory location.
Args:
config_dir (str): The absolute path to the config directory used by the daemon.
Returns:
socket.socket: The daemon socket. Clients connect to this socket. | codesearchnet |
def _print_primitive_field(self, field_name: str, field: descriptor.FieldDescriptor, value: Any) -> None:
if proto_utils.field_is_repeated(field):
string_values = []
elements = []
extensions_found = False
nonnull_values_found = False
for primitive in value:
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)
string_values.append(wrapper.json_value())
elements.append(wrapper.get_element())
nonnull_values_found = nonnull_values_found or wrapper.has_value()
extensions_found = extensions_found or wrapper.has_element()
if nonnull_values_found:
self.generator.add_field(field_name)
self._print_list(string_values, self.generator.push)
if extensions_found:
if nonnull_values_found:
self.generator.push(',')
self.generator.add_newline()
self.generator.add_field(f'_{field_name}')
self._print_list(elements, self._print)
elif self.json_format == _FhirJsonFormat.ANALYTIC and field.message_type.name == 'ReferenceId':
str_value = proto_utils.get_value_at_field(value, 'value')
self.generator.add_field(field_name, f'"{str_value}"')
else:
wrapper = self.primitive_handler.primitive_wrapper_from_primitive(value)
if wrapper.has_value():
self.generator.add_field(field_name, wrapper.json_value())
if wrapper.has_element() and self.json_format == _FhirJsonFormat.PURE:
if wrapper.has_value():
self.generator.push(',')
self.generator.add_newline()
self.generator.add_field(f'_{field_name}')
self._print(wrapper.get_element()) | Prints the primitive field.
Args:
field_name: The name of the field.
field: The FielDescriptor whose contents to print.
value: The value present at field to print. | github-repos |
def one_of(self, chset: str) -> str:
res = self.peek()
if (res in chset):
self.offset += 1
return res
raise UnexpectedInput(self, ('one of ' + chset)) | Parse one character form the specified set.
Args:
chset: string of characters to try as alternatives.
Returns:
The character that was actually matched.
Raises:
UnexpectedInput: If the next character is not in `chset`. | codesearchnet |
def gather_continuous_embeddings(self, word_embeddings: torch.Tensor, continuous_embeddings: List[torch.Tensor], image_patch_input_indices: torch.Tensor) -> torch.Tensor:
if not word_embeddings.shape[0] == len(continuous_embeddings):
raise ValueError(f'Batch sizes must match! Got len(continuous_embeddings)={len(continuous_embeddings)!r} and word_embeddings.shape[0]={word_embeddings.shape[0]!r}')
output_embeddings = word_embeddings.clone()
for batch_idx in range(word_embeddings.shape[0]):
dst_indices = torch.nonzero(image_patch_input_indices[batch_idx] >= 0, as_tuple=True)[0]
src_indices = image_patch_input_indices[batch_idx][dst_indices]
if src_indices.shape[0] > continuous_embeddings[batch_idx].shape[0]:
raise ValueError(f'Number of continuous embeddings continuous_embeddings[batch_idx].shape={continuous_embeddings[batch_idx].shape!r} does not match number of continuous token ids src_indices.shape={src_indices.shape!r} in batch element {batch_idx}.')
output_embeddings[batch_idx, dst_indices] = continuous_embeddings[batch_idx][src_indices].to(output_embeddings.device)
return output_embeddings | This function places the continuous_embeddings into the word_embeddings at the locations
indicated by image_patch_input_indices. Different batch elements can have different numbers of continuous
embeddings.
Args:
word_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Tensor of word embeddings.
continuous_embeddings (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
Tensor of continuous embeddings. The length of the list is the batch size. Each entry is shape
[num_image_embeddings, hidden], and num_image_embeddings needs to match the number of non-negative
indices in image_patch_input_indices for that batch element.
image_patch_input_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Tensor of indices of the image patches in the input_ids tensor. | github-repos |
def get_if_set(self, addresses):
with self._lock:
results = []
for add in addresses:
results.append(self._get_if_set(add))
return results | Returns the value set in this context, or None, for each address in
addresses.
Args:
addresses (list of str): The addresses to return values for, if set
within this context.
Returns:
(list): bytes set at the address or None | codesearchnet |
def for_all_test_methods(decorator: Callable[..., Any], *args, **kwargs) -> Callable[[_TC], _TC]:
def all_test_methods_impl(cls: _TC) -> _TC:
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith('test') and (name != 'test_session'):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl | Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator. | github-repos |
def space_to_batch_direct(input_array, block_shape, paddings):
input_array = np.array(input_array)
block_shape = np.array(block_shape)
num_block_dims = len(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
padded = np.pad(input_array, pad_width=[[0, 0]] + list(paddings) + [[0, 0]] * (input_array.ndim - 1 - num_block_dims), mode='constant')
reshaped_padded_shape = [input_array.shape[0]]
output_shape = [input_array.shape[0] * np.prod(block_shape)]
for block_dim, block_shape_value in enumerate(block_shape):
reduced_size = padded.shape[block_dim + 1]
reshaped_padded_shape.append(reduced_size)
output_shape.append(reduced_size)
reshaped_padded_shape.append(block_shape_value)
reshaped_padded_shape.extend(input_array.shape[num_block_dims + 1:])
output_shape.extend(input_array.shape[num_block_dims + 1:])
reshaped_padded = padded.reshape(reshaped_padded_shape)
permuted_reshaped_padded = np.transpose(reshaped_padded, list(np.arange(num_block_dims) * 2 + 2) + [0] + list(np.arange(num_block_dims) * 2 + 1) + list(np.arange(input_array.ndim - num_block_dims - 1) + 1 + num_block_dims * 2))
return permuted_reshaped_padded.reshape(output_shape) | Direct Python implementation of space-to-batch conversion.
This is used for tests only.
Args:
input_array: N-D array
block_shape: 1-D array of shape [num_block_dims].
paddings: 2-D array of shape [num_block_dims, 2].
Returns:
Converted tensor. | github-repos |
def load(self, binary: pyquil.Program) -> 'QuantumFlowQVM':
assert (self.status in ['connected', 'done'])
prog = quil_to_program(str(binary))
self._prog = prog
self.program = binary
self.status = 'loaded'
return self | Load a pyQuil program, and initialize QVM into a fresh state.
Args:
binary: A pyQuil program | codesearchnet |
def summary(self, v):
if hasattr(v, '__iter__'):
self._summary = self._summary_cls(v)
else:
self._summary = self._summary_cls(float(v)) | Set summary.
Args:
v: A new summary. It could be a single number or lists. | codesearchnet |
def get_variable_name_from_bird(bird_conf):
bird_variable_pattern = re.compile('\n ^\\s*\n define\\s+\n (?P<name>\\S+\\b)\n \\s+\n =\n ', re.VERBOSE)
with open(bird_conf, 'r') as content:
for line in content.readlines():
variable_match = bird_variable_pattern.search(line)
if variable_match:
return variable_match.group('name')
return None | Return the variable name set in Bird configuration.
The variable name in Bird configuration is set with the keyword 'define',
here is an example:
define ACAST_PS_ADVERTISE =
and we exract the string between the word 'define' and the equals sign.
Arguments:
bird_conf (str): The absolute file name path of Bird configuration.
Returns:
The variable name as a string or None if it isn't found. | codesearchnet |
def update_headers(self, headers):
check_type(headers, dict, may_be_none=False)
self._req_session.headers.update(headers) | Update the HTTP headers used for requests in this session.
Note: Updates provided by the dictionary passed as the `headers`
parameter to this method are merged into the session headers by adding
new key-value pairs and/or updating the values of existing keys. The
session headers are not replaced by the provided dictionary.
Args:
headers(dict): Updates to the current session headers. | juraj-google-style |
def map_(function, *structures, **kwargs):
flatten = kwargs.pop('flatten', False)
assert (not kwargs), 'map() got unexpected keyword arguments.'
def impl(function, *structures):
if (len(structures) == 0):
return structures
if all((isinstance(s, (tuple, list)) for s in structures)):
if (len(set((len(x) for x in structures))) > 1):
raise ValueError('Cannot merge tuples or lists of different length.')
args = tuple((impl(function, *x) for x in _builtin_zip(*structures)))
if hasattr(structures[0], '_fields'):
return type(structures[0])(*args)
else:
return type(structures[0])(args)
if all((isinstance(s, dict) for s in structures)):
if (len(set((frozenset(x.keys()) for x in structures))) > 1):
raise ValueError('Cannot merge dicts with different keys.')
merged = {k: impl(function, *(s[k] for s in structures)) for k in structures[0]}
return type(structures[0])(merged)
return function(*structures)
result = impl(function, *structures)
if flatten:
result = flatten_(result)
return result | Apply a function to every element in a nested structure.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
function: The function to apply to the elements of the structure. Receives
one argument for every structure that is provided.
*structures: One of more nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure. | codesearchnet |
def plot_kurtosis(self, f_start=None, f_stop=None, if_id=0, **kwargs):
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1]
plot_f = plot_f[::-1]
try:
plot_kurtosis = scipy.stats.kurtosis(plot_data, axis=0, nan_policy='omit')
except:
plot_kurtosis = plot_data*0.0
plt.plot(plot_f, plot_kurtosis, **kwargs)
plt.ylabel("Kurtosis")
plt.xlabel("Frequency [MHz]")
plt.xlim(plot_f[0], plot_f[-1]) | Plot kurtosis
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
kwargs: keyword args to be passed to matplotlib imshow() | juraj-google-style |
def find_wells_without_curve(self, mnemonic, alias=None):
return Project([w for w in self if w.get_curve(mnemonic, alias=alias) is None]) | Returns a new Project with only the wells which DO NOT have the named curve.
Args:
menmonic (str): the name of the curve to look for.
alias (dict): a welly alias dictionary.
Returns:
project. | juraj-google-style |
def parse_genes(transcripts):
genes_to_transcripts = {}
genes = []
hgvs_identifier = None
canonical_transcript = None
exon = None
for transcript in transcripts:
hgnc_id = transcript['hgnc_id']
hgnc_symbol = transcript['hgnc_symbol']
if (transcript['is_canonical'] and transcript.get('coding_sequence_name')):
hgvs_identifier = transcript.get('coding_sequence_name')
canonical_transcript = transcript['transcript_id']
exon = transcript['exon']
if hgnc_id:
if hgnc_id in genes_to_transcripts:
genes_to_transcripts[hgnc_id].append(transcript)
else:
genes_to_transcripts[hgnc_id] = [transcript]
else:
if hgnc_symbol:
if hgnc_symbol in genes_to_transcripts:
genes_to_transcripts[hgnc_symbol].append(transcript)
else:
genes_to_transcripts[hgnc_symbol] = [transcript]
for gene_id in genes_to_transcripts:
gene_transcripts = genes_to_transcripts[gene_id]
most_severe_consequence = None
most_severe_rank = float('inf')
most_severe_transcript = None
most_severe_region = None
most_severe_sift = None
most_severe_polyphen = None
for transcript in gene_transcripts:
hgnc_id = transcript['hgnc_id']
hgnc_symbol = transcript['hgnc_symbol']
for consequence in transcript['functional_annotations']:
new_rank = SO_TERMS[consequence]['rank']
if new_rank < most_severe_rank:
most_severe_rank = new_rank
most_severe_consequence = consequence
most_severe_transcript = transcript
most_severe_sift = transcript['sift_prediction']
most_severe_polyphen = transcript['polyphen_prediction']
most_severe_region = SO_TERMS[consequence]['region']
gene = {
'transcripts': gene_transcripts,
'most_severe_transcript': most_severe_transcript,
'most_severe_consequence': most_severe_consequence,
'most_severe_sift': most_severe_sift,
'most_severe_polyphen': most_severe_polyphen,
'hgnc_id': hgnc_id,
'hgnc_symbol': hgnc_symbol,
'region_annotation': most_severe_region,
'hgvs_identifier': transcript['coding_sequence_name'],
'canonical_transcript': transcript['transcript_id'],
'exon': transcript['exon'],
}
genes.append(gene)
return genes | Parse transcript information and get the gene information from there.
Use hgnc_id as identifier for genes and ensembl transcript id to identify transcripts
Args:
transcripts(iterable(dict))
Returns:
genes (list(dict)): A list with dictionaries that represents genes | juraj-google-style |
def forall(self, vars_list: List[str]) -> 'TensorFluent':
return self._aggregation_op(tf.reduce_all, self, vars_list) | Returns the TensorFluent for the forall aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the forall aggregation function. | codesearchnet |
def _create_hash_from_doc(doc: Mapping[str, Any]) -> str:
doc_string = json.dumps(doc, sort_keys=True)
return _create_hash(doc_string) | Create hash Id from edge record
Args:
edge (Mapping[str, Any]): edge record to create hash from
Returns:
str: Murmur3 128 bit hash | juraj-google-style |
def add_trace(self, *args, **kwargs):
args = list(args)
kwargs = kwargs.copy()
for fn in self._functions.values():
if self._expects_training_arg:
def trace_with_training(value, fn=fn):
utils.set_training_arg(value, self._training_arg_index, args, kwargs)
add_trace_to_queue(fn, args, kwargs, value)
trace_with_training(True)
trace_with_training(False)
else:
add_trace_to_queue(fn, args, kwargs) | Traces all functions with the same args and kwargs.
Args:
*args: Positional args passed to the original function.
**kwargs: Keyword args passed to the original function. | github-repos |
def compute_context_repetition_mask(self, input_ids: torch.LongTensor) -> torch.LongTensor:
self._check_input_ids_shape(input_ids)
batch_size, _ = input_ids.shape
state = SynthIDTextWatermarkState(batch_size=batch_size, ngram_len=self.ngram_len, context_history_size=self.context_history_size, device=self.device)
contexts = input_ids[:, :-1].unfold(dimension=1, size=self.ngram_len - 1, step=1)
_, num_contexts, _ = contexts.shape
are_repeated_contexts = []
for i in range(num_contexts):
context = contexts[:, i, :]
hash_result = torch.ones(batch_size, device=self.device, dtype=torch.long)
context_hash = self.accumulate_hash(hash_result, context)[:, None]
is_repeated_context = (state.context_history == context_hash).any(dim=1, keepdim=True)
are_repeated_contexts.append(is_repeated_context)
state.context_history = torch.concat((context_hash, state.context_history), dim=1)[:, :-1]
are_repeated_contexts = torch.concat(are_repeated_contexts, dim=1)
return torch.logical_not(are_repeated_contexts) | Computes repetition mask.
0 and 1 stand for repeated and not repeated context n-1 grams respectively.
Args:
input_ids (`torch.LongTensor`):
Input token ids (batch_size, input_len).
Returns:
Repetitions mask (batch_size, input_len - (ngram_len - 1)). | github-repos |
def clear_db(self):
self.data_store.clear_db()
self.plugin_manager.load_all_plugins()
self._store_information() | Clear the Main Database of all samples and worker output.
Args:
None
Returns:
Nothing | juraj-google-style |
def change_disk_usage(self, usage_change, file_path, st_dev):
mount_point = self._mount_point_for_device(st_dev)
if mount_point:
total_size = mount_point['total_size']
if (total_size is not None):
if ((total_size - mount_point['used_size']) < usage_change):
self.raise_io_error(errno.ENOSPC, file_path)
mount_point['used_size'] += usage_change | Change the used disk space by the given amount.
Args:
usage_change: Number of bytes added to the used space.
If negative, the used space will be decreased.
file_path: The path of the object needing the disk space.
st_dev: The device ID for the respective file system.
Raises:
IOError: if usage_change exceeds the free file system space | codesearchnet |
def __init__(self, max_meter=None, band_types=None, capabilities=None,
max_bands=None, max_color=None):
super().__init__()
self.max_meter = max_meter
self.band_types = band_types
self.capabilities = capabilities
self.max_bands = max_bands
self.max_color = max_color | Create a MeterFeatures with the optional parameters below.
Args:
max_meter(int): Maximum number of meters.
band_types (|MeterBandType_v0x04|):
Bitmaps of OFPMBT_* values supported.
capabilities (|MeterFlags_v0x04|): Bitmaps of "ofp_meter_flags".
max_bands(int): Maximum bands per meters
max_color(int): Maximum color value | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.