code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def get(self, key, default=None):
if (key.count('.') == 0):
return super(DotDict, self).get(key, default)
value = default
(first, remainder) = key.split('.', 1)
if (first in self):
value = super(DotDict, self).get(first, default)
if isinstance(value, (dict, DotDict)):
return DotDict(value).get(remainder, default)
return value | Get a value from the `DotDict`.
The `key` parameter can either be a regular string key,
e.g. "foo", or it can be a string key with dot notation,
e.g. "foo.bar.baz", to signify a nested lookup.
The default value is returned if any level of the key's
components are not found.
Args:
key (str): The key to get the value for.
default: The return value should the given key
not exist in the `DotDict`. | codesearchnet |
def create_interconnect(self, location_entries, timeout=(- 1)):
return self._helper.create(location_entries, uri=self.locations_uri, timeout=timeout) | Creates an interconnect at the given location.
Warning:
It does not create the LOGICAL INTERCONNECT itself.
It will fail if no interconnect is already present on the specified position.
Args:
location_entries (dict): Dictionary with location entries.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Created interconnect. | codesearchnet |
def identify(self, token):
payload = {
'op': 2,
'd': {
'token': self.token,
'properties': {
'$os': sys.platform,
'$browser': 'legobot',
'$device': 'legobot'
},
'compress': False,
'large_threshold': 250
}
}
payload['d']['synced_guilds'] = []
logger.info("Identifying with the following message: \
{}".format(payload))
self.ws.send(json.dumps(payload))
return | Identifies to the websocket endpoint
Args:
token (string): Discord bot token | juraj-google-style |
def assert_split_at_fraction_exhaustive(source, start_position=None, stop_position=None, perform_multi_threaded_test=True):
expected_items = read_from_source(source, start_position, stop_position)
if not expected_items:
raise ValueError('Source %r is empty.' % source)
if len(expected_items) == 1:
raise ValueError('Source %r only reads a single item.' % source)
all_non_trivial_fractions = []
any_successful_fractions = False
any_non_trivial_fractions = False
for i in range(len(expected_items)):
stats = SplitFractionStatistics([], [])
assert_split_at_fraction_binary(source, expected_items, i, 0.0, None, 1.0, None, stats)
if stats.successful_fractions:
any_successful_fractions = True
if stats.non_trivial_fractions:
any_non_trivial_fractions = True
all_non_trivial_fractions.append(stats.non_trivial_fractions)
if not any_successful_fractions:
raise ValueError('SplitAtFraction test completed vacuously: no successful split fractions found')
if not any_non_trivial_fractions:
raise ValueError('SplitAtFraction test completed vacuously: no non-trivial split fractions found')
if not perform_multi_threaded_test:
return
num_total_trials = 0
for i in range(len(expected_items)):
non_trivial_fractions = [2.0]
non_trivial_fractions.extend(all_non_trivial_fractions[i])
min_non_trivial_fraction = min(non_trivial_fractions)
if min_non_trivial_fraction == 2.0:
continue
num_trials = 0
have_success = False
have_failure = False
thread_pool = _ThreadPool(2)
try:
while True:
num_trials += 1
if num_trials > MAX_CONCURRENT_SPLITTING_TRIALS_PER_ITEM:
_LOGGER.warning('After %d concurrent splitting trials at item
break
if _assert_split_at_fraction_concurrent(source, expected_items, i, min_non_trivial_fraction, thread_pool):
have_success = True
else:
have_failure = True
if have_success and have_failure:
_LOGGER.info('%d trials to observe both success and failure of concurrent splitting at item
break
finally:
thread_pool.close()
num_total_trials += num_trials
if num_total_trials > MAX_CONCURRENT_SPLITTING_TRIALS_TOTAL:
_LOGGER.warning('After %d total concurrent splitting trials, considered only %d items, giving up.', num_total_trials, i)
break
_LOGGER.info('%d total concurrent splitting trials for %d items', num_total_trials, len(expected_items)) | Performs and tests dynamic work rebalancing exhaustively.
Asserts that for each possible start position, a source can be split at
every interesting fraction (halfway between two fractions that differ by at
least one item) and the results are consistent if a split succeeds.
Verifies multi threaded splitting as well.
Args:
source (~apache_beam.io.iobase.BoundedSource): the source to perform
dynamic splitting on.
perform_multi_threaded_test (bool): if :data:`True` performs a
multi-threaded test, otherwise this test is skipped.
Raises:
ValueError: if the exhaustive splitting test fails. | github-repos |
def search(cls, session, queries, out_type):
cls._check_implements('search')
domain = cls.get_search_domain(queries)
return cls(('/search/%s.json' % cls.__endpoint__), data={'query': str(domain)}, session=session, out_type=out_type) | Search for a record given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
out_type (helpscout.BaseModel): The type of record to output. This
should be provided by child classes, by calling super.
Returns:
RequestPaginator(output_type=helpscout.BaseModel): Results
iterator of the ``out_type`` that is defined. | codesearchnet |
def _pack_images(images, rows, cols):
shape = onp.shape(images)
(width, height, depth) = shape[(- 3):]
images = onp.reshape(images, ((- 1), width, height, depth))
batch = onp.shape(images)[0]
rows = onp.minimum(rows, batch)
cols = onp.minimum((batch
images = images[:(rows * cols)]
images = onp.reshape(images, (rows, cols, width, height, depth))
images = onp.transpose(images, [0, 2, 1, 3, 4])
images = onp.reshape(images, [(rows * width), (cols * height), depth])
return images | Helper utility to make a tiled field of images from numpy arrays.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows. | codesearchnet |
def remove_pos_arg_placeholders(alias_command):
split_command = shlex.split(alias_command)
boundary_index = len(split_command)
for (i, subcommand) in enumerate(split_command):
if ((not re.match('^[a-z]', subcommand.lower())) or (i > COLLISION_CHECK_LEVEL_DEPTH)):
boundary_index = i
break
return ' '.join(split_command[:boundary_index]).lower() | Remove positional argument placeholders from alias_command.
Args:
alias_command: The alias command to remove from.
Returns:
The alias command string without positional argument placeholder. | codesearchnet |
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] | Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. | github-repos |
def get_key(self, match_key, num_results=1, best=True, **dfilter):
return get_key(match_key, self.keys(), num_results=num_results, best=best, **dfilter) | Get multiple fully-specified keys that match the provided query.
Args:
key (DatasetID): DatasetID of query parameters to use for
searching. Any parameter that is `None`
is considered a wild card and any match is
accepted. Can also be a string representing the
dataset name or a number representing the dataset
wavelength.
num_results (int): Number of results to return. If `0` return all,
if `1` return only that element, otherwise
return a list of matching keys.
**dfilter (dict): See `get_key` function for more information. | codesearchnet |
def CheckPath(self, path, path_segment_separator=None):
if (not self._case_sensitive):
path = path.lower()
if (path_segment_separator is None):
path_segment_separator = self._path_segment_separator
path_segments = path.split(path_segment_separator)
number_of_path_segments = len(path_segments)
scan_object = self._root_node
while scan_object:
if isinstance(scan_object, py2to3.STRING_TYPES):
break
if (scan_object.path_segment_index >= number_of_path_segments):
scan_object = scan_object.default_value
continue
path_segment = path_segments[scan_object.path_segment_index]
scan_object = scan_object.GetScanObject(path_segment)
if (not isinstance(scan_object, py2to3.STRING_TYPES)):
return False
filter_path_segments = scan_object.split(self._path_segment_separator)
return (filter_path_segments == path_segments) | Checks if a path matches the scan tree-based path filter.
Args:
path: a string containing the path.
path_segment_separator: optional string containing the path segment
separator. None defaults to the path segment
separator that was set when the path filter
scan tree was initialized.
Returns:
A boolean indicating if the path matches the filter. | codesearchnet |
def post(self, service, data):
url = self._url_format(service)
data = Base._data_to_json(data)
headers = {'content-type': 'application/json'}
return self.rest_action(self._session.post, url, data=data, headers=headers) | Generic POST operation for sending data to Learning Modules API.
Data should be a JSON string or a dict. If it is not a string,
it is turned into a JSON string for the POST body.
Args:
service (str): The endpoint service to use, i.e. gradebook
data (json or dict): the data payload
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
list: the json-encoded content of the response | codesearchnet |
def save_state_regularly(self, fname, frequency=600):
self.save_state(fname)
loop = asyncio.get_event_loop()
self.save_state_loop = loop.call_later(frequency,
self.save_state_regularly,
fname,
frequency) | Save the state of node with a given regularity to the given
filename.
Args:
fname: File name to save retularly to
frequency: Frequency in seconds that the state should be saved.
By default, 10 minutes. | juraj-google-style |
def latch_config_variables(self):
return {desc.name: desc.latch() for desc in self._config_variables.values()} | Latch the current value of all config variables as python objects.
This function will capture the current value of all config variables
at the time that this method is called. It must be called after
start() has been called so that any default values in the config
variables have been properly set otherwise DataError will be thrown.
Conceptually this method performs the operation that happens just
before a tile executive hands control to the tile application
firmware. It latches in the value of all config variables at that
point in time.
For convenience, this method does all necessary binary -> python
native object conversion so that you just get python objects back.
Returns:
dict: A dict of str -> object with the config variable values.
The keys in the dict will be the name passed to
`declare_config_variable`.
The values will be the python objects that result from calling
latch() on each config variable. Consult ConfigDescriptor.latch()
for documentation on how that method works. | codesearchnet |
def is_end_node(node):
return (isinstance(node, ast.Expr) and
isinstance(node.value, ast.Name) and
node.value.id == 'end') | Checks if a node is the "end" keyword.
Args:
node: AST node.
Returns:
True if the node is the "end" keyword, otherwise False. | juraj-google-style |
def _next_layer_gather_index(bc, original_rp, broadcast_rp):
old_value_rowids = array_ops.gather(bc.gather_index, broadcast_rp.value_rowids())
def gi_no_broadcast():
old_row_starts = array_ops.gather(original_rp.row_splits(), old_value_rowids)
expected_row_lengths = array_ops.gather(params=original_rp.row_lengths(), indices=bc.gather_index)
actual_row_lengths = broadcast_rp.row_lengths()
check_valid = check_ops.assert_equal(expected_row_lengths, actual_row_lengths, message='Cannot broadcast')
gather_index = old_row_starts + broadcast_rp.offsets_in_rows()
return control_flow_ops.with_dependencies([check_valid], gather_index)
def gi_broadcast():
return old_value_rowids
if not original_rp.is_uniform():
return gi_no_broadcast()
do_broadcast = math_ops.equal(original_rp.uniform_row_length(), constant_op.constant(1, original_rp.dtype))
gather_index = cond.cond(do_broadcast, true_fn=gi_broadcast, false_fn=gi_no_broadcast)
return gather_index | Create the next layer gather_index whether or not a broadcast happens.
*----------bc-------->*
| |
original_rp broadcast_rp
| |
\|/ \|/
*--next_broadcaster-->*
Args:
bc: the old broadcaster.
original_rp: the original row partition.
broadcast_rp: the target row partition.
Returns:
the gather_index for next_broadcaster.
Raises:
InvalidArgumentError if the shapes are incompatible. | github-repos |
def set(self, *args):
assert (len(args) in (1, 2))
if (len(args) == 1):
value = args[0]
self._impl.set(value)
else:
(index, value) = args
if isinstance(value, Real):
self._impl.setTplDbl(Tuple(index)._impl, value)
elif isinstance(value, basestring):
self._impl.setTplStr(Tuple(index)._impl, value)
else:
raise TypeError | Set the value of a single instance of this parameter.
Args:
args: value if the parameter is scalar, index and value
otherwise.
Raises:
RuntimeError: If the entity has been deleted in the underlying
AMPL.
TypeError: If the parameter is not scalar and the index is not
provided. | codesearchnet |
def has_title(self, title, **kwargs):
try:
self.assert_title(title, **kwargs)
return True
except ExpectationNotMet:
return False | Checks if the page has the given title.
Args:
title (str | RegexObject): The string or regex that the title should match.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
bool: Whether it matches. | juraj-google-style |
def draw_text(img, pos, text, color, font_scale=0.4):
img = img.astype(np.uint8)
(x0, y0) = (int(pos[0]), int(pos[1]))
font = cv2.FONT_HERSHEY_SIMPLEX
((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, 1)
if ((x0 + text_w) > img.shape[1]):
x0 = (img.shape[1] - text_w)
if ((y0 - int((1.15 * text_h))) < 0):
y0 = int((1.15 * text_h))
back_topleft = (x0, (y0 - int((1.3 * text_h))))
back_bottomright = ((x0 + text_w), y0)
cv2.rectangle(img, back_topleft, back_bottomright, color, (- 1))
text_bottomleft = (x0, (y0 - int((0.25 * text_h))))
cv2.putText(img, text, text_bottomleft, font, font_scale, (222, 222, 222), lineType=cv2.LINE_AA)
return img | Draw text on an image.
Args:
pos (tuple): x, y; the position of the text
text (str):
font_scale (float):
color (tuple): a 3-tuple BGR color in [0, 255] | codesearchnet |
def run(self, gin):
with ScratchDir("."):
p = subprocess.Popen(
self._gulp_cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = p.communicate(bytearray(gin, "utf-8"))
out = out.decode("utf-8")
err = err.decode("utf-8")
if "Error" in err or "error" in err:
print(gin)
print("----output_0---------")
print(out)
print("----End of output_0------\n\n\n")
print("----output_1--------")
print(out)
print("----End of output_1------")
raise GulpError(err)
if "ERROR" in out:
raise GulpError(out)
conv_err_string = "Conditions for a minimum have not been satisfied"
if conv_err_string in out:
raise GulpConvergenceError()
gout = ""
for line in out.split("\n"):
gout = gout + line + "\n"
return gout | Run GULP using the gin as input
Args:
gin: GULP input string
Returns:
gout: GULP output string | juraj-google-style |
async def init(self, name, conf=None):
tank = self.tanks.get(name)
if (tank is not None):
return tank
iden = s_common.guid()
logger.info('Creating new tank: %s', name)
path = s_common.genpath(self.dirn, 'tanks', iden)
tank = (await CryoTank.anit(path, conf))
node = (await self.names.open((name,)))
(await node.set((iden, conf)))
self.tanks.put(name, tank)
return tank | Generate a new CryoTank with a given name or get an reference to an existing CryoTank.
Args:
name (str): Name of the CryoTank.
Returns:
CryoTank: A CryoTank instance. | codesearchnet |
def load_disease_terms(adapter, genemap_lines, genes=None, hpo_disease_lines=None):
if (not genes):
genes = adapter.genes_by_alias()
disease_terms = get_mim_phenotypes(genemap_lines=genemap_lines)
if (not hpo_disease_lines):
hpo_disease_lines = fetch_hpo_phenotype_to_terms()
hpo_diseases = parse_hpo_diseases(hpo_disease_lines)
start_time = datetime.now()
nr_diseases = None
LOG.info('Loading the hpo disease...')
for (nr_diseases, disease_number) in enumerate(disease_terms):
disease_info = disease_terms[disease_number]
disease_id = 'OMIM:{0}'.format(disease_number)
if (disease_id in hpo_diseases):
hpo_terms = hpo_diseases[disease_id]['hpo_terms']
if hpo_terms:
disease_info['hpo_terms'] = hpo_terms
disease_obj = build_disease_term(disease_info, genes)
adapter.load_disease_term(disease_obj)
LOG.info('Loading done. Nr of diseases loaded {0}'.format(nr_diseases))
LOG.info('Time to load diseases: {0}'.format((datetime.now() - start_time))) | Load the omim phenotypes into the database
Parse the phenotypes from genemap2.txt and find the associated hpo terms
from ALL_SOURCES_ALL_FREQUENCIES_diseases_to_genes_to_phenotypes.txt.
Args:
adapter(MongoAdapter)
genemap_lines(iterable(str))
genes(dict): Dictionary with all genes found in database
hpo_disease_lines(iterable(str)) | codesearchnet |
def contains(self, key):
try:
self._api.objects_get(self._bucket, key)
except datalab.utils.RequestException as e:
if (e.status == 404):
return False
raise e
except Exception as e:
raise e
return True | Checks if the specified item exists.
Args:
key: the key of the item to lookup.
Returns:
True if the item exists; False otherwise.
Raises:
Exception if there was an error requesting information about the item. | codesearchnet |
def is_compatible(self, other: 'ValueSpec') -> bool: | Returns True if values acceptable to `other` is acceptable to this spec.
Args:
other: Other value spec.
Returns:
True if values that is applicable to the other value spec can be applied
to current spec. Otherwise False. | github-repos |
def list_(return_yaml=True, include_pillar=True, include_opts=True, **kwargs):
beacons = None
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'func': 'list', 'include_pillar': include_pillar, 'include_opts': include_opts}, 'manage_beacons')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_complete', wait=kwargs.get('timeout', 30))
log.debug('event_ret %s', event_ret)
if (event_ret and event_ret['complete']):
beacons = event_ret['beacons']
except KeyError:
ret = {'comment': 'Event module not available. Beacon list failed.', 'result': False}
return ret
if beacons:
if return_yaml:
tmp = {'beacons': beacons}
return salt.utils.yaml.safe_dump(tmp, default_flow_style=False)
else:
return beacons
else:
return {'beacons': {}} | List the beacons currently configured on the minion.
Args:
return_yaml (bool):
Whether to return YAML formatted output, default ``True``.
include_pillar (bool):
Whether to include beacons that are configured in pillar, default
is ``True``.
include_opts (bool):
Whether to include beacons that are configured in opts, default is
``True``.
Returns:
list: List of currently configured Beacons.
CLI Example:
.. code-block:: bash
salt '*' beacons.list | codesearchnet |
class AutoformerFeatureEmbedder(nn.Module):
def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None:
super().__init__()
self.num_features = len(cardinalities)
self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.num_features > 1:
cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)
else:
cat_feature_slices = [features]
return torch.cat([embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)], dim=-1) | Embed a sequence of categorical features.
Args:
cardinalities (`list[int]`):
List of cardinalities of the categorical features.
embedding_dims (`list[int]`):
List of embedding dimensions of the categorical features. | github-repos |
def _ContainsNone(self, fail_verb, excluded):
present = []
if len(excluded) == 1:
if excluded[0] in self._actual:
present.extend(excluded)
elif excluded:
try:
actual_set = set(self._actual)
except TypeError:
actual_set = self._actual
for i in excluded:
if i in actual_set:
present.append(i)
if present:
self._FailWithBadResults(fail_verb, excluded, 'contains', present) | Determines if the subject contains none of the excluded elements.
Helper function for ContainsNoneIn() and ContainsNoneOf().
Args:
fail_verb: string describing how the excluded elements should be excluded.
excluded: iterable of objects that should not be contained in the subject.
Returns:
None if the subject contains none of the expected elements.
Raises:
TruthAssertionError: the subject contains any of the excluded elements. | github-repos |
def read(in_path):
assert os.path.exists(in_path), "The following GRP file can't be found. in_path: {}".format(in_path)
with open(in_path, 'r') as f:
lines = f.readlines()
grp = [line.strip() for line in lines if (line and (not re.match('^
return grp | Read a grp file at the path specified by in_path.
Args:
in_path (string): path to GRP file
Returns:
grp (list) | codesearchnet |
def validate(self, proxy_scanner, expected_num=20, queue_timeout=3, val_timeout=5):
while (self.proxy_num() < expected_num):
try:
candidate_proxy = proxy_scanner.proxy_queue.get(timeout=queue_timeout)
except queue.Empty:
if proxy_scanner.is_scanning():
continue
else:
break
addr = candidate_proxy['addr']
protocol = candidate_proxy['protocol']
ret = self.is_valid(addr, protocol, val_timeout)
if (self.proxy_num() >= expected_num):
self.logger.info('Enough valid proxies, thread {} exit.'.format(threading.current_thread().name))
break
if ret['valid']:
self.add_proxy(Proxy(addr, protocol))
self.logger.info('{} ok, {:.2f}s'.format(addr, ret['response_time']))
else:
self.logger.info('{} invalid, {}'.format(addr, ret['msg'])) | Target function of validation threads
Args:
proxy_scanner: A ProxyScanner object.
expected_num: Max number of valid proxies to be scanned.
queue_timeout: Timeout for getting a proxy from the queue.
val_timeout: An integer passed to `is_valid` as argument `timeout`. | codesearchnet |
def __init__(self, task_queue, verbose=True):
multiprocessing.Process.__init__(self)
self._task_queue = task_queue
self.total_task = self._task_queue.qsize()
self.current_state = None
self.verbose = verbose | Construct an instance of TaskTracker
Args:
task_queue (multiprocessing.JoinableQueue): A queue of the
input data.
verbose (bool, optional): Set to False to disable verbose output. | juraj-google-style |
def _update_task(self, task):
self.task = task
self.task.data.update(self.task_data)
self.task_type = task.task_spec.__class__.__name__
self.spec = task.task_spec
self.task_name = task.get_name()
self.activity = getattr(self.spec, 'service_class', '')
self._set_lane_data() | Assigns current task step to self.task
then updates the task's data with self.task_data
Args:
task: Task object. | codesearchnet |
def check_partitioners(partitioners, keys):
if (partitioners is None):
return {}
_assert_is_dictlike(partitioners, valid_keys=keys)
keys = set(keys)
if (not (set(partitioners) <= keys)):
extra_keys = (set(partitioners) - keys)
raise KeyError('Invalid partitioner keys {}, partitioners can only be provided for {}'.format(', '.join(("'{}'".format(key) for key in extra_keys)), ', '.join(("'{}'".format(key) for key in keys))))
_check_nested_callables(partitioners, 'Partitioner')
return partitioners | Checks the given partitioners.
This checks that `partitioners` is a dictionary that only contains keys in
`keys`, and furthermore the entries in `partitioners` are functions or
further dictionaries (the latter used, for example, in passing partitioners
to modules inside modules) that must satisfy the same constraints.
Args:
partitioners: Dictionary of partitioners (allowing nested dictionaries) or
None.
keys: Iterable of valid keys for `partitioners`.
Returns:
Checked dictionary of partitioners. If `partitioners=None`, an empty
dictionary will be returned.
Raises:
KeyError: If an partitioner is provided for a key not in `keys`.
TypeError: If a provided partitioner is not a callable function, or
`partitioners` is not a Mapping. | codesearchnet |
def most_recent(path, startswith=None, endswith=None):
candidate_files = []
for filename in all_files_in_directory(path):
if startswith and not os.path.basename(filename).startswith(startswith):
continue
if endswith and not filename.endswith(endswith):
continue
candidate_files.append({'name': filename, 'modtime': os.path.getmtime(filename)})
most_recent = sorted(candidate_files, key=lambda k: k['modtime'], reverse=True)
return most_recent[0]['name'] if most_recent else None | Recursively inspect all files under a directory and return the most recent
Args:
path (str): the path of the directory to traverse
startswith (str): the file name start with (optional)
endswith (str): the file name ends with (optional)
Returns:
the most recent file within the subdirectory | juraj-google-style |
def remove(self, keys, name=None):
if keys.dtype != self._key_dtype:
raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}')
with ops.name_scope(name, '%s_lookup_table_remove' % self.name, (self.resource_handle, keys, self._default_value)):
op = gen_lookup_ops.lookup_table_remove_v2(self.resource_handle, keys)
return op | Removes `keys` and its associated values from the table.
If a key is not present in the table, it is silently ignored.
Args:
keys: Keys to remove. Can be a tensor of any shape. Must match the table's
key type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` do not match the table data types. | github-repos |
def copy_results(self, copy_to_dir, rename_model_to=None, force_rerun=False):
if (not rename_model_to):
rename_model_to = self.model_to_use
new_model_path = op.join(copy_to_dir, '{}.pdb'.format(rename_model_to))
if self.structure_path:
if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_model_path):
custom_clean = CleanPDB()
my_pdb = StructureIO(self.structure_path)
new_model_path = my_pdb.write_pdb(custom_selection=custom_clean, custom_name=rename_model_to, out_dir=copy_to_dir, force_rerun=force_rerun)
self.load_structure_path(structure_path=new_model_path, file_type='pdb')
dest_itasser_dir = op.join(copy_to_dir, '{}_itasser'.format(rename_model_to))
if (not op.exists(dest_itasser_dir)):
os.mkdir(dest_itasser_dir)
for attr in self._attrs_to_copy:
old_file_path = getattr(self, attr)
new_file_path = op.join(dest_itasser_dir, op.basename(old_file_path))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_file_path):
shutil.copy2(old_file_path, new_file_path)
log.debug('{}: copied from {}'.format(new_file_path, old_file_path))
else:
log.debug('{}: file already exists'.format(new_file_path))
setattr(self, attr, new_file_path) | Copy the raw information from I-TASSER modeling to a new folder.
Copies all files in the list _attrs_to_copy.
Args:
copy_to_dir (str): Directory to copy the minimal set of results per sequence.
rename_model_to (str): New file name (without extension)
force_rerun (bool): If existing models and results should be overwritten. | codesearchnet |
def __init__(self):
self._last_step_outputs = {}
self._last_step_outputs_reduce_ops = {}
self._non_tensor_outputs = {} | Initialize an output context.
Returns:
A context object. | github-repos |
async def is_try_or_pull_request(context, task):
if is_github_task(task):
return await is_pull_request(context, task)
else:
return is_try(task, context.config['source_env_prefix']) | Determine if a task is a try or a pull-request-like task (restricted privs).
Checks are the ones done in ``is_try`` and ``is_pull_request``
Args:
context (scriptworker.context.Context): the scriptworker context.
task (dict): the task definition to check.
Returns:
bool: True if it's a pull-request or a try task | juraj-google-style |
def _MergeIdenticalCaseInsensitive(self, a, b):
if (a.lower() != b.lower()):
raise MergeError(("values must be the same (case insensitive) ('%s' vs '%s')" % (transitfeed.EncodeUnicode(a), transitfeed.EncodeUnicode(b))))
return b | Tries to merge two strings.
The string are required to be the same ignoring case. The second string is
always used as the merged value.
Args:
a: The first string.
b: The second string.
Returns:
The merged string. This is equal to the second string.
Raises:
MergeError: The strings were not the same ignoring case. | codesearchnet |
def reset_logger(name, level=None, handler=None):
if level is None:
level = logging.INFO
logger = logging.getLogger(name)
logger.setLevel(level)
handler = handler or logging.StreamHandler()
handler.setFormatter(logging.Formatter(_DEFAULT_LOG_FORMAT))
logger.handlers = [handler]
return logger | Make a standard python logger object with default formatter, handler, etc.
Defaults are:
- level == logging.INFO
- handler == logging.StreamHandler()
Args:
name: a logger name.
level: an optional initial log level for this logger.
handler: an optional initial handler for this logger.
Returns: a standard python logger with a single handler. | juraj-google-style |
def genes_by_alias(hgnc_genes):
alias_genes = {}
for hgnc_id in hgnc_genes:
gene = hgnc_genes[hgnc_id]
hgnc_symbol = gene['hgnc_symbol']
for alias in gene['previous_symbols']:
true_id = None
if (alias == hgnc_symbol):
true_id = hgnc_id
if (alias in alias_genes):
alias_genes[alias.upper()]['ids'].add(hgnc_id)
if true_id:
alias_genes[alias.upper()]['true_id'] = hgnc_id
else:
alias_genes[alias.upper()] = {'true': true_id, 'ids': set([hgnc_id])}
return alias_genes | Return a dictionary with hgnc symbols as keys
Value of the dictionaries are information about the hgnc ids for a symbol.
If the symbol is primary for a gene then 'true_id' will exist.
A list of hgnc ids that the symbol points to is in ids.
Args:
hgnc_genes(dict): a dictionary with hgnc_id as key and gene info as value
Returns:
alias_genes(dict):
{
'hgnc_symbol':{
'true_id': int,
'ids': list(int)
}
} | codesearchnet |
def is_stopped(self):
resp = self._client.send(Request(action='is_dag_stopped', payload={'dag_name': self._dag_name}))
return resp.payload['is_stopped'] | Check whether the task received a stop signal from the workflow.
Tasks can use the stop flag to gracefully terminate their work. This is
particularly important for long running tasks and tasks that employ an
infinite loop, such as trigger tasks.
Returns:
bool: True if the task should be stopped. | codesearchnet |
def dispatch(self, message):
for validator, callback in self.validators:
if not validator.matches(message):
continue
callback(message)
return
raise ArgumentError("No handler was registered for message", message=message) | Dispatch a message to a callback based on its schema.
Args:
message (dict): The message to dispatch | juraj-google-style |
def register(self, cmd: Type[Command]) -> None:
self.commands[cmd.command] = cmd | Register a new IMAP command.
Args:
cmd: The new command type. | codesearchnet |
def editline_with_regex(self, regex_tgtline, to_replace):
for (idx, line) in enumerate(self._swp_lines):
mobj = re.match(regex_tgtline, line)
if mobj:
self._swp_lines[idx] = to_replace
return | find the first matched line, then replace
Args:
regex_tgtline (str): regular expression used to match the target line
to_replace (str): line you wanna use to replace | codesearchnet |
def exists(self, path):
self.__validate_storage_path(path)
try:
metadata = self.api_client.get_entity_by_query(path=path)
except StorageNotFoundException:
return False
return metadata and 'uuid' in metadata | Check if a certain path exists in the storage service.
Args:
path (str): The path to be checked
Returns:
True if the path exists, False otherwise
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes | juraj-google-style |
def get_lock_state_transaction(self, transaction_id):
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) | Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state | juraj-google-style |
def ping(hostname: str, timeout_s: int = 5) -> bool:
if sys.platform == "win32":
timeout_ms = timeout_s * 1000
args = [
"ping",
hostname,
"-n", "1",
"-w", str(timeout_ms),
]
elif sys.platform.startswith('linux'):
args = [
"ping",
hostname,
"-c", "1",
"-w", str(timeout_s),
]
else:
raise AssertionError("Don't know how to ping on this operating system")
proc = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.communicate()
retcode = proc.returncode
return retcode == 0 | Pings a host, using OS tools.
Args:
hostname: host name or IP address
timeout_s: timeout in seconds
Returns:
was the ping successful? | juraj-google-style |
def setup_config(cfg, config_filenames=None, env_var_name=None):
if (env_var_name is None):
env_var_name = 'BB_CONFIG_FILE'
config_path = os.getenv(env_var_name, None)
if (not config_path):
config_path = find_config(defaults=config_filenames)
if config_path:
cfg.load(config_path)
cfg['config_file'] = os.path.abspath(config_path)
cfg.init_from_env() | This will initialize the given configuration object.
The following resources are available in the same order:
1) Default settings.
2) Config file.
3) Environment variables.
WARNING: Environment variables do _not_ take precedence over the config
file right now. (init_from_env will refuse to update the
value, if there is already one.)
Args:
config_filenames: list of possible config filenames
env_var_name: name of the environment variable holding the config path | codesearchnet |
def get_diff(repo: Repo, base_commit: str, commits: List[str]) -> List[str]:
print('\n
code_diff = []
for commit in commits:
for diff_obj in commit.diff(base_commit):
if diff_obj.change_type == 'A' and diff_obj.b_path.endswith('.py'):
code_diff.append(diff_obj.b_path)
elif diff_obj.change_type == 'D' and diff_obj.a_path.endswith('.py'):
code_diff.append(diff_obj.a_path)
elif diff_obj.change_type in ['M', 'R'] and diff_obj.b_path.endswith('.py'):
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
elif diff_is_docstring_only(repo, commit, diff_obj.b_path):
print(f'Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.')
else:
code_diff.append(diff_obj.a_path)
return code_diff | Get the diff between a base commit and one or several commits.
Args:
repo (`git.Repo`):
A git repository (for instance the Transformers repo).
base_commit (`str`):
The commit reference of where to compare for the diff. This is the current commit, not the branching point!
commits (`List[str]`):
The list of commits with which to compare the repo at `base_commit` (so the branching point).
Returns:
`List[str]`: The list of Python files with a diff (files added, renamed or deleted are always returned, files
modified are returned if the diff in the file is not only in docstrings or comments, see
`diff_is_docstring_only`). | github-repos |
def wait_until_final(self, poll_interval=1, timeout=60):
start_time = time.time()
elapsed = 0
while ((self.status != 'complete') and ((timeout <= 0) or (elapsed < timeout))):
time.sleep(poll_interval)
self.refresh()
elapsed = (time.time() - start_time) | It will poll the URL to grab the latest status resource in a given
timeout and time interval.
Args:
poll_interval (int): how often to poll the status service.
timeout (int): how long to poll the URL until giving up. Use <= 0
to wait forever | codesearchnet |
def create_feed_dict_from_input_data(input_data: RepresentativeSample, signature_def: meta_graph_pb2.SignatureDef) -> Mapping[str, np.ndarray]:
feed_dict = {}
for input_key, input_value in input_data.items():
input_tensor_name = signature_def.inputs[input_key].name
value = input_value
if isinstance(input_value, core.Tensor):
value = input_value.eval()
feed_dict[input_tensor_name] = value
return feed_dict | Constructs a feed_dict from input data.
Note: This function should only be used in graph mode.
This is a helper function that converts an 'input key -> input value' mapping
to a feed dict. A feed dict is an 'input tensor name -> input value' mapping
and can be directly passed to the `feed_dict` argument of `sess.run()`.
Args:
input_data: Input key -> input value mapping. The input keys should match
the input keys of `signature_def`.
signature_def: A SignatureDef representing the function that `input_data` is
an input to.
Returns:
Feed dict, which is intended to be used as input for `sess.run`. It is
essentially a mapping: input tensor name -> input value. Note that the input
value in the feed dict is not a `Tensor`. | github-repos |
def get_extra_args():
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_args
else:
return [] | Returns the corresponding function arguments for the captured inputs.
Returns:
If the default graph is being used to define a function, the
returned list of place holders are those used inside the function
body corresponding those returned by get_extra_inputs(). Otherwise,
returns an empty list. | github-repos |
def get(self, item, alt=None):
try:
val = self[item]
except ValueError:
return alt
return val if val is not None else alt | Standard dict-like .get() method.
Args:
item (str): See :meth:`.__getitem__` for details.
alt (default None): Alternative value, if item is not found.
Returns:
obj: `item` or `alt`, if item is not found. | juraj-google-style |
def get_train_examples(self, data_dir, filename=None):
if data_dir is None:
data_dir = ''
if self.train_file is None:
raise ValueError('SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor')
with open(os.path.join(data_dir, self.train_file if filename is None else filename), 'r', encoding='utf-8') as reader:
input_data = json.load(reader)['data']
return self._create_examples(input_data, 'train') | Returns the training examples from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the training file has a different name than the original one
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively. | github-repos |
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
self._ParseSubKey(parser_mediator, registry_key, [], codepage=codepage) | Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage. | juraj-google-style |
def moments_v2(x, axes, shift=None, keepdims=False, name=None):
return moments(x=x, axes=axes, shift=shift, name=name, keep_dims=keepdims) | Calculates the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
Note: shift is currently not used; the true mean is computed and used.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
shift: Not used in the current implementation.
keepdims: produce moments with the same dimensionality as the input.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`. | github-repos |
def init_log(log_file):
log = None
try:
log = open(log_file, 'a')
sys.stdout = Tee(sys.stdout, log)
except:
pass
return log | Creates log file on disk and "Tees" :py:class:`sys.stdout` to console and disk
Args:
log_file (str): The path on disk to append or create the log file.
Returns:
file: The opened log file. | juraj-google-style |
def intersect(self, second_iterable, selector=identity):
if self.closed():
raise ValueError('Attempt to call intersect() on a closed Queryable.')
if (not is_iterable(second_iterable)):
raise TypeError('Cannot compute intersect() with second_iterable of non-iterable {0}'.format(str(type(second_iterable))[7:(- 1)]))
if (not is_callable(selector)):
raise TypeError('intersect() parameter selector={0} is not callable'.format(repr(selector)))
return self._create(self._generate_intersect_result(second_iterable, selector)) | Returns those elements which are both in the source sequence and in
the second_iterable.
Note: This method uses deferred execution.
Args:
second_iterable: Elements are returned if they are also in the
sequence.
selector: An optional single argument function which is used to
project the elements in the source and second_iterables prior
to comparing them. If omitted the identity function will be
used.
Returns:
A sequence containing all elements in the source sequence which
are also members of the second sequence.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If the second_iterable is not in fact iterable.
TypeError: If the selector is not callable. | codesearchnet |
def success(channel, image, hex_str):
hex_number = int(hex_str, 16)
gui = ui_embed.UI(
channel,
"",
"
modulename=modulename,
colour=hex_number,
thumbnail=image,
)
return gui | Creates an embed UI containing a hex color message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
image (str): The url of the image to add
hex_str (str): The hex value
Returns:
ui (ui_embed.UI): The embed UI object that was created | juraj-google-style |
def is_back_tracking(neurite):
def pair(segs):
' Pairs the input list into triplets'
return zip(segs, segs[1:])
def coords(node):
' Returns the first three values of the tree that correspond to the x, y, z coordinates'
return node[COLS.XYZ]
def max_radius(seg):
' Returns maximum radius from the two segment endpoints'
return max(seg[0][COLS.R], seg[1][COLS.R])
def is_not_zero_seg(seg):
' Returns True if segment has zero length'
return (not np.allclose(coords(seg[0]), coords(seg[1])))
def is_in_the_same_verse(seg1, seg2):
' Checks if the vectors face the same direction. This\n is true if their dot product is greater than zero.\n '
v1 = (coords(seg2[1]) - coords(seg2[0]))
v2 = (coords(seg1[1]) - coords(seg1[0]))
return (np.dot(v1, v2) >= 0)
def is_seg2_within_seg1_radius(dist, seg1, seg2):
' Checks whether the orthogonal distance from the point at the end of\n seg1 to seg2 segment body is smaller than the sum of their radii\n '
return (dist <= (max_radius(seg1) + max_radius(seg2)))
def is_seg1_overlapping_with_seg2(seg1, seg2):
'Checks if a segment is in proximity of another one upstream'
s1 = coords(seg2[0])
s2 = coords(seg2[1])
C = (0.5 * (s1 + s2))
P = coords(seg1[1])
CP = (P - C)
S1S2 = (s2 - s1)
prj = mm.vector_projection(CP, S1S2)
if (not is_seg2_within_seg1_radius(np.linalg.norm((CP - prj)), seg1, seg2)):
return False
return (np.linalg.norm(prj) < (0.55 * np.linalg.norm(S1S2)))
def is_inside_cylinder(seg1, seg2):
' Checks if seg2 approximately lies within a cylindrical volume of seg1.\n Two conditions must be satisfied:\n 1. The two segments are not facing the same direction (seg2 comes back to seg1)\n 2. seg2 is overlaping with seg1\n '
return ((not is_in_the_same_verse(seg1, seg2)) and is_seg1_overlapping_with_seg2(seg1, seg2))
section_itr = (snode for snode in neurite.iter_sections() if (snode.points.shape[0] > 2))
for snode in section_itr:
segment_pairs = list(filter(is_not_zero_seg, pair(snode.points)))
for (i, seg1) in enumerate(segment_pairs[1:]):
for seg2 in segment_pairs[0:(i + 1)]:
if is_inside_cylinder(seg1, seg2):
return True
return False | Check if a neurite process backtracks to a previous node. Back-tracking takes place
when a daughter of a branching process goes back and either overlaps with a previous point, or
lies inside the cylindrical volume of the latter.
Args:
neurite(Neurite): neurite to operate on
Returns:
True Under the following scenaria:
1. A segment endpoint falls back and overlaps with a previous segment's point
2. The geometry of a segment overlaps with a previous one in the section | codesearchnet |
def sheets_values_batch_update(config, auth, sheet_url_or_name, data):
sheet_id = sheets_id(config, auth, sheet_url_or_name)
API_Sheets(config, auth).spreadsheets().values().batchUpdate(spreadsheetId=sheet_id, body=data).execute() | Helper for performing batch value operations.
Args:
config - see starthinker/util/configuration.py
auth - user or service
sheet_url_or_name - one of: URL, document title, or id
data - JSON data for sending to batch request
No Return | github-repos |
def xresnet18(pretrained=False, **kwargs):
model = XResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['xresnet18']))
return model | Constructs a XResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | juraj-google-style |
def create_tree_from_string(line):
depth = 0
current_word = ""
root = None
current_node = root
for char in line:
if char == '(':
if current_node is not None and len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
depth += 1
if depth > 1:
child = LabeledTree(depth=depth)
current_node.add_child(child)
current_node = child
root.add_general_child(child)
else:
root = LabeledTree(depth=depth)
root.add_general_child(root)
current_node = root
elif char == ')':
if len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
depth -= 1
if current_node.parent != None:
current_node.parent.udepth = max(current_node.udepth+1, current_node.parent.udepth)
current_node = current_node.parent
else:
current_word += char
if depth != 0:
raise ParseError("Not an equal amount of closing and opening parentheses")
return root | Parse and convert a string representation
of an example into a LabeledTree datastructure.
Arguments:
----------
line : str, string version of the tree.
Returns:
--------
LabeledTree : parsed tree. | juraj-google-style |
def sample_from_discretized_mix_logistic(pred, seed=None):
(logits, locs, log_scales, coeffs) = split_to_discretized_mix_logistic_params(pred)
num_mixtures = shape_list(logits)[(- 1)]
gumbel_noise = (- tf.log((- tf.log(tf.random_uniform(tf.shape(logits), minval=1e-05, maxval=(1.0 - 1e-05), seed=seed)))))
sel = tf.one_hot(tf.argmax((logits + gumbel_noise), (- 1)), depth=num_mixtures, dtype=tf.float32)
sel = tf.expand_dims(sel, (- 1))
locs = tf.reduce_sum((locs * sel), 3)
log_scales = tf.reduce_sum((log_scales * sel), 3)
coeffs = tf.reduce_sum((coeffs * sel), 3)
uniform_noise = tf.random_uniform(tf.shape(locs), minval=1e-05, maxval=(1.0 - 1e-05), seed=seed)
logistic_noise = (tf.log(uniform_noise) - tf.log1p((- uniform_noise)))
x = (locs + (tf.exp(log_scales) * logistic_noise))
x0 = x[(..., 0)]
x1 = (x[(..., 1)] + (coeffs[(..., 0)] * x0))
x2 = ((x[(..., 2)] + (coeffs[(..., 1)] * x0)) + (coeffs[(..., 2)] * x1))
x = tf.stack([x0, x1, x2], axis=(- 1))
x = tf.clip_by_value(x, (- 1.0), 1.0)
return x | Sampling from a discretized mixture of logistics.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
seed: Random seed.
Returns:
A tensor of shape [batch, height, width, 3] with real intensities scaled
between -1 and 1. | codesearchnet |
def match(self, message) -> bool:
if (self.to and (message.to != self.to)):
return False
if (self.sender and (message.sender != self.sender)):
return False
if (self.body and (message.body != self.body)):
return False
if (self.thread and (message.thread != self.thread)):
return False
for (key, value) in self.metadata.items():
if (message.get_metadata(key) != value):
return False
logger.debug(f'message matched {self} == {message}')
return True | Returns wether a message matches with this message or not.
The message can be a Message object or a Template object.
Args:
message (spade.message.Message): the message to match to
Returns:
bool: wether the message matches or not | codesearchnet |
def sample_g_values(self, ngram_keys: torch.LongTensor) -> torch.LongTensor:
sampling_table_size, = self.sampling_table.shape
sampling_table = self.sampling_table.reshape((1, 1, sampling_table_size))
ngram_keys = ngram_keys % sampling_table_size
return torch.take_along_dim(sampling_table, indices=ngram_keys, dim=2) | Samples g values from Bernoulli distribution.
It is not possible to pass random keys in a vectorized way in torch. Instead
we pre-compute a random sampling table, and use apply modulo table size to
map from ngram keys (int64) to g values.
Args:
ngram_keys (`torch.LongTensor`):
Random keys (batch_size, num_ngrams, depth).
Returns:
G values (batch_size, num_ngrams, depth). | github-repos |
def city(self, value=None):
if (value is not None):
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str for field `city`'.format(value))
if (',' in value):
raise ValueError('value should not contain a comma for field `city`')
self._city = value | Corresponds to IDD Field `city`
Args:
value (str): value for IDD Field `city`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | codesearchnet |
def build_image(registry, image):
if ':' in image['name']:
_, tag = image['name'].split(':', 1)
else:
_, tag = image['name'], None
values = {
'registry': '' if registry is None else registry + '/',
'image': image['name'],
'tag': tag,
}
if tag is None:
args = [
'-t {registry}{image}'.format(**values),
'-t {registry}{image}:{version}'.format(
version=versioning.current(),
**values
),
]
else:
args = ['-t {registry}{image}'.format(**values)]
if 'file' in image:
args.append('-f {}'.format(conf.proj_path(image['file'])))
with conf.within_proj_dir(image.get('path', '.')):
log.info("Building <33>{registry}<35>/{image}", **values)
shell.run('docker build {args} .'.format(args=' '.join(args))) | Build docker image.
Args:
registry (str):
The name of the registry this image belongs to. If not given, the
resulting image will have a name without the registry.
image (dict[str, Any]):
The dict containing the information about the built image. This is
the same dictionary as defined in DOCKER_IMAGES variable. | juraj-google-style |
def save_as_json(total: list,
name='data.json',
sort_by: str = None,
no_duplicate=False,
order='asc'):
if sort_by:
reverse = order == 'desc'
total = sorted(total, key=itemgetter(sort_by), reverse=reverse)
if no_duplicate:
total = [key for key, _ in groupby(total)]
data = json.dumps(total, ensure_ascii=False)
Path(name).write_text(data, encoding='utf-8') | Save what you crawled as a json file.
Args:
total (list): Total of data you crawled.
name (str, optional): Defaults to 'data.json'. The name of the file.
sort_by (str, optional): Defaults to None. Sort items by a specific key.
no_duplicate (bool, optional): Defaults to False. If True, it will remove duplicated data.
order (str, optional): Defaults to 'asc'. The opposite option is 'desc'. | juraj-google-style |
def VerifyStructure(self, parser_mediator, line):
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
key = 'header'
try:
structure = self._MAC_WIFI_HEADER.parseString(line)
except pyparsing.ParseException:
structure = None
if not structure:
key = 'turned_over_header'
try:
structure = self._MAC_WIFI_TURNED_OVER_HEADER.parseString(line)
except pyparsing.ParseException:
structure = None
if not structure:
logger.debug('Not a Mac Wifi log file')
return False
time_elements_tuple = self._GetTimeElementsTuple(key, structure)
try:
dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug(
'Not a Mac Wifi log file, invalid date and time: {0!s}'.format(
structure.date_time))
return False
self._last_month = time_elements_tuple[1]
return True | Verify that this file is a Mac Wifi log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not. | juraj-google-style |
def parse(source):
if isinstance(source, str):
return parse_stream(six.StringIO(source))
else:
return parse_stream(source) | Parses source code returns an array of instructions suitable for
optimization and execution by a Machine.
Args:
source: A string or stream containing source code. | codesearchnet |
def _logspace_mean(log_values):
center = tf.stop_gradient(_sample_max(log_values))
centered_values = tf.math.exp(log_values - center)
log_mean_of_values = tf.math.log(_sample_mean(centered_values)) + center
return log_mean_of_values | Evaluate `Log[E[values]]` in a stable manner.
Args:
log_values: `Tensor` holding `Log[values]`.
Returns:
`Tensor` of same `dtype` as `log_values`, reduced across dim 0.
`Log[Mean[values]]`. | juraj-google-style |
def resolve(self, keys: Iterable[str]) -> Tuple[Dict[KeySpec, List[str]], List[str]]:
keys = list(keys)
input_keyset = set(keys)
nonconst_key_specs = [k for k in self._fields.keys() if not k.is_const]
nonconst_keys = {k: [] for k in nonconst_key_specs}
unmatched_keys = []
keys_by_key_spec = dict()
for key in keys:
if key not in self._fields:
matched_nonconst_keys = False
for key_spec in nonconst_key_specs:
if key_spec.match(key):
nonconst_keys[key_spec].append(key)
matched_nonconst_keys = True
break
if not matched_nonconst_keys:
unmatched_keys.append(key)
for key_spec in self._fields.keys():
keys = []
if not key_spec.is_const:
keys = nonconst_keys.get(key_spec, [])
elif key_spec in input_keyset:
keys.append(str(key_spec))
keys_by_key_spec[key_spec] = keys
return (keys_by_key_spec, unmatched_keys) | Resolve keys by grouping them by their matched fields.
Args:
keys: A list of string keys.
Returns:
A tuple of matched key results and unmatched keys.
Matched key results are an ordered dict of KeySpec to matched keys,
in field declaration order.
Unmatched keys are strings from input. | github-repos |
def extend(self, *iterables):
for value in iterables:
list.extend(self, value)
return self | Add all values of all iterables at the end of the list
Args:
iterables: iterable which content to add at the end
Example:
>>> from ww import l
>>> lst = l([])
>>> lst.extend([1, 2])
[1, 2]
>>> lst
[1, 2]
>>> lst.extend([3, 4]).extend([5, 6])
[1, 2, 3, 4, 5, 6]
>>> lst
[1, 2, 3, 4, 5, 6] | juraj-google-style |
def filter_values(cls, part_info):
filtered = []
for info_list in cls.filter_parts(part_info).values():
filtered += info_list
return filtered | Filter the part_info dict list looking for instances of our class
Args:
part_info (dict): {part_name: [Info] or None} as returned from
Controller.run_hook()
Returns:
list: [info] where info is a subclass of cls | codesearchnet |
def register_read_multiple(self, register_indices):
num_regs = len(register_indices)
buf = (ctypes.c_uint32 * num_regs)(*register_indices)
data = (ctypes.c_uint32 * num_regs)(0)
statuses = (ctypes.c_uint8 * num_regs)(0)
res = self._dll.JLINKARM_ReadRegs(buf, data, statuses, num_regs)
if (res < 0):
raise errors.JLinkException(res)
return list(data) | Retrieves the values from the registers specified.
Args:
self (JLink): the ``JLink`` instance
register_indices (list): list of registers to read
Returns:
A list of values corresponding one-to-one for each of the given
register indices. The returned list of values are the values in
order of which the indices were specified.
Raises:
JLinkException: if a given register is invalid or an error occurs. | codesearchnet |
def _follow_link(self, link_path_components, link):
link_path = link.contents
sep = self._path_separator(link_path)
if (not self._starts_with_root_path(link_path)):
components = link_path_components[:(- 1)]
components.append(link_path)
link_path = sep.join(components)
return self.normpath(link_path) | Follow a link w.r.t. a path resolved so far.
The component is either a real file, which is a no-op, or a
symlink. In the case of a symlink, we have to modify the path
as built up so far
/a/b => ../c should yield /a/../c (which will normalize to /a/c)
/a/b => x should yield /a/x
/a/b => /x/y/z should yield /x/y/z
The modified path may land us in a new spot which is itself a
link, so we may repeat the process.
Args:
link_path_components: The resolved path built up to the link
so far.
link: The link object itself.
Returns:
(string) The updated path resolved after following the link.
Raises:
IOError: if there are too many levels of symbolic link | codesearchnet |
def headers(self, headers=None, **kw):
headers = kw if kw else headers
self._request.headers = headers
self.add_matcher(matcher('HeadersMatcher', headers)) | Defines a dictionary of arguments.
Header keys are case insensitive.
Arguments:
headers (dict): headers to match.
**headers (dict): headers to match as variadic keyword arguments.
Returns:
self: current Mock instance. | juraj-google-style |
def _ReadStructure(self, file_object, file_offset, data_size, data_type_map, description):
data = self._ReadData(file_object, file_offset, data_size, description)
return self._ReadStructureFromByteStream(data, file_offset, data_type_map, description) | Reads a structure.
Args:
file_object (FileIO): file-like object.
file_offset (int): offset of the data relative from the start of
the file-like object.
data_size (int): data size of the structure.
data_type_map (dtfabric.DataTypeMap): data type map of the structure.
description (str): description of the structure.
Returns:
object: structure values object.
Raises:
FileFormatError: if the structure cannot be read.
ValueError: if file-like object or date type map are invalid. | codesearchnet |
def size(self):
gate_ops = 0
for (instr, _, _) in self.data:
if (instr.name not in ['barrier', 'snapshot']):
gate_ops += 1
return gate_ops | Returns total number of gate operations in circuit.
Returns:
int: Total number of gate operations. | codesearchnet |
def commit_offsets_async(self, offsets, callback=None):
self._invoke_completed_offset_commit_callbacks()
if (not self.coordinator_unknown()):
future = self._do_commit_offsets_async(offsets, callback)
else:
future = self.lookup_coordinator()
future.add_callback((lambda r: functools.partial(self._do_commit_offsets_async, offsets, callback)()))
if callback:
future.add_errback((lambda e: self.completed_offset_commits.appendleft((callback, offsets, e))))
self._client.poll(timeout_ms=0)
return future | Commit specific offsets asynchronously.
Arguments:
offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit
callback (callable, optional): called as callback(offsets, response)
response will be either an Exception or a OffsetCommitResponse
struct. This callback can be used to trigger custom actions when
a commit request completes.
Returns:
kafka.future.Future | codesearchnet |
def modutf7_decode(data: bytes) -> str:
parts = []
is_usascii = True
buf = memoryview(data)
while buf:
byte = buf[0]
if is_usascii:
if (buf[0:2] == b'&-'):
parts.append('&')
buf = buf[2:]
elif (byte == 38):
is_usascii = False
buf = buf[1:]
else:
parts.append(chr(byte))
buf = buf[1:]
else:
for (i, byte) in enumerate(buf):
if (byte == 45):
to_decode = buf[:i].tobytes()
decoded = _modified_b64decode(to_decode)
parts.append(decoded)
buf = buf[(i + 1):]
is_usascii = True
break
if (not is_usascii):
to_decode = buf.tobytes()
decoded = _modified_b64decode(to_decode)
parts.append(decoded)
return ''.join(parts) | Decode the bytestring using modified UTF-7.
Args:
data: The encoded bytestring to decode. | codesearchnet |
def implement(cls, implementations, for_type=None, for_types=None):
for type_ in cls.__get_type_args(for_type, for_types):
cls._implement_for_type(for_type=type_, implementations=implementations) | Provide protocol implementation for a type.
Register all implementations of multimethod functions in this
protocol and add the type into the abstract base class of the
protocol.
Arguments:
implementations: A dict of (function, implementation), where each
function is multimethod and each implementation is a callable.
for_type: The concrete type implementations apply to.
for_types: Same as for_type, but takes a tuple of types.
You may not supply both for_type and for_types for obvious reasons.
Raises:
ValueError for arguments.
TypeError if not all implementations are provided or if there
are issues related to polymorphism (e.g. attempting to
implement a non-multimethod function. | codesearchnet |
def static(self, root, path, media_type=None, charset='UTF-8'):
root = os.path.abspath(os.path.join(root, ''))
path = os.path.abspath(os.path.join(root, path.lstrip('/\\')))
self.response.state['filename'] = os.path.basename(path)
if (not path.startswith(root)):
return 403
elif (not os.path.isfile(path)):
return 404
if (media_type is not None):
self.response.media_type = media_type
else:
self.response.media_type = mimetypes.guess_type(path)[0]
self.response.charset = charset
with open(path, 'rb') as f:
return f.read() | Send content of a static file as response.
The path to the document root directory should be specified as
the root argument. This is very important to prevent directory
traversal attack. This method guarantees that only files within
the document root directory are served and no files outside this
directory can be accessed by a client.
The path to the actual file to be returned should be specified
as the path argument. This path must be relative to the document
directory.
The *media_type* and *charset* arguments are used to set the
Content-Type header of the HTTP response. If *media_type*
is not specified or specified as ``None`` (the default), then it
is guessed from the filename of the file to be returned.
Arguments:
root (str): Path to document root directory.
path (str): Path to file relative to document root directory.
media_type (str, optional): Media type of file.
charset (str, optional): Character set of file.
Returns:
bytes: Content of file to be returned in the HTTP response. | codesearchnet |
def shape(self):
return self._dense_shape_default | Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object. | github-repos |
def file_modified_time(file_name) -> pd.Timestamp:
return pd.to_datetime(time.ctime(os.path.getmtime(filename=file_name))) | File modified time in python
Args:
file_name: file name
Returns:
pd.Timestamp | juraj-google-style |
def check_trace_mode(device_type, trace_mode):
if trace_mode == tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY:
if device_type != _DEVICE_TYPE_TPU:
raise ValueError('Device_type "%s" is not yet supported for trace mode "%s"' % (device_type, trace_mode)) | Checks if the given trace mode work on the given device type.
Args:
device_type: Device type, TPU, GPU, CPU.
trace_mode: Tensor tracer trace mode.
Raises:
ValueError: If the given trace mode is not supported for the device. | github-repos |
def state_estimation_ensemble(data, k, n_runs=10, M_list=[], **se_params):
if (len(M_list) == 0):
M_list = []
for i in range(n_runs):
(M, W, ll) = poisson_estimate_state(data, k, **se_params)
M_list.append(M)
M_stacked = np.hstack(M_list)
(M_new, W_new, ll) = poisson_estimate_state(M_stacked, k, **se_params)
W_new = np.dot(data.T, M_new)
W_new = (W_new / W_new.sum(0))
return (M_new, W_new, ll) | Runs an ensemble method on the list of M results...
Args:
data: genes x cells array
k: number of classes
n_runs (optional): number of random initializations of state estimation
M_list (optional): list of M arrays from state estimation
se_params (optional): optional poisson_estimate_state params
Returns:
M_new
W_new
ll | codesearchnet |
def __init__(self, certificate_type, value, masks=None,
name='Certificate'):
super(Certificate, self).__init__()
self._object_type = enums.ObjectType.CERTIFICATE
self.value = value
self.certificate_type = certificate_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
self._cryptographic_algorithm = None
self._cryptographic_length = None
self._certificate_length = None
self._cryptographic_parameters = list()
self._digital_signature_algorithm = list()
self.validate() | Create a Certificate.
Args:
certificate_type(CertificateType): An enumeration defining the
type of the certificate.
value(bytes): The bytes representing the certificate.
masks(list): A list of CryptographicUsageMask enumerations
defining how the certificate will be used.
name(string): The string name of the certificate. | juraj-google-style |
def count_up_to(ref, limit, name=None):
if ref.dtype._is_ref_dtype:
return gen_state_ops.count_up_to(ref, limit=limit, name=name)
return gen_state_ops.resource_count_up_to(ref.handle, limit, T=ref.dtype, name=name) | Increments 'ref' until it reaches 'limit'.
Args:
ref: A Variable. Must be one of the following types: `int32`, `int64`.
Should be from a scalar `Variable` node.
limit: An `int`.
If incrementing ref would bring it above limit, instead generates an
'OutOfRange' error.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `ref`.
A copy of the input before increment. If nothing else modifies the
input, the values produced will all be distinct. | github-repos |
def get_model_test_files() -> List[str]:
_ignore_files = ['test_modeling_common', 'test_modeling_encoder_decoder', 'test_modeling_flax_encoder_decoder', 'test_modeling_flax_speech_encoder_decoder', 'test_modeling_marian', 'test_modeling_tf_common', 'test_modeling_tf_encoder_decoder']
test_files = []
model_test_root = os.path.join(PATH_TO_TESTS, 'models')
model_test_dirs = []
for x in os.listdir(model_test_root):
x = os.path.join(model_test_root, x)
if os.path.isdir(x):
model_test_dirs.append(x)
for target_dir in [PATH_TO_TESTS] + model_test_dirs:
for file_or_dir in os.listdir(target_dir):
path = os.path.join(target_dir, file_or_dir)
if os.path.isfile(path):
filename = os.path.split(path)[-1]
if 'test_modeling' in filename and os.path.splitext(filename)[0] not in _ignore_files:
file = os.path.join(*path.split(os.sep)[1:])
test_files.append(file)
return test_files | Get the model test files.
Returns:
`List[str]`: The list of test files. The returned files will NOT contain the `tests` (i.e. `PATH_TO_TESTS`
defined in this script). They will be considered as paths relative to `tests`. A caller has to use
`os.path.join(PATH_TO_TESTS, ...)` to access the files. | github-repos |
def AddRoute(self, short_name, long_name, route_type, route_id=None):
if route_id is None:
route_id = util.FindUniqueId(self.routes)
route = self._gtfs_factory.Route(short_name=short_name, long_name=long_name,
route_type=route_type, route_id=route_id)
route.agency_id = self.GetDefaultAgency().agency_id
self.AddRouteObject(route)
return route | Add a route to this schedule.
Args:
short_name: Short name of the route, such as "71L"
long_name: Full name of the route, such as "NW 21st Ave/St Helens Rd"
route_type: A type such as "Tram", "Subway" or "Bus"
route_id: id of the route or None, in which case a unique id is picked
Returns:
A new Route object | juraj-google-style |
def _group_sentences(total_nb_sentences, group_length):
sentences_groups = []
current_sentence_group = []
for i in range(0, total_nb_sentences):
if i % group_length == 0:
if len(current_sentence_group) > 0:
sentences_groups.append(current_sentence_group)
current_sentence_group = [i]
else:
current_sentence_group.append(i)
if len(current_sentence_group) > 0:
sentences_groups.append(current_sentence_group)
return sentences_groups | Split sentences in groups, given a specific group length.
Args:
total_nb_sentences (int): Total available sentences.
group_length (int): Limit of length for each group.
Returns:
list: Contains groups (lists) of sentences. | juraj-google-style |
def is_table(engine, sql):
if engine.dialect.has_table(engine, sql):
return True
return False | Check with the given sql arg is query or table
Args:
engine: SQLAlchemy connection engine
sql: SQL query or table name
Returns:
True for table or False if not | juraj-google-style |
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
pass | Returns a `TensorSequenceLengthPair`.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables. | github-repos |
def ParseOptions(self, options):
helpers_manager.ArgumentHelperManager.ParseOptions(options, self, names=['data_location'])
signature_identifiers = self.ParseStringOption(options, 'signature_identifiers')
if (signature_identifiers == 'list'):
self.list_signature_identifiers = True
if self.list_signature_identifiers:
return
self._ParseInformationalOptions(options)
self._ParseLogFileOptions(options)
self._ParseStorageMediaOptions(options)
self._destination_path = self.ParseStringOption(options, 'path', default_value='export')
if (not self._data_location):
logger.warning('Unable to automatically determine data location.')
argument_helper_names = ['artifact_definitions', 'process_resources']
helpers_manager.ArgumentHelperManager.ParseOptions(options, self, names=argument_helper_names)
self._ParseFilterOptions(options)
if (getattr(options, 'no_vss', False) or getattr(options, 'include_duplicates', False)):
self._skip_duplicates = False
self._EnforceProcessMemoryLimit(self._process_memory_limit) | Parses the options and initializes the front-end.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid. | codesearchnet |
def fig_to_svg(fig):
buf = io.StringIO()
fig.savefig(buf, format='svg')
buf.seek(0)
return buf.getvalue() | Helper function to convert matplotlib figure to SVG string
Returns:
str: figure as SVG string | codesearchnet |
def _parse_schema_field(field):
schema = bigquery.TableFieldSchema()
schema.name = field['name']
schema.type = field['type']
if 'mode' in field:
schema.mode = field['mode']
else:
schema.mode = 'NULLABLE'
if 'description' in field:
schema.description = field['description']
if 'fields' in field:
schema.fields = [_parse_schema_field(x) for x in field['fields']]
return schema | Parse a single schema field from dictionary.
Args:
field: Dictionary object containing serialized schema.
Returns:
A TableFieldSchema for a single column in BigQuery. | github-repos |
def ParseForwardedIps(self, forwarded_ips):
addresses = []
forwarded_ips = forwarded_ips or []
for ip in forwarded_ips:
if ip and (IP_REGEX.match(ip) or IP_ALIAS_REGEX.match(ip)):
addresses.extend([str(addr) for addr in list(netaddr.IPNetwork(ip))])
else:
self.logger.warning('Could not parse IP address: "%s".', ip)
return addresses | Parse and validate forwarded IP addresses.
Args:
forwarded_ips: list, the IP address strings to parse.
Returns:
list, the valid IP address strings. | juraj-google-style |
def get_checkpoint_state(checkpoint_dir, latest_filename=None):
if isinstance(checkpoint_dir, os.PathLike):
checkpoint_dir = os.fspath(checkpoint_dir)
ckpt = None
coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir, latest_filename)
f = None
try:
if file_io.file_exists(coord_checkpoint_filename):
file_content = file_io.read_file_to_string(coord_checkpoint_filename)
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
if not ckpt.model_checkpoint_path:
raise ValueError('Invalid checkpoint state loaded from ' + checkpoint_dir)
if not os.path.isabs(ckpt.model_checkpoint_path):
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir, ckpt.model_checkpoint_path)
for i, p in enumerate(ckpt.all_model_checkpoint_paths):
if not os.path.isabs(p):
ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)
except errors.OpError as e:
logging.warning('%s: %s', type(e).__name__, e)
logging.warning('%s: Checkpoint ignored', coord_checkpoint_filename)
return None
except text_format.ParseError as e:
logging.warning('%s: %s', type(e).__name__, e)
logging.warning('%s: Checkpoint ignored', coord_checkpoint_filename)
return None
finally:
if f:
f.close()
return ckpt | Returns CheckpointState proto from the "checkpoint" file.
If the "checkpoint" file contains a valid CheckpointState
proto, returns it.
Args:
checkpoint_dir: The directory of checkpoints.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Returns:
A CheckpointState if the state was available, None
otherwise.
Raises:
ValueError: if the checkpoint read doesn't have model_checkpoint_path set. | github-repos |
def testHeatEquation_WithDefaultBoundaryCondtion(self, lower_bc_type, upper_bc_type):
def final_cond_fn(x):
return math.e * math.sin(x)
def expected_result_fn(x):
return tf.sin(x)
@neumann
def boundary_fn(t, x):
del x
return -tf.exp(t)
lower_boundary_fn = boundary_fn if lower_bc_type == 'Neumann' else None
upper_boundary_fn = boundary_fn if upper_bc_type == 'Neumann' else None
grid = grids.uniform_grid(minimums=[0.0], maximums=[5 * math.pi], sizes=[1000], dtype=np.float32)
self._testHeatEquation(grid, final_t=1, time_step=0.01, final_cond_fn=final_cond_fn, expected_result_fn=expected_result_fn, one_step_fn=crank_nicolson_step(), lower_boundary_fn=lower_boundary_fn, upper_boundary_fn=upper_boundary_fn, error_tolerance=0.001) | Test for Default boundary conditions.
Tests solving heat equation with the following boundary conditions involving
default boundary `u_xx(0, t) = 0` or `u_xx(5 pi, t) = 0`.
The exact solution `u(x, t=0) = e^t sin(x)`.
Args:
lower_bc_type: Lower boundary condition type.
upper_bc_type: Upper boundary condition type. | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.