code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def sget_steptime(self, cycle, step, dataset_number=None):
dataset_number = self._validate_dataset_number(dataset_number)
if (dataset_number is None):
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
step_time_header = self.headers_normal.step_time_txt
step_index_header = self.headers_normal.step_index_txt
test = self.datasets[dataset_number].dfdata
if isinstance(step, (list, tuple)):
warnings.warn(f'The varialbe step is a list.Should be an integer.{step}')
step = step[0]
c = test.loc[(((test[cycle_index_header] == cycle) & (test[step_index_header] == step)), :)]
if (not self.is_empty(c)):
t = c[step_time_header]
return t
else:
return None
|
Returns step time for cycle, step.
Convinience function; same as issuing
dfdata[(dfdata[cycle_index_header] == cycle) &
(dfdata[step_index_header] == step)][step_time_header]
Args:
cycle: cycle number
step: step number
dataset_number: the dataset number (automatic selection if None)
Returns:
pandas.Series or None if empty
|
codesearchnet
|
def execute_command(self, command: str, read: bool=False) -> None:
if self.debug:
print(command)
return 'SIMULATING VALUE' if read else True
else:
print('\nCOMMAND\n', command, '\n' + '-' * 40)
try:
cmd = subprocess.run(command, shell=True, capture_output=read, text=True, check=True)
if read:
return cmd.stdout.strip()
return True
except subprocess.CalledProcessError as e:
return False
|
Helper function that either executes or prints each command.
Args:
command - a command line command, typically a gcloud command.
read - if True, the commands output is passed back to the caller.
Returns:
Bool - if command value is not required, simply indicate successor failure.
String - if read is specified, the command output or error is returned.
|
github-repos
|
def _parse_vars(self, tokens):
key_values = {}
for token in tokens:
if token.startswith('
break
else:
(k, v) = token.split('=', 1)
key = k.strip()
key_values[key] = v.strip()
return key_values
|
Given an iterable of tokens, returns variables and their values as a
dictionary.
For example:
['dtap=prod', 'comment=some comment']
Returns:
{'dtap': 'prod', 'comment': 'some comment'}
|
codesearchnet
|
def pot_string_from_file(filename='feff.inp'):
with zopen(filename, "rt") as f_object:
f = f_object.readlines()
ln = -1
pot_str = ["POTENTIALS\n"]
pot_tag = -1
pot_data = 0
pot_data_over = 1
sep_line_pattern = [re.compile('ipot.*Z.*tag.*lmax1.*lmax2.*spinph'),
re.compile('^[*]+.*[*]+$')]
for line in f:
if pot_data_over == 1:
ln += 1
if pot_tag == -1:
pot_tag = line.find("POTENTIALS")
ln = 0
if pot_tag >= 0 and ln > 0 and pot_data_over > 0:
try:
if len(sep_line_pattern[0].findall(line)) > 0 or \
len(sep_line_pattern[1].findall(line)) > 0:
pot_str.append(line)
elif int(line.split()[0]) == pot_data:
pot_data += 1
pot_str.append(line.replace("\r", ""))
except (ValueError, IndexError):
if pot_data > 0:
pot_data_over = 0
return ''.join(pot_str).rstrip('\n')
|
Reads Potential parameters from a feff.inp or FEFFPOT file.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichometry spinph
Args:
filename: file name containing potential data.
Returns:
FEFFPOT string.
|
juraj-google-style
|
def patch_toText(self, patches):
text = []
for patch in patches:
text.append(str(patch))
return ''.join(text)
|
Take a list of patches and return a textual representation.
Args:
patches: Array of Patch objects.
Returns:
Text representation of patches.
|
codesearchnet
|
def list_attributes(self, name):
result = self.client.service.getListAttributes(name, self.proxy_id)
if isinstance(result, list) and len(result) == 1:
return result[0]
return result
|
Look up the attributes of a list.
Args:
name (str): The name of the list
Returns:
dict: attributes of the list
|
juraj-google-style
|
def create_token(self,
token_name,
project_name,
dataset_name,
is_public):
url = self.url() + '/nd/resource/dataset/{}'.format(
dataset_name) + '/project/{}'.format(project_name) + \
'/token/{}/'.format(token_name)
json = {
"token_name": token_name,
"public": is_public
}
req = self.remote_utils.post_url(url, json=json)
if req.status_code is not 201:
raise RemoteDataUploadError('Cout not upload {}:'.format(req.text))
if req.content == "" or req.content == b'':
return True
else:
return False
|
Creates a token with the given parameters.
Arguments:
project_name (str): Project name
dataset_name (str): Dataset name project is based on
token_name (str): Token name
is_public (int): 1 is public. 0 is not public
Returns:
bool: True if project created, false if not created.
|
juraj-google-style
|
def Scan(self, scan_context, auto_recurse=True, scan_path_spec=None):
if not scan_context:
raise ValueError('Invalid scan context.')
scan_context.updated = False
if scan_path_spec:
scan_node = scan_context.GetScanNode(scan_path_spec)
else:
scan_node = scan_context.GetUnscannedScanNode()
if scan_node:
self._ScanNode(scan_context, scan_node, auto_recurse=auto_recurse)
|
Scans for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
auto_recurse (Optional[bool]): True if the scan should automatically
recurse as far as possible.
scan_path_spec (Optional[PathSpec]): path specification to indicate
where the source scanner should continue scanning, where None
indicates the scanner will start with the sources.
Raises:
ValueError: if the scan context is invalid.
|
juraj-google-style
|
def update_nanopubstore_start_dt(url: str, start_dt: str):
hostname = urllib.parse.urlsplit(url)[1]
start_dates_doc = state_mgmt.get(start_dates_doc_key)
if not start_dates_doc:
start_dates_doc = {
"_key": start_dates_doc_key,
"start_dates": [{"nanopubstore": hostname, "start_dt": start_dt}],
}
state_mgmt.insert(start_dates_doc)
else:
for idx, start_date in enumerate(start_dates_doc["start_dates"]):
if start_date["nanopubstore"] == hostname:
start_dates_doc["start_dates"][idx]["start_dt"] = start_dt
break
else:
start_dates_doc["start_dates"].append(
{"nanopubstore": hostname, "start_dt": start_dt}
)
state_mgmt.replace(start_dates_doc)
|
Add nanopubstore start_dt to belapi.state_mgmt collection
Args:
url: url of nanopubstore
start_dt: datetime of last query against nanopubstore for new ID's
|
juraj-google-style
|
def __init__(self, seed_fn, desc=None):
if desc is None:
desc = u'Query({})'.format(getattr(seed_fn, '__name__', ''))
self.seed_fn = seed_fn
self.transforms = []
self.desc_stack = []
self.desc = desc
|
Configure the `Query`.
Args:
seed_fn (callable): Callable with no arguments that produces a list of values.
Keyword Args:
desc (str): A description of the query, used in log messages.
If not provided, defaults to the name of the seed function.
Returns:
Query
|
juraj-google-style
|
def add_triple(self, p, o, auto_refresh=True):
self.rdf.graph.add((self.uri, p, self._handle_object(o)))
self._handle_triple_refresh(auto_refresh)
|
add triple by providing p,o, assumes s = subject
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: adds triple to self.rdf.graph
|
juraj-google-style
|
def get_associated_resource(self, task):
if (not task):
raise HPOneViewUnknownType(MSG_INVALID_TASK)
if ((task['category'] != 'tasks') and (task['category'] != 'backups')):
raise HPOneViewUnknownType(MSG_UNKNOWN_OBJECT_TYPE)
if (task['type'] == 'TaskResourceV2'):
resource_uri = task['associatedResource']['resourceUri']
if (resource_uri and resource_uri.startswith('/rest/appliance/support-dumps/')):
return (task, resource_uri)
elif (task['type'] == 'BACKUP'):
task = self._connection.get(task['taskUri'])
resource_uri = task['uri']
else:
raise HPOneViewInvalidResource((MSG_TASK_TYPE_UNRECONIZED % task['type']))
entity = {}
if resource_uri:
entity = self._connection.get(resource_uri)
return (task, entity)
|
Retrieve a resource associated with a task.
Args:
task: task dict
Returns:
tuple: task (updated), the entity found (dict)
|
codesearchnet
|
def plot_vec(axis, step, var):
xmesh, ymesh, vec1, vec2 = get_meshes_vec(step, var)
dipz = step.geom.nztot
if conf.field.shift:
vec1 = np.roll(vec1, conf.field.shift, axis=0)
vec2 = np.roll(vec2, conf.field.shift, axis=0)
if step.geom.spherical or conf.plot.ratio is None:
dipx = dipz
else:
dipx = step.geom.nytot if step.geom.twod_yz else step.geom.nxtot
dipx = int(dipx
axis.quiver(xmesh[::dipx, ::dipz], ymesh[::dipx, ::dipz],
vec1[::dipx, ::dipz], vec2[::dipx, ::dipz],
linewidths=1)
|
Plot vector field.
Args:
axis (:class:`matplotlib.axes.Axes`): the axis handler of an
existing matplotlib figure where the vector field should
be plotted.
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
var (str): the vector field name.
|
juraj-google-style
|
def set_session(self, headers=None):
if (headers is None):
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}
elif (not isinstance(headers, dict)):
raise TypeError('"headers" must be a dict object')
self.session = Session(self.proxy_pool)
self.session.headers.update(headers)
|
Init session with default or custom headers
Args:
headers: A dict of headers (default None, thus using the default
header to init the session)
|
codesearchnet
|
def StringEscape(self, string, match, **_):
precondition.AssertType(string, Text)
if match.group(1) in "'\"rnbt":
self.string += compatibility.UnescapeString(string)
else:
self.string += string
|
Escape backslashes found inside a string quote.
Backslashes followed by anything other than ['"rnbt] will just be included
in the string.
Args:
string: The string that matched.
match: The match object (m.group(1) is the escaped code)
|
juraj-google-style
|
def _get_bundles_by_type(self, type):
bundles = {}
bundle_definitions = self.config.get(type)
if bundle_definitions is None:
return bundles
for bundle_name, paths in bundle_definitions.items():
bundle_files = []
for path in paths:
pattern = abspath = os.path.join(self.basedir, path)
assetdir = os.path.dirname(abspath)
fnames = [os.path.join(assetdir, fname)
for fname in os.listdir(assetdir)]
expanded_fnames = fnmatch.filter(fnames, pattern)
bundle_files.extend(sorted(expanded_fnames))
bundles[bundle_name] = bundle_files
return bundles
|
Get a dictionary of bundles for requested type.
Args:
type: 'javascript' or 'css'
|
juraj-google-style
|
def __init__(self, streaming_buffer, writer_spec=None):
self._streaming_buffer = streaming_buffer
self._no_dup = False
if writer_spec:
self._no_dup = writer_spec.get(self._NO_DUPLICATE, False)
if self._no_dup:
self._seg_index = int(streaming_buffer.name.rsplit("-", 1)[1])
self._seg_valid_length = 0
|
Initialize a GoogleCloudStorageOutputWriter instance.
Args:
streaming_buffer: an instance of writable buffer from cloudstorage_api.
writer_spec: the specification for the writer.
|
juraj-google-style
|
def __init__(self, specification_store, signature_identifiers):
super(SignaturesFileEntryFilter, self).__init__()
self._file_scanner = None
self._signature_identifiers = []
self._file_scanner = self._GetScanner(
specification_store, signature_identifiers)
|
Initializes a signature-based file entry filter.
Args:
specification_store (FormatSpecificationStore): a specification store.
signature_identifiers (list[str]): signature identifiers.
|
juraj-google-style
|
def calculate_sun_from_hoy(self, hoy, is_solar_time=False):
datetime = DateTime.from_hoy(hoy, self.is_leap_year)
return self.calculate_sun_from_date_time(datetime, is_solar_time)
|
Get Sun data for an hour of the year.
Args:
datetime: Ladybug datetime
is_solar_time: A boolean to indicate if the input hour is solar time
(Default: False).
Returns:
A sun object for this particular time
|
juraj-google-style
|
def preemphasis(signal, shift=1, cof=0.98):
rolled_signal = np.roll(signal, shift)
return signal - cof * rolled_signal
|
preemphasising on the signal.
Args:
signal (array): The input signal.
shift (int): The shift step.
cof (float): The preemphasising coefficient. 0 equals to no filtering.
Returns:
array: The pre-emphasized signal.
|
juraj-google-style
|
def __init__(self, operator, left, right):
super(BinaryComposition, self).__init__(operator, left, right)
self.operator = operator
self.left = left
self.right = right
self.validate()
|
Construct an expression that connects two expressions with an operator.
Args:
operator: unicode, specifying where the field was declared
left: Expression on the left side of the binary operator
right: Expression on the right side of the binary operator
Returns:
new BinaryComposition object
|
juraj-google-style
|
async def update(self, service_id: str, version: str, *, image: str=None, rollback: bool=False) -> bool:
if ((image is None) and (rollback is False)):
raise ValueError('You need to specify an image.')
inspect_service = (await self.inspect(service_id))
spec = inspect_service['Spec']
if (image is not None):
spec['TaskTemplate']['ContainerSpec']['Image'] = image
params = {'version': version}
if (rollback is True):
params['rollback'] = 'previous'
data = json.dumps(clean_map(spec))
(await self.docker._query_json('services/{service_id}/update'.format(service_id=service_id), method='POST', data=data, params=params))
return True
|
Update a service.
If rollback is True image will be ignored.
Args:
service_id: ID or name of the service.
version: Version of the service that you want to update.
rollback: Rollback the service to the previous service spec.
Returns:
True if successful.
|
codesearchnet
|
def _generate_G_points(self, kpoint):
gpoints = []
for i in range(2 * self._nbmax[2] + 1):
i3 = i - 2 * self._nbmax[2] - 1 if i > self._nbmax[2] else i
for j in range(2 * self._nbmax[1] + 1):
j2 = j - 2 * self._nbmax[1] - 1 if j > self._nbmax[1] else j
for k in range(2 * self._nbmax[0] + 1):
k1 = k - 2 * self._nbmax[0] - 1 if k > self._nbmax[0] else k
G = np.array([k1, j2, i3])
v = kpoint + G
g = np.linalg.norm(np.dot(v, self.b))
E = g ** 2 / self._C
if E < self.encut:
gpoints.append(G)
return np.array(gpoints, dtype=np.float64)
|
Helper function to generate G-points based on nbmax.
This function iterates over possible G-point values and determines
if the energy is less than G_{cut}. Valid values are appended to
the output array. This function should not be called outside of
initialization.
Args:
kpoint (np.array): the array containing the current k-point value
Returns:
a list containing valid G-points
|
juraj-google-style
|
def Normalize(self, fraction=1.0):
if self.log:
raise ValueError("Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('total probability is zero.')
logging.warning('Normalize: total probability is zero.')
return total
factor = float(fraction) / total
for x in self.d:
self.d[x] *= factor
return total
|
Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
|
juraj-google-style
|
def make_pixel_mask(image: 'torch.Tensor', output_size: Tuple[int, int]) -> 'torch.Tensor':
input_height, input_width = image.shape[-2:]
batch_size = image.size(0)
mask = torch.zeros((batch_size, *output_size), dtype=torch.long)
mask[:input_height, :input_width] = 1
return mask
|
Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
Args:
image (`np.ndarray`):
Image to make the pixel mask for.
output_size (`Tuple[int, int]`):
Output size of the mask.
|
github-repos
|
def get_audience(self, audience_id):
audience = self.audience_id_map.get(audience_id)
if audience:
return audience
self.logger.error('Audience ID "%s" is not in datafile.' % audience_id)
self.error_handler.handle_error(exceptions.InvalidAudienceException((enums.Errors.INVALID_AUDIENCE_ERROR)))
|
Get audience object for the provided audience ID.
Args:
audience_id: ID of the audience.
Returns:
Dict representing the audience.
|
juraj-google-style
|
def get(self, branch='master', filename=''):
file_contents = ''
if self.runway_dir:
file_contents = self.local_file(filename=filename)
else:
file_contents = self.remote_file(branch=branch, filename=filename)
return file_contents
|
Retrieve _filename_ from GitLab.
Args:
branch (str): Git Branch to find file.
filename (str): Name of file to retrieve relative to root of Git
repository, or _runway_dir_ if specified.
Returns:
str: Contents of file.
|
juraj-google-style
|
def load_strain(self, strain_id, strain_genome_file):
strain_gp = GEMPRO(gem_name=strain_id, genome_path=strain_genome_file, write_protein_fasta_files=False)
self.strains.append(strain_gp)
return self.strains.get_by_id(strain_id)
|
Load a strain as a new GEM-PRO by its ID and associated genome file. Stored in the ``strains`` attribute.
Args:
strain_id (str): Strain ID
strain_genome_file (str): Path to strain genome file
|
codesearchnet
|
def SignFile(self, in_filename, out_filename=None):
if out_filename is None:
out_filename = "%s.signed" % in_filename
args = [
"-certs", self.cert, "-key", self.key, "-n", self.application, "-t",
"http:
"-in", in_filename, "-out", out_filename
]
try:
output_log = io.StringIO()
ossl = pexpect.spawn("osslsigncode", args)
ossl.logfile_read = output_log
ossl.expect("Enter PEM pass phrase")
ossl.sendline(self.password)
ossl.wait()
except pexpect.ExceptionPexpect:
output_log.seek(0)
logging.exception(output_log.read())
raise
if not os.path.exists(out_filename):
raise SigningError("Expected output %s not created" % out_filename)
try:
subprocess.check_call(["osslsigncode", "verify", "-in", out_filename])
except subprocess.CalledProcessError:
logging.exception("Bad signature verification on %s", out_filename)
raise SigningError("Bad signature verification on %s" % out_filename)
return out_filename
|
Sign a file using osslsigncode.
Args:
in_filename: file to read from
out_filename: file to output to, if none we output to the same filename as
the input with a .signed suffix.
Returns:
output filename string
Raises:
pexpect.ExceptionPexpect: if the expect invocation of osslsigncode fails.
SigningError: for signing failures.
|
juraj-google-style
|
def save_checkpoint(model, filename, optimizer=None, meta=None):
if (meta is None):
meta = {}
elif (not isinstance(meta, dict)):
raise TypeError('meta must be a dict or None, but got {}'.format(type(meta)))
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
mmcv.mkdir_or_exist(osp.dirname(filename))
if hasattr(model, 'module'):
model = model.module
checkpoint = {'meta': meta, 'state_dict': weights_to_cpu(model.state_dict())}
if (optimizer is not None):
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, filename)
|
Save checkpoint to file.
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
``optimizer``. By default ``meta`` will contain version and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
|
codesearchnet
|
def _check_sensor_platform_consistency(self, sensor):
ref_sensor = SENSORS.get(self.platform, None)
if (ref_sensor and (not (sensor == ref_sensor))):
logger.error('Sensor-Platform mismatch: {} is not a payload of {}. Did you choose the correct reader?'.format(sensor, self.platform))
|
Make sure sensor and platform are consistent
Args:
sensor (str) : Sensor name from YAML dataset definition
Raises:
ValueError if they don't match
|
codesearchnet
|
def read_config(config_filepath, logger=logging.getLogger('ProsperCommon')):
config_parser = configparser.ConfigParser(interpolation=ExtendedInterpolation(), allow_no_value=True, delimiters='=', inline_comment_prefixes='
logger.debug('config_filepath=%s', config_filepath)
with open(config_filepath, 'r') as filehandle:
config_parser.read_file(filehandle)
return config_parser
|
fetch and parse config file
Args:
config_filepath (str): path to config file. abspath > relpath
logger (:obj:`logging.Logger`): logger to catch error msgs
|
codesearchnet
|
def Check(self, error, filename, linenum):
if Match('T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = (base_trigger * (2 ** _VerboseLevel()))
if (self.lines_in_function > trigger):
error_level = int(math.log((self.lines_in_function / base_trigger), 2))
if (error_level > 5):
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level, ('Small and focused functions are preferred: %s has %d non-comment lines (error triggered by exceeding %d lines).' % (self.current_function, self.lines_in_function, trigger)))
|
Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
|
codesearchnet
|
def instance_default(self, obj):
return self.property.themed_default(obj.__class__, self.name, obj.themed_values())
|
Get the default value that will be used for a specific instance.
Args:
obj (HasProps) : The instance to get the default value for.
Returns:
object
|
juraj-google-style
|
def exists(self, path: str) -> bool:
raise NotImplementedError
|
Check if the provided path exists on the FileSystem.
Args:
path: string path that needs to be checked.
Returns: boolean flag indicating if path exists
|
github-repos
|
def _eligible_features_from_example_handler(self, request):
features_list = inference_utils.get_eligible_features(self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)
return http_util.Respond(request, features_list, 'application/json')
|
Returns a list of JSON objects for each feature in the example.
Args:
request: A request for features.
Returns:
A list with a JSON object for each feature.
Numeric features are represented as {name: observedMin: observedMax:}.
Categorical features are repesented as {name: samples:[]}.
|
codesearchnet
|
def plot_stacked_hist(self, key="wall_time", nmax=5, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
mpi_rank = "0"
timers = self.timers(mpi_rank=mpi_rank)
n = len(timers)
names, values = [], []
rest = np.zeros(n)
for idx, sname in enumerate(self.section_names(ordkey=key)):
sections = self.get_sections(sname)
svals = np.asarray([s.__dict__[key] for s in sections])
if idx < nmax:
names.append(sname)
values.append(svals)
else:
rest += svals
names.append("others (nmax=%d)" % nmax)
values.append(rest)
ind = np.arange(n)
width = 0.35
colors = nmax * ['r', 'g', 'b', 'c', 'k', 'y', 'm']
bars = []
bottom = np.zeros(n)
for idx, vals in enumerate(values):
color = colors[idx]
bar = ax.bar(ind, vals, width, color=color, bottom=bottom)
bars.append(bar)
bottom += vals
ax.set_ylabel(key)
ax.set_title("Stacked histogram with the %d most important sections" % nmax)
ticks = ind + width / 2.0
labels = ["MPI=%d, OMP=%d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(ticks)
ax.set_xticklabels(labels, rotation=15)
ax.legend([bar[0] for bar in bars], names, loc="best")
return fig
|
Plot stacked histogram of the different timers.
Args:
key: Keyword used to extract data from the timers. Only the first `nmax`
sections with largest value are show.
mmax: Maximum nuber of sections to show. Other entries are grouped together
in the `others` section.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
|
juraj-google-style
|
def array(self):
url = '{}/{}'.format(__endpoint__, self.type.RESOURCE)
return RestClient.get(url, self.params)[self.type.RESOURCE]
|
Get all resources and return the result as an array
Returns:
array of str: Array of resources
|
codesearchnet
|
def createCategoryFilter(self, retina_name, filter_name, body, ):
resourcePath = '/classify/create_category_filter'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['filter_name'] = filter_name
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return category_filter.CategoryFilter(**response.json())
|
get filter for classifier
Args:
filter_name, str: A unique name for the filter. (required)
body, FilterTrainingObject: The list of positive and negative (optional) example items. (required)
retina_name, str: The retina name (required)
Returns: CategoryFilter
|
juraj-google-style
|
class AlignVisionBlock(nn.Module):
def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, expand_ratio: int, kernel_size: int, drop_rate: float, id_skip: bool, adjust_padding: bool):
super().__init__()
self.expand_ratio = expand_ratio
self.expand = True if self.expand_ratio != 1 else False
expand_in_dim = in_dim * expand_ratio
if self.expand:
self.expansion = AlignVisionExpansionLayer(config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride)
self.depthwise_conv = AlignVisionDepthwiseLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, stride=stride, kernel_size=kernel_size, adjust_padding=adjust_padding)
self.squeeze_excite = AlignVisionSqueezeExciteLayer(config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand)
self.projection = AlignVisionFinalBlockLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, out_dim=out_dim, stride=stride, drop_rate=drop_rate, id_skip=id_skip)
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
embeddings = hidden_states
if self.expand_ratio != 1:
hidden_states = self.expansion(hidden_states)
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.squeeze_excite(hidden_states)
hidden_states = self.projection(embeddings, hidden_states)
return hidden_states
|
This corresponds to the block module of original the EfficientNet vision encoder implementation.
Args:
config ([`AlignVisionConfig`]):
Model configuration class.
in_dim (`int`):
Number of input channels.
out_dim (`int`):
Number of output channels.
stride (`int`):
Stride size to be used in convolution layers.
expand_ratio (`int`):
Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
kernel_size (`int`):
Kernel size for the depthwise convolution layer.
drop_rate (`float`):
Dropout rate to be used in the final phase of each block.
id_skip (`bool`):
Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
of each block. Set to `True` for the first block of each stage.
adjust_padding (`bool`):
Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
operation, set to `True` for inputs with odd input sizes.
|
github-repos
|
def trace_buffer_capacity(self):
cmd = enums.JLinkTraceCommand.GET_CONF_CAPACITY
data = ctypes.c_uint32(0)
res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))
if (res == 1):
raise errors.JLinkException('Failed to get trace buffer size.')
return data.value
|
Retrieves the trace buffer's current capacity.
Args:
self (JLink): the ``JLink`` instance.
Returns:
The current capacity of the trace buffer. This is not necessarily
the maximum possible size the buffer could be configured with.
|
codesearchnet
|
def filter(self, filter_fn=None, desc=None, **kwargs):
if ((filter_fn is not None) and kwargs):
raise TypeError('Must supply either a filter_fn or attribute filter parameters to filter(), but not both.')
if ((filter_fn is None) and (not kwargs)):
raise TypeError('Must supply one of filter_fn or one or more attribute filter parameters to filter().')
if (desc is None):
if (filter_fn is not None):
desc = getattr(filter_fn, '__name__', '')
elif kwargs:
desc = u', '.join([u'{}={!r}'.format(key, value) for (key, value) in kwargs.items()])
desc = u'filter({})'.format(desc)
if kwargs:
def filter_fn(elem):
return all(((getattr(elem, filter_key) == filter_value) for (filter_key, filter_value) in kwargs.items()))
return self.transform((lambda xs: (x for x in xs if filter_fn(x))), desc=desc)
|
Return a copy of this query, with some values removed.
Example usages:
.. code:: python
# Returns a query that matches even numbers
q.filter(filter_fn=lambda x: x % 2)
# Returns a query that matches elements with el.description == "foo"
q.filter(description="foo")
Keyword Args:
filter_fn (callable): If specified, a function that accepts one argument (the element)
and returns a boolean indicating whether to include that element in the results.
kwargs: Specify attribute values that an element must have to be included in the results.
desc (str): A description of the filter, for use in log messages.
Defaults to the name of the filter function or attribute.
Raises:
TypeError: neither or both of `filter_fn` and `kwargs` are provided.
|
codesearchnet
|
def format_ascii(sensor_graph):
cmdfile = CommandFile('Sensor Graph', '1.0')
cmdfile.add('set_online', False)
cmdfile.add('clear')
cmdfile.add('reset')
for node in sensor_graph.dump_nodes():
cmdfile.add('add_node', node)
for streamer in sensor_graph.streamers:
other = 255
if (streamer.with_other is not None):
other = streamer.with_other
args = [streamer.selector, streamer.dest, streamer.automatic, streamer.format, streamer.report_type, other]
cmdfile.add('add_streamer', *args)
for (stream, value) in sorted(sensor_graph.constant_database.items(), key=(lambda x: x[0].encode())):
cmdfile.add('push_reading', stream, value)
cmdfile.add('persist')
cmdfile.add('set_online', True)
return cmdfile.dump()
|
Format this sensor graph as a loadable ascii file format.
This includes commands to reset and clear previously stored
sensor graphs.
NB. This format does not include any required configuration
variables that were specified in this sensor graph, so you
should also output tha information separately in, e.g.
the config format.
Args:
sensor_graph (SensorGraph): the sensor graph that we want to format
Returns:
str: The ascii output lines concatenated as a single string
|
codesearchnet
|
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, data_type, use_gpu, grouped_conv=False, data_format='NHWC', dilations=None, tolerance=None):
input_size = 1
filter_size = 1
for s in tensor_in_sizes:
input_size *= s
for s in filter_in_sizes:
filter_size *= s
x1 = [f * 1.0 / input_size for f in range(1, input_size + 1)]
x1 = np.array(x1).reshape(tensor_in_sizes)
x2 = [f * 1.0 / filter_size for f in range(1, filter_size + 1)]
x2 = np.array(x2).reshape(filter_in_sizes)
strides = [1, stride, stride, 1]
if isinstance(padding, list):
padding = [(0, 0)] + padding + [(0, 0)]
np_result = _DepthwiseConv2dNumpy(x1, x2, strides, padding, 'NHWC', dilations)
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
tolerance = tolerance or {dtypes.float16: 0.04, dtypes.float32: 1e-05, dtypes.float64: 1e-12, dtypes.bfloat16: 0.01}[data_type]
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=data_type)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=data_type)
if data_format == 'NCHW':
t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
if isinstance(padding, list):
padding = [padding[0], padding[3], padding[1], padding[2]]
if dilations is None:
with sess.graph._kernel_label_map({'DepthwiseConv2dNative': 'cudnn_grouped_convolution'} if grouped_conv else {}):
conv_native = nn_ops.depthwise_conv2d_native(t1, t2, strides=strides, data_format=data_format, padding=padding)
if data_format == 'NCHW':
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
try:
native_result = self.evaluate(conv_native)
except errors.InvalidArgumentError as e:
if "No OpKernel was registered to support Op 'DepthwiseConv2dNative'" in e.message:
tf_logging.warn('Skipping grouped convolution test')
return
raise e
conv_interface = nn_impl.depthwise_conv2d(t1, t2, strides=strides, padding=padding, data_format=data_format, dilations=dilations)
if data_format == 'NCHW':
conv_interface = array_ops.transpose(conv_interface, [0, 2, 3, 1])
interface_result = self.evaluate(conv_interface)
if dilations is None:
self.assertAllClose(native_result, np_result, atol=tolerance, rtol=tolerance)
self.assertAllClose(interface_result, np_result, atol=tolerance, rtol=tolerance)
|
Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols,
input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
use_gpu: Whether to use GPU.
grouped_conv: Whether to use cuDNN 7's grouped convolution.
data_format: The data_format of the input. "NHWC" or "NCHW".
dilations: A list of 2 elements, representing the dilations.
tolerance: The absolute and relative tolarance when verifying the output.
|
github-repos
|
def install_event_handlers(self, categories=None, handlers=None):
if categories is not None and handlers is not None:
raise ValueError("categories and handlers are mutually exclusive!")
from .events import get_event_handler_classes
if categories:
raise NotImplementedError()
handlers = [cls() for cls in get_event_handler_classes(categories=categories)]
else:
handlers = handlers or [cls() for cls in get_event_handler_classes()]
self._event_handlers = handlers
|
Install the `EventHandlers for this `Node`. If no argument is provided
the default list of handlers is installed.
Args:
categories: List of categories to install e.g. base + can_change_physics
handlers: explicit list of :class:`EventHandler` instances.
This is the most flexible way to install handlers.
.. note::
categories and handlers are mutually exclusive.
|
juraj-google-style
|
def remove_bond(self, idx1, idx2):
for obbond in ob.OBMolBondIter(self._obmol):
if (((obbond.GetBeginAtomIdx() == idx1) and (obbond.GetEndAtomIdx() == idx2)) or ((obbond.GetBeginAtomIdx() == idx2) and (obbond.GetEndAtomIdx() == idx1))):
self._obmol.DeleteBond(obbond)
|
Remove a bond from an openbabel molecule
Args:
idx1: The atom index of one of the atoms participating the in bond
idx2: The atom index of the other atom participating in the bond
|
codesearchnet
|
def convert_videos_to_summaries(input_videos, output_videos, target_videos,
tag, decode_hparams,
display_ground_truth=False):
fps = decode_hparams.frames_per_second
border_percent = decode_hparams.border_percent
max_outputs = decode_hparams.max_display_outputs
target_steps = target_videos.shape[1]
all_summaries = []
input_videos = create_border(
input_videos, color="blue", border_percent=border_percent)
target_videos = create_border(
target_videos, color="red", border_percent=border_percent)
output_videos = create_border(
output_videos, color="red", border_percent=border_percent)
all_input = np.concatenate((input_videos, target_videos), axis=1)
all_output = np.concatenate((input_videos, output_videos), axis=1)
output_summ_vals, _ = common_video.py_gif_summary(
"%s/output" % tag, all_output, max_outputs=max_outputs, fps=fps,
return_summary_value=True)
all_summaries.extend(output_summ_vals)
if display_ground_truth:
input_summ_vals, _ = common_video.py_gif_summary(
"%s/input" % tag, all_input, max_outputs=max_outputs, fps=fps,
return_summary_value=True)
all_summaries.extend(input_summ_vals)
iterable = zip(output_videos[:max_outputs, :target_steps],
target_videos[:max_outputs])
for ind, (input_video, output_video) in enumerate(iterable):
t, h, w, c = input_video.shape
input_frames = np.reshape(input_video, (t*h, w, c))
output_frames = np.reshape(output_video, (t*h, w, c))
all_frames = np.concatenate((input_frames, output_frames), axis=1)
tag = "input/output/%s_sample_%d" % (tag, ind)
frame_by_frame_summ = image_utils.image_to_tf_summary_value(
all_frames, tag=tag)
all_summaries.append(frame_by_frame_summ)
return all_summaries
|
Converts input, output and target videos into video summaries.
Args:
input_videos: 5-D NumPy array, (NTHWC) conditioning frames.
output_videos: 5-D NumPy array, (NTHWC) model predictions.
target_videos: 5-D NumPy array, (NTHWC) target frames.
tag: tf summary tag.
decode_hparams: HParams.
display_ground_truth: Whether or not to display ground truth videos.
Returns:
summaries: a list of tf frame-by-frame and video summaries.
|
juraj-google-style
|
def generate_json_schema(cls, schema, context=DEFAULT_DICT):
schema = cls._get_schema(schema)
return cls(context=context).dump(schema).data
|
Generate a JSON Schema from a Marshmallow schema.
Args:
schema (marshmallow.Schema|str): The Marshmallow schema, or the
Python path to one, to create the JSON schema for.
Keyword Args:
file_pointer (file, optional): The path or pointer to the file
to write this schema to. If not provided, the schema will be
dumped to ``sys.stdout``.
Returns:
dict: The JSON schema in dictionary form.
|
codesearchnet
|
def _clean_url(url):
if (url == 'default'):
url = DEFAULT_SERVER_HTTP_URL
if url.startswith('ws'):
raise ValueError('url should be the http or https URL for the server, not the websocket URL')
return url.rstrip('/')
|
Produce a canonical Bokeh server URL.
Args:
url (str)
A URL to clean, or "defatul". If "default" then the
``BOKEH_SERVER_HTTP_URL`` will be returned.
Returns:
str
|
codesearchnet
|
def encode(self, tf_graph_predictions):
row = []
for col in self._header:
row.append(str(tf_graph_predictions[col]))
return ','.join(row)
|
Encodes the graph json prediction into csv.
Args:
tf_graph_predictions: python dict.
Returns:
csv string.
|
juraj-google-style
|
def lookup_value(self, api_name, key):
if api_name in self._cache:
return self._cache[api_name].get(key, None)
return None
|
Add the value of an API call to the cache.
Args:
api_name: a string name of the API. Keys and values are segmented by api_name.
key: a string key for the specific call.
|
juraj-google-style
|
def make_triple(sub, pred, obj):
return "{s} {p} {o} .".format(s=sub, p=pred, o=obj)
|
Takes a subject predicate and object and joins them with a space
in between
Args:
sub -- Subject
pred -- Predicate
obj -- Object
Returns
str
|
juraj-google-style
|
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A ConvBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
github-repos
|
def to_sql(self, view: views.View, limit: Optional[int]=None) -> str:
encoder = _spark_interpreter.SparkSqlInterpreter(value_set_codes_table='VALUESET_VIEW')
dataset = f'{self._fhir_dataset}'
sql_generator = runner_utils.RunnerSqlGenerator(view, encoder, dataset, self._snake_case_resource_tables)
sql_statement = sql_generator.build_sql_statement()
valuesets_clause = sql_generator.build_valueset_expression(self._value_set_codes_table)
if limit is not None and limit < 1:
raise ValueError('Query limits must be positive integers.')
limit_clause = '' if limit is None else f' LIMIT {limit}'
return f'{valuesets_clause}{sql_statement}{limit_clause}'
|
Returns the SQL used to run the given view in Spark.
Args:
view: the view used to generate the SQL.
limit: optional limit to attach to the generated SQL.
Returns:
The SQL used to run the given view.
|
github-repos
|
def copy_workspace(self, uri, new_name):
payload = {'isPublic': True, 'newName': new_name}
return self._api.request('post', (((('/api/documents/' + uri['did']) + '/workspaces/') + uri['wvm']) + '/copy'), body=payload)
|
Copy the current workspace.
Args:
- uri (dict): the uri of the workspace being copied. Needs to have a did and wid key.
- new_name (str): the new name of the copied workspace.
Returns:
- requests.Response: Onshape response data
|
codesearchnet
|
def compile_keywords(keywords):
mdt = []
cz_keywords = []
en_keywords = []
for keyword in keywords:
keyword = keyword_to_info(keyword.encode('utf-8'))
if (not keyword):
continue
cz_keywords.append({'uid': keyword['uid'], 'zahlavi': keyword['zahlavi'], 'zdroj': 'czenas'})
if keyword.get('mdt'):
mdt.append({'mdt': keyword['mdt'], 'mrf': keyword['mrf']})
angl_ekvivalent = keyword.get('angl_ekvivalent')
if angl_ekvivalent:
en_keywords.append({'zahlavi': angl_ekvivalent, 'zdroj': (keyword.get('zdroj_angl_ekvivalentu') or 'eczenas')})
return (mdt, cz_keywords, en_keywords)
|
Translate `keywords` to full keyword records as they are used in Aleph.
Returns tuple with three lists, each of which is later used in different
part of the MRC/MARC record.
Args:
keywords (list): List of keyword strings.
Returns:
tuple: (mdt_list, cz_keyword_list, en_keyword_list)
|
codesearchnet
|
def GetVShadowStoreByPathSpec(self, path_spec):
store_index = vshadow.VShadowPathSpecGetStoreIndex(path_spec)
if (store_index is None):
return None
return self._vshadow_volume.get_store(store_index)
|
Retrieves a VSS store for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyvshadow.store: a VSS store or None if not available.
|
codesearchnet
|
def add_string_parameters(self, string):
if isinstance(string, list):
for x in string:
self.add_string_parameters(x)
return
self._parameters.append((('{ "value": "' + string) + '" }'))
|
Add given string parameters to the internal list.
Args:
string (list of str or str): A string or list of strings to add to the parameters.
|
codesearchnet
|
def file_digest(source):
hash_sha256 = hashlib.sha256()
should_close = False
if isinstance(source, six.string_types):
should_close = True
source = open(source, 'rb')
for chunk in iter((lambda : source.read(_BUFFER_SIZE)), b''):
hash_sha256.update(chunk)
if should_close:
source.close()
return hash_sha256.hexdigest()
|
Calculates SHA256 digest of a file.
Args:
source: either a file-like object or a path to file
|
codesearchnet
|
def _new_open_bin(self, remaining_rect):
factories_to_delete = set()
new_bin = None
for key, binfac in self._empty_bins.items():
a_rectangle_fits = False
for _, rect in remaining_rect.items():
if binfac.fits_inside(rect[0], rect[1]):
a_rectangle_fits = True
break
if not a_rectangle_fits:
factories_to_delete.add(key)
continue
new_bin = binfac.new_bin()
if new_bin is None:
continue
self._open_bins.append(new_bin)
if binfac.is_empty():
factories_to_delete.add(key)
break
for f in factories_to_delete:
del self._empty_bins[f]
return new_bin
|
Extract the next bin where at least one of the rectangles in
rem
Arguments:
remaining_rect (dict): rectangles not placed yet
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found
|
juraj-google-style
|
def find_element(driver, elem_path, by=CSS, timeout=TIMEOUT, poll_frequency=0.5):
wait = WebDriverWait(driver, timeout, poll_frequency)
return wait.until(EC.presence_of_element_located((by, elem_path)))
|
Find and return an element once located
find_element locates an element on the page, waiting
for up to timeout seconds. The element, when located,
is returned. If not located, a TimeoutException is raised.
Args:
driver (selenium webdriver or element): A driver or element
elem_path (str): String used to located the element
by (selenium By): Selenium By reference
timeout (int): Selenium Wait timeout, in seconds
poll_frequency (float): Selenium Wait polling frequency, in seconds
Returns:
element: Selenium element
Raises:
TimeoutException: Raised when target element isn't located
|
codesearchnet
|
def create_issues_report(self, timeout=(- 1)):
uri = '{}/issues/'.format(self.data['uri'])
return self._helper.create_report(uri, timeout)
|
Creates an unexpected zoning report for a SAN.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
list: A list of FCIssueResponse dict.
|
codesearchnet
|
def save_features(self, train_features, test_features, feature_names, feature_list_id):
self.save_feature_names(feature_names, feature_list_id)
self.save_feature_list(train_features, 'train', feature_list_id)
self.save_feature_list(test_features, 'test', feature_list_id)
|
Save features for the training and test sets to disk, along with their metadata.
Args:
train_features: A NumPy array of features for the training set.
test_features: A NumPy array of features for the test set.
feature_names: A list containing the names of the feature columns.
feature_list_id: The name for this feature list.
|
juraj-google-style
|
def channel_interpolate(layer1, n_channel1, layer2, n_channel2):
def inner(T):
batch_n = T(layer1).get_shape().as_list()[0]
arr1 = T(layer1)[(..., n_channel1)]
arr2 = T(layer2)[(..., n_channel2)]
weights = (np.arange(batch_n) / float((batch_n - 1)))
S = 0
for n in range(batch_n):
S += ((1 - weights[n]) * tf.reduce_mean(arr1[n]))
S += (weights[n] * tf.reduce_mean(arr2[n]))
return S
return inner
|
Interpolate between layer1, n_channel1 and layer2, n_channel2.
Optimize for a convex combination of layer1, n_channel1 and
layer2, n_channel2, transitioning across the batch.
Args:
layer1: layer to optimize 100% at batch=0.
n_channel1: neuron index to optimize 100% at batch=0.
layer2: layer to optimize 100% at batch=N.
n_channel2: neuron index to optimize 100% at batch=N.
Returns:
Objective
|
codesearchnet
|
def add_cohp_dict(self, cohp_dict, key_sort_func=None):
if key_sort_func:
keys = sorted(cohp_dict.keys(), key=key_sort_func)
else:
keys = cohp_dict.keys()
for label in keys:
self.add_cohp(label, cohp_dict[label])
|
Adds a dictionary of COHPs with an optional sorting function
for the keys.
Args:
cohp_dict: dict of the form {label: Cohp}
key_sort_func: function used to sort the cohp_dict keys.
|
codesearchnet
|
def returns(desc=None, printer=None, data=True):
if data is False:
raise ArgumentError("Specifying non data return type in returns is no longer supported")
def _returns(func):
annotated(func)
func.custom_returnvalue(printer, desc)
return func
return _returns
|
Specify how the return value of this function should be handled.
Args:
desc (str): A deprecated description of the return value
printer (callable): A callable function that can format this return value
data (bool): A deprecated parameter for specifying that this function
returns data.
|
juraj-google-style
|
def recursive_copy(source, destination):
if os.path.isdir(source):
copy_tree(source, destination)
|
A wrapper around distutils.dir_util.copy_tree but won't throw any exception when the source
directory does not exist.
Args:
source (str): source path
destination (str): destination path
|
juraj-google-style
|
def update_ports(self, ports, id_or_uri, timeout=-1):
resources = merge_default_values(ports, {'type': 'port'})
uri = self._client.build_uri(id_or_uri) + "/update-ports"
return self._client.update(resources, uri, timeout)
|
Updates the interconnect ports.
Args:
id_or_uri: Can be either the interconnect id or the interconnect uri.
ports (list): Ports to update.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: The interconnect.
|
juraj-google-style
|
def google_maps_geoloc_link(data):
if isinstance(data, str):
lat_lon = ip_geoloc(data)
if (lat_lon is None):
return ''
(lat, lon) = lat_lon
else:
(lat, lon) = data
loc = ('%s,%s' % (lat, lon))
return ('https:
|
Get a link to google maps pointing on this IP's geolocation.
Args:
data (str/tuple): IP address or (latitude, longitude).
Returns:
str: a link to google maps pointing on this IP's geolocation.
|
codesearchnet
|
def put_content(self, url, content):
cache_path = self._url_to_path(url)
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise Error('Failed to create cache directories for ' % cache_path)
try:
with open(cache_path, 'wb') as f:
f.write(content)
except IOError:
raise Error('Failed to cache content as %s for %s' % (cache_path, url))
|
Stores the content of a resource into the disk cache.
Args:
url: The url of the resource
content: The content of the resource
Raises:
CacheError: If the content cannot be put in cache
|
juraj-google-style
|
def renew(self, requested_timeout=None):
if self._has_been_unsubscribed:
raise SoCoException('Cannot renew subscription once unsubscribed')
if (not self.is_subscribed):
raise SoCoException('Cannot renew subscription before subscribing')
if (self.time_left == 0):
raise SoCoException('Cannot renew subscription after expiry')
headers = {'SID': self.sid}
if (requested_timeout is None):
requested_timeout = self.requested_timeout
if (requested_timeout is not None):
headers['TIMEOUT'] = 'Second-{}'.format(requested_timeout)
response = requests.request('SUBSCRIBE', (self.service.base_url + self.service.event_subscription_url), headers=headers)
response.raise_for_status()
timeout = response.headers['timeout']
if (timeout.lower() == 'infinite'):
self.timeout = None
else:
self.timeout = int(timeout.lstrip('Second-'))
self._timestamp = time.time()
self.is_subscribed = True
log.info('Renewed subscription to %s, sid: %s', (self.service.base_url + self.service.event_subscription_url), self.sid)
|
Renew the event subscription.
You should not try to renew a subscription which has been
unsubscribed, or once it has expired.
Args:
requested_timeout (int, optional): The period for which a renewal
request should be made. If None (the default), use the timeout
requested on subscription.
|
codesearchnet
|
def depth(script, iterations=3, viewpoint=(0, 0, 0), selected=False):
filter_xml = ''.join([' <filter name="Depth Smooth">\n', ' <Param name="stepSmoothNum" ', 'value="{:d}" '.format(iterations), 'description="Smoothing steps" ', 'type="RichInt" ', '/>\n', ' <Param name="viewPoint" ', 'x="{}" '.format(viewpoint[0]), 'y="{}" '.format(viewpoint[1]), 'z="{}" '.format(viewpoint[2]), 'description="Smoothing steps" ', 'type="RichPoint3f" ', '/>\n', ' <Param name="Selected" ', 'value="{}" '.format(str(selected).lower()), 'description="Affect only selected faces" ', 'type="RichBool" ', '/>\n', ' </filter>\n'])
util.write_filter(script, filter_xml)
return None
|
A laplacian smooth that is constrained to move vertices only along the
view direction.
Args:
script: the FilterScript object or script filename to write
the filter to.
iterations (int): The number of times that the whole algorithm (normal
smoothing + vertex fitting) is iterated.
viewpoint (vector tuple or list): The position of the view point that
is used to get the constraint direction.
selected (bool): If selected the filter is performed only on the
selected faces
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
|
codesearchnet
|
def format_search_results(self, search_results):
formatted_lines = []
for search_result in search_results:
lines = self._format_search_result(search_result)
formatted_lines.extend(lines)
return formatted_lines
|
Format search results.
Args:
search_results (list of `ResourceSearchResult`): Search to format.
Returns:
List of 2-tuple: Text and color to print in.
|
codesearchnet
|
def set_message(self, title, msg, typ, url=None):
return self.user.send_notification(title=title,
message=msg,
typ=typ,
url=url)
|
Sets user notification message.
Args:
title: Msg. title
msg: Msg. text
typ: Msg. type
url: Additional URL (if exists)
Returns:
Message ID.
|
juraj-google-style
|
def _AssertValidators(self, validators):
for validator in sorted(validators, key=(lambda validator: validator.insertion_index)):
try:
validator.verify(self)
except exceptions.ValidationError as e:
message = validator.print_flags_with_values(self)
raise exceptions.IllegalFlagValueError(('%s: %s' % (message, str(e))))
|
Assert if all validators in the list are satisfied.
Asserts validators in the order they were created.
Args:
validators: Iterable(validators.Validator), validators to be
verified
Raises:
AttributeError: if validators work with a non-existing flag.
IllegalFlagValueError: if validation fails for at least one validator
|
codesearchnet
|
def validate_to_schema(nanopub, schema) -> Tuple[(bool, List[Tuple[(str, str)]])]:
v = jsonschema.Draft4Validator(schema)
messages = []
errors = sorted(v.iter_errors(nanopub), key=(lambda e: e.path))
for error in errors:
for suberror in sorted(error.context, key=(lambda e: e.schema_path)):
print(list(suberror.schema_path), suberror.message, sep=', ')
messages.append(('ERROR', suberror.message))
is_valid = True
if errors:
is_valid = False
return (is_valid, messages)
|
Validate nanopub against jsonschema for nanopub
Args:
nanopub (Mapping[str, Any]): nanopub dict
schema (Mapping[str, Any]): nanopub schema
Returns:
Tuple[bool, List[str]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('Error|Warning', msg)
e.g. [('ERROR', "'subject' is a required property")]
|
codesearchnet
|
def all_sum(tensors):
return _apply_all_reduce('sum', tensors)
|
Returns a list of tensors with the all-reduce sum across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to sum; must be assigned
to GPU devices.
Returns:
List of tensors, each with the sum of the input tensors, where tensor i has
the same device as `tensors[i]`.
|
github-repos
|
def on_predict_begin(self, logs=None):
|
Called at the beginning of prediction.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
|
github-repos
|
def optimize_for_inference(input_graph_def: graph_pb2.GraphDef, input_node_names: Sequence[str], output_node_names: Sequence[str], placeholder_type_enum: int, toco_compatible: bool=False, placeholder_to_const_names=None) -> graph_pb2.GraphDef:
ensure_graph_is_valid(input_graph_def)
optimized_graph_def = input_graph_def
optimized_graph_def = convert_placeholder_to_const(optimized_graph_def, placeholder_to_const_names)
optimized_graph_def = strip_unused_lib.strip_unused(optimized_graph_def, input_node_names, output_node_names, placeholder_type_enum)
optimized_graph_def = graph_util.remove_training_nodes(optimized_graph_def, output_node_names)
optimized_graph_def = fuse_decomposed_batch_norm(optimized_graph_def)
optimized_graph_def = fold_batch_norms(optimized_graph_def)
if not toco_compatible:
optimized_graph_def = fuse_resize_and_conv(optimized_graph_def, output_node_names)
ensure_graph_is_valid(optimized_graph_def)
return optimized_graph_def
|
Applies a series of inference optimizations on the input graph.
Args:
input_graph_def: A GraphDef containing a training model.
input_node_names: A list of names of the nodes that are fed inputs during
inference.
output_node_names: A list of names of the nodes that produce the final
results.
placeholder_type_enum: The AttrValue enum for the placeholder data type, or
a list that specifies one value per input node name.
toco_compatible: Boolean, if True, only runs optimizations that result in
TOCO compatible graph operations (default=False).
placeholder_to_const_names: A list of names of the PlaceholderWithDefault
nodes to be converted to Constant.
Returns:
An optimized version of the input graph.
|
github-repos
|
def _normalize_mlengine_job_id(job_id):
match = re.search('\\d|\\{{2}', job_id)
if (match and (match.start() == 0)):
job = 'z_{}'.format(job_id)
else:
job = job_id
tracker = 0
cleansed_job_id = ''
for m in re.finditer('\\{{2}.+?\\}{2}', job):
cleansed_job_id += re.sub('[^0-9a-zA-Z]+', '_', job[tracker:m.start()])
cleansed_job_id += job[m.start():m.end()]
tracker = m.end()
cleansed_job_id += re.sub('[^0-9a-zA-Z]+', '_', job[tracker:])
return cleansed_job_id
|
Replaces invalid MLEngine job_id characters with '_'.
This also adds a leading 'z' in case job_id starts with an invalid
character.
Args:
job_id: A job_id str that may have invalid characters.
Returns:
A valid job_id representation.
|
codesearchnet
|
def get_array_from_hist2D(hist: Hist, set_zero_to_NaN: bool=True, return_bin_edges: bool=False) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]:
shape = (hist.GetYaxis().GetNbins(), hist.GetXaxis().GetNbins())
hist_array = np.array([hist.GetBinContent(x) for x in range(1, hist.GetNcells()) if ((not hist.IsBinUnderflow(x)) and (not hist.IsBinOverflow(x)))])
hist_array = hist_array.reshape(shape)
hist_array = hist_array.T
if set_zero_to_NaN:
hist_array[(hist_array == 0)] = np.nan
if return_bin_edges:
x_bin_edges = get_bin_edges_from_axis(hist.GetXaxis())
y_bin_edges = get_bin_edges_from_axis(hist.GetYaxis())
epsilon = 1e-09
x_range = np.arange(np.amin(x_bin_edges), (np.amax(x_bin_edges) + epsilon), hist.GetXaxis().GetBinWidth(1))
y_range = np.arange(np.amin(y_bin_edges), (np.amax(y_bin_edges) + epsilon), hist.GetYaxis().GetBinWidth(1))
else:
x_range = np.array([hist.GetXaxis().GetBinCenter(i) for i in range(1, (hist.GetXaxis().GetNbins() + 1))])
y_range = np.array([hist.GetYaxis().GetBinCenter(i) for i in range(1, (hist.GetYaxis().GetNbins() + 1))])
(X, Y) = np.meshgrid(x_range, y_range)
return (X, Y, hist_array)
|
Extract x, y, and bin values from a 2D ROOT histogram.
Converts the histogram into a numpy array, and suitably processes it for a surface plot
by removing 0s (which can cause problems when taking logs), and returning a set of (x, y) mesh
values utilziing either the bin edges or bin centers.
Note:
This is a different format than the 1D version!
Args:
hist (ROOT.TH2): Histogram to be converted.
set_zero_to_NaN: If true, set 0 in the array to NaN. Useful with matplotlib so that it will
ignore the values when plotting. See comments in this function for more details. Default: True.
return_bin_edges: Return x and y using bin edges instead of bin centers.
Returns:
Contains (x values, y values, numpy array of hist data) where (x, y) are values on a
grid (from np.meshgrid) using the selected bin values.
|
codesearchnet
|
def __init__(self, metagraph, ignore_colocation=True, ignore_user_placement=False):
self._metagraph = metagraph
self._item_graph = meta_graph_pb2.MetaGraphDef()
self._item_graph.CopyFrom(metagraph)
self._ignore_colocation = ignore_colocation
self._ignore_user_placement = ignore_user_placement
self._tf_item = None
self._BuildTFItem()
|
Creates an Item.
Args:
metagraph: a TensorFlow metagraph.
ignore_colocation: if set, the tool will ignore all the colocation
constraints generated by TensorFlow.
ignore_user_placement: if set, all the placement annotations annotated in
the metagraph will be ignored.
Raises:
ValueError: the metagraph is incomplete or invalid.
|
github-repos
|
def register(self, token, regexp):
self._tokens.append((token, re.compile(regexp)))
|
Register a token.
Args:
token (Token): the token class to register
regexp (str): the regexp for that token
|
juraj-google-style
|
def __init__(self, value=None, length=0):
super().__init__(value)
self.length = length
self._fmt = '!{}{}'.format(self.length, 's')
|
Create a Char with the optional parameters below.
Args:
value: The character to be build.
length (int): Character size.
|
juraj-google-style
|
def parse_vhdl_file(fname):
with open(fname, 'rt') as fh:
text = fh.read()
return parse_vhdl(text)
|
Parse a named VHDL file
Args:
fname(str): Name of file to parse
Returns:
Parsed objects.
|
juraj-google-style
|
def heightmap_rain_erosion(hm: np.ndarray, nbDrops: int, erosionCoef: float, sedimentationCoef: float, rnd: Optional[tcod.random.Random]=None) -> None:
lib.TCOD_heightmap_rain_erosion(_heightmap_cdata(hm), nbDrops, erosionCoef, sedimentationCoef, (rnd.random_c if rnd else ffi.NULL))
|
Simulate the effect of rain drops on the terrain, resulting in erosion.
``nbDrops`` should be at least hm.size.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
nbDrops (int): Number of rain drops to simulate.
erosionCoef (float): Amount of ground eroded on the drop's path.
sedimentationCoef (float): Amount of ground deposited when the drops
stops to flow.
rnd (Optional[Random]): A tcod.Random instance, or None.
|
codesearchnet
|
def read_html_file(data_dir, fileroot, encoding=None):
fname = os.path.join(data_dir, RAW_HTML_DIRNAME, (fileroot + RAW_HTML_EXT))
encodings = ((encoding,) if encoding else ('utf-8', 'iso-8859-1'))
for encoding in encodings:
try:
with io.open(fname, mode='rt', encoding=encoding) as f:
raw_html = f.read()
break
except (UnicodeDecodeError, UnicodeError):
raw_html = None
return ftfy.fix_encoding(raw_html).strip()
|
Read the HTML file corresponding to identifier ``fileroot``
in the raw HTML directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
encoding (str)
Returns:
str
|
codesearchnet
|
def begin_scan(self, callback=None, interval=DEF_SCAN_INTERVAL, window=DEF_SCAN_WINDOW):
logger.debug('configuring scan parameters')
self.api.ble_cmd_gap_set_scan_parameters(interval, window, 1)
self._set_state(self._STATE_CONFIGURE_SCAN)
self.api.ble_cmd_gap_discover(1)
self._wait_for_state(self._STATE_CONFIGURE_SCAN)
logger.debug('starting async scan for devices')
self.scan_targets = None
self.scan_callback = callback
self._set_state(self._STATE_SCANNING)
return True
|
Begins a BLE scan and returns immediately.
Using this method you can begin a BLE scan and leave the dongle in scanning
mode in the background. It will remain in scanning mode until you call the
:meth:`end_scan` method or the :meth:`reset` method.
Args:
callback (callbable): a callback that will be called for each new device
discovered by the scanning process. Will be passed a single argument,
a :class:`ScanResult` object. May be None if not needed.
interval (int): BLE scan interval, in units of 625us
window (int): BLE scan window, in units of 625us
Returns:
True on success, False otherwise.
|
codesearchnet
|
def split(input_file, file_1, file_2, no_in_first_file):
with open(input_file) as f:
feat_collection = geojson.load(f)
features = feat_collection['features']
feat_collection_1 = geojson.FeatureCollection(features[0:no_in_first_file])
feat_collection_2 = geojson.FeatureCollection(features[no_in_first_file:])
with open(file_1, 'w') as f:
geojson.dump(feat_collection_1, f)
with open(file_2, 'w') as f:
geojson.dump(feat_collection_2, f)
|
Split a geojson in two separate files.
Args:
input_file (str): Input filename.
file_1 (str): Output file name 1.
file_2 (str): Output file name 2.
no_features (int): Number of features in input_file to go to file_1.
output_file (str): Output file name.
|
juraj-google-style
|
def find_backend(line: str) -> Optional[str]:
if _re_test_backend.search(line) is None:
return None
backends = [b[0] for b in _re_backend.findall(line)]
backends.sort()
return '_and_'.join(backends)
|
Find one (or multiple) backend in a code line of the init.
Args:
line (`str`): A code line of the main init.
Returns:
Optional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line
contains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so
`xxx_and_yyy` for instance).
|
github-repos
|
def dump(config):
return yaml.safe_dump(
config.to_primitive(),
default_flow_style=False,
encoding='utf-8',
allow_unicode=True)
|
Dumps a stacker Config object as yaml.
Args:
config (:class:`Config`): the stacker Config object.
stream (stream): an optional stream object to write to.
Returns:
str: the yaml formatted stacker Config.
|
juraj-google-style
|
def read_nmr_efg_tensor(self):
header_pattern = 'Electric field gradients \\(V/A\\^2\\)\\n-*\\n ion\\s+V_xx\\s+V_yy\\s+V_zz\\s+V_xy\\s+V_xz\\s+V_yz\\n-*\\n'
row_pattern = '\\d+\\s+([-\\d\\.]+)\\s+([-\\d\\.]+)\\s+([-\\d\\.]+)\\s+([-\\d\\.]+)\\s+([-\\d\\.]+)\\s+([-\\d\\.]+)'
footer_pattern = '-*\\n'
data = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float)
tensors = [make_symmetric_matrix_from_upper_tri(d) for d in data]
self.data['unsym_efg_tensor'] = tensors
return tensors
|
Parses the NMR Electric Field Gradient Raw Tensors
Returns:
A list of Electric Field Gradient Tensors in the order of Atoms from OUTCAR
|
codesearchnet
|
def copy(self, **override_parameters_kwargs):
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
|
Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
|
github-repos
|
def with_stack(cls, stack, severity, message, **kwargs):
stack = _dedup_opcodes(stack) if stack else None
opcode = stack[-1].current_opcode if stack else None
if opcode is None:
return cls(severity, message, **kwargs)
else:
return cls(severity, message, filename=opcode.code.filename, line=opcode.line, endline=opcode.endline, col=opcode.col, endcol=opcode.endcol, methodname=opcode.code.name, opcode_name=opcode.__class__.__name__, traceback=_make_traceback_str(stack), **kwargs)
|
Return an error using a stack for position information.
Args:
stack: A list of state.Frame or state.SimpleFrame objects.
severity: The error level (error or warning), an integer.
message: The error message string.
**kwargs: Additional keyword args to pass onto the class ctor.
Returns:
An Error object.
|
github-repos
|
def exp(x):
return math_ops.exp(x)
|
Element-wise exponential.
Args:
x: Tensor or variable.
Returns:
A tensor.
|
github-repos
|
def _GetDateTime(self, filetime):
if filetime == 0:
return dfdatetime_semantic_time.SemanticTime('Not set')
return dfdatetime_filetime.Filetime(timestamp=filetime)
|
Retrieves the date and time from a FILETIME timestamp.
Args:
filetime (int): FILETIME timestamp.
Returns:
dfdatetime.DateTimeValues: date and time.
|
juraj-google-style
|
def __init__(self, config_file=None, config_header=None):
self.config_file = config_file or CONFIG
self.config_header = config_header
self.config = parser.Parser()
self.config.read(self.config_file)
|
Constructor.
Args:
config_file: string, the location of the config file.
config_header: string, the message to write at the top of the config.
|
juraj-google-style
|
def GetEstimatedYear(self):
if self._preferred_year:
return self._preferred_year
if self._knowledge_base.year:
return self._knowledge_base.year
year = self._GetEarliestYearFromFileEntry()
if (not year):
year = self._GetLatestYearFromFileEntry()
if (not year):
year = timelib.GetCurrentYear()
return year
|
Retrieves an estimate of the year.
This function determines the year in the following manner:
* see if the user provided a preferred year;
* see if knowledge base defines a year e.g. derived from preprocessing;
* determine the year based on the file entry metadata;
* default to the current year;
Returns:
int: estimated year.
|
codesearchnet
|
def divide_to_patches(image: np.array, patch_size: int, input_data_format) -> List[np.array]:
patches = []
height, width = get_image_size(image, channel_dim=input_data_format)
for i in range(0, height, patch_size):
for j in range(0, width, patch_size):
if input_data_format == ChannelDimension.LAST:
patch = image[i:i + patch_size, j:j + patch_size]
else:
patch = image[:, i:i + patch_size, j:j + patch_size]
patches.append(patch)
return patches
|
Divides an image into patches of a specified size.
Args:
image (`np.array`):
The input image.
patch_size (`int`):
The size of each patch.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
list: A list of np.array representing the patches.
|
github-repos
|
def _GetActualMessage(self):
if six.PY2:
return self._actual.message
return self._actual.args[0] if self._actual.args else ''
|
Returns the "message" portion of an exception.
Many Python 2 exceptions have a "message" attribute, so return that directly
in Python 2. However, this attribute is never present in Python 3, so return
the first argument passed to the exception instance as the message.
Returns:
String
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.