code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __init__(self, tokens, flags=re.MULTILINE):
self.tokens = {}
for state, patterns in tokens.iteritems():
full_patterns = []
for p in patterns:
pat = re.compile(p[0], flags)
action = p[1]
new_state = p[2] if len(p) >= 3 else None
if new_state and new_state.startswith('
try:
new_state = -int(new_state.split(':')[1])
except IndexError, ValueError:
new_state = -1
full_patterns.append((pat, action, new_state))
self.tokens[state] = full_patterns
|
Create a new lexer
Args:
tokens (dict(match rules)): Hierarchical dict of states with a list of regex patterns and transitions
flags (int): Optional regex flags
|
juraj-google-style
|
def extractTimes(self, inp):
def handleMatch(time):
relative = False
if (not time):
return None
elif (time.group(1) == 'morning'):
h = 8
m = 0
elif (time.group(1) == 'afternoon'):
h = 12
m = 0
elif (time.group(1) == 'evening'):
h = 19
m = 0
elif (time.group(4) and time.group(5)):
(h, m) = (0, 0)
converter = NumberService()
try:
diff = converter.parse(time.group(4))
except:
return None
if (time.group(5) == 'hours'):
h += diff
else:
m += diff
if time.group(6):
converter = NumberService()
try:
diff = converter.parse(time.group(7))
except:
return None
if (time.group(8) == 'hours'):
h += diff
else:
m += diff
relative = True
else:
t = time.group(2)
(h, m) = ((int(t.split(':')[0]) % 12), int(t.split(':')[1]))
try:
if (time.group(3) == 'pm'):
h += 12
except IndexError:
pass
if relative:
return (self.now + datetime.timedelta(hours=h, minutes=m))
else:
return datetime.datetime(self.now.year, self.now.month, self.now.day, h, m)
inp = self._preprocess(inp)
return [handleMatch(time) for time in self._timeRegex.finditer(inp)]
|
Extracts time-related information from an input string.
Ignores any information related to the specific date, focusing
on the time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted times from the
input snippet, or an empty list if none found.
|
codesearchnet
|
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(LocateResponsePayload, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.LOCATED_ITEMS, local_buffer):
self._located_items = primitives.Integer(tag=enums.Tags.LOCATED_ITEMS)
self._located_items.read(local_buffer, kmip_version=kmip_version)
self._unique_identifiers = []
while self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):
unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)
unique_identifier.read(local_buffer, kmip_version=kmip_version)
self._unique_identifiers.append(unique_identifier)
self.is_oversized(local_buffer)
|
Read the data encoding the Locate response payload and decode it
into its constituent parts.
Args:
input_buffer (stream): A data buffer containing encoded object
data, supporting a read method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
|
codesearchnet
|
def generate_batch(cls, strategy, size, **kwargs):
assert strategy in (enums.STUB_STRATEGY, enums.BUILD_STRATEGY, enums.CREATE_STRATEGY)
batch_action = getattr(cls, '%s_batch' % strategy)
return batch_action(size, **kwargs)
|
Generate a batch of instances.
The instances will be created with the given strategy (one of
BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).
Args:
strategy (str): the strategy to use for generating the instance.
size (int): the number of instances to generate
Returns:
object list: the generated instances
|
juraj-google-style
|
def _GetDayOfYear(self, year, month, day_of_month):
if month not in range(1, 13):
raise ValueError('Month value out of bounds.')
days_per_month = self._GetDaysPerMonth(year, month)
if day_of_month < 1 or day_of_month > days_per_month:
raise ValueError('Day of month value out of bounds.')
day_of_year = day_of_month
for past_month in range(1, month):
day_of_year += self._GetDaysPerMonth(year, past_month)
return day_of_year
|
Retrieves the day of the year for a specific day of a month in a year.
Args:
year (int): year e.g. 1970.
month (int): month, where 1 represents January.
day_of_month (int): day of the month, where 1 represents the first day.
Returns:
int: day of year.
Raises:
ValueError: if the month or day of month value is out of bounds.
|
juraj-google-style
|
def all_matches(pcoll, regex):
regex = Regex._regex_compile(regex)
def _process(element):
m = regex.match(element)
if m:
yield [m.group(ix) for ix in range(m.lastindex + 1)]
return pcoll | FlatMap(_process)
|
Returns all matches (groups) if zero or more characters at the beginning
of string match the regular expression.
Args:
regex: the regular expression string or (re.compile) pattern.
|
github-repos
|
def dup(node, copy_map, field_name='___pyct_anno'):
for n in gast.walk(node):
for k in copy_map:
if hasanno(n, k, field_name):
setanno(n, copy_map[k], getanno(n, k, field_name), field_name)
|
Recursively copies annotations in an AST tree.
Args:
node: ast.AST
copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination
key. All annotations with the source key will be copied to identical
annotations with the destination key.
field_name: str
|
github-repos
|
def read_pattern(text_str, patterns, terminate_on_match=False, postprocess=str):
compiled = {key: re.compile(pattern, (re.MULTILINE | re.DOTALL)) for (key, pattern) in patterns.items()}
matches = defaultdict(list)
for (key, pattern) in compiled.items():
for match in pattern.finditer(text_str):
matches[key].append([postprocess(i) for i in match.groups()])
if terminate_on_match:
break
return matches
|
General pattern reading on an input string
Args:
text_str (str): the input string to search for patterns
patterns (dict): A dict of patterns, e.g.,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Renders accessible:
Any attribute in patterns. For example,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the
value of matches["energy"] = [[-1234], [-3453], ...], to the
results from regex and postprocess. Note that the returned values
are lists of lists, because you can grep multiple items on one line.
|
codesearchnet
|
def requires_open_handle(method):
@functools.wraps(method)
def wrapper_requiring_open_handle(self, *args, **kwargs):
if self.is_closed():
raise usb_exceptions.HandleClosedError()
return method(self, *args, **kwargs)
return wrapper_requiring_open_handle
|
Decorator to ensure a handle is open for certain methods.
Subclasses should decorate their Read() and Write() with this rather than
checking their own internal state, keeping all "is this handle open" logic
in is_closed().
Args:
method: A class method on a subclass of UsbHandle
Raises:
HandleClosedError: If this handle has been closed.
Returns:
A wrapper around method that ensures the handle is open before calling through
to the wrapped method.
|
juraj-google-style
|
def potential_purviews(self, direction, mechanism, purviews=False):
system = self.system[direction]
return [
purview for purview in system.potential_purviews(
direction, mechanism, purviews)
if set(purview).issubset(self.purview_indices(direction))
]
|
Return all purviews that could belong to the |MIC|/|MIE|.
Filters out trivially-reducible purviews.
Args:
direction (str): Either |CAUSE| or |EFFECT|.
mechanism (tuple[int]): The mechanism of interest.
Keyword Args:
purviews (tuple[int]): Optional subset of purviews of interest.
|
juraj-google-style
|
def victim_email_assets(self, main_type, sub_type, unique_id, params=None):
params = params or {}
if not sub_type:
url = '/v2/{}/{}/victimAssets/emailAddresses'.format(main_type, unique_id)
else:
url = '/v2/{}/{}/{}/victimAssets/emailAddresses'.format(type, sub_type, unique_id)
for vea in self._iterate(url, params, 'victimEmail'):
yield vea
|
Args:
main_type:
sub_type:
unique_id:
params:
Return:
|
juraj-google-style
|
def _tokenize_wordpiece(self, text):
output_tokens = []
for token in self.basic_tokenizer._whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.vocab.unknown_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = ''.join(chars[start:end])
if start > 0:
substr = '
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.vocab.unknown_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
|
Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BERTBasicTokenizer.
Returns:
A list of wordpiece tokens.
|
juraj-google-style
|
def write_representative_sequences_file(self, outname, outdir=None, set_ids_from_model=True):
if (not outdir):
outdir = self.data_dir
if (not outdir):
raise ValueError('Output directory must be specified')
outfile = op.join(outdir, (outname + '.faa'))
tmp = []
for x in self.genes_with_a_representative_sequence:
repseq = x.protein.representative_sequence
copied_seq_record = copy(repseq)
if set_ids_from_model:
copied_seq_record.id = x.id
tmp.append(copied_seq_record)
SeqIO.write(tmp, outfile, 'fasta')
log.info('{}: wrote all representative sequences to file'.format(outfile))
self.genome_path = outfile
return self.genome_path
|
Write all the model's sequences as a single FASTA file. By default, sets IDs to model gene IDs.
Args:
outname (str): Name of the output FASTA file without the extension
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_ids_from_model (bool): If the gene ID source should be the model gene IDs, not the original sequence ID
|
codesearchnet
|
def _get_args_and_defaults(args, defaults):
defaults = (defaults or [])
args_and_defaults = [(argument, default) for (argument, default) in zip_longest(args[::(- 1)], defaults[::(- 1)], fillvalue=NoDefault)]
return args_and_defaults[::(- 1)]
|
Return a list of 2-tuples - the argument name and its default value or
a special value that indicates there is no default value.
Args:
args: list of argument name
defaults: tuple of default values
|
codesearchnet
|
def __init__(self, resolver_context, encoding='utf-8'):
super(CPIOFileSystem, self).__init__(resolver_context)
self._cpio_archive_file = None
self._file_object = None
self.encoding = encoding
|
Initializes a CPIO archive file system.
Args:
resolver_context (Context): resolver context.
encoding (Optional[str]): file entry name encoding.
|
juraj-google-style
|
def VerifyCipherSignature(self, remote_public_key):
if self.cipher_metadata.signature and remote_public_key:
stats_collector_instance.Get().IncrementCounter("grr_rsa_operations")
remote_public_key.Verify(self.serialized_cipher,
self.cipher_metadata.signature)
return True
|
Verifies the signature on the encrypted cipher block.
This method returns True if the signature verifies correctly with
the key given.
Args:
remote_public_key: The remote public key.
Returns:
None
Raises:
rdf_crypto.VerificationError: A signature and a key were both given but
verification fails.
|
juraj-google-style
|
def case_study_social_link_linkedin(value):
parsed = parse.urlparse(value.lower())
if not parsed.netloc.endswith('linkedin.com'):
raise ValidationError(MESSAGE_NOT_LINKEDIN)
|
Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError
|
juraj-google-style
|
def _request(self, method, url, headers=None, **kwargs):
_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
if headers:
_headers.update(headers)
if self.is_debug:
self.logger.debug('{} {} {} {}'.format(method, url, headers, kwargs))
return self._parse(requests.request(method, url, headers=_headers, timeout=60, **kwargs))
|
Normally the connection guarantees response times of 3 seconds on average,
if there is an abnormal situation, the maximum response time is 1 minute.
It is highly recommended that you set “timeouts” when you connect with PayU.
Args:
method:
url:
headers:
**kwargs:
Returns:
|
juraj-google-style
|
def _maybe_init_experiment(self, experiment_name):
user_id = self._maybe_init_user()
cursor = self._db.cursor()
cursor.execute(
,
(user_id, experiment_name))
row = cursor.fetchone()
if row:
return row[0]
experiment_id = self._create_id()
computed_time = 0
cursor.execute(
,
(user_id, experiment_id, experiment_name, time.time(), computed_time,
False))
return experiment_id
|
Returns the ID for the given experiment, creating the row if needed.
Args:
experiment_name: name of experiment.
|
juraj-google-style
|
def __init__(self, value=None):
super(ApplicationNamespace, self).__init__(
value, Tags.APPLICATION_NAMESPACE)
|
Construct an ApplicationNamespace object.
Args:
value (str): A string representing a namespace. Optional, defaults
to None.
|
juraj-google-style
|
def encode(self, value: Any) -> geno.DNA:
children = []
def _encode(path: utils.KeyPath, template_value: Any, input_value: Any) -> Any:
if pg_typing.MISSING_VALUE == input_value and pg_typing.MISSING_VALUE != template_value:
raise ValueError(f"Value is missing from input. Path='{path}'.")
if isinstance(template_value, base.HyperValue) and (not self._where or self._where(template_value)):
children.append(template_value.encode(input_value))
elif isinstance(template_value, derived.DerivedValue):
if self._compute_derived:
referenced_values = [reference_path.query(value) for _, reference_path in template_value.resolve()]
derived_value = template_value.derive(*referenced_values)
if derived_value != input_value:
raise ValueError(f"Unmatched derived value between template and input. (Path='{path}', Template={template_value!r}, ComputedValue={derived_value!r}, Input={input_value!r})")
elif isinstance(template_value, symbolic.Object):
if type(input_value) is not type(template_value):
raise ValueError(f"Unmatched Object type between template and input: (Path='{path}', Template={template_value!r}, Input={input_value!r})")
template_keys = set(template_value.sym_keys())
value_keys = set(input_value.sym_keys())
if template_keys != value_keys:
raise ValueError(f"Unmatched Object keys between template value and input value. (Path='{path}', TemplateOnlyKeys={template_keys - value_keys}, InputOnlyKeys={value_keys - template_keys})")
for key in template_value.sym_keys():
utils.merge_tree(template_value.sym_getattr(key), input_value.sym_getattr(key), _encode, root_path=utils.KeyPath(key, path))
elif isinstance(template_value, symbolic.Dict):
if not isinstance(input_value, dict):
raise ValueError(f"Unmatched dict between template value and input value. (Path='{path}', Template={template_value!r}, Input={input_value!r})")
elif isinstance(template_value, symbolic.List):
if not isinstance(input_value, list) or len(input_value) != len(template_value):
raise ValueError(f"Unmatched list between template value and input value. (Path='{path}', Template={template_value!r}, Input={input_value!r})")
for i, template_item in enumerate(template_value):
utils.merge_tree(template_item, input_value[i], _encode, root_path=utils.KeyPath(i, path))
elif template_value != input_value:
raise ValueError(f"Unmatched value between template and input. (Path='{path}', Template={utils.quote_if_str(template_value)}, Input={utils.quote_if_str(input_value)})")
return template_value
utils.merge_tree(self._value, value, _encode, root_path=self._root_path)
return geno.DNA(None, children)
|
Encode a value into a DNA.
Example::
# DNA of a constant template:
template = pg.hyper.ObjectTemplate({'a': 0})
assert template.encode({'a': 0}) == pg.DNA(None)
# Raises: Unmatched value between template and input.
template.encode({'a': 1})
# DNA of a template containing only one pg.oneof.
template = pg.hyper.ObjectTemplate({'a': pg.oneof([1, 2])})
assert template.encode({'a': 1}) == pg.DNA(0)
# DNA of a template containing only one pg.oneof.
template = pg.hyper.ObjectTemplate({'a': pg.floatv(0.1, 1.0)})
assert template.encode({'a': 0.5}) == pg.DNA(0.5)
Args:
value: Value to encode.
Returns:
Encoded DNA.
Raises:
ValueError if value cannot be encoded by this template.
|
github-repos
|
def list(self):
return self._registry.keys()
|
Lists registered items.
Returns:
A list of names of registered objects.
|
github-repos
|
def find_wells_with_curve(self, mnemonic, alias=None):
return Project([w for w in self if (w.get_curve(mnemonic, alias=alias) is not None)])
|
Returns a new Project with only the wells which have the named curve.
Args:
menmonic (str): the name of the curve to look for.
alias (dict): a welly alias dictionary.
Returns:
project.
|
codesearchnet
|
def _copy_attr(self, module, varname, cls, attrname=None):
if (not hasattr(module, varname)):
raise RuntimeError("Variable '{}' not found".format(varname))
obj = getattr(module, varname)
if (not isinstance(obj, cls)):
raise RuntimeError("Expecting fobj to be a {}, not a '{}'".format(cls.__name__, obj.__class__.__name__))
if (attrname is None):
attrname = varname
setattr(self, attrname, obj)
|
Copies attribute from module object to self. Raises if object not of expected class
Args:
module: module object
varname: variable name
cls: expected class of variable
attrname: attribute name of self. Falls back to varname
|
codesearchnet
|
def dispatch(self, state_change: StateChange) -> List[Event]:
assert isinstance(state_change, StateChange)
next_state = deepcopy(self.current_state)
iteration = self.state_transition(
next_state,
state_change,
)
assert isinstance(iteration, TransitionResult)
self.current_state = iteration.new_state
events = iteration.events
assert isinstance(self.current_state, (State, type(None)))
assert all(isinstance(e, Event) for e in events)
return events
|
Apply the `state_change` in the current machine and return the
resulting events.
Args:
state_change: An object representation of a state
change.
Return:
A list of events produced by the state transition.
It's the upper layer's responsibility to decided how to handle
these events.
|
juraj-google-style
|
def getPoly(rCut, nMax):
rCutVeryHard = rCut+5.0
rx = 0.5*rCutVeryHard*(x + 1)
basisFunctions = []
for i in range(1, nMax + 1):
basisFunctions.append(lambda rr, i=i, rCut=rCut: (rCut - np.clip(rr, 0, rCut))**(i+2))
S = np.zeros((nMax, nMax))
for i in range(1, nMax+1):
for j in range(1, nMax+1):
S[i-1, j-1] = (2*(rCut)**(7+i+j))/((5+i+j)*(6+i+j)*(7+i+j))
betas = sqrtm(np.linalg.inv(S))
if (betas.dtype == np.complex128):
raise ValueError(
"Could not calculate normalization factors for the polynomial basis"
" in the domain of real numbers. Lowering the number of radial "
"basis functions is advised."
)
fs = np.zeros([nMax, len(x)])
for n in range(1, nMax+1):
fs[n-1, :] = (rCut-np.clip(rx, 0, rCut))**(n+2)
gss = np.dot(betas, fs)
return nMax, rx, gss
|
Used to calculate discrete vectors for the polynomial basis functions.
Args:
rCut(float): Radial cutoff
nMax(int): Number of polynomial radial functions
|
juraj-google-style
|
def __init__(self, string):
self._raw_taf = None
self._taf_header = None
self._raw_weather_groups = []
self._weather_groups = []
self._maintenance = None
if isinstance(string, str) and string != "":
self._raw_taf = string
else:
raise MalformedTAF("TAF/METAR string expected")
self._raw_taf = self._raw_taf.strip()
self._taf_header = self._init_header(self._raw_taf)
if self._taf_header['form'] == 'metar':
self._weather_groups.append(self._parse_group(self._raw_taf))
else:
self._raw_weather_groups = self._init_groups(self._raw_taf)
for group in self._raw_weather_groups:
parsed_group = self._parse_group(group)
self._weather_groups.append(parsed_group)
self._maintenance = self._parse_maintenance(self._raw_taf)
|
Initializes the object with TAF/METAR report text.
Args:
string: TAF/METAR report string
Raises:
MalformedTAF: An error parsing the TAF/METAR report
|
juraj-google-style
|
def bucket(self, experiment, user_id, bucketing_id):
if not experiment:
return None
if experiment.groupPolicy in GROUP_POLICIES:
group = self.config.get_group(experiment.groupId)
if not group:
return None
user_experiment_id = self.find_bucket(bucketing_id, experiment.groupId, group.trafficAllocation)
if not user_experiment_id:
self.config.logger.info('User "%s" is in no experiment.' % user_id)
return None
if user_experiment_id != experiment.id:
self.config.logger.info('User "%s" is not in experiment "%s" of group %s.' % (
user_id,
experiment.key,
experiment.groupId
))
return None
self.config.logger.info('User "%s" is in experiment %s of group %s.' % (
user_id,
experiment.key,
experiment.groupId
))
variation_id = self.find_bucket(bucketing_id, experiment.id, experiment.trafficAllocation)
if variation_id:
variation = self.config.get_variation_from_id(experiment.key, variation_id)
self.config.logger.info('User "%s" is in variation "%s" of experiment %s.' % (
user_id,
variation.key,
experiment.key
))
return variation
self.config.logger.info('User "%s" is in no variation.' % user_id)
return None
|
For a given experiment and bucketing ID determines variation to be shown to user.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_id: ID for user.
bucketing_id: ID to be used for bucketing the user.
Returns:
Variation in which user with ID user_id will be put in. None if no variation.
|
juraj-google-style
|
def get_intermediate_dirs(fs, dir_path):
intermediates = []
with fs.lock():
for path in recursepath(abspath(dir_path), reverse=True):
try:
resource = fs.getinfo(path)
except ResourceNotFound:
intermediates.append(abspath(path))
else:
if resource.is_dir:
break
raise errors.DirectoryExpected(dir_path)
return intermediates[::(- 1)][:(- 1)]
|
Get a list of non-existing intermediate directories.
Arguments:
fs (FS): A filesystem instance.
dir_path (str): A path to a new directory on the filesystem.
Returns:
list: A list of non-existing paths.
Raises:
~fs.errors.DirectoryExpected: If a path component
references a file and not a directory.
|
codesearchnet
|
def int_to_id(cls, number):
if number < 0 or number >= 1 << 96:
raise ValueError('number value must be within [0, %s)' % (1 << 96))
ints = [(number & 79228162495817593519834398720) >> 64, (number & 18446744069414584320) >> 32, number & 4294967295]
number_bytes = struct.pack('>III', *ints)
return ObjectId(number_bytes)
|
Args:
number(int): The integer value to be used to convert to ObjectId.
Returns: The ObjectId that has the 12 bytes binary converted from the
integer value.
|
github-repos
|
def send(self, message_type, data, callback=None, one_way=False):
message = validator_pb2.Message(correlation_id=_generate_id(), content=data, message_type=message_type)
fut = future.Future(message.correlation_id, message.content, callback, timeout=self._connection_timeout)
if (not one_way):
self._futures.put(fut)
self._send_receive_thread.send_message(message)
return fut
|
Sends a message of message_type
Args:
message_type (validator_pb2.Message): enum value
data (bytes): serialized protobuf
callback (function): a callback function to call when a
response to this message is received
Returns:
future.Future
|
codesearchnet
|
def next_event(self, event_id, prev=False):
i = self.events.index(self._events_dict[event_id])
if (prev and (i > 0)):
return self.events[(i - 1)]
elif ((not prev) and ((i + 1) < len(self.events))):
return self.events[(i + 1)]
else:
return None
|
Get the event following another event in this conversation.
Args:
event_id (str): ID of the event.
prev (bool): If ``True``, return the previous event rather than the
next event. Defaults to ``False``.
Raises:
KeyError: If no such :class:`.ConversationEvent` is known.
Returns:
:class:`.ConversationEvent` or ``None`` if there is no following
event.
|
codesearchnet
|
def get_pipe_series_output(commands: Sequence[str],
stdinput: BinaryIO = None) -> bytes:
processes = []
for i in range(len(commands)):
if i == 0:
processes.append(
subprocess.Popen(
shlex.split(commands[i]),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
)
else:
processes.append(
subprocess.Popen(
shlex.split(commands[i]),
stdin=processes[i - 1].stdout,
stdout=subprocess.PIPE
)
)
return processes[len(processes) - 1].communicate(stdinput)[0]
|
Get the output from a piped series of commands.
Args:
commands: sequence of command strings
stdinput: optional ``stdin`` data to feed into the start of the pipe
Returns:
``stdout`` from the end of the pipe
|
juraj-google-style
|
def _load_yaml_(file_name):
if (not os.path.exists(file_name)):
return dict()
with open(file_name, 'r', encoding='utf-8') as fp:
return YAML().load(stream=fp)
|
Load assets infomation from file
Args:
file_name: file name
Returns:
dict
|
codesearchnet
|
def forward(self, input_ids: torch.LongTensor, spkr_id: torch.Tensor, lang_id: torch.Tensor) -> Tuple[torch.Tensor]:
hidden_states = self.unit_embedding(input_ids).transpose(1, 2)
spkr = self.speaker_embedding(spkr_id).transpose(1, 2)
lang = self.language_embedding(lang_id).transpose(1, 2)
log_dur_pred = self.dur_predictor(hidden_states.transpose(1, 2))
dur_out = torch.clamp(torch.round(torch.expm1(log_dur_pred)).long(), min=1)
if hidden_states.size(0) == 1:
hidden_states = torch.repeat_interleave(hidden_states, dur_out.view(-1), dim=2)
else:
if hidden_states.shape[0] > 1 and self.training:
logger.warning('`self.training=True` and you use batching. You lose parallelism during the hifigan\n forward pass because the samples are interleaved.')
hidden_states = [torch.repeat_interleave(hidden_state, duration, dim=-1).transpose(0, 1) for hidden_state, duration in zip(hidden_states, dur_out)]
hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True).transpose(1, 2)
spkr = spkr.repeat(1, 1, hidden_states.shape[-1])
lang = lang.repeat(1, 1, hidden_states.shape[-1])
hidden_states = torch.cat([lang, hidden_states, spkr], dim=1)
hidden_states = self.hifi_gan(hidden_states)
unit_lengths = self._get_dur_output_lengths(input_ids, dur_out)
lengths = self._get_output_hifigan_lengths(unit_lengths)
return (hidden_states, lengths)
|
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4TTextToUnitForConditionalGeneration`]. [What are input
IDs?](../glossary#input-ids)
spkr_id (`int`, *optional*):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
tgt_lang (`str`, *optional*):
The language id to use as target language for translation.
|
github-repos
|
def console_print_ex(con: tcod.console.Console, x: int, y: int, flag: int, alignment: int, fmt: str) -> None:
lib.TCOD_console_printf_ex(_console(con), x, y, flag, alignment, _fmt(fmt))
|
Print a string on a console using a blend mode and alignment mode.
Args:
con (Console): Any Console instance.
x (int): Character x position from the left.
y (int): Character y position from the top.
.. deprecated:: 8.5
Use :any:`Console.print_` instead.
|
codesearchnet
|
def get_built_in(self, language, level, data):
pp = pprint.PrettyPrinter(indent=level)
lookup = {'python' : pp.pformat(data),
'json' : str(json.dumps(data, sort_keys=True, indent=level, separators=(',', ': ')))}
self.data_structure = lookup[language]
|
Gets the return string for a language that's supported by python.
Used in cases when python provides support for the conversion.
Args:
language: string the langage to return for.
level: integer, the indentation level.
data: python data structure being converted (list of tuples)
Returns:
None, updates self.data_structure
|
juraj-google-style
|
def _ReadMemberFooter(self, file_object):
file_offset = file_object.get_offset()
member_footer = self._ReadStructure(file_object, file_offset, self._MEMBER_FOOTER_SIZE, self._MEMBER_FOOTER, 'member footer')
self.uncompressed_data_size = member_footer.uncompressed_data_size
|
Reads a member footer.
Args:
file_object (FileIO): file-like object to read from.
Raises:
FileFormatError: if the member footer cannot be read.
|
codesearchnet
|
def __init__(self, stream_start):
self._decompressor = zlib_decompressor.DeflateDecompressor()
self.last_read = stream_start
self.uncompressed_offset = 0
self._compressed_data = b''
|
Initializes a gzip member decompressor wrapper.
Args:
stream_start (int): offset to the compressed stream within the containing
file object.
|
juraj-google-style
|
def feed(self, url_template, keyword, offset, max_num, page_step):
for i in range(offset, (offset + max_num), page_step):
url = url_template.format(keyword, i)
self.out_queue.put(url)
self.logger.debug('put url to url_queue: {}'.format(url))
|
Feed urls once
Args:
url_template: A string with parameters replaced with "{}".
keyword: A string indicating the searching keyword.
offset: An integer indicating the starting index.
max_num: An integer indicating the max number of images to be crawled.
page_step: An integer added to offset after each iteration.
|
codesearchnet
|
def from_string(cls, key, key_id=None):
key = _helpers.from_bytes(key)
marker_id, key_bytes = pem.readPemBlocksFromFile(
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
if marker_id == 0:
private_key = rsa.key.PrivateKey.load_pkcs1(
key_bytes, format='DER')
elif marker_id == 1:
key_info, remaining = decoder.decode(
key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b'':
raise ValueError('Unused bytes', remaining)
private_key_info = key_info.getComponentByName('privateKey')
private_key = rsa.key.PrivateKey.load_pkcs1(
private_key_info.asOctets(), format='DER')
else:
raise ValueError('No key could be detected.')
return cls(private_key, key_id=key_id)
|
Construct an Signer instance from a private key in PEM format.
Args:
key (str): Private key in PEM format.
key_id (str): An optional key id used to identify the private key.
Returns:
google.auth.crypt.Signer: The constructed signer.
Raises:
ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
|
juraj-google-style
|
def post_attention(self, token, x):
with tf.variable_scope(self.name + "/post_attention", reuse=tf.AUTO_REUSE):
depth = common_layers.shape_list(x)[-1]
actual_batch_size = common_layers.shape_list(x)[0]
memory_output = tf.gather(token["retrieved_mem"],
tf.range(actual_batch_size))
output = tf.add(tf.layers.dense(x, depth, use_bias=False),
tf.layers.dense(memory_output, depth))
with tf.control_dependencies([output]):
with tf.control_dependencies([
self.write(token["x"], token["access_logits"])]):
return tf.identity(output)
|
Called after self-attention. The memory can be updated here.
Args:
token: Data returned by pre_attention, which can be used to carry over
state related to the current memory operation.
x: a Tensor of data after self-attention and feed-forward
Returns:
a (possibly modified) version of the input x
|
juraj-google-style
|
def define_singleton(carrier, name, cls, cls_args = {}):
instance_name = "__{}".format(name)
setattr(carrier, instance_name, None)
def getter(self):
instance = getattr(carrier, instance_name)
if instance is None:
instance = cls(**cls_args)
setattr(carrier, instance_name, instance)
return instance
setattr(type(carrier), name, property(getter))
|
Creates a property with the given name, but the cls will created only with the first call
Args:
carrier: an instance of the class where want to reach the cls instance
name (str): the variable name of the cls instance
cls (type): the singleton object type
cls_args (dict): optional dict for createing cls
|
juraj-google-style
|
def power(self, n):
if n > 0:
return super().power(n)
return Choi(SuperOp(self).power(n))
|
The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Choi: the matrix power of the SuperOp converted to a Choi channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer.
|
juraj-google-style
|
def set_cn_energies( self, cn_energies ):
for site in self.sites:
site.set_cn_occupation_energies( cn_energies[ site.label ] )
self.cn_energies = cn_energies
|
Set the coordination number dependent energies for this lattice.
Args:
cn_energies (Dict(Str:Dict(Int:Float))): Dictionary of dictionaries specifying the coordination number dependent energies for each site type. e.g.::
{ 'A' : { 0 : 0.0, 1 : 1.0, 2 : 2.0 }, 'B' : { 0 : 0.0, 1 : 2.0 } }
Returns:
None
|
juraj-google-style
|
def patch_index_to_coordinate(ul_idx: int, lr_idx: int, num_patches_per_side: int):
cell_size = 1.0 / num_patches_per_side
ul_x = ul_idx % num_patches_per_side
ul_y = ul_idx
lr_x = lr_idx % num_patches_per_side
lr_y = lr_idx
if ul_idx == lr_idx:
x1 = ul_x * cell_size
y1 = ul_y * cell_size
x2 = lr_x * cell_size + cell_size
y2 = lr_y * cell_size + cell_size
elif ul_x == lr_x or ul_y == lr_y:
x1 = ul_x * cell_size
y1 = ul_y * cell_size
x2 = lr_x * cell_size + cell_size
y2 = lr_y * cell_size + cell_size
else:
x1 = ul_x * cell_size + cell_size / 2
y1 = ul_y * cell_size + cell_size / 2
x2 = lr_x * cell_size + cell_size / 2
y2 = lr_y * cell_size + cell_size / 2
return (x1, y1, x2, y2)
|
Given a grid of length `num_patches_per_side` and the indices of the upper-left and lower-right corners of a
bounding box, returns the normalized coordinates of the bounding box, in the form (x1, y1, x2, y2).
Args:
ul_idx (`int`): the index of the grid cell that corresponds to the upper-left corner of the bounding box.
lr_idx (`int`): the index of the grid cell that corresponds to the lower-right corner of the bounding box.
num_patches_per_side (`int`): the number of patches along each side.
Returns:
`Tuple[float]`: the normalized coordinates of the bounding box, in the form (x1, y1, x2, y2).
|
github-repos
|
def mask_from_embedding(emb):
return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True))
|
Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
|
codesearchnet
|
def bayesian_resnet(input_shape, num_classes=10, kernel_posterior_scale_mean=(- 9.0), kernel_posterior_scale_stddev=0.1, kernel_posterior_scale_constraint=0.2):
filters = [64, 128, 256, 512]
kernels = [3, 3, 3, 3]
strides = [1, 2, 2, 2]
def _untransformed_scale_constraint(t):
return tf.clip_by_value(t, (- 1000), tf.math.log(kernel_posterior_scale_constraint))
kernel_posterior_fn = tfp.layers.default_mean_field_normal_fn(untransformed_scale_initializer=tf.compat.v1.initializers.random_normal(mean=kernel_posterior_scale_mean, stddev=kernel_posterior_scale_stddev), untransformed_scale_constraint=_untransformed_scale_constraint)
image = tf.keras.layers.Input(shape=input_shape, dtype='float32')
x = tfp.layers.Convolution2DFlipout(64, 3, strides=1, padding='same', kernel_posterior_fn=kernel_posterior_fn)(image)
for i in range(len(kernels)):
x = _resnet_block(x, filters[i], kernels[i], strides[i], kernel_posterior_fn)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.AveragePooling2D(4, 1)(x)
x = tf.keras.layers.Flatten()(x)
x = tfp.layers.DenseFlipout(num_classes, kernel_posterior_fn=kernel_posterior_fn)(x)
model = tf.keras.Model(inputs=image, outputs=x, name='resnet18')
return model
|
Constructs a ResNet18 model.
Args:
input_shape: A `tuple` indicating the Tensor shape.
num_classes: `int` representing the number of class labels.
kernel_posterior_scale_mean: Python `int` number for the kernel
posterior's scale (log variance) mean. The smaller the mean the closer
is the initialization to a deterministic network.
kernel_posterior_scale_stddev: Python `float` number for the initial kernel
posterior's scale stddev.
```
q(W|x) ~ N(mu, var),
log_var ~ N(kernel_posterior_scale_mean, kernel_posterior_scale_stddev)
````
kernel_posterior_scale_constraint: Python `float` number for the log value
to constrain the log variance throughout training.
i.e. log_var <= log(kernel_posterior_scale_constraint).
Returns:
tf.keras.Model.
|
codesearchnet
|
def partial_derivative(self, X, y=0):
self.check_fit()
U, V = self.split_matrix(X)
if self.theta == 1:
return V
else:
t1 = np.power(-np.log(U), self.theta)
t2 = np.power(-np.log(V), self.theta)
p1 = self.cumulative_distribution(X)
p2 = np.power(t1 + t2, -1 + 1.0 / self.theta)
p3 = np.power(-np.log(V), self.theta - 1)
return np.divide(np.multiply(np.multiply(p1, p2), p3), V) - y
|
Compute partial derivative :math:`C(u|v)` of cumulative density.
Args:
X: `np.ndarray`
y: `float`
Returns:
|
juraj-google-style
|
def locate_point(nodes, x_val, y_val):
r
zero1 = _curve_helpers.full_reduce(nodes[[0], :]) - x_val
zero2 = _curve_helpers.full_reduce(nodes[[1], :]) - y_val
if zero1.shape[1] > zero2.shape[1]:
zero1, zero2 = zero2, zero1
if zero1.shape[1] == 1:
zero1, zero2 = zero2, zero1
power_basis1 = poly_to_power_basis(zero1[0, :])
all_roots = roots_in_unit_interval(power_basis1)
if all_roots.size == 0:
return None
power_basis2 = normalize_polynomial(poly_to_power_basis(zero2[0, :]))
near_zero = np.abs(polynomial.polyval(all_roots, power_basis2))
index = np.argmin(near_zero)
if near_zero[index] < _ZERO_THRESHOLD:
return all_roots[index]
return None
|
r"""Find the parameter corresponding to a point on a curve.
.. note::
This assumes that the curve :math:`B(s, t)` defined by ``nodes``
lives in :math:`\mathbf{R}^2`.
Args:
nodes (numpy.ndarray): The nodes defining a B |eacute| zier curve.
x_val (float): The :math:`x`-coordinate of the point.
y_val (float): The :math:`y`-coordinate of the point.
Returns:
Optional[float]: The parameter on the curve (if it exists).
|
juraj-google-style
|
def _annotate_variable_ops(func, graph_def):
ph_shape_map = {}
for ph, var in zip(func.graph.internal_captures, func.variables):
ph_shape_map[ph.name] = var.shape
name_to_node = {node.name: node for node in graph_def.node}
for node in graph_def.node:
if node.op == 'ReadVariableOp' or node.op == 'ResourceGather':
node_ = node
while name_to_node[node_.input[0]].op == 'Identity':
node_ = name_to_node[node_.input[0]]
ph_name = node_.input[0] + ':0'
if ph_name in ph_shape_map:
shape = ph_shape_map[ph_name]
node.attr['_shape'].shape.CopyFrom(shape.as_proto())
else:
raise RuntimeError('Not found in the function captures: {}'.format(ph_name))
|
Annotates variable operations with custom `_shape` attribute.
This is required for the converters and shape inference. The graph
definition is modified in-place.
Args:
func: Function represented by the graph definition.
graph_def: Graph definition to be annotated in-place.
Raises:
RuntimeError: if some shapes cannot be annotated.
|
github-repos
|
def create(self, data, *args, **kwargs):
if (self.create.__func__.__module__ != self.__module__):
raise Exception('Child method not implemented')
self._MambuStruct__method = 'POST'
self._MambuStruct__data = data
self.connect(*args, **kwargs)
self._MambuStruct__method = 'GET'
self._MambuStruct__data = None
|
Creates an entity in Mambu
This method must be implemented in child classes
Args:
data (dictionary): dictionary with data to send, this dictionary
is specific for each Mambu entity
|
codesearchnet
|
def start_timer(self, timer_name):
self._timers[timer_name] = datetime.datetime.now()
|
Initializes a new timer.
Args:
timer_name: name of the timer to initialize, if not unique will reset
existing timer.
|
github-repos
|
def symbolic_heisenberg_eom(
self, X=None, noises=None, expand_simplify=True):
L, H = self.L, self.H
if X is None:
X = OperatorSymbol('X', hs=(L.space | H.space))
summands = [I * (H * X - X * H), ]
for Lk in L.matrix.ravel():
summands.append(adjoint(Lk) * X * Lk)
summands.append(-(adjoint(Lk) * Lk * X + X * adjoint(Lk) * Lk) / 2)
if noises is not None:
if not isinstance(noises, Matrix):
noises = Matrix(noises)
LambdaT = (noises.adjoint().transpose() * noises.transpose()).transpose()
assert noises.shape == L.shape
S = self.S
summands.append((adjoint(noises) * S.adjoint() * (X * L - L * X))
.expand()[0, 0])
summand = (((L.adjoint() * X - X * L.adjoint()) * S * noises)
.expand()[0, 0])
summands.append(summand)
if len(S.space & X.space):
comm = (S.adjoint() * X * S - X)
summands.append((comm * LambdaT).expand().trace())
ret = OperatorPlus.create(*summands)
if expand_simplify:
ret = ret.expand().simplify_scalar()
return ret
|
Compute the symbolic Heisenberg equations of motion of a system
operator X. If no X is given, an OperatorSymbol is created in its
place. If no noises are given, this correspnds to the
ensemble-averaged Heisenberg equation of motion.
Args:
X (Operator): A system operator
noises (Operator): A vector of noise inputs
Returns:
Operator: The RHS of the Heisenberg equations of motion of X.
|
juraj-google-style
|
def collective_dr_squared(self):
return sum(np.square(sum([atom.dr for atom in self.atoms])))
|
Squared sum of total displacements for these atoms.
Args:
None
Returns:
(Float): The square of the summed total displacements for these atoms.
|
codesearchnet
|
def is_pure_symbolic(x: Any) -> bool:
def _check_pure_symbolic(k, v, p):
del k, p
if isinstance(v, PureSymbolic) or (isinstance(v, Symbolic) and v.sym_puresymbolic):
return TraverseAction.STOP
else:
return TraverseAction.ENTER
return not traverse(x, _check_pure_symbolic)
|
Returns if the input value is pure symbolic.
Example::
class Bar(pg.PureSymbolic):
pass
@pg.symbolize
def foo(x, y):
pass
assert not pg.is_pure_symbolic(1)
assert not pg.is_pure_symbolic(foo(1, 2))
assert pg.is_pure_symbolic(Bar())
assert pg.is_pure_symbolic(foo(Bar(), 1))
assert pg.is_pure_symbolic(foo(pg.oneof([1, 2]), 1))
Args:
x: Value to query against.
Returns:
True if value itself is PureSymbolic or its child and nested
child fields contain PureSymbolic values.
|
github-repos
|
def add_permissions(self, grp_name, resource, permissions):
self.project_service.set_auth(self._token_project)
self.project_service.add_permissions(grp_name, resource, permissions)
|
Add additional permissions for the group associated with the resource.
Args:
grp_name (string): Name of group.
resource (intern.resource.boss.Resource): Identifies which data
model object to operate on.
permissions (list): List of permissions to add to the given resource
Raises:
requests.HTTPError on failure.
|
codesearchnet
|
def draw(self, time: float, frametime: float, target: moderngl.Framebuffer):
raise NotImplementedError('draw() is not implemented')
|
Draw function called by the system every frame when the effect is active.
This method raises ``NotImplementedError`` unless implemented.
Args:
time (float): The current time in seconds.
frametime (float): The time the previous frame used to render in seconds.
target (``moderngl.Framebuffer``): The target FBO for the effect.
|
codesearchnet
|
def create_switch(type, settings, pin):
switch = None
if type == "A":
group, device = settings.split(",")
switch = pi_switch.RCSwitchA(group, device)
elif type == "B":
addr, channel = settings.split(",")
addr = int(addr)
channel = int(channel)
switch = pi_switch.RCSwitchB(addr, channel)
elif type == "C":
family, group, device = settings.split(",")
group = int(group)
device = int(device)
switch = pi_switch.RCSwitchC(family, group, device)
elif type == "D":
group, device = settings.split(",")
device = int(device)
switch = pi_switch.RCSwitchD(group, device)
else:
print "Type %s is not supported!" % type
sys.exit()
switch.enableTransmit(pin)
return switch
|
Create a switch.
Args:
type: (str): type of the switch [A,B,C,D]
settings (str): a comma separted list
pin (int): wiringPi pin
Returns:
switch
|
juraj-google-style
|
def _add_doc_value(self, field_name: str, jsonpath: str) -> None:
path = self.origin_doc.etk.parse_json_path(jsonpath)
matches = path.find(self.origin_doc.value)
all_valid = True
invalid = []
for a_match in matches:
if a_match.value:
valid = self._add_value(field_name, a_match.value, provenance_path=str(a_match.full_path))
if not valid:
invalid.append(field_name + ":" + str(a_match.value))
all_valid = all_valid and valid
if not all_valid:
raise KgValueError("Some kg value type invalid according to schema: " + json.dumps(invalid))
|
Add a value to knowledge graph by giving a jsonpath
Args:
field_name: str
jsonpath: str
Returns:
|
juraj-google-style
|
def publish_state(self, state):
message = json.dumps({'state': {'reported': state}})
self.client.publish(self.topic, message)
self._state = state
|
Publish thing state to AWS IoT.
Args:
state (dict): object state. Must be JSON serializable (i.e., not
have circular references).
|
codesearchnet
|
def __init__(self, sess, grpc_debug_server_addresses, thread_name_filter=None, send_traceback_and_source_code=True):
def _gated_grpc_watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(debug_ops=['DebugIdentity(gated_grpc=true)'])
super().__init__(sess, grpc_debug_server_addresses, watch_fn=_gated_grpc_watch_fn, thread_name_filter=thread_name_filter)
self._send_traceback_and_source_code = send_traceback_and_source_code
self._sent_graph_version = -1
register_signal_handler()
|
Constructor of TensorBoardDebugWrapperSession.
Args:
sess: The `tf.compat.v1.Session` instance to be wrapped.
grpc_debug_server_addresses: gRPC address(es) of debug server(s), as a
`str` or a `list` of `str`s. E.g., "localhost:2333",
"grpc://localhost:2333", ["192.168.0.7:2333", "192.168.0.8:2333"].
thread_name_filter: Optional filter for thread names.
send_traceback_and_source_code: Whether traceback of graph elements and
the source code are to be sent to the debug server(s).
|
github-repos
|
def __init__(self, expr, weld_type, dim):
self.expr = expr
self.weld_type = weld_type
self.dim = dim
|
Summary
Args:
expr (TYPE): Description
weld_type (TYPE): Description
dim (TYPE): Description
|
juraj-google-style
|
def find_replace(obj, find, replace):
try:
if isinstance(obj, dict):
return {find_replace(key,find,replace): find_replace(value,find,replace) for key, value in obj.items()}
elif isinstance(obj, list):
return [find_replace(element,find,replace) for element in obj]
elif obj == find:
return unicode_convert(replace)
else:
try:
return unicode_convert(find_replace_string(obj, find, replace))
except:
return unicode_convert(obj)
except:
line, filename, synerror = trace()
raise ArcRestHelperError({
"function": "find_replace",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
pass
|
Searches an object and performs a find and replace.
Args:
obj (object): The object to iterate and find/replace.
find (str): The string to search for.
replace (str): The string to replace with.
Returns:
object: The object with replaced strings.
|
juraj-google-style
|
def mark_point(img, x, y):
overlay = img.copy()
output = img.copy()
alpha = 0.5
radius = max(5, (min(img.shape[:2])
center = (int(x), int(y))
color = (0, 0, 255)
cv2.circle(overlay, center, radius, color, (- 1))
cv2.addWeighted(overlay, alpha, output, (1 - alpha), 0, output)
return output
|
Mark a point
Args:
- img(numpy): the source image
- x, y(int): position
|
codesearchnet
|
def load_graph_from_args(pipeline_name: str, framework: str, model: str, tokenizer: Optional[str]=None, **models_kwargs) -> Pipeline:
if tokenizer is None:
tokenizer = model
if framework == 'pt' and (not is_torch_available()):
raise Exception('Cannot convert because PyTorch is not installed. Please install torch first.')
if framework == 'tf' and (not is_tf_available()):
raise Exception('Cannot convert because TF is not installed. Please install tensorflow first.')
print(f'Loading pipeline (model: {model}, tokenizer: {tokenizer})')
return pipeline(pipeline_name, model=model, tokenizer=tokenizer, framework=framework, model_kwargs=models_kwargs)
|
Convert the set of arguments provided through the CLI to an actual pipeline reference (tokenizer + model
Args:
pipeline_name: The kind of pipeline to use (ner, question-answering, etc.)
framework: The actual model to convert the pipeline from ("pt" or "tf")
model: The model name which will be loaded by the pipeline
tokenizer: The tokenizer name which will be loaded by the pipeline, default to the model's value
Returns: Pipeline object
|
github-repos
|
def __init__(self, e_pw, nsites, kappa=2.0, omega=0.5, mu=1.0,
freeparams=['kappa', 'omega', 'mu']):
_checkParam('e_pw', e_pw, self.PARAMLIMITS, self.PARAMTYPES)
self.e_pw = e_pw.copy()
self.phi = self._calculate_correctedF3X4()
assert scipy.allclose(self.phi.sum(axis = 1),\
scipy.ones(3, dtype='float'),atol=1e-4, rtol=5e-3),\
"The `phi` values do not sum to 1 for all `p`"
self.Phi_x = scipy.ones(N_CODON, dtype='float')
self._calculate_Phi_x()
self._nsites = nsites
assert self._nsites > 0, "There must be more than 1 site in the gene"
assert all(map(lambda x: x in self.ALLOWEDPARAMS, freeparams)),\
"Invalid entry in freeparams\nGot: {0}\nAllowed: {1}".format(
', '.join(freeparams), ', '.join(self.ALLOWEDPARAMS))
self._freeparams = list(freeparams)
self._mu = mu
self.kappa = kappa
self.omega = omega
for (name, value) in [('kappa', self.kappa), ('omega', self.omega),
('mu', self.mu)]:
_checkParam(name, value, self.PARAMLIMITS, self.PARAMTYPES)
self.Pxy = scipy.zeros((1, N_CODON, N_CODON), dtype='float')
self.Pxy_no_omega = scipy.zeros((1, N_CODON, N_CODON), dtype='float')
self.D = scipy.zeros((1, N_CODON), dtype='float')
self.A = scipy.zeros((1, N_CODON, N_CODON), dtype='float')
self.Ainv = scipy.zeros((1, N_CODON, N_CODON), dtype='float')
self.dPxy = {}
self.B = {}
for param in self.freeparams:
if param in self.ALLOWEDPARAMS:
self.dPxy[param] = scipy.zeros((1, N_CODON, N_CODON),
dtype='float')
self.B[param] = scipy.zeros((1, N_CODON, N_CODON),
dtype='float')
else:
raise ValueError("Unrecognized param {0}".format(param))
self._diag_indices = scipy.diag_indices(N_CODON)
self.updateParams({}, update_all=True)
|
Initialize an `YNGKP_M0` object.
Args:
`kappa`, `omega`, `mu`,
Model params described in main class doc string.
`freeparams` (list of strings)
Specifies free parameters.
`e_pw`, `nsites`
Meaning described in the main class doc string.
|
juraj-google-style
|
def _tf_restore_batch_dims(x, num_nonbatch_dims, prototype):
assert (x.shape.ndims == (1 + num_nonbatch_dims))
new_shape = (prototype.shape.as_list()[:(- num_nonbatch_dims)] + x.shape.as_list()[1:])
assert (None not in new_shape)
if (new_shape != x.shape.as_list()):
x = tf.reshape(x, new_shape)
return x
|
Reverse op of _tf_flatten_batch_dims.
Un-flatten the first dimension of x to match all but the last
num_nonbatch_dims dimensions of prototype.
Args:
x: a tf.Tensor with 1 + num_nonbatch_dims dimensions
num_nonbatch_dims: an integer
prototype: a tf.Tensor
Returns:
a tf.Tensor
|
codesearchnet
|
def browse_podcasts_genres(self):
response = self._call(mc_calls.PodcastBrowseHierarchy)
genres = response.body.get('groups', [])
return genres
|
Get the genres from the Podcasts browse tab dropdown.
Returns:
list: Genre groups that contain sub groups.
|
codesearchnet
|
def _maybe_download_corpora(tmp_dir):
mnli_filename = "MNLI.zip"
mnli_finalpath = os.path.join(tmp_dir, "MNLI")
if not tf.gfile.Exists(mnli_finalpath):
zip_filepath = generator_utils.maybe_download(
tmp_dir, mnli_filename, _MNLI_URL)
zip_ref = zipfile.ZipFile(zip_filepath, "r")
zip_ref.extractall(tmp_dir)
zip_ref.close()
return mnli_finalpath
|
Download corpora for multinli.
Args:
tmp_dir: a string
Returns:
a string
|
juraj-google-style
|
def variable_op_v2(shape, dtype, name='Variable', container='', shared_name=''):
return gen_state_ops.variable_v2(shape=shape, dtype=dtype, name=name, container=container, shared_name=shared_name)
|
Create a variable Operation.
See also variables.Variable.
Args:
shape: The shape of the tensor managed by this variable
dtype: The underlying type of the tensor values.
name: optional name to use for the variable op.
container: An optional string. Defaults to "".
If non-empty, this variable is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional string. Defaults to "".
If non-empty, this variable is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
Returns:
A variable tensor.
|
github-repos
|
def ApproximateDistanceBetweenPoints(pa, pb):
alat, alon = pa
blat, blon = pb
sa = transitfeed.Stop(lat=alat, lng=alon)
sb = transitfeed.Stop(lat=blat, lng=blon)
return transitfeed.ApproximateDistanceBetweenStops(sa, sb)
|
Finds the distance between two points on the Earth's surface.
This is an approximate distance based on assuming that the Earth is a sphere.
The points are specified by their lattitude and longitude.
Args:
pa: the first (lat, lon) point tuple
pb: the second (lat, lon) point tuple
Returns:
The distance as a float in metres.
|
juraj-google-style
|
def custom_apply(self, path: utils.KeyPath, value_spec: pg_typing.ValueSpec, allow_partial: bool, child_transform: Optional[Callable[[utils.KeyPath, pg_typing.Field, Any], Any]]=None) -> Tuple[bool, Any]:
del path, value_spec, allow_partial, child_transform
return (False, self)
|
Custom apply on a value based on its original value spec.
This implements ``pg.pg_typing.CustomTyping``, allowing a pure symbolic
value to be assigned to any field. To customize this behavior, override
this method in subclasses.
Args:
path: KeyPath of current object under its object tree.
value_spec: Original value spec for this field.
allow_partial: Whether allow partial object to be created.
child_transform: Function to transform child node values into their final
values. Transform function is called on leaf nodes first, then on their
parents, recursively.
Returns:
A tuple (proceed_with_standard_apply, value_to_proceed).
If proceed_with_standard_apply is set to False, value_to_proceed
will be used as final value.
Raises:
Error when the value is not compatible with the value spec.
|
github-repos
|
def SafeReadBytes(self, length):
data = self.ReadBytes(length)
if (len(data) < length):
raise ValueError('Not enough data available')
else:
return data
|
Read exactly `length` number of bytes from the stream.
Raises:
ValueError is not enough data
Returns:
bytes: `length` number of bytes
|
codesearchnet
|
def _example_from_allof(self, prop_spec):
example_dict = {}
for definition in prop_spec['allOf']:
update = self.get_example_from_prop_spec(definition, True)
example_dict.update(update)
return example_dict
|
Get the examples from an allOf section.
Args:
prop_spec: property specification you want an example of.
Returns:
An example dict
|
juraj-google-style
|
def postprocess_monograph(marc_xml, mods, uuid, counter, url):
dom = double_linked_dom(mods)
if (not isinstance(marc_xml, MARCXMLRecord)):
marc_xml = MARCXMLRecord(marc_xml)
add_missing_xml_attributes(dom, counter)
fix_invalid_type_parameter(dom)
if uuid:
add_uuid(dom, uuid)
add_marccountry_tag(dom)
add_genre(dom)
remove_hairs_from_tags(dom)
fix_issuance(dom)
fix_location_tag(dom)
fix_related_item_tag(dom)
fix_missing_electronic_locator_tag(dom, url)
fix_missing_lang_tags(marc_xml, dom)
return dom.prettify()
|
Fix bugs in `mods` produced by XSLT template.
Args:
marc_xml (str): Original Aleph record.
mods (str): XML string generated by XSLT template.
uuid (str): UUID of the package.
counter (int): Number of record, is added to XML headers.
url (str): URL of the publication (public or not).
Returns:
str: Updated XML.
|
codesearchnet
|
def g_step(self, gen_frames, fake_logits_stop):
hparam_to_gen_loss = {'least_squares': gan_losses.least_squares_generator_loss, 'cross_entropy': gan_losses.modified_generator_loss, 'wasserstein': gan_losses.wasserstein_generator_loss}
fake_logits = self.discriminator(gen_frames)
mean_fake_logits = tf.reduce_mean(fake_logits)
tf.summary.scalar('mean_fake_logits', mean_fake_logits)
generator_loss_func = hparam_to_gen_loss[self.hparams.gan_loss]
gan_g_loss_pos_d = generator_loss_func(discriminator_gen_outputs=fake_logits, add_summaries=True)
gan_g_loss_neg_d = (- generator_loss_func(discriminator_gen_outputs=fake_logits_stop, add_summaries=True))
return (gan_g_loss_pos_d, gan_g_loss_neg_d)
|
Performs the generator step in computing the GAN loss.
Args:
gen_frames: Generated frames
fake_logits_stop: Logits corresponding to the generated frames as per
the discriminator. Assumed to have a stop-gradient term.
Returns:
gan_g_loss_pos_d: Loss.
gan_g_loss_neg_d: -gan_g_loss_pos_d but with a stop gradient on generator.
|
codesearchnet
|
def verify(self, verify_locations: str) -> None:
with open(verify_locations):
pass
try:
self._ocsp_response.basic_verify(verify_locations)
except _nassl.OpenSSLError as e:
if 'certificate verify error' in str(e):
raise OcspResponseNotTrustedError(verify_locations)
raise
|
Verify that the OCSP response is trusted.
Args:
verify_locations: The file path to a trust store containing pem-formatted certificates, to be used for
validating the OCSP response.
Raises OcspResponseNotTrustedError if the validation failed ie. the OCSP response is not trusted.
|
juraj-google-style
|
def solve_sweep_structure(self, structures, sweep_param_list, filename='structure_n_effs.dat', plot=True, x_label='Structure number', fraction_mode_list=[]):
n_effs = []
mode_types = []
fractions_te = []
fractions_tm = []
for s in tqdm.tqdm(structures, ncols=70):
self.solve(s)
n_effs.append(np.real(self.n_effs))
mode_types.append(self._get_mode_types())
fractions_te.append(self.fraction_te)
fractions_tm.append(self.fraction_tm)
if filename:
self._write_n_effs_to_file(n_effs, (self._modes_directory + filename), sweep_param_list)
with open((self._modes_directory + 'mode_types.dat'), 'w') as fs:
header = ','.join((('Mode%i' % i) for (i, _) in enumerate(mode_types[0])))
fs.write((('
for mt in mode_types:
txt = ','.join((('%s %.2f' % pair) for pair in mt))
fs.write((txt + '\n'))
with open((self._modes_directory + 'fraction_te.dat'), 'w') as fs:
header = 'fraction te'
fs.write((('
for (param, fte) in zip(sweep_param_list, fractions_te):
txt = ('%.6f,' % param)
txt += ','.join((('%.2f' % f) for f in fte))
fs.write((txt + '\n'))
with open((self._modes_directory + 'fraction_tm.dat'), 'w') as fs:
header = 'fraction tm'
fs.write((('
for (param, ftm) in zip(sweep_param_list, fractions_tm):
txt = ('%.6f,' % param)
txt += ','.join((('%.2f' % f) for f in ftm))
fs.write((txt + '\n'))
if plot:
if MPL:
title = ('$n_{eff}$ vs %s' % x_label)
y_label = '$n_{eff}$'
else:
title = ('n_{effs} vs %s' % x_label)
y_label = 'n_{eff}'
self._plot_n_effs((self._modes_directory + filename), (self._modes_directory + 'fraction_te.dat'), x_label, y_label, title)
title = ('TE Fraction vs %s' % x_label)
self._plot_fraction((self._modes_directory + 'fraction_te.dat'), x_label, 'TE Fraction [%]', title, fraction_mode_list)
title = ('TM Fraction vs %s' % x_label)
self._plot_fraction((self._modes_directory + 'fraction_tm.dat'), x_label, 'TM Fraction [%]', title, fraction_mode_list)
return n_effs
|
Find the modes of many structures.
Args:
structures (list): A list of `Structures` to find the modes
of.
sweep_param_list (list): A list of the parameter-sweep sweep
that was used. This is for plotting purposes only.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'structure_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
x_label (str): x-axis text to display in the plot.
fraction_mode_list (list): A list of mode indices of the modes
that should be included in the TE/TM mode fraction plot.
If the list is empty, all modes will be included. The list
is empty by default.
Returns:
list: A list of the effective indices found for each structure.
|
codesearchnet
|
def __init__(self, plot_type, fields=None, win=None, env=None, opts={}, port=8097, server="localhost", name=None):
super(VisdomPlotLogger, self).__init__(fields, win, env, opts, port, server)
valid_plot_types = {
"scatter": self.viz.scatter,
"line": self.viz.line}
self.plot_type = plot_type
if plot_type not in valid_plot_types.keys():
raise ValueError("plot_type \'{}\' not found. Must be one of {}".format(
plot_type, valid_plot_types.keys()))
self.chart = valid_plot_types[plot_type]
|
Multiple lines can be added to the same plot with the "name" attribute (see example)
Args:
fields: Currently unused
plot_type: {scatter, line}
Examples:
>>> scatter_logger = VisdomPlotLogger('line')
>>> scatter_logger.log(stats['epoch'], loss_meter.value()[0], name="train")
>>> scatter_logger.log(stats['epoch'], loss_meter.value()[0], name="test")
|
juraj-google-style
|
def save_results(vcs, signature, result_path, patterns):
results_directory = _get_results_directory(vcs, signature)
if (not os.path.exists(results_directory)):
os.makedirs(results_directory)
with open(os.path.join(results_directory, 'patterns'), 'w') as f:
f.write('\n'.join(patterns))
if (not os.path.exists(os.path.join(results_directory, 'results'))):
os.mkdir(os.path.join(results_directory, 'results'))
includes = ['--include={}'.format(x) for x in patterns]
cmd = ((['rsync', '-r'] + includes) + ['--exclude=*', os.path.join(result_path, ''), os.path.join(results_directory, 'results', '')])
subprocess.check_call(cmd)
|
Save results matching `patterns` at `result_path`.
Args:
vcs (easyci.vcs.base.Vcs) - the VCS object for the actual project
(not the disposable copy)
signature (str) - the project state signature
result_path (str) - the path containing the result, usually
a disposable copy of the project
patterns (str) - `rsync`-compatible patterns matching test results
to save.
|
codesearchnet
|
def set_default_by_alias(self, alias):
if (alias not in self._aliases):
raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))
self._default_index = self._aliases[alias]
|
Set the default dataset by its alias.
After changing the default dataset, all calls without explicitly specifying the
dataset by index or alias will be redirected to this dataset.
Args:
alias (str): The alias of the dataset that should be made the default.
Raises:
DataInvalidAlias: If the alias does not represent a valid dataset.
|
codesearchnet
|
def get_reverse_dependency_tree(package_name, depth=None, paths=None, build_requires=False, private_build_requires=False):
pkgs_list = [[package_name]]
g = digraph()
g.add_node(package_name)
it = iter_package_families(paths)
package_names = set((x.name for x in it))
if (package_name not in package_names):
raise PackageFamilyNotFoundError(('No such package family %r' % package_name))
if (depth == 0):
return (pkgs_list, g)
bar = ProgressBar('Searching', len(package_names))
lookup = defaultdict(set)
for (i, package_name_) in enumerate(package_names):
it = iter_packages(name=package_name_, paths=paths)
packages = list(it)
if (not packages):
continue
pkg = max(packages, key=(lambda x: x.version))
requires = []
for variant in pkg.iter_variants():
pbr = (private_build_requires and (pkg.name == package_name))
requires += variant.get_requires(build_requires=build_requires, private_build_requires=pbr)
for req in requires:
if (not req.conflict):
lookup[req.name].add(package_name_)
bar.next()
bar.finish()
n = 0
consumed = set([package_name])
working_set = set([package_name])
node_color = '
node_fontsize = 10
node_attrs = [('fillcolor', node_color), ('style', 'filled'), ('fontsize', node_fontsize)]
while (working_set and ((depth is None) or (n < depth))):
working_set_ = set()
for child in working_set:
parents = (lookup[child] - consumed)
working_set_.update(parents)
consumed.update(parents)
for parent in parents:
g.add_node(parent, attrs=node_attrs)
g.add_edge((parent, child))
if working_set_:
pkgs_list.append(sorted(list(working_set_)))
working_set = working_set_
n += 1
return (pkgs_list, g)
|
Find packages that depend on the given package.
This is a reverse dependency lookup. A tree is constructed, showing what
packages depend on the given package, with an optional depth limit. A
resolve does not occur. Only the latest version of each package is used,
and requirements from all variants of that package are used.
Args:
package_name (str): Name of the package depended on.
depth (int): Tree depth limit, unlimited if None.
paths (list of str): paths to search for packages, defaults to
`config.packages_path`.
build_requires (bool): If True, includes packages' build_requires.
private_build_requires (bool): If True, include `package_name`'s
private_build_requires.
Returns:
A 2-tuple:
- (list of list of str): Lists of package names, where each list is a
single depth in the tree. The first list is always [`package_name`].
- `pygraph.digraph` object, where nodes are package names, and
`package_name` is always the leaf node.
|
codesearchnet
|
def get_models_in_diff():
fork_point_sha = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
modified_files = subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('utf-8').split()
relevant_modified_files = [x for x in modified_files if '/models/' in x and x.endswith('.py')]
model_names = set()
for file_path in relevant_modified_files:
model_name = file_path.split('/')[-2]
model_names.add(model_name)
return model_names
|
Finds all models that have been modified in the diff.
Returns:
A set containing the names of the models that have been modified (e.g. {'llama', 'whisper'}).
|
github-repos
|
def get(self, key, value):
if (key == 'id'):
response = self._swimlane.request('get', 'app/{0}/record/{1}'.format(self._app.id, value))
return Record(self._app, response.json())
if (key == 'tracking_id'):
response = self._swimlane.request('get', 'app/{0}/record/tracking/{1}'.format(self._app.id, value))
return Record(self._app, response.json())
|
Get a single record by id
Supports resource cache
.. versionchanged:: 2.17.0
Added option to retrieve record by tracking_id
Keyword Args:
id (str): Full record ID
tracking_id (str): Record Tracking ID
Returns:
Record: Matching Record instance returned from API
Raises:
TypeError: No id argument provided
|
codesearchnet
|
def deregister_instances(name, instances, region=None, key=None, keyid=None, profile=None):
if (isinstance(instances, six.string_types) or isinstance(instances, six.text_type)):
instances = [instances]
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
registered_instances = conn.deregister_instances(name, instances)
except boto.exception.BotoServerError as error:
if (error.error_code == 'InvalidInstance'):
log.warning('One or more of instance(s) %s are not part of ELB %s. deregister_instances not performed.', instances, name)
return None
else:
log.warning(error)
return False
registered_instance_ids = [instance.id for instance in registered_instances]
deregister_failures = set(instances).intersection(set(registered_instance_ids))
if deregister_failures:
log.warning('Instance(s): %s not deregistered from ELB %s.', list(deregister_failures), name)
deregister_result = False
else:
deregister_result = True
return deregister_result
|
Deregister instances with an ELB. Instances is either a string
instance id or a list of string instance id's.
Returns:
- ``True``: instance(s) deregistered successfully
- ``False``: instance(s) failed to be deregistered
- ``None``: instance(s) not valid or not registered, no action taken
CLI example:
.. code-block:: bash
salt myminion boto_elb.deregister_instances myelb instance_id
salt myminion boto_elb.deregister_instances myelb "[instance_id, instance_id]"
|
codesearchnet
|
def max_variance_genes(data, nbins=5, frac=0.2):
indices = []
if sparse.issparse(data):
means, var = sparse_mean_var(data)
else:
means = data.mean(1)
var = data.var(1)
mean_indices = means.argsort()
n_elements = int(data.shape[0]/nbins)
frac_elements = int(n_elements*frac)
for i in range(nbins):
bin_i = mean_indices[i*n_elements : (i+1)*n_elements]
if i==nbins-1:
bin_i = mean_indices[i*n_elements :]
var_i = var[bin_i]
var_sorted = var_i.argsort()
top_var_indices = var_sorted[len(bin_i) - frac_elements:]
ind = bin_i[top_var_indices]
ind = [index for index in ind if var[index]>0]
indices.extend(ind)
return indices
|
This function identifies the genes that have the max variance
across a number of bins sorted by mean.
Args:
data (array): genes x cells
nbins (int): number of bins to sort genes by mean expression level. Default: 10.
frac (float): fraction of genes to return per bin - between 0 and 1. Default: 0.1
Returns:
list of gene indices (list of ints)
|
juraj-google-style
|
def schema_keys(schema):
def _get_leaf(value):
if isinstance(value, Schema):
return _get_leaf(value._schema)
return value
keys = set()
dict_ = schema._schema
assert isinstance(dict_, dict)
for key in dict_.iterkeys():
key_ = _get_leaf(key)
if isinstance(key_, basestring):
keys.add(key_)
return keys
|
Get the string values of keys in a dict-based schema.
Non-string keys are ignored.
Returns:
Set of string keys of a schema which is in the form (eg):
schema = Schema({Required("foo"): int,
Optional("bah"): basestring})
|
codesearchnet
|
def floodlight_rows(config, task: dict, report_id: int) -> Generator[list[str, str, str, str, str, str, int], None, None]:
filename, report = report_file(config, task['auth'], task['account'], report_id, None, 10)
rows = report_to_rows(report)
rows = report_clean(rows)
rows = rows_header_trim(rows)
rows = rows_to_type(rows, column=6)
return rows
|
Monitor a report for completion and return rows
Args:
report_id - the report created earlier for a specific floodlight id.
Returns:
A stream of rows, see FLOODLIGHT_* constants for definitions.
|
github-repos
|
def AddBudget(self, client_customer_id, micro_amount):
self.client.SetClientCustomerId(client_customer_id)
budget_service = self.client.GetService('BudgetService')
operations = [{
'operator': 'ADD',
'operand': {
'name': 'Budget
'amount': {
'microAmount': micro_amount
},
'deliveryMethod': 'STANDARD'
}
}]
return budget_service.mutate(operations)['value'][0]['budgetId']
|
Create a new Budget with the given microAmount.
Args:
client_customer_id: str Client Customer Id used to create Budget.
micro_amount: str The budget represented in micros.
Returns:
str BudgetId of the newly created Budget.
|
juraj-google-style
|
def rolldim(P, n=1):
dim = P.dim
shape = P.shape
dtype = P.dtype
A = dict((((key[n:] + key[:n]), P.A[key]) for key in P.keys))
return Poly(A, dim, shape, dtype)
|
Roll the axes.
Args:
P (Poly) : Input polynomial.
n (int) : The axis that after rolling becomes the 0th axis.
Returns:
(Poly) : Polynomial with new axis configuration.
Examples:
>>> x,y,z = variable(3)
>>> P = x*x*x + y*y + z
>>> print(P)
q0^3+q1^2+q2
>>> print(rolldim(P))
q0^2+q2^3+q1
|
codesearchnet
|
def retrieve(url):
try:
pem_data = urlopen(url).read()
except (ValueError, HTTPError):
warnings.warn('Certificate URL is invalid.')
return False
if (sys.version >= '3'):
try:
pem_data = pem_data.decode()
except UnicodeDecodeError:
warnings.warn('Certificate encoding is not utf-8.')
return False
return _parse_pem_data(pem_data)
|
Retrieve and parse PEM-encoded X.509 certificate chain.
See `validate.request` for additional info.
Args:
url: str. SignatureCertChainUrl header value sent by request.
Returns:
list or bool: If url is valid, returns the certificate chain as a list
of cryptography.hazmat.backends.openssl.x509._Certificate
certificates where certs[0] is the first certificate in the file; if
url is invalid, returns False.
|
codesearchnet
|
def push(self, is_building_function, enter_context_fn, device_stack):
self.stack.append(ContextSwitch(is_building_function, enter_context_fn, device_stack))
|
Push metadata about a context switch onto the stack.
A context switch can take any one of the two forms: installing a graph as
the default graph, or entering the eager context. For each context switch,
we record whether or not the entered context is building a function.
Args:
is_building_function: (bool.) Whether the context is building a function.
enter_context_fn: (function.) A callable that executes the context switch.
For example, `graph.as_default` or `eager_mode`.
device_stack: If applicable, the device function stack for this graph.
When breaking out of graphs in init_scope, the innermost nonempty device
stack is used. Eager contexts put `None` here and the value is never
used.
|
github-repos
|
def from_config(cls, config):
if 'dtype' in config and isinstance(config['dtype'], dict):
config = config.copy()
policy = dtype_policies.deserialize(config['dtype'])
if not isinstance(policy, dtype_policies.DTypePolicyMap) and policy.quantization_mode is None:
policy = policy.name
config['dtype'] = policy
try:
return cls(**config)
except Exception as e:
raise TypeError(f"Error when deserializing class '{cls.__name__}' using config={config}.\n\nException encountered: {e}")
|
Creates an operation from its config.
This method is the reverse of `get_config`, capable of instantiating the
same operation from the config dictionary.
Note: If you override this method, you might receive a serialized dtype
config, which is a `dict`. You can deserialize it as follows:
```python
if "dtype" in config and isinstance(config["dtype"], dict):
policy = dtype_policies.deserialize(config["dtype"])
```
Args:
config: A Python dictionary, typically the output of `get_config`.
Returns:
An operation instance.
|
github-repos
|
def get_missing_simulations(self, param_list, runs=None):
params_to_simulate = []
if runs is not None:
next_runs = self.db.get_next_rngruns()
available_params = [r['params'] for r in self.db.get_results()]
for param_comb in param_list:
needed_runs = runs
for i, p in enumerate(available_params):
if param_comb == {k: p[k] for k in p.keys() if k != "RngRun"}:
needed_runs -= 1
new_param_combs = []
for needed_run in range(needed_runs):
new_param = deepcopy(param_comb)
new_param['RngRun'] = next(next_runs)
new_param_combs += [new_param]
params_to_simulate += new_param_combs
else:
for param_comb in param_list:
if not self.db.get_results(param_comb):
params_to_simulate += [param_comb]
return params_to_simulate
|
Return a list of the simulations among the required ones that are not
available in the database.
Args:
param_list (list): a list of dictionaries containing all the
parameters combinations.
runs (int): an integer representing how many repetitions are wanted
for each parameter combination, None if the dictionaries in
param_list already feature the desired RngRun value.
|
juraj-google-style
|
def _store_checkpoint(self, sess, saver, global_step):
if ((not self._logdir) or (not saver)):
return
tf.gfile.MakeDirs(self._logdir)
filename = os.path.join(self._logdir, 'model.ckpt')
saver.save(sess, filename, global_step)
|
Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of the checkpoint name.
|
codesearchnet
|
def create(cls, env, filenames, trim=False):
import_graph = cls(env)
for filename in filenames:
import_graph.add_file_recursive(os.path.abspath(filename), trim)
import_graph.build()
return import_graph
|
Create and return a final graph.
Args:
env: An environment.Environment object
filenames: A list of filenames
trim: Whether to trim the dependencies of builtin and system files.
Returns:
An immutable ImportGraph with the recursive dependencies of all the
files in filenames
|
juraj-google-style
|
def console_set_background_flag(con: tcod.console.Console, flag: int) -> None:
lib.TCOD_console_set_background_flag(_console(con), flag)
|
Change the default blend mode for this console.
Args:
con (Console): Any Console instance.
flag (int): Blend mode to use by default.
.. deprecated:: 8.5
Set :any:`Console.default_bg_blend` instead.
|
codesearchnet
|
def multivariate_ess(samples, batch_size_generator=None):
samples_generator = _get_sample_generator(samples)
return np.array(multiprocess_mapping(_MultivariateESSMultiProcessing(batch_size_generator), samples_generator()))
|
r"""Estimate the multivariate Effective Sample Size for the samples of every problem.
This essentially applies :func:`estimate_multivariate_ess` to every problem.
Args:
samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and
n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally,
a generator function that yields sample arrays of shape (p, n).
batch_size_generator (MultiVariateESSBatchSizeGenerator): the batch size generator, tells us how many
batches and of which size we use in estimating the minimum ESS.
Returns:
ndarray: the multivariate ESS per problem
|
codesearchnet
|
def get_test_configs():
test_configs = [('NHWC', False), ('NHWC', True)]
return test_configs
|
Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.