code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def heightmap_lerp_hm(
hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray, coef: float
) -> None:
lib.TCOD_heightmap_lerp_hm(
_heightmap_cdata(hm1),
_heightmap_cdata(hm2),
_heightmap_cdata(hm3),
coef,
)
|
Perform linear interpolation between two heightmaps storing the result
in ``hm3``.
This is the same as doing ``hm3[:] = hm1[:] + (hm2[:] - hm1[:]) * coef``
Args:
hm1 (numpy.ndarray): The first heightmap.
hm2 (numpy.ndarray): The second heightmap to add to the first.
hm3 (numpy.ndarray): A destination heightmap to store the result.
coef (float): The linear interpolation coefficient.
|
juraj-google-style
|
def replace_punctuation(self, text, excluded=None, replacement=''):
if (excluded is None):
excluded = set()
elif (not isinstance(excluded, set)):
excluded = set(excluded)
punct = ''.join(self.__punctuation.difference(excluded))
return self.replace_characters(text, characters=punct, replacement=replacement)
|
Replace punctuation symbols in text.
Removes punctuation from input text or replaces them with a
string if specified. Characters replaced will be those
in string.punctuation.
Args:
text: The text to be processed.
excluded: Set of characters to exclude.
replacement: New text that will replace punctuation.
Returns:
The text without punctuation.
|
codesearchnet
|
def __init__(self, type_, value):
self.type_ = type_
self.value = value
super(CastError, self).__init__(
'Unable to cast "{}" to {}.'.format(value, type_.__name__))
|
Instantiate the exception with a descriptive message.
Args:
type_: The type to which the cast was attempting to convert the
value.
value: The value that was attempted to be cast.
|
juraj-google-style
|
def add_dspam_headers(self, results):
for header in self.headers:
hname = (self.header_prefix + header)
if (header.lower() in results):
hvalue = results[header.lower()]
logger.debug('<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
elif (header == 'Processed'):
hvalue = datetime.datetime.now().strftime('%a %b %d %H:%M:%S %Y')
logger.debug('<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
else:
logger.warning('<{}> Not adding header {}, no data available in DSPAM results'.format(self.id, hname))
|
Format DSPAM headers with passed results, and add them to the message.
Args:
results -- A results dictionary from DspamClient.
|
codesearchnet
|
def __init__(self, trainer_id):
if not trainer_id:
raise ValueError('tf.data service cross-trainer cache requires a non-empty trainer ID.')
self.trainer_id = trainer_id
|
Constructs a CrossTrainerCache.
Args:
trainer_id: Each training job has a unique ID. Once a job has consumed
data, the data remains in the cache and is re-used by jobs with different
`trainer_id`s. Requests with the same `trainer_id` do not re-use data.
Raises:
ValueError if `trainer_id` is empty.
|
github-repos
|
def __init__(self, add_tag_methods=None):
super(PacketTags, self).__init__()
self.tag_methods = [PacketTags._tag_net_direction, PacketTags._tag_nxdomain]
if add_tag_methods:
self.tag_methods += add_tag_methods
self.output_stream = self.tag_stuff()
|
Initialize PacketTags Class
Args:
add_tag_methods: a list of additional tag methods (optional, defaults to None))
Note: all methods must take the data dictionary as an argmument (e.g. tag_method(data))
|
juraj-google-style
|
def _shadow_model_variables(shadow_vars):
G = tf.get_default_graph()
curr_shadow_vars = set([v.name for v in shadow_vars])
model_vars = tf.model_variables()
shadow_model_vars = []
for v in model_vars:
assert v.name.startswith('tower'), 'Found some MODEL_VARIABLES created outside of the tower function!'
(stripped_op_name, stripped_var_name) = get_op_tensor_name(re.sub('^tower[0-9]+/', '', v.name))
if (stripped_op_name in curr_shadow_vars):
continue
try:
G.get_tensor_by_name(stripped_var_name)
logger.warn('Model Variable {} also appears in other collections.'.format(stripped_var_name))
continue
except KeyError:
pass
new_v = tf.get_variable(stripped_op_name, dtype=v.dtype.base_dtype, initializer=v.initial_value, trainable=False)
curr_shadow_vars.add(stripped_op_name)
shadow_vars.append(new_v)
shadow_model_vars.append((new_v, v))
return shadow_model_vars
|
Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``.
Returns:
list of (shadow_model_var, local_model_var) used for syncing.
|
codesearchnet
|
def to_view(self, view_name):
from . import _view
return _view.View(view_name, self._context).create(self._sql)
|
Create a View from this Query.
Args:
view_name: the name of the View either as a string or a 3-part tuple
(projectid, datasetid, name).
Returns:
A View for the Query.
|
codesearchnet
|
def trace_set_buffer_capacity(self, size):
cmd = enums.JLinkTraceCommand.SET_CAPACITY
data = ctypes.c_uint32(size)
res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))
if (res == 1):
raise errors.JLinkException('Failed to set trace buffer size.')
return None
|
Sets the capacity for the trace buffer.
Args:
self (JLink): the ``JLink`` instance.
size (int): the new capacity for the trace buffer.
Returns:
``None``
|
codesearchnet
|
def get_attribute_from_config(config, section, attribute):
section = config.get(section)
if section:
option = section.get(attribute)
if option:
return option
raise ConfigurationError("Config file badly formed!\nFailed to get attribute '{}' from section '{}'!".format(attribute, section))
|
Try to parse an attribute of the config file.
Args:
config (defaultdict): A defaultdict.
section (str): The section of the config file to get information from.
attribute (str): The attribute of the section to fetch.
Returns:
str: The string corresponding to the section and attribute.
Raises:
ConfigurationError
|
codesearchnet
|
def Verify(self, mempool):
logger.info("Verifying transaction: %s " % self.Hash.ToBytes())
return Helper.VerifyScripts(self)
|
Verify the transaction.
Args:
mempool:
Returns:
bool: True if verified. False otherwise.
|
juraj-google-style
|
def record_factory(app, fields=None):
record = Record(app, {
'$type': Record._type,
'isNew': True,
'applicationId': app.id,
'comments': {
'$type': 'System.Collections.Generic.Dictionary`2[[System.String, mscorlib],[System.Collections.Generic.List`1[[Core.Models.Record.Comments, Core]], mscorlib]], mscorlib'
},
'values': {
'$type': 'System.Collections.Generic.Dictionary`2[[System.String, mscorlib],[System.Object, mscorlib]], mscorlib'
}
})
fields = fields or {}
for name, value in six.iteritems(fields):
record[name] = value
copy_raw = copy.copy(record._raw)
values_dict = {}
for key, value in six.iteritems(copy_raw['values']):
if value is not None:
values_dict[key] = value
record._raw['values'] = values_dict
return record
|
Return a temporary Record instance to be used for field validation and value parsing
Args:
app (App): Target App to create a transient Record instance for
fields (dict): Optional dict of fields and values to set on new Record instance before returning
Returns:
Record: Unsaved Record instance to be used for validation, creation, etc.
|
juraj-google-style
|
def parse_binary_descriptor(bindata):
func_names = {0: 'copy_latest_a', 1: 'average_a', 2: 'copy_all_a', 3: 'sum_a', 4: 'copy_count_a', 5: 'trigger_streamer', 6: 'call_rpc', 7: 'subtract_afromb'}
if (len(bindata) != 20):
raise ArgumentError('Invalid binary node descriptor with incorrect size', size=len(bindata), expected=20, bindata=bindata)
(a_trig, b_trig, stream_id, a_id, b_id, proc, a_cond, b_cond, trig_combiner) = struct.unpack('<LLHHHBBBB2x', bindata)
node_stream = DataStream.FromEncoded(stream_id)
if (a_id == 65535):
raise ArgumentError('Invalid binary node descriptor with invalid first input', input_selector=a_id)
a_selector = DataStreamSelector.FromEncoded(a_id)
a_trigger = _process_binary_trigger(a_trig, a_cond)
b_selector = None
b_trigger = None
if (b_id != 65535):
b_selector = DataStreamSelector.FromEncoded(b_id)
b_trigger = _process_binary_trigger(b_trig, b_cond)
if (trig_combiner == SGNode.AndTriggerCombiner):
comb = '&&'
elif (trig_combiner == SGNode.OrTriggerCombiner):
comb = '||'
else:
raise ArgumentError('Invalid trigger combiner in binary node descriptor', combiner=trig_combiner)
if (proc not in func_names):
raise ArgumentError('Unknown processing function', function_id=proc, known_functions=func_names)
func_name = func_names[proc]
if (b_selector is None):
return '({} {}) => {} using {}'.format(a_selector, a_trigger, node_stream, func_name)
return '({} {} {} {} {}) => {} using {}'.format(a_selector, a_trigger, comb, b_selector, b_trigger, node_stream, func_name)
|
Convert a binary node descriptor into a string descriptor.
Binary node descriptor are 20-byte binary structures that encode all
information needed to create a graph node. They are used to communicate
that information to an embedded device in an efficent format. This
function exists to turn such a compressed node description back into
an understandable string.
Args:
bindata (bytes): The raw binary structure that contains the node
description.
Returns:
str: The corresponding string description of the same sensor_graph node
|
codesearchnet
|
def server_hardware_types(self):
if (not self.__server_hardware_types):
self.__server_hardware_types = ServerHardwareTypes(self.__connection)
return self.__server_hardware_types
|
Gets the ServerHardwareTypes API client.
Returns:
ServerHardwareTypes:
|
codesearchnet
|
def parse_int(value: Any) -> Numeric:
return int(value)
|
Attempts to parse a valid integer value from the provided value.
Args:
* value: of Any type
Returns:
* int value: if valid
Raises:
* ValueError: if parsing failed
|
github-repos
|
def _get_context_name(self, app=None):
elements = [self.__class__.__name__, 'context', text_type(id(self))]
if app:
elements.append(text_type(id(app)))
else:
try:
elements.append(text_type(id(self.app)))
except RuntimeError:
pass
return '_'.join(elements)
|
Generate the name of the context variable for this component & app.
Because we store the ``context`` in a Local so the component
can be used across multiple apps, we cannot store the context on the
instance itself. This function will generate a unique and predictable
key in which to store the context.
Returns:
str: The name of the context variable to set and get the context
from.
|
codesearchnet
|
def match_rules_context(tree, rules, parent_context={}):
for template, match_rules in rules.items():
context = parent_context.copy()
if match_template(tree, template, context):
for key, child_rules in match_rules.items():
child_context = match_rules_context(context[key], child_rules, context)
if child_context:
for k, v in child_context.items():
context[k] = v
else:
return None
return context
return None
|
Recursively matches a Tree structure with rules and returns context
Args:
tree (Tree): Parsed tree structure
rules (dict): See match_rules
parent_context (dict): Context of parent call
Returns:
dict: Context matched dictionary of matched rules or
None if no match
|
juraj-google-style
|
def _get_members(self, class_obj, member_type, include_in_public=None):
try:
app = self.state.document.settings.env.app
except AttributeError:
app = None
if (not include_in_public):
include_in_public = []
all_members = []
for member_name in dir(class_obj):
try:
documenter = get_documenter(app, safe_getattr(class_obj, member_name), class_obj)
except AttributeError:
continue
if (documenter.objtype == member_type):
all_members.append(member_name)
public_members = [x for x in all_members if ((x in include_in_public) or (not x.startswith('_')))]
return (public_members, all_members)
|
Return class members of the specified type.
class_obj: Class object.
member_type: Member type ('method' or 'attribute').
include_in_public: set/list/tuple with member names that should be
included in public members in addition to the public names (those
starting without underscore).
Returns:
tuple(public_members, all_members): Names of the class members of
the specified member type (public / all).
|
codesearchnet
|
def group_protos(cls, proto_list: List[types.ProtobufBaseType], **kwargs) -> Dict[str, List[types.ProtobufBaseType]]:
del proto_list, kwargs
return []
|
Creates a dict of batchable protos.
For a list of protos, generates a dictionary `{key: grouped_protos}` such
that the `grouped_protos` can be batched together.
Args:
proto_list: A list of `Instrument` protos.
**kwargs: Any extra arguments. E.g., pricing configuration.
Returns:
A dictionary of grouped protos.
|
github-repos
|
def unzip(input_layer, split_dim=0, num_splits=2):
shape = input_layer.shape
_check_split_dims(num_splits, split_dim, shape)
splits = functions.unzip(input_layer, split_dim, shape[split_dim], num_splits)
return input_layer.with_sequence(splits)
|
Unzips this Tensor along the split_dim into num_splits Equal chunks.
Examples:
* `[1, 2, 3, 4] -> [1, 3], [2, 4]`
* `[[1, 1], [2, 2], [3, 3], [4, 4]] -> [[1, 1], [3, 3]], [[2, 2], [4, 4]]`
Args:
input_layer: The chainable object, supplied.
split_dim: The dimension to split along. Defaults to batch.
num_splits: The number of splits.
Returns:
A list of PrettyTensors.
Raises:
ValueError: If split_dim is out of range or isn't divided evenly by
num_splits.
|
juraj-google-style
|
def determine_action(self, issue):
resource_type = self.resource_types[issue.resource.resource_type_id]
issue_alert_schedule = (self.alert_schedule[resource_type] if (resource_type in self.alert_schedule) else self.alert_schedule['*'])
action_item = {'action': None, 'action_description': None, 'last_alert': issue.last_alert, 'issue': issue, 'resource': self.resource_classes[self.resource_types[issue.resource.resource_type_id]](issue.resource), 'owners': [], 'stop_after': issue_alert_schedule['stop'], 'remove_after': issue_alert_schedule['remove'], 'notes': issue.notes, 'missing_tags': issue.missing_tags}
time_elapsed = (time.time() - issue.created)
stop_schedule = pytimeparse.parse(issue_alert_schedule['stop'])
remove_schedule = pytimeparse.parse(issue_alert_schedule['remove'])
if self.collect_only:
action_item['action'] = AuditActions.IGNORE
elif (remove_schedule and (time_elapsed >= remove_schedule)):
action_item['action'] = AuditActions.REMOVE
action_item['action_description'] = 'Resource removed'
action_item['last_alert'] = remove_schedule
if issue.update({'last_alert': remove_schedule}):
db.session.add(issue.issue)
elif (stop_schedule and (time_elapsed >= stop_schedule)):
action_item['action'] = AuditActions.STOP
action_item['action_description'] = 'Resource stopped'
action_item['last_alert'] = stop_schedule
if issue.update({'last_alert': stop_schedule}):
db.session.add(issue.issue)
else:
alert_selection = self.determine_alert(issue_alert_schedule['alert'], issue.get_property('created').value, issue.get_property('last_alert').value)
if alert_selection:
action_item['action'] = AuditActions.ALERT
action_item['action_description'] = '{} alert'.format(alert_selection)
action_item['last_alert'] = alert_selection
if issue.update({'last_alert': alert_selection}):
db.session.add(issue.issue)
else:
action_item['action'] = AuditActions.IGNORE
db.session.commit()
return action_item
|
Determine the action we should take for the issue
Args:
issue: Issue to determine action for
Returns:
`dict`
|
codesearchnet
|
def __init__(self, *timeslots: List[Timeslot]):
self._table = defaultdict(list)
for slot in timeslots:
for interval in self._table[slot.channel]:
if slot.interval.has_overlap(interval):
raise PulseError("Cannot create TimeslotCollection from overlapped timeslots")
self._table[slot.channel].append(slot.interval)
self._timeslots = tuple(timeslots)
|
Create a new time-slot collection.
Args:
*timeslots: list of time slots
Raises:
PulseError: when overlapped time slots are specified
|
juraj-google-style
|
def wait_for_bq_job(self, job_reference, sleep_duration_sec=5, max_retries=0):
retry = 0
while True:
retry += 1
job = self.get_job(job_reference.projectId, job_reference.jobId, job_reference.location)
_LOGGER.info('Job %s status: %s', job.id, job.status.state)
if job.status.state == 'DONE' and job.status.errorResult:
raise RuntimeError('BigQuery job {} failed. Error Result: {}'.format(job_reference.jobId, job.status.errorResult))
elif job.status.state == 'DONE':
return True
else:
time.sleep(sleep_duration_sec)
if max_retries != 0 and retry >= max_retries:
raise RuntimeError('The maximum number of retries has been reached')
|
Poll job until it is DONE.
Args:
job_reference: bigquery.JobReference instance.
sleep_duration_sec: Specifies the delay in seconds between retries.
max_retries: The total number of times to retry. If equals to 0,
the function waits forever.
Raises:
`RuntimeError`: If the job is FAILED or the number of retries has been
reached.
|
github-repos
|
def _create_state_graph(self, name):
import_collections = [tf_v1.GraphKeys.GLOBAL_VARIABLES, tf_v1.GraphKeys.MODEL_VARIABLES, tf_v1.GraphKeys.TABLE_INITIALIZERS, tf_v1.GraphKeys.ASSET_FILEPATHS, tf_v1.GraphKeys.COND_CONTEXT, tf_v1.GraphKeys.WHILE_CONTEXT]
if self._trainable:
import_collections.extend([tf_v1.GraphKeys.TRAINABLE_VARIABLES, tf_v1.GraphKeys.REGULARIZATION_LOSSES])
absolute_scope_name = tf_v1.get_default_graph().unique_name(name, mark_as_used=False)
relative_scope_name = absolute_scope_name.split('/')[(- 1)]
assert (relative_scope_name == name)
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.CopyFrom(self._meta_graph)
meta_graph_lib.filter_collections(meta_graph, import_collections)
meta_graph_lib.prefix_shared_name_attributes(meta_graph, absolute_scope_name)
tf_v1.train.import_meta_graph(meta_graph, input_map={}, import_scope=relative_scope_name)
variables_tensor_map = {}
for var in tf_v1.global_variables():
if var.op.name.startswith((absolute_scope_name + '/')):
variables_tensor_map[var.name[(len(absolute_scope_name) + 1):]] = var
def _get_tensor(tensor_name):
return tf_v1.get_default_graph().get_tensor_by_name(meta_graph_lib.prepend_name_scope(tensor_name, import_scope=absolute_scope_name))
state_op_names = list_registered_stateful_ops_without_inputs()
state_map = get_state_map(meta_graph, state_op_names, set(), _get_tensor)
return (variables_tensor_map, state_map)
|
Creates the graph nodes that hold the state of the Module.
Args:
name: name scope to create the state graph in.
Returns:
A tuple consisting of:
variables_tensor_map: a map from tensor names in the original graph def
to the created Variables objects.
state_map: a map from tensors names in the original graph def to the
instantiated tensors to be used as a state_map.
|
codesearchnet
|
def _order_pases(self, passes):
passes = set(passes)
pass_deps = {}
for opt in passes:
(_, before, after) = self._known_passes[opt]
if (opt not in pass_deps):
pass_deps[opt] = set()
for after_pass in after:
pass_deps[opt].add(after_pass)
for other in before:
if (other not in passes):
continue
if (other not in pass_deps):
pass_deps[other] = set()
pass_deps[other].add(opt)
return toposort_flatten(pass_deps)
|
Topologically sort optimization passes.
This ensures that the resulting passes are run in order
respecting before/after constraints.
Args:
passes (iterable): An iterable of pass names that should
be included in the optimization passes run.
|
codesearchnet
|
def listdir(self, target_directory):
target_directory = self.resolve_path(target_directory, allow_fd=True)
directory = self.confirmdir(target_directory)
directory_contents = directory.contents
return list(directory_contents.keys())
|
Return a list of file names in target_directory.
Args:
target_directory: Path to the target directory within the
fake filesystem.
Returns:
A list of file names within the target directory in arbitrary
order.
Raises:
OSError: if the target is not a directory.
|
juraj-google-style
|
def AddSubkey(self, registry_key):
name = registry_key.name.upper()
if name in self._subkeys:
raise KeyError(
'Subkey: {0:s} already exists.'.format(registry_key.name))
self._subkeys[name] = registry_key
key_path = self._JoinKeyPath([self._key_path, registry_key.name])
registry_key._key_path = key_path
|
Adds a subkey.
Args:
registry_key (WinRegistryKey): Windows Registry subkey.
Raises:
KeyError: if the subkey already exists.
|
juraj-google-style
|
def init_op(self):
return self._init_op
|
Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
|
github-repos
|
def match_criterion(self, tag):
return tag.name == self.reference_tag_name and \
tag.attrs.get('kind', '') == self.reference_tag_kind
|
Override. Determine if a tag has the desired name and kind attribute
value.
Args:
tag: A BeautifulSoup Tag.
Returns:
True if tag has the desired name and kind, otherwise False.
|
juraj-google-style
|
def _pad_modernbert_output(inputs: torch.Tensor, indices: torch.Tensor, batch: int, seqlen: int) -> torch.Tensor:
if inputs.dim() == 1:
output = torch.zeros(batch * seqlen, dtype=inputs.dtype, device=inputs.device)
output[indices] = inputs
padded_inputs = output.view(batch, seqlen)
else:
_, *rest = inputs.shape
output = torch.zeros(batch * seqlen, *rest, dtype=inputs.dtype, device=inputs.device)
output[indices] = inputs
padded_inputs = output.view(batch, seqlen, *rest)
return padded_inputs
|
Add padding to sequences.
Args:
inputs: (total_nnz, ...) or (total_nnz,), where total_nnz = number of tokens selected in attention_mask.
indices: (total_nnz)
batch: int, batch size
seqlen: int, max sequence length
Returns:
padded_inputs: (batch, seqlen, ...) or (batch, seqlen)
|
github-repos
|
def is_generic_union(type_: Type) -> bool:
if hasattr(typing, '_GenericAlias'):
return (isinstance(type_, typing._GenericAlias) and
type_.__origin__ is Union)
else:
if hasattr(typing, '_Union'):
return isinstance(type_, typing._Union)
else:
return isinstance(type_, typing.UnionMeta)
raise RuntimeError('Could not determine whether type is a Union. Is this'
' a YAtiML-supported Python version?')
|
Determines whether a type is a Union[...].
How to do this varies for different Python versions, due to the
typing library not having a stable API. This functions smooths
over the differences.
Args:
type_: The type to check.
Returns:
True iff it's a Union[...something...].
|
juraj-google-style
|
def FromEncoded(cls, bindata):
if (len(bindata) != 8):
raise ArgumentError('Invalid binary slot descriptor with invalid length', length=len(bindata), expected=8, data=bindata)
(slot, match_op) = struct.unpack('<B6xB', bindata)
match_name = cls.KNOWN_MATCH_CODES.get(match_op)
if (match_name is None):
raise ArgumentError('Unknown match operation specified in binary slot descriptor', operation=match_op, known_match_ops=cls.KNOWN_MATCH_CODES)
if (match_name == 'match_controller'):
return SlotIdentifier(controller=True)
if (match_name == 'match_slot'):
return SlotIdentifier(slot=slot)
raise ArgumentError('Unsupported match operation in binary slot descriptor', match_op=match_name)
|
Create a slot identifier from an encoded binary descriptor.
These binary descriptors are used to communicate slot targeting
to an embedded device. They are exactly 8 bytes in length.
Args:
bindata (bytes): The 8-byte binary descriptor.
Returns:
SlotIdentifier
|
codesearchnet
|
def _push_frontier(self, early_frontier: Dict[(ops.Qid, int)], late_frontier: Dict[(ops.Qid, int)], update_qubits: Iterable[ops.Qid]=None) -> Tuple[(int, int)]:
if (update_qubits is None):
update_qubits = set(early_frontier).difference(late_frontier)
n_new_moments = (max(((early_frontier.get(q, 0) - late_frontier[q]) for q in late_frontier)) if late_frontier else 0)
if (n_new_moments > 0):
insert_index = min(late_frontier.values())
self._moments[insert_index:insert_index] = ([ops.Moment()] * n_new_moments)
for q in update_qubits:
if (early_frontier.get(q, 0) > insert_index):
early_frontier[q] += n_new_moments
return (insert_index, n_new_moments)
return (0, 0)
|
Inserts moments to separate two frontiers.
After insertion n_new moments, the following holds:
for q in late_frontier:
early_frontier[q] <= late_frontier[q] + n_new
for q in update_qubits:
early_frontier[q] the identifies the same moment as before
(but whose index may have changed if this moment is after
those inserted).
Args:
early_frontier: The earlier frontier. For qubits not in the later
frontier, this is updated to account for the newly inserted
moments.
late_frontier: The later frontier. This is not modified.
update_qubits: The qubits for which to update early_frontier to
account for the newly inserted moments.
Returns:
(index at which new moments were inserted, how many new moments
were inserted) if new moments were indeed inserted. (0, 0)
otherwise.
|
codesearchnet
|
def run(self, resources):
hwman = resources['connection']
con = hwman.hwman.controller()
test_interface = con.test_interface()
try:
test_interface.synchronize_clock()
print('Time currently set at %s' % test_interface.current_time_str())
except:
raise ArgumentError('Error setting RTC time, check if controller actually has RTC or if iotile-support-lib-controller-3 is updated')
|
Sets the RTC timestamp to UTC.
Args:
resources (dict): A dictionary containing the required resources that
we needed access to in order to perform this step.
|
juraj-google-style
|
def _get_data_buffer_time_limit_ms(experiments):
for experiment in experiments:
if re.match('data_buffer_time_limit_ms=', experiment):
return int(re.match('data_buffer_time_limit_ms=(?P<data_buffer_time_limit_ms>.*)', experiment).group('data_buffer_time_limit_ms'))
return 0
|
Defines the time limt of the outbound data buffering.
Note: data_buffer_time_limit_ms is an experimental flag and might
not be available in future releases.
Returns:
an int indicating the time limit in milliseconds of the outbound
data buffering. Default is 0 (disabled)
|
github-repos
|
def sample_with_temperature(x, dim, temperature=1.0, dtype=tf.int32, name=None):
dim = convert_to_dimension(dim)
with tf.name_scope(name, default_name="sample_with_temperature"):
if temperature != 0.0:
tiny_val = 1e-9
g = -log(-log(
random_uniform(
x.mesh,
x.shape,
minval=tiny_val,
maxval=1.,
dtype=x.dtype)))
x += g * temperature
return argmax(x, dim, dtype, name)
|
Either argmax or random sampling.
Args:
x: a Tensor.
dim: a Dimension in x.shape.dims
temperature: a float 0.0=argmax 1.0=random
dtype: a tf.dtype (for the output)
name: an optional string
Returns:
a Tensor with type dtype.
|
juraj-google-style
|
def _process_celeba_config_file(self, file_path):
with tf.io.gfile.GFile(file_path) as f:
data_raw = f.read()
lines = data_raw.split("\n")
keys = lines[1].strip().split()
values = {}
for line in lines[2:-1]:
row_values = line.strip().split()
values[row_values[0]] = [int(v) for v in row_values[1:]]
return keys, values
|
Unpack the celeba config file.
The file starts with the number of lines, and a header.
Afterwards, there is a configuration for each file: one per line.
Args:
file_path: Path to the file with the configuration.
Returns:
keys: names of the attributes
values: map from the file name to the list of attribute values for
this file.
|
juraj-google-style
|
def _align_monomer(self, monomer, mon_vector, move_direction):
axis = np.cross(mon_vector, move_direction)
origin = monomer[self.start].coords
angle = get_angle(mon_vector, move_direction)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
monomer.apply_operation(op)
|
rotate the monomer so that it is aligned along the move direction
Args:
monomer (Molecule)
mon_vector (numpy.array): molecule vector that starts from the
start atom index to the end atom index
move_direction (numpy.array): the direction of the polymer chain
extension
|
juraj-google-style
|
def Matches(self, file_entry):
if not self._file_scanner or not file_entry.IsFile():
return None
file_object = file_entry.GetFileObject()
if not file_object:
return False
try:
scan_state = pysigscan.scan_state()
self._file_scanner.scan_file_object(scan_state, file_object)
except IOError as exception:
location = getattr(file_entry.path_spec, 'location', '')
logging.error((
'[skipping] unable to scan file: {0:s} for signatures '
'with error: {1!s}').format(location, exception))
return False
finally:
file_object.close()
return scan_state.number_of_scan_results > 0
|
Compares the file entry against the filter.
Args:
file_entry (dfvfs.FileEntry): file entry to compare.
Returns:
bool: True if the file entry matches the filter, False if not or
None if the filter does not apply.
|
juraj-google-style
|
def _to_backend_layout(tensor_layout):
if tensor_layout.device_mesh is None:
raise ValueError('Cannot create sharding when device mesh is not set for TensorLayout.')
sharding_specs = [axis if axis else dtensor.UNSHARDED for axis in tensor_layout.axes]
dtensor_mesh = tensor_layout.device_mesh.backend_mesh
return dtensor.Layout(sharding_specs=sharding_specs, mesh=dtensor_mesh)
|
Convert the TensorLayout to Tensorflow backend specific Sharding.
Args:
tensor_layout: TensorLayout instance to convert.
Returns:
A `tf.dtensor.Layout` instance.
|
github-repos
|
def rtt_get_num_down_buffers(self):
cmd = enums.JLinkRTTCommand.GETNUMBUF
dir = ctypes.c_int(enums.JLinkRTTDirection.DOWN)
return self.rtt_control(cmd, dir)
|
After starting RTT, get the current number of down buffers.
Args:
self (JLink): the ``JLink`` instance
Returns:
The number of configured down buffers on the target.
Raises:
JLinkRTTException if the underlying JLINK_RTTERMINAL_Control call fails.
|
juraj-google-style
|
def _check_lambda_alias(self):
aliases = self.lambda_client.list_aliases(FunctionName=self.app_name)
matched_alias = False
for alias in aliases['Aliases']:
if (alias['Name'] == self.env):
LOG.info('Found alias %s for function %s', self.env, self.app_name)
matched_alias = True
break
else:
LOG.info('No alias %s found for function %s', self.env, self.app_name)
return matched_alias
|
Check if lambda alias exists.
Returns:
True if alias exists
False if alias does not exist
|
codesearchnet
|
def _build_projection_expression(clean_table_keys):
projection_expression = ''
for key in clean_table_keys[:-1]:
projection_expression += ('{},').format(key)
projection_expression += clean_table_keys[-1]
return projection_expression
|
Given cleaned up keys, this will return a projection expression for
the dynamodb lookup.
Args:
clean_table_keys (dict): keys without the data types attached
Returns:
str: A projection expression for the dynamodb lookup.
|
juraj-google-style
|
def xsrf_secret_key():
secret = memcache.get(XSRF_MEMCACHE_ID, namespace=OAUTH2CLIENT_NAMESPACE)
if (not secret):
model = SiteXsrfSecretKey.get_or_insert(key_name='site')
if (not model.secret):
model.secret = _generate_new_xsrf_secret_key()
model.put()
secret = model.secret
memcache.add(XSRF_MEMCACHE_ID, secret, namespace=OAUTH2CLIENT_NAMESPACE)
return str(secret)
|
Return the secret key for use for XSRF protection.
If the Site entity does not have a secret key, this method will also create
one and persist it.
Returns:
The secret key.
|
codesearchnet
|
def derive_temporary_python2_environment(
destination_directory: str,
python3_environment: PreparedEnv,
verbose: bool,
env_name: str = '.test_virtualenv_py2',
python_path: str = "/usr/bin/python2.7") -> PreparedEnv:
shutil.rmtree(destination_directory)
input_directory = cast(str, python3_environment.destination_directory)
os.chdir(input_directory)
conversion_script_path = os.path.join(
input_directory,
'dev_tools',
'python2.7-generate.sh')
shell_tools.run_cmd('bash',
conversion_script_path,
destination_directory,
input_directory,
python3_environment.virtual_env_path,
out=sys.stderr)
os.chdir(destination_directory)
env_path = os.path.join(destination_directory, env_name)
req_path = os.path.join(destination_directory, 'requirements.txt')
dev_req_path = os.path.join(destination_directory,
'pip-list-test-tools.txt')
contrib_req_path = os.path.join(destination_directory,
'cirq',
'contrib',
'contrib-requirements.txt')
req_paths = [req_path, dev_req_path, contrib_req_path]
create_virtual_env(venv_path=env_path,
python_path=python_path,
requirements_paths=req_paths,
verbose=verbose)
return PreparedEnv(github_repo=python3_environment.repository,
actual_commit_id=python3_environment.actual_commit_id,
compare_commit_id=python3_environment.compare_commit_id,
destination_directory=destination_directory,
virtual_env_path=env_path)
|
Creates a python 2.7 environment starting from a prepared python 3 one.
Args:
destination_directory: Where to put the python 2 environment.
python3_environment: The prepared environment to start from.
verbose: When set, more progress output is produced.
env_name: The name to use for the virtualenv directory.
python_path: The python binary to use.
Returns:
A description of the environment that was prepared.
|
juraj-google-style
|
def _reset_offset(self, partition):
timestamp = self._subscriptions.assignment[partition].reset_strategy
if (timestamp is OffsetResetStrategy.EARLIEST):
strategy = 'earliest'
elif (timestamp is OffsetResetStrategy.LATEST):
strategy = 'latest'
else:
raise NoOffsetForPartitionError(partition)
log.debug('Resetting offset for partition %s to %s offset.', partition, strategy)
offsets = self._retrieve_offsets({partition: timestamp})
if (partition not in offsets):
raise NoOffsetForPartitionError(partition)
offset = offsets[partition][0]
if self._subscriptions.is_assigned(partition):
self._subscriptions.seek(partition, offset)
|
Reset offsets for the given partition using the offset reset strategy.
Arguments:
partition (TopicPartition): the partition that needs reset offset
Raises:
NoOffsetForPartitionError: if no offset reset strategy is defined
|
codesearchnet
|
def get_highest_values(self, count):
count = int(count)
assert (count <= len(self._values)), 'count must be smaller than or equal to values length. {} > {}.'.format(count, len(self._values))
assert (count > 0), 'count must be greater than 0. Got {}.'.format(count)
highest_values = sorted(self._values, reverse=True)[0:count]
highest_values_index = sorted(list(xrange(len(self._values))), key=(lambda k: self._values[k]), reverse=True)[0:count]
return (highest_values, highest_values_index)
|
Get a list of the the x highest values of the Data Collection and their indices.
This is useful for situations where one needs to know the times of
the year when the largest values of a data collection occur. For example,
there is a European dayight code that requires an analysis for the hours
of the year with the greatest exterior illuminance level. This method
can be used to help build a shcedule for such a study.
Args:
count: Integer representing the number of highest values to account for.
Returns:
highest_values: The n highest values in data list, ordered from
highest to lowest.
highest_values_index: Indicies of the n highest values in data
list, ordered from highest to lowest.
|
codesearchnet
|
def _validate_testbed_configs(testbed_configs):
seen_names = set()
for config in testbed_configs:
name = config[keys.Config.key_testbed_name.value]
_validate_testbed_name(name)
if name in seen_names:
raise MoblyConfigError('Duplicate testbed name %s found.' % name)
seen_names.add(name)
|
Validates the testbed configurations.
Args:
testbed_configs: A list of testbed configuration dicts.
Raises:
MoblyConfigError: Some parts of the configuration is invalid.
|
github-repos
|
def _WriteTimestamp(self, timestamp, filename):
try:
os.makedirs(self.timestamp_dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(self.timestamp_dir):
pass
else:
raise
filedesc, temp_filename = tempfile.mkstemp(prefix='nsscache-update-', dir=self.timestamp_dir)
time_string = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(timestamp))
try:
os.write(filedesc, b'%s\n' % time_string.encode())
os.fsync(filedesc)
os.close(filedesc)
except OSError:
os.unlink(temp_filename)
self.log.warning('writing timestamp failed!')
return False
os.chmod(temp_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
os.rename(temp_filename, filename)
self.log.debug('wrote timestamp %s to file %r', time_string, filename)
return True
|
Write a given timestamp out to a file, converting to the ISO-8601
format.
We convert internal timestamp format (epoch) to ISO-8601 format, i.e.
YYYY-MM-DDThh:mm:ssZ which is basically UTC time, then write it out to a
file.
Args:
timestamp: A String in nss_cache internal timestamp format, aka time_t.
filename: A String naming the file to write to.
Returns:
A boolean indicating success of write.
|
github-repos
|
def human_timestamp(__timestamp: datetime.datetime) -> str:
numstr = '. a two three four five six seven eight nine ten'.split()
matches = [(((60 * 60) * 24) * 365), (((60 * 60) * 24) * 28), (((60 * 60) * 24) * 7), ((60 * 60) * 24), (60 * 60), 60, 1]
match_names = ['year', 'month', 'week', 'day', 'hour', 'minute', 'second']
if (__timestamp.tzinfo is None):
__timestamp = __timestamp.replace(tzinfo=datetime.timezone.utc)
now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
delta = int((now - __timestamp).total_seconds())
for scale in matches:
i = (delta
if i:
name = match_names[matches.index(scale)]
break
else:
i = 0
if (i == 0):
result = 'right now'
elif ((i == 1) and (name in ('year', 'month', 'week'))):
result = 'last {}'.format(name)
elif ((i == 1) and (name == 'day')):
result = 'yesterday'
elif ((i == 1) and (name == 'hour')):
result = 'about an hour ago'
else:
result = 'about {} {}{} ago'.format((i if (i > 10) else numstr[i]), name, ('s' if (i > 1) else ''))
return result
|
Format a relative time.
Args:
__timestamp: Event to generate relative timestamp against
Returns:
Human readable date and time offset
|
codesearchnet
|
def ParseChat(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
participants = self._GetRowValue(query_hash, row, 'participants')
author = self._GetRowValue(query_hash, row, 'author')
dialog_partner = self._GetRowValue(query_hash, row, 'dialog_partner')
from_displayname = self._GetRowValue(query_hash, row, 'from_displayname')
accounts = []
participants = participants.split(' ')
for participant in participants:
if (participant != author):
accounts.append(participant)
to_account = ', '.join(accounts)
if (not to_account):
to_account = (dialog_partner or 'Unknown User')
from_account = '{0:s} <{1:s}>'.format(from_displayname, author)
event_data = SkypeChatEventData()
event_data.from_account = from_account
event_data.query = query
event_data.text = self._GetRowValue(query_hash, row, 'body_xml')
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.to_account = to_account
timestamp = self._GetRowValue(query_hash, row, 'timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Chat from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a chat message.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
|
codesearchnet
|
def __driver_helper(self, line):
if (line.strip() == '?'):
self.stdout.write('\n')
self.stdout.write(self.doc_string())
else:
toks = shlex.split(line[:(- 1)])
try:
msg = self.__get_help_message(toks)
except Exception as e:
self.stderr.write('\n')
self.stderr.write(traceback.format_exc())
self.stderr.flush()
self.stdout.write('\n')
self.stdout.write(msg)
self.stdout.write('\n')
self.stdout.write(self.prompt)
self.stdout.write(line)
self.stdout.flush()
|
Driver level helper method.
1. Display help message for the given input. Internally calls
self.__get_help_message() to obtain the help message.
2. Re-display the prompt and the input line.
Arguments:
line: The input line.
Raises:
Errors from helper methods print stack trace without terminating
this shell. Other exceptions will terminate this shell.
|
codesearchnet
|
def are_checksums_equal(checksum_a_pyxb, checksum_b_pyxb):
if (checksum_a_pyxb.algorithm != checksum_b_pyxb.algorithm):
raise ValueError('Cannot compare checksums calculated with different algorithms. a="{}" b="{}"'.format(checksum_a_pyxb.algorithm, checksum_b_pyxb.algorithm))
return (checksum_a_pyxb.value().lower() == checksum_b_pyxb.value().lower())
|
Determine if checksums are equal.
Args:
checksum_a_pyxb, checksum_b_pyxb: PyXB Checksum objects to compare.
Returns:
bool
- **True**: The checksums contain the same hexadecimal values calculated with
the same algorithm. Identical checksums guarantee (for all practical
purposes) that the checksums were calculated from the same sequence of bytes.
- **False**: The checksums were calculated with the same algorithm but the
hexadecimal values are different.
Raises:
ValueError
The checksums were calculated with different algorithms, hence cannot be
compared.
|
codesearchnet
|
def UpdateIncludeState(filename, include_dict, io=codecs):
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
|
Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
|
codesearchnet
|
def ExtractCredentialsFromPathSpec(self, path_spec):
credentials = manager.CredentialsManager.GetCredentials(path_spec)
for identifier in credentials.CREDENTIALS:
value = getattr(path_spec, identifier, None)
if (value is None):
continue
self.SetCredential(path_spec, identifier, value)
|
Extracts credentials from a path specification.
Args:
path_spec (PathSpec): path specification to extract credentials from.
|
codesearchnet
|
def clip_boxes(box, box_size: Tuple[int, int]):
assert torch.isfinite(box).all(), 'Box tensor contains infinite or NaN!'
height, width = box_size
x1 = box[:, 0].clamp(min=0, max=width)
y1 = box[:, 1].clamp(min=0, max=height)
x2 = box[:, 2].clamp(min=0, max=width)
y2 = box[:, 3].clamp(min=0, max=height)
box = torch.stack((x1, y1, x2, y2), dim=-1)
return box
|
Clip the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box (Tensor): The box to be clipped.
box_size (height, width): The clipping box's size.
|
github-repos
|
def get_header_from_ops_and_kernels(ops_and_kernels, include_all_ops_and_kernels):
ops_and_kernels = sorted(ops_and_kernels)
ops = set((op for op, _ in ops_and_kernels))
result_list = []
def append(s):
result_list.append(s)
_, script_name = os.path.split(sys.argv[0])
append('
append('
append('
if include_all_ops_and_kernels:
append('
append('
append('
else:
line = "\n namespace {\n constexpr const char* skip(const char* x) {\n return (*x) ? (*x == ' ' ? skip(x + 1) : x) : x;\n }\n\n constexpr bool isequal(const char* x, const char* y) {\n return (*skip(x) && *skip(y))\n ? (*skip(x) == *skip(y) && isequal(skip(x) + 1, skip(y) + 1))\n : (!*skip(x) && !*skip(y));\n }\n\n template<int N>\n struct find_in {\n static constexpr bool f(const char* x, const char* const y[N]) {\n return isequal(x, y[0]) || find_in<N - 1>::f(x, y + 1);\n }\n };\n\n template<>\n struct find_in<0> {\n static constexpr bool f(const char* x, const char* const y[]) {\n return false;\n }\n };\n }
line += 'constexpr const char* kNecessaryOpKernelClasses[] = {\n'
for _, kernel_class in ops_and_kernels:
if kernel_class is None:
continue
line += '"%s",\n' % kernel_class
line += '};'
append(line)
append('
append('')
append('constexpr inline bool ShouldRegisterOp(const char op[]) {')
append(' return false')
for op in sorted(ops):
append(' || isequal(op, "%s")' % op)
append(' ;')
append('}')
append('
append('')
append('
append('
return '\n'.join(result_list)
|
Returns a header for use with tensorflow SELECTIVE_REGISTRATION.
Args:
ops_and_kernels: a set of (op_name, kernel_class_name) pairs to include.
include_all_ops_and_kernels: if True, ops_and_kernels is ignored and all op
kernels are included.
Returns:
the string of the header that should be written as ops_to_register.h.
|
github-repos
|
def input_fn(filenames, tf_transform_output, batch_size=200):
transformed_feature_spec = tf_transform_output.transformed_feature_spec().copy()
transformed_features = tf.contrib.learn.io.read_batch_features(filenames, batch_size, transformed_feature_spec, reader=_gzip_reader_fn)
return (transformed_features, transformed_features.pop(taxi.transformed_name(taxi.LABEL_KEY)))
|
Generates features and labels for training or evaluation.
Args:
filenames: [str] list of CSV files to read data from.
tf_transform_output: A TFTransformOutput.
batch_size: int First dimension size of the Tensors returned by input_fn
Returns:
A (features, indices) tuple where features is a dictionary of
Tensors, and indices is a single Tensor of label indices.
|
github-repos
|
def load(self, sess, tags, import_scope=None, **saver_kwargs):
saved_model_proto = parse_saved_model(self._export_dir)
metrics.IncrementReadApi(_LOADER_LABEL)
with sess.graph.as_default():
saver, _ = self.load_graph(sess.graph, tags, import_scope, **saver_kwargs)
self.restore_variables(sess, saver, import_scope)
self.run_init_ops(sess, tags, import_scope)
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
if len(saved_model_proto.meta_graphs) == 1 and saved_model_proto.meta_graphs[0].HasField('object_graph_def'):
metrics.IncrementRead(write_version='2')
else:
metrics.IncrementRead(write_version='1')
return meta_graph_def
|
Load the MetaGraphDef graph and restore variable values into the session.
Args:
sess: tf.compat.v1.Session to restore variable values.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph.
Returns:
`MetagraphDef` proto of the graph that was loaded.
|
github-repos
|
def _build_migrated_variables(checkpoint_reader, name_value_fn):
names_to_shapes = checkpoint_reader.get_variable_to_shape_map()
new_name_to_variable = {}
name_to_new_name = {}
for name in names_to_shapes:
value = checkpoint_reader.get_tensor(name)
(new_name, new_value) = name_value_fn(name, value)
if (new_name is None):
continue
name_to_new_name[name] = new_name
new_name_to_variable[new_name] = tf.Variable(new_value)
return (new_name_to_variable, name_to_new_name)
|
Builds the TensorFlow variables of the migrated checkpoint.
Args:
checkpoint_reader: A `tf.train.NewCheckPointReader` of the checkpoint to
be read from.
name_value_fn: Function taking two arguments, `name` and `value`, which
returns the pair of new name and value for that a variable of that name.
Returns:
Tuple of a dictionary with new variable names as keys and `tf.Variable`s as
values, and a dictionary that maps the old variable names to the new
variable names.
|
codesearchnet
|
def start_of_chunk(prev_tag, tag, prev_type, type_):
chunk_start = False
if (tag == 'B'):
chunk_start = True
if (tag == 'S'):
chunk_start = True
if ((prev_tag == 'E') and (tag == 'E')):
chunk_start = True
if ((prev_tag == 'E') and (tag == 'I')):
chunk_start = True
if ((prev_tag == 'S') and (tag == 'E')):
chunk_start = True
if ((prev_tag == 'S') and (tag == 'I')):
chunk_start = True
if ((prev_tag == 'O') and (tag == 'E')):
chunk_start = True
if ((prev_tag == 'O') and (tag == 'I')):
chunk_start = True
if ((tag != 'O') and (tag != '.') and (prev_type != type_)):
chunk_start = True
return chunk_start
|
Checks if a chunk started between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_start: boolean.
|
codesearchnet
|
def Tensors(self, run, tag):
accumulator = self.GetAccumulator(run)
return accumulator.Tensors(tag)
|
Retrieve the tensor events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.TensorEvent`s.
|
juraj-google-style
|
def build(self, backend=None):
n_total = len(self.data.index)
if len(self.completes):
completes = [set(x) for x in sum(self.completes, [])]
completes = set.intersection(*completes)
else:
completes = [x for x in range(len(self.data.index))]
self.clean_data = self.data.iloc[list(completes), :]
if len(completes) < n_total:
msg = "Automatically removing {}/{} rows from the dataset."
msg = msg.format(n_total - len(completes), n_total)
warnings.warn(msg)
for term_args in self.added_terms:
self._add(**term_args)
self._set_priors(**self._added_priors)
for name, term in self.terms.items():
type_ = 'intercept' if name == 'Intercept' else \
'random' if self.terms[name].random else 'fixed'
term.prior = self._prepare_prior(term.prior, type_)
if backend is None:
if self._backend_name is None:
raise ValueError("Error: no backend was passed or set in the "
"Model; did you forget to call fit()?")
backend = self._backend_name
if self.y is None:
raise ValueError("No outcome (y) variable is set! Please specify "
"an outcome variable using the formula interface "
"before build() or fit().")
terms = [t for t in self.fixed_terms.values() if t.name != 'Intercept']
if len(self.fixed_terms) > 1:
X = [pd.DataFrame(x.data, columns=x.levels) for x in terms]
X = pd.concat(X, axis=1)
self.dm_statistics = {
'r2_x': pd.Series({
x: sm.OLS(endog=X[x],
exog=sm.add_constant(X.drop(x, axis=1))
if 'Intercept' in self.term_names
else X.drop(x, axis=1)).fit().rsquared
for x in list(X.columns)}),
'sd_x': X.std(),
'mean_x': X.mean(axis=0)
}
mat = X.corr()
for x in list(mat.columns):
mat.loc[x, x] = self.dm_statistics['mean_x'][x]
self._diagnostics = {
'VIF': 1/(1 - self.dm_statistics['r2_x']),
'corr_mean_X': mat
}
if any(self.dm_statistics['r2_x'] > .999):
raise ValueError(
"There is perfect collinearity among the fixed effects!\n"
"Printing some design matrix statistics:\n" +
str(self.dm_statistics) + '\n' +
str(self._diagnostics))
num_cats = [x.data.size for x in self.fixed_terms.values()]
if any(np.array(num_cats) == 0):
raise ValueError(
"At least one categorical predictor contains only 1 category!")
if len(self.terms) > 0:
if self.taylor is not None:
taylor = self.taylor
else:
taylor = 5 if self.family.name == 'gaussian' else 1
scaler = PriorScaler(self, taylor=taylor)
scaler.scale()
if self.family.name == 'bernoulli' and np.max(self.y.data) < 1.01:
event = next(
i for i, x in enumerate(self.y.data.flatten()) if x > .99)
warnings.warn('Modeling the probability that {}==\'{}\''.format(
self.y.name, str(self.clean_data[self.y.name].iloc[event])))
self._set_backend(backend)
self.backend.build(self)
self.built = True
|
Set up the model for sampling/fitting.
Performs any steps that require access to all model terms (e.g., scaling priors
on each term), then calls the BackEnd's build() method.
Args:
backend (str): The name of the backend to use for model fitting.
Currently, 'pymc' and 'stan' are supported. If None, assume
that fit() has already been called (possibly without building),
and look in self._backend_name.
|
juraj-google-style
|
def GetIamPolicy(self, request, global_params=None):
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(config, request, global_params=global_params)
|
Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (BigqueryTablesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
|
github-repos
|
def receiveds_format(receiveds):
log.debug("Receiveds for this email are parsed")
output = []
counter = Counter()
for i in receiveds[::-1]:
j = {k: v.strip() for k, v in i.items() if v}
j["hop"] = counter["hop"] + 1
if i.get("date"):
i["date"] = i["date"].split(";")[-1]
try:
j["date_utc"], _ = convert_mail_date(i["date"])
except TypeError:
j["date_utc"] = None
size = len(output)
now = j.get("date_utc")
if size and now:
before = output[counter["hop"] - 1].get("date_utc")
if before:
j["delay"] = (now - before).total_seconds()
else:
j["delay"] = 0
else:
j["delay"] = 0
output.append(j)
counter["hop"] += 1
else:
for i in output:
if i.get("date_utc"):
i["date_utc"] = i["date_utc"].isoformat()
else:
return output
|
Given a list of receiveds hop, adds metadata and reformat
field values
Args:
receiveds (list): list of receiveds hops already formatted
Returns:
list of receiveds reformated and with new fields
|
juraj-google-style
|
def users_getPresence(self, *, user: str, **kwargs) -> SlackResponse:
kwargs.update({"user": user})
return self.api_call("users.getPresence", http_verb="GET", params=kwargs)
|
Gets user presence information.
Args:
user (str): User to get presence info on. Defaults to the authed user.
e.g. 'W1234567890'
|
juraj-google-style
|
def chown(self, path, uid, gid, dir_fd=None, follow_symlinks=None):
if (follow_symlinks is None):
follow_symlinks = True
elif (sys.version_info < (3, 3)):
raise TypeError("chown() got an unexpected keyword argument 'follow_symlinks'")
path = self._path_with_dir_fd(path, self.chown, dir_fd)
try:
file_object = self.filesystem.resolve(path, follow_symlinks, allow_fd=True)
except IOError as io_error:
if (io_error.errno == errno.ENOENT):
self.filesystem.raise_os_error(errno.ENOENT, path)
raise
if (not ((is_int_type(uid) or (uid is None)) and (is_int_type(gid) or (gid is None)))):
raise TypeError('An integer is required')
if (uid != (- 1)):
file_object.st_uid = uid
if (gid != (- 1)):
file_object.st_gid = gid
|
Set ownership of a faked file.
Args:
path: (str) Path to the file or directory.
uid: (int) Numeric uid to set the file or directory to.
gid: (int) Numeric gid to set the file or directory to.
dir_fd: (int) If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
New in Python 3.3.
follow_symlinks: (bool) If `False` and path points to a symlink,
the link itself is changed instead of the linked object.
New in Python 3.3.
Raises:
OSError: if path does not exist.
`None` is also allowed for `uid` and `gid`. This permits `os.rename`
to use `os.chown` even when the source file `uid` and `gid` are
`None` (unset).
|
codesearchnet
|
def parse(self, message, schema):
func = {
'audit-log': self._parse_audit_log_msg,
'event': self._parse_event_msg,
}[schema]
return func(message)
|
Parse message according to schema.
`message` should already be validated against the given schema.
See :ref:`schemadef` for more information.
Args:
message (dict): message data to parse.
schema (str): valid message schema.
Returns:
(dict): parsed message
|
juraj-google-style
|
def get_adversary_phone_asset(self, main_type, sub_type, unique_id, asset_id, params=None):
return self.adversary_phone_asset(main_type, sub_type, unique_id, asset_id, params=params)
|
Args:
main_type:
sub_type:
unique_id:
asset_id:
params:
Return:
|
juraj-google-style
|
def should_use_network(self, request):
return (self.networking and all((fn(request) for fn in self.network_filters)))
|
Verifies if real networking mode should be used for the given
request, passing it to the registered network filters.
Arguments:
request (pook.Request): outgoing HTTP request to test.
Returns:
bool
|
codesearchnet
|
def _parse_publisher(details):
publisher = _get_td_or_none(
details,
"ctl00_ContentPlaceHolder1_tblRowNakladatel"
)
if not publisher:
return None
publisher = dhtmlparser.removeTags(publisher).strip()
if not publisher:
return None
return publisher
|
Parse publisher of the book.
Args:
details (obj): HTMLElement containing slice of the page with details.
Returns:
str/None: Publisher's name as string or None if not found.
|
juraj-google-style
|
def _extract_field_with_regex(self, field):
matched = re.search(field, self.text)
if (not matched):
err_msg = u'Failed to extract data with regex! => {}\n'.format(field)
err_msg += u'response body: {}\n'.format(self.text)
logger.log_error(err_msg)
raise exceptions.ExtractFailure(err_msg)
return matched.group(1)
|
extract field from response content with regex.
requests.Response body could be json or html text.
Args:
field (str): regex string that matched r".*\(.*\).*"
Returns:
str: matched content.
Raises:
exceptions.ExtractFailure: If no content matched with regex.
Examples:
>>> # self.text: "LB123abcRB789"
>>> filed = "LB[\d]*(.*)RB[\d]*"
>>> _extract_field_with_regex(field)
abc
|
codesearchnet
|
def __tf_unflatten__(cls, metadata, components):
|
Create a user-defined object from (metadata, components).
Args:
metadata: a custom Python object that stands for the static config for
reconstructing a new object of the current class.
components: a `tuple` that contains the dynamic data fields of the current
class, for object reconstruction.
Returns:
The user-defined object, with the same class of the current object.
Implementation Note:
- This method should not invoke any TensorFlow ops.
- This method only needs to unflatten the current level. If the object has
an attribute that also need custom unflattening, nest functions will
utilize this method to do recursive unflattening.
|
github-repos
|
def set_signal_type(self, sig_type):
if isinstance(sig_type, str):
sig_type = [sig_type]
self.snr_input.signal_type = sig_type
return
|
Set the signal type of interest.
Sets the signal type for which the SNR is calculated.
This means inspiral, merger, and/or ringdown.
Args:
sig_type (str or list of str): Signal type desired by user.
Choices are `ins`, `mrg`, `rd`, `all` for circular waveforms created with PhenomD.
If eccentric waveforms are used, must be `all`.
|
codesearchnet
|
def _summarize_eager(tensor, summarize=None):
if summarize is None:
summarize = 3
elif summarize < 0:
summarize = array_ops.size(tensor)
if tensor._rank():
flat = tensor.numpy().reshape((-1,))
lst = [str(x) for x in flat[:summarize]]
if len(lst) < flat.size:
lst.append('...')
elif gen_math_ops.not_equal(summarize, 0):
lst = [str(tensor.numpy())]
else:
lst = []
return ', '.join(lst)
|
Returns a summarized string representation of eager `tensor`.
Args:
tensor: EagerTensor to summarize
summarize: Include these many first elements of `array`
|
github-repos
|
def register_name(self, register_index):
result = self._dll.JLINKARM_GetRegisterName(register_index)
return ctypes.cast(result, ctypes.c_char_p).value.decode()
|
Retrives and returns the name of an ARM CPU register.
Args:
self (JLink): the ``JLink`` instance
register_index (int): index of the register whose name to retrieve
Returns:
Name of the register.
|
codesearchnet
|
def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, deterministic: bool=True, params: Optional[dict]=None, dropout_rng: PRNGKey=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')
decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
rngs = {}
if dropout_rng is not None:
rngs['dropout'] = dropout_rng
inputs = {'params': params or self.params}
if past_key_values:
inputs['cache'] = past_key_values
mutable = ['cache']
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
outputs = decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = module.model.variables['params']['shared']['embedding']
lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states)
else:
lm_logits = module.lm_head(hidden_states)
lm_logits += module.final_logits_bias.astype(self.dtype)
return (lm_logits, outputs)
outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, rngs=rngs, mutable=mutable, method=_decoder_forward)
if past_key_values is None:
lm_logits, decoder_outputs = outputs
else:
(lm_logits, decoder_outputs), past = outputs
if return_dict:
outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions)
else:
outputs = (lm_logits,) + decoder_outputs[1:]
if past_key_values is not None and return_dict:
outputs['past_key_values'] = unfreeze(past['cache'])
return outputs
elif past_key_values is not None and (not return_dict):
outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]
return outputs
|
Returns:
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration
>>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large")
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> logits = outputs.logits
```
|
github-repos
|
def spawn_watcher(self, label, target=None, eternal=False):
if (label not in self._sources):
raise YapconfSourceError(('Cannot watch %s no source named %s' % (label, label)))
current_config = self._sources[label].get_data()
handler = ConfigChangeHandler(current_config, self, target)
return self._sources[label].watch(handler, eternal)
|
Spawns a config watcher in a separate daemon thread.
If a particular config value changes, and the item has a
``watch_target`` defined, then that method will be called.
If a ``target`` is passed in, then it will call the ``target``
anytime the config changes.
Args:
label (str): Should match a label added through ``add_source``
target (func): Should be a function that takes two arguments,
the old configuration and the new configuration.
eternal (bool): Determines if watcher threads should be restarted
if they die.
Returns:
The thread that was spawned.
|
codesearchnet
|
def _DepthwiseConv2dNumpy(x1, x2, strides, padding, data_format, dilations):
if data_format == 'NCHW':
x1 = np.transpose(x1, (0, 3, 1, 2))
strides = [strides[0], strides[3], strides[1], strides[2]]
if dilations:
dilations = [dilations[0], dilations[3], dilations[1], dilations[2]]
if dilations:
fh, fw, c, o = x2.shape
new_fh = (fh - 1) * dilations[0] + 1
new_fw = (fw - 1) * dilations[1] + 1
new_x2 = np.zeros((new_fh, new_fw, c, o))
for i in range(fh):
for j in range(fw):
new_x2[i * dilations[0], j * dilations[1], :] = x2[i, j, :, :]
x2 = new_x2
if padding == 'SAME':
def PaddingsForDim(input_dim, filter_dim, stride):
if input_dim % stride == 0:
total_padding = max(filter_dim - stride, 0)
else:
total_padding = max(filter_dim - input_dim % stride, 0)
pad_before = total_padding
pad_after = total_padding - pad_before
return (pad_before, pad_after)
padding = [(0, 0), PaddingsForDim(x1.shape[1], x2.shape[0], strides[1]), PaddingsForDim(x1.shape[2], x2.shape[1], strides[2]), (0, 0)]
elif padding == 'VALID':
padding = [(0, 0)] * 4
x1 = np.pad(x1, padding, 'constant')
y = _DepthwiseConv2dNumpyBasic(x1, x2, strides)
if data_format == 'NCHW':
y = np.transpose(y, (0, 2, 3, 1))
return y
|
Compute depthwise_conv2d using Numpy.
This allows use to test TensorFlow's depthwise_conv2d by comparing to the
Numpy version.
Unlike `_DepthwiseConv2dNumpyBasic`, this supports more advanced features
like padding.
Args:
x1: The input Numpy array.
x2: The filter Numpy array.
strides: A Python list of 4 elements representing the strides.
padding: The padding. "SAME", "VALID", or a list of explicit paddings.
data_format: "NHWC" or "NCHW".
dilations: A list of 2 elements, representing the dilations.
Returns:
The depthwise conv2d as a Numpy array.
|
github-repos
|
def __init__(self, process: Process):
self.process = process
self.stopped_due_to_worker_shutdown = False
|
Constructor.
Args:
process (Process): task process
|
juraj-google-style
|
def fetch(self, subscription_id, data={}, **kwargs):
return super(Subscription, self).fetch(subscription_id, data, **kwargs)
|
Fetch Subscription for given Id
Args:
subscription_id : Id for which subscription object is retrieved
Returns:
Subscription dict for given subscription Id
|
codesearchnet
|
def convert_datetime_array(array):
if not isinstance(array, np.ndarray):
return array
try:
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
except AttributeError as e:
if e.args == ("'module' object has no attribute 'datetime64'",):
if 'PyPy' in sys.version:
legacy_datetime64 = False
pass
else:
raise e
else:
raise e
if array.dtype.kind == 'M':
if legacy_datetime64:
if array.dtype == np.dtype('datetime64[ns]'):
array = array.astype('int64') / 10**6.0
else:
array = array.astype('datetime64[us]').astype('int64') / 1000.
elif array.dtype.kind == 'm':
array = array.astype('timedelta64[us]').astype('int64') / 1000.
return array
|
Convert NumPy datetime arrays to arrays to milliseconds since epoch.
Args:
array : (obj)
A NumPy array of datetime to convert
If the value passed in is not a NumPy array, it will be returned as-is.
Returns:
array
|
juraj-google-style
|
def ms_to_frames(ms, fps):
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round((ms / 1000) * fps))
|
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
|
juraj-google-style
|
def context(self, name):
data = self._context(name)
context = data.get("context")
if context:
return context
assert self.load_path
context_path = os.path.join(self.load_path, "contexts", "%s.rxt" % name)
context = ResolvedContext.load(context_path)
data["context"] = context
data["loaded"] = True
return context
|
Get a context.
Args:
name (str): Name to store the context under.
Returns:
`ResolvedContext` object.
|
juraj-google-style
|
def match(self, message) -> bool:
if self.to and message.to != self.to:
return False
if self.sender and message.sender != self.sender:
return False
if self.body and message.body != self.body:
return False
if self.thread and message.thread != self.thread:
return False
for key, value in self.metadata.items():
if message.get_metadata(key) != value:
return False
logger.debug(f"message matched {self} == {message}")
return True
|
Returns wether a message matches with this message or not.
The message can be a Message object or a Template object.
Args:
message (spade.message.Message): the message to match to
Returns:
bool: wether the message matches or not
|
juraj-google-style
|
def _GetCh(self):
fd = self._tty.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = self._tty.read(1)
if (ord(ch) == 27):
ch += self._tty.read(2)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
return ch
|
Read a single character from the user.
Returns:
A string, the character read.
|
codesearchnet
|
def _insert_operations(self, operations: Sequence[ops.Operation], insertion_indices: Sequence[int]) -> None:
if (len(operations) != len(insertion_indices)):
raise ValueError('operations and insertion_indices must have thesame length.')
self._moments += [ops.Moment() for _ in range(((1 + max(insertion_indices)) - len(self)))]
moment_to_ops = defaultdict(list)
for (op_index, moment_index) in enumerate(insertion_indices):
moment_to_ops[moment_index].append(operations[op_index])
for (moment_index, new_ops) in moment_to_ops.items():
self._moments[moment_index] = ops.Moment((self._moments[moment_index].operations + tuple(new_ops)))
|
Inserts operations at the specified moments. Appends new moments if
necessary.
Args:
operations: The operations to insert.
insertion_indices: Where to insert them, i.e. operations[i] is
inserted into moments[insertion_indices[i].
Raises:
ValueError: operations and insert_indices have different lengths.
NB: It's on the caller to ensure that the operations won't conflict
with operations already in the moment or even each other.
|
codesearchnet
|
class TFCLIPEncoder(keras.layers.Layer):
def __init__(self, config: CLIPConfig, **kwargs):
super().__init__(**kwargs)
self.layers = [TFCLIPEncoderLayer(config, name=f'layers_._{i}') for i in range(config.num_hidden_layers)]
def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, training=training)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None))
return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, 'layers', None) is not None:
for layer in self.layers:
with tf.name_scope(layer.name):
layer.build(None)
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`TFCLIPEncoderLayer`].
Args:
config: CLIPConfig
|
github-repos
|
def serialize_to_xml(root, block):
root.tag = 'ubcpi'
if block.rationale_size is not None:
if block.rationale_size.get('min'):
root.set('rationale_size_min', unicode(block.rationale_size.get('min')))
if block.rationale_size.get('max'):
root.set('rationale_size_max', unicode(block.rationale_size['max']))
if block.algo:
if block.algo.get('name'):
root.set('algorithm', block.algo.get('name'))
if block.algo.get('num_responses'):
root.set('num_responses', unicode(block.algo.get('num_responses')))
display_name = etree.SubElement(root, 'display_name')
display_name.text = block.display_name
question = etree.SubElement(root, 'question')
question_text = etree.SubElement(question, 'text')
question_text.text = block.question_text['text']
serialize_image(block.question_text, question)
options = etree.SubElement(root, 'options')
serialize_options(options, block)
seeds = etree.SubElement(root, 'seeds')
serialize_seeds(seeds, block)
|
Serialize the Peer Instruction XBlock's content to XML.
Args:
block (PeerInstructionXBlock): The peer instruction block to serialize.
root (etree.Element): The XML root node to update.
Returns:
etree.Element
|
juraj-google-style
|
def __init__(self, project=None, deidentification_template_name=None, deidentification_config=None, inspection_template_name=None, inspection_config=None, timeout=None):
self.config = {}
self.project = project
self.timeout = timeout
if deidentification_template_name is not None and deidentification_config is not None:
raise ValueError('Both deidentification_template_name and deidentification_config were specified. Please specify only one of these.')
elif deidentification_template_name is None and deidentification_config is None:
raise ValueError('deidentification_template_name or deidentification_config must be specified.')
elif deidentification_template_name is not None:
self.config['deidentify_template_name'] = deidentification_template_name
else:
self.config['deidentify_config'] = deidentification_config
if inspection_config is None and inspection_template_name is None:
raise ValueError('inspection_template_name or inspection_config must be specified')
if inspection_template_name is not None:
self.config['inspect_template_name'] = inspection_template_name
if inspection_config is not None:
self.config['inspect_config'] = inspection_config
|
Initializes a :class:`MaskDetectedDetails` transform.
Args:
project: Optional. GCP project name in which inspection will be performed
deidentification_template_name (str): Either this or
`deidentification_config` required. Name of
deidentification template to be used on detected sensitive information
instances in text.
deidentification_config
(``Union[dict, google.cloud.dlp_v2.types.DeidentifyConfig]``):
Configuration for the de-identification of the content item.
If both template name and config are supplied,
config is more important.
inspection_template_name (str): This or `inspection_config` required.
Name of inspection template to be used
to detect sensitive data in text.
inspection_config
(``Union[dict, google.cloud.dlp_v2.types.InspectConfig]``):
Configuration for the inspector used to detect sensitive data in text.
If both template name and config are supplied,
config takes precedence.
timeout (float): Optional. The amount of time, in seconds, to wait for
the request to complete.
|
github-repos
|
def orient_undirected_graph(self, data, umg, alg='HC'):
warnings.warn('The pairwise GNN model is computed on each edge of the UMG to initialize the model and start CGNN with a DAG')
gnn = GNN(nh=self.nh, lr=self.lr)
og = gnn.orient_graph(data, umg, nb_runs=self.nb_runs, nb_max_runs=self.nb_runs, nb_jobs=self.nb_jobs, train_epochs=self.train_epochs, test_epochs=self.test_epochs, verbose=self.verbose, gpu=self.gpu)
dag = dagify_min_edge(og)
return self.orient_directed_graph(data, dag, alg=alg)
|
Orient the undirected graph using GNN and apply CGNN to improve the graph.
Args:
data (pandas.DataFrame): Observational data on which causal
discovery has to be performed.
umg (nx.Graph): Graph that provides the skeleton, on which the GNN
then the CGNN algorithm will be applied.
alg (str): Exploration heuristic to use, among ["HC", "HCr",
"tabu", "EHC"]
Returns:
networkx.DiGraph: Solution given by CGNN.
.. note::
GNN (``cdt.causality.pairwise.GNN``) is first used to orient the
undirected graph and output a DAG before applying CGNN.
|
codesearchnet
|
def _get_reference(document_path, reference_map):
try:
return reference_map[document_path]
except KeyError:
msg = _BAD_DOC_TEMPLATE.format(document_path)
raise ValueError(msg)
|
Get a document reference from a dictionary.
This just wraps a simple dictionary look-up with a helpful error that is
specific to :meth:`~.firestore.client.Client.get_all`, the
**public** caller of this function.
Args:
document_path (str): A fully-qualified document path.
reference_map (Dict[str, .DocumentReference]): A mapping (produced
by :func:`_reference_info`) of fully-qualified document paths to
document references.
Returns:
.DocumentReference: The matching reference.
Raises:
ValueError: If ``document_path`` has not been encountered.
|
codesearchnet
|
def OverwriteAndClose(self, compressed_data, size):
self.Set(self.Schema.CONTENT(compressed_data))
self.Set(self.Schema.SIZE(size))
super(AFF4MemoryStreamBase, self).Close()
|
Directly overwrite the current contents.
Replaces the data currently in the stream with compressed_data,
and closes the object. Makes it possible to avoid recompressing
the data.
Args:
compressed_data: The data to write, must be zlib compressed.
size: The uncompressed size of the data.
|
juraj-google-style
|
def _compile_constant_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
etype = expr.etype
args = expr.args
dtype = utils.python_type_to_dtype(etype[1])
fluent = TensorFluent.constant(args, dtype=dtype)
return fluent
|
Compile a constant expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL constant expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
|
juraj-google-style
|
def UploadFilePath(self, filepath, offset=0, amount=None):
return self._UploadChunkStream(self._streamer.StreamFilePath(filepath, offset=offset, amount=amount))
|
Uploads chunks of a file on a given path to the transfer store flow.
Args:
filepath: A path to the file to upload.
offset: An integer offset at which the file upload should start on.
amount: An upper bound on number of bytes to stream. If it is `None` then
the whole file is uploaded.
Returns:
A `BlobImageDescriptor` object.
|
codesearchnet
|
def random_sparse(strategy, prob, obj_reaction, flux_threshold):
essential = set()
deleted = set()
for (entity, deleted_reactions) in strategy.iter_tests():
if (obj_reaction in deleted_reactions):
logger.info('Marking entity {} as essential because the objective reaction depends on this entity...'.format(entity))
essential.add(entity)
continue
if (len(deleted_reactions) == 0):
logger.info('No reactions were removed when entity {} was deleted'.format(entity))
deleted.add(entity)
strategy.delete(entity, deleted_reactions)
continue
logger.info('Deleted reactions: {}'.format(', '.join(deleted_reactions)))
constr = []
for r in deleted_reactions:
flux_var = prob.get_flux_var(r)
(c,) = prob.prob.add_linear_constraints((flux_var == 0))
constr.append(c)
logger.info('Trying FBA without reactions {}...'.format(', '.join(deleted_reactions)))
try:
prob.maximize(obj_reaction)
except fluxanalysis.FluxBalanceError:
logger.info('FBA is infeasible, marking {} as essential'.format(entity))
for c in constr:
c.delete()
essential.add(entity)
continue
logger.debug('Reaction {} has flux {}'.format(obj_reaction, prob.get_flux(obj_reaction)))
if (prob.get_flux(obj_reaction) < flux_threshold):
for c in constr:
c.delete()
essential.add(entity)
logger.info('Entity {} was essential'.format(entity))
else:
deleted.add(entity)
strategy.delete(entity, deleted_reactions)
logger.info('Entity {} was deleted'.format(entity))
return (essential, deleted)
|
Find a random minimal network of model reactions.
Given a reaction to optimize and a threshold, delete entities randomly
until the flux of the reaction to optimize falls under the threshold.
Keep deleting until no more entities can be deleted. It works
with two strategies: deleting reactions or deleting genes (reactions
related to certain genes).
Args:
strategy: :class:`.ReactionDeletionStrategy` or
:class:`.GeneDeletionStrategy`.
prob: :class:`psamm.fluxanalysis.FluxBalanceProblem`.
obj_reaction: objective reactions to optimize.
flux_threshold: threshold of max reaction flux.
|
codesearchnet
|
def register_controller(self, module, required=True, min_number=1):
verify_controller_module(module)
module_ref_name = module.__name__.split('.')[(- 1)]
if (module_ref_name in self._controller_objects):
raise signals.ControllerError(('Controller module %s has already been registered. It cannot be registered again.' % module_ref_name))
module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME
if (module_config_name not in self.controller_configs):
if required:
raise signals.ControllerError(('No corresponding config found for %s' % module_config_name))
logging.warning('No corresponding config found for optional controller %s', module_config_name)
return None
try:
original_config = self.controller_configs[module_config_name]
controller_config = copy.deepcopy(original_config)
objects = module.create(controller_config)
except:
logging.exception('Failed to initialize objects for controller %s, abort!', module_config_name)
raise
if (not isinstance(objects, list)):
raise signals.ControllerError(('Controller module %s did not return a list of objects, abort.' % module_ref_name))
actual_number = len(objects)
if (actual_number < min_number):
module.destroy(objects)
raise signals.ControllerError(('Expected to get at least %d controller objects, got %d.' % (min_number, actual_number)))
self._controller_objects[module_ref_name] = copy.copy(objects)
logging.debug('Found %d objects for controller %s', len(objects), module_config_name)
self._controller_modules[module_ref_name] = module
return objects
|
Loads a controller module and returns its loaded devices.
This is to be used in a mobly test class.
Args:
module: A module that follows the controller module interface.
required: A bool. If True, failing to register the specified
controller module raises exceptions. If False, the objects
failed to instantiate will be skipped.
min_number: An integer that is the minimum number of controller
objects to be created. Default is one, since you should not
register a controller module without expecting at least one
object.
Returns:
A list of controller objects instantiated from controller_module, or
None if no config existed for this controller and it was not a
required controller.
Raises:
ControllerError:
* The controller module has already been registered.
* The actual number of objects instantiated is less than the
* `min_number`.
* `required` is True and no corresponding config can be found.
* Any other error occurred in the registration process.
|
codesearchnet
|
async def getNodeByBuid(self, buid):
node = self.livenodes.get(buid)
if node is not None:
return node
props = {}
proplayr = {}
for layr in self.layers:
layerprops = await layr.getBuidProps(buid)
props.update(layerprops)
proplayr.update({k: layr for k in layerprops})
node = s_node.Node(self, buid, props.items(), proplayr=proplayr)
await asyncio.sleep(0)
if node.ndef is None:
return None
self.buidcache.append(node)
self.livenodes[buid] = node
return node
|
Retrieve a node tuple by binary id.
Args:
buid (bytes): The binary ID for the node.
Returns:
Optional[s_node.Node]: The node object or None.
|
juraj-google-style
|
def _copy_script_migrated(self, filename, id_=(- 1), file_type=SCRIPT_FILE_TYPE):
basefname = os.path.basename(filename)
resource = open(filename, 'rb')
headers = {'DESTINATION': '1', 'OBJECT_ID': str(id_), 'FILE_TYPE': file_type, 'FILE_NAME': basefname}
response = self.connection['jss'].session.post(url=('%s/%s' % (self.connection['jss'].base_url, 'dbfileupload')), data=resource, headers=headers)
return response
|
Upload a script to a migrated JSS's database.
On a "migrated" JSS, scripts are POSTed to the JSS. Pass an id
if you wish to associate the script with an existing Script
object, otherwise, it will create a new Script object.
Args:
filename: Path to script file.
id_: Int ID of Script object to associate this file with.
Default is -1, which creates a new Script.
|
codesearchnet
|
def __call__(self,
state: Sequence[tf.Tensor],
timestep: tf.Tensor) -> Sequence[tf.Tensor]:
return self._default
|
Returns the default action fluents regardless of the current `state` and `timestep`.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
timestep (tf.Tensor): The current timestep.
Returns:
Sequence[tf.Tensor]: A tuple of action fluents.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.