code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def get_texture(self, label: str) -> Union[moderngl.Texture, moderngl.TextureArray,
moderngl.Texture3D, moderngl.TextureCube]:
return self._project.get_texture(label)
|
Get a texture by its label
Args:
label (str): The Label for the texture
Returns:
The py:class:`moderngl.Texture` instance
|
juraj-google-style
|
def FilterRange(self, start_time=None, stop_time=None):
start_time = self._NormalizeTime(start_time)
stop_time = self._NormalizeTime(stop_time)
self.data = [
p for p in self.data
if (start_time is None or p[1] >= start_time) and
(stop_time is None or p[1] < stop_time)
]
|
Filter the series to lie between start_time and stop_time.
Removes all values of the series which are outside of some time range.
Args:
start_time: If set, timestamps before start_time will be dropped.
stop_time: If set, timestamps at or past stop_time will be dropped.
|
juraj-google-style
|
def init_app(self, app):
self._key = (app.config.get(CONF_KEY) or getenv(CONF_KEY))
if (not self._key):
return
self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI)
sender = AsynchronousSender(self._endpoint_uri)
queue = AsynchronousQueue(sender)
self._channel = TelemetryChannel(None, queue)
self._init_request_logging(app)
self._init_trace_logging(app)
self._init_exception_logging(app)
|
Initializes the extension for the provided Flask application.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
|
codesearchnet
|
def eval(self, data, data_store, *, exclude=None):
exclude = ([] if (exclude is None) else exclude)
result = {}
for (key, value) in self.items():
if (key in exclude):
continue
if ((value is not None) and callable(value)):
result[key] = value(data, data_store)
else:
result[key] = value
return TaskParameters(result)
|
Return a new object in which callable parameters have been evaluated.
Native types are not touched and simply returned, while callable methods are
executed and their return value is returned.
Args:
data (MultiTaskData): The data object that has been passed from the
predecessor task.
data_store (DataStore): The persistent data store object that allows the task
to store data for access across the current workflow
run.
exclude (list): List of key names as strings that should be excluded from
the evaluation.
Returns:
TaskParameters: A new TaskParameters object with the callable parameters
replaced by their return value.
|
codesearchnet
|
def shell_call(command, **kwargs):
command = list(command)
for i in range(len(command)):
m = CMD_VARIABLE_RE.match(command[i])
if m:
var_id = m.group(1)
if var_id in kwargs:
command[i] = kwargs[var_id]
return subprocess.call(command) == 0
|
Calls shell command with parameter substitution.
Args:
command: command to run as a list of tokens
**kwargs: dirctionary with substitutions
Returns:
whether command was successful, i.e. returned 0 status code
Example of usage:
shell_call(['cp', '${A}', '${B}'], A='src_file', B='dst_file')
will call shell command:
cp src_file dst_file
|
juraj-google-style
|
def init(self, game_info, static_data):
self._game_info = game_info
self._static_data = static_data
if not game_info.HasField("start_raw"):
raise ValueError("Raw observations are required for the renderer.")
self._map_size = point.Point.build(game_info.start_raw.map_size)
if game_info.options.HasField("feature_layer"):
fl_opts = game_info.options.feature_layer
self._feature_screen_px = point.Point.build(fl_opts.resolution)
self._feature_minimap_px = point.Point.build(fl_opts.minimap_resolution)
self._feature_camera_width_world_units = fl_opts.width
self._render_rgb = False
else:
self._feature_screen_px = self._feature_minimap_px = None
if game_info.options.HasField("render"):
render_opts = game_info.options.render
self._rgb_screen_px = point.Point.build(render_opts.resolution)
self._rgb_minimap_px = point.Point.build(render_opts.minimap_resolution)
self._render_rgb = True
else:
self._rgb_screen_px = self._rgb_minimap_px = None
if not self._feature_screen_px and not self._rgb_screen_px:
raise ValueError("Nothing to render.")
try:
self.init_window()
self._initialized = True
except pygame.error as e:
self._initialized = False
logging.error("-" * 60)
logging.error("Failed to initialize pygame: %s", e)
logging.error("Continuing without pygame.")
logging.error("If you're using ssh and have an X server, try ssh -X.")
logging.error("-" * 60)
self._obs = sc_pb.ResponseObservation()
self._queued_action = None
self._queued_hotkey = ""
self._select_start = None
self._alerts = {}
self._past_actions = []
self._help = False
|
Take the game info and the static data needed to set up the game.
This must be called before render or get_actions for each game or restart.
Args:
game_info: A `sc_pb.ResponseGameInfo` object for this game.
static_data: A `StaticData` object for this game.
Raises:
ValueError: if there is nothing to render.
|
juraj-google-style
|
def GetDataStreamByPathSpec(self, path_spec):
file_entry = self.GetFileEntryByPathSpec(path_spec)
if not file_entry:
return None
data_stream_name = getattr(path_spec, 'data_stream', None)
return file_entry.GetDataStream(data_stream_name)
|
Retrieves a data stream for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
DataStream: a data stream or None if not available.
|
juraj-google-style
|
def query(self, coords, **kwargs):
return super(Lenz2017Query, self).query(coords, **kwargs)
|
Returns E(B-V), in mags, at the specified location(s) on the sky.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query.
Returns:
A float array of the reddening, in magnitudes of E(B-V), at the
selected coordinates.
|
juraj-google-style
|
def __message_to_schema(self, message_type):
name = self.__normalized_name(message_type)
schema = {
'id': name,
'type': 'object',
}
if message_type.__doc__:
schema['description'] = message_type.__doc__
properties = {}
for field in message_type.all_fields():
descriptor = {}
type_info = {}
if type(field) == messages.MessageField:
field_type = field.type().__class__
type_info['$ref'] = self.add_message(field_type)
if field_type.__doc__:
descriptor['description'] = field_type.__doc__
else:
schema_type = self.__FIELD_TO_SCHEMA_TYPE_MAP.get(
type(field), self.__DEFAULT_SCHEMA_TYPE)
if isinstance(schema_type, dict):
variant_map = schema_type
variant = getattr(field, 'variant', None)
if variant in variant_map:
schema_type = variant_map[variant]
else:
schema_type = variant_map[None]
type_info['type'] = schema_type[0]
if schema_type[1]:
type_info['format'] = schema_type[1]
if type(field) == messages.EnumField:
sorted_enums = sorted([enum_info for enum_info in field.type],
key=lambda enum_info: enum_info.number)
type_info['enum'] = [enum_info.name for enum_info in sorted_enums]
if field.required:
descriptor['required'] = True
if field.default:
if type(field) == messages.EnumField:
descriptor['default'] = str(field.default)
else:
descriptor['default'] = field.default
if field.repeated:
descriptor['items'] = type_info
descriptor['type'] = 'array'
else:
descriptor.update(type_info)
properties[field.name] = descriptor
schema['properties'] = properties
return schema
|
Parse a single message into JSON Schema.
Will recursively descend the message structure
and also parse other messages references via MessageFields.
Args:
message_type: protorpc.messages.Message class to parse.
Returns:
An object representation of the schema.
|
juraj-google-style
|
class FlaxSuppressTokensLogitsProcessor(FlaxLogitsProcessor):
def __init__(self, suppress_tokens: list):
self.suppress_tokens = list(suppress_tokens)
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
scores = scores.at[..., self.suppress_tokens].set(-float('inf'))
return scores
|
[`FlaxLogitsProcessor`] suppressing a list of tokens at each decoding step. The processor will set their log probs
to be `-inf` so they are not sampled.
Args:
suppress_tokens (`list`):
Tokens to not sample.
|
github-repos
|
def children(self, as_resources=False):
children = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))]
if as_resources:
logger.debug('retrieving children as resources')
children = [ self.repo.get_resource(child) for child in children ]
return children
|
method to return hierarchical children of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
|
juraj-google-style
|
def emit(self, **kwargs):
self._ensure_emit_kwargs(kwargs)
for slot in self.slots:
slot(**kwargs)
|
Emit signal by calling all connected slots.
The arguments supplied have to match the signal definition.
Args:
kwargs: Keyword arguments to be passed to connected slots.
Raises:
:exc:`InvalidEmit`: If arguments don't match signal specification.
|
juraj-google-style
|
def _parse_line_entry(self, line, type):
name = None
key_values = {}
if (type == 'vars'):
key_values = self._parse_line_vars(line)
else:
tokens = shlex.split(line.strip())
name = tokens.pop(0)
try:
key_values = self._parse_vars(tokens)
except ValueError:
self.log.warning('Unsupported vars syntax. Skipping line: {0}'.format(line))
return (name, {})
return (name, key_values)
|
Parse a section entry line into its components. In case of a 'vars'
section, the first field will be None. Otherwise, the first field will
be the unexpanded host or group name the variables apply to.
For example:
[production:children]
frontend purpose="web" # The line we process
Returns:
('frontend', {'purpose': 'web'})
For example:
[production:vars]
purpose="web" # The line we process
Returns:
(None, {'purpose': 'web'})
Undocumented feature:
[prod:vars]
json_like_vars=[{'name': 'htpasswd_auth'}]
Returns:
(None, {'name': 'htpasswd_auth'})
|
codesearchnet
|
def AddClass(self, class_name, gtfs_class):
if class_name in self._class_mapping:
raise problems.DuplicateMapping(class_name)
self._class_mapping[class_name] = gtfs_class
|
Adds an entry to the list of known classes.
Args:
class_name: A string with name through which gtfs_class is to be made
accessible.
gtfs_class: The class to be added.
Raises:
DuplicateMapping if class_name is already present in the class mapping.
|
juraj-google-style
|
def GenerateLabels(self, hash_information):
response_code = hash_information['response_code']
if response_code == self._VIRUSTOTAL_NOT_PRESENT_RESPONSE_CODE:
return ['virustotal_not_present']
if response_code == self._VIRUSTOTAL_PRESENT_RESPONSE_CODE:
positives = hash_information['positives']
if positives > 0:
return ['virustotal_detections_{0:d}'.format(positives)]
return ['virsutotal_no_detections']
if response_code == self._VIRUSTOTAL_ANALYSIS_PENDING_RESPONSE_CODE:
return ['virustotal_analysis_pending']
logger.error(
'VirusTotal returned unknown response code {0!s}'.format(
response_code))
return ['virustotal_unknown_response_code_{0:d}'.format(response_code)]
|
Generates a list of strings that will be used in the event tag.
Args:
hash_information (dict[str, object]): the JSON decoded contents of the
result of a VirusTotal lookup, as produced by the VirusTotalAnalyzer.
Returns:
list[str]: strings describing the results from VirusTotal.
|
juraj-google-style
|
def _parse_version(version):
parsed_version = parse_version(version)
return tuple(
int(dot_version)
for dot_version in parsed_version.base_version.split('.')
) + (parsed_version.is_prerelease,)
|
Parse a version string.
Args:
version (str): A string representing a version e.g. '1.9rc2'
Returns:
tuple: major, minor, patch parts cast as integer and whether or not
it was a pre-release version.
|
juraj-google-style
|
def get_all_keywords(self, term_so_far='', current_dict=None):
terms_present = {}
if (not term_so_far):
term_so_far = ''
if (current_dict is None):
current_dict = self.keyword_trie_dict
for key in current_dict:
if (key == '_keyword_'):
terms_present[term_so_far] = current_dict[key]
else:
sub_values = self.get_all_keywords((term_so_far + key), current_dict[key])
for key in sub_values:
terms_present[key] = sub_values[key]
return terms_present
|
Recursively builds a dictionary of keywords present in the dictionary
And the clean name mapped to those keywords.
Args:
term_so_far : string
term built so far by adding all previous characters
current_dict : dict
current recursive position in dictionary
Returns:
terms_present : dict
A map of key and value where each key is a term in the keyword_trie_dict.
And value mapped to it is the clean name mapped to it.
Examples:
>>> keyword_processor = KeywordProcessor()
>>> keyword_processor.add_keyword('j2ee', 'Java')
>>> keyword_processor.add_keyword('Python', 'Python')
>>> keyword_processor.get_all_keywords()
>>> {'j2ee': 'Java', 'python': 'Python'}
>>> # NOTE: for case_insensitive all keys will be lowercased.
|
codesearchnet
|
def __init__(self, client, method, url, query=None):
self.client = client
self.method = method
self.url = url
self.query = query if query is not None else {}
|
Construct request for the Retsly API
Args:
client (dict): Retsly client
method (string): method
url (string): url
query (list): query
|
juraj-google-style
|
def extra(name: str, desc: str) -> Callable:
def attr_dec(f):
f.__setattr__("extra_fn", True)
f.__setattr__("name", name)
f.__setattr__("desc", desc)
return f
return attr_dec
|
Decorator for slave channel's "additional features" interface.
Args:
name (str): A human readable name for the function.
desc (str): A short description and usage of it. Use
``{function_name}`` in place of the function name
in the description.
Returns:
The decorated method.
|
juraj-google-style
|
def open(cls, filename):
asarfile = open(filename, 'rb')
asarfile.seek(4)
header_size = struct.unpack('I', asarfile.read(4))
if len(header_size) <= 0:
raise IndexError()
header_size = header_size[0] - 8
asarfile.seek(asarfile.tell() + 8)
header = asarfile.read(header_size).decode('utf-8')
files = json.loads(header)
return cls(filename, asarfile, files, asarfile.tell())
|
Opens a *.asar file and constructs a new :see AsarArchive instance.
Args:
filename (str):
Path to the *.asar file to open for reading.
Returns (AsarArchive):
An insance of of the :AsarArchive class or None if reading failed.
|
juraj-google-style
|
def render(self, tmpl_name, request_env):
return super(WebApplication, self).render(tmpl_name, request_env)
|
Render the specified template and return the output.
Args:
tmpl_name (str): file name of the template
request_env (dict): request environment
Returns:
str - the rendered template
|
juraj-google-style
|
def softplus(x):
return math_ops.softplus(x)
|
Softplus of a tensor.
Args:
x: A tensor or variable.
Returns:
A tensor.
|
github-repos
|
def tooltip(self, value: Any, *, parent: Any=None, root_path: Optional[KeyPath]=None, css_classes: Optional[Sequence[str]]=None, id: Optional[str]=None, content: Union[str, Html, None]=None, **kwargs) -> Html:
del parent, kwargs
if content is None:
content = Html.escape(utils.format(value, root_path=root_path, compact=False, verbose=False, python_format=True, max_bytes_len=64, max_str_len=256))
return Html.element('span', [content], id=id, css_classes=['tooltip', css_classes]).add_style('\n \n span.tooltip {\n visibility: hidden;\n white-space: pre-wrap;\n font-weight: normal;\n background-color:
|
Renders a tooltip for the value.
Args:
value: The value to render.
parent: The parent of the value.
root_path: The root path of the value.
css_classes: CSS classes to add to the HTML element.
id: The ID of the tooltip span element. If None, no ID will be added.
content: The content to render. If None, the value will be rendered.
**kwargs: Additional keyword arguments passed from the user that
will be ignored.
Returns:
The rendered HTML as the tooltip of the value.
|
github-repos
|
def get_history(self, filters=(), pagesize=15, offset=0):
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
|
juraj-google-style
|
def _get_required_params_for_impression(self, experiment, variation_id):
snapshot = {}
snapshot[self.EventParams.DECISIONS] = [{
self.EventParams.EXPERIMENT_ID: experiment.id,
self.EventParams.VARIATION_ID: variation_id,
self.EventParams.CAMPAIGN_ID: experiment.layerId
}]
snapshot[self.EventParams.EVENTS] = [{
self.EventParams.EVENT_ID: experiment.layerId,
self.EventParams.TIME: self._get_time(),
self.EventParams.KEY: 'campaign_activated',
self.EventParams.UUID: str(uuid.uuid4())
}]
return snapshot
|
Get parameters that are required for the impression event to register.
Args:
experiment: Experiment for which impression needs to be recorded.
variation_id: ID for variation which would be presented to user.
Returns:
Dict consisting of decisions and events info for impression event.
|
juraj-google-style
|
def ReconcileShadow(self, store_type):
for (k, v) in iteritems(self.entry):
if (v.pw_entry.store == store_type):
shadow_entry = self.shadow.get(k)
if (shadow_entry is not None):
v.pw_entry = shadow_entry
else:
v.pw_entry.store = 'UNKNOWN'
|
Verify that entries that claim to use shadow files have a shadow entry.
If the entries of the non-shadowed file indicate that a shadow file is used,
check that there is actually an entry for that file in shadow.
Args:
store_type: The type of password store that should be used (e.g.
/etc/shadow or /etc/gshadow)
|
codesearchnet
|
def epoch_to_human_time(epoch_time):
if isinstance(epoch_time, int):
try:
d = datetime.datetime.fromtimestamp(epoch_time / 1000)
return d.strftime("%m-%d-%Y %H:%M:%S ")
except ValueError:
return None
|
Converts an epoch timestamp to human readable time.
This essentially converts an output of get_current_epoch_time to an output
of get_current_human_time
Args:
epoch_time: An integer representing an epoch timestamp in milliseconds.
Returns:
A time string representing the input time.
None if input param is invalid.
|
juraj-google-style
|
def Serialize(self, writer):
super(UnspentCoinState, self).Serialize(writer)
writer.WriteVarInt(len(self.Items))
for item in self.Items:
byt = item.to_bytes(1, 'little')
writer.WriteByte(byt)
|
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
|
juraj-google-style
|
def set_setting(name, value):
if (name.lower() not in _get_valid_names()):
raise KeyError('Invalid name: {0}'.format(name))
for setting in settings:
if (value.lower() == setting.lower()):
cmd = '/set /subcategory:"{0}" {1}'.format(name, settings[setting])
break
else:
raise KeyError('Invalid setting value: {0}'.format(value))
_auditpol_cmd(cmd)
return True
|
Set the configuration for the named audit setting
Args:
name (str):
The name of the setting to configure
value (str):
The configuration for the named value. Valid options are:
- No Auditing
- Success
- Failure
- Success and Failure
Returns:
bool: True if successful
Raises:
KeyError: On invalid ``name`` or ``value``
CommandExecutionError: If an error is encountered modifying the setting
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
# Set the state of the "Credential Validation" setting to Success and
# Failure
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='Success and Failure')
# Set the state of the "Credential Validation" setting to No Auditing
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='No Auditing')
|
codesearchnet
|
def angle(self, deg=False):
if self.dtype.str[1] != 'c':
warnings.warn('angle() is intended for complex-valued timeseries',
RuntimeWarning, 1)
da = distob.vectorize(np.angle)(self, deg)
return _dts_from_da(da, self.tspan, self.labels)
|
Return the angle of a complex Timeseries
Args:
deg (bool, optional):
Return angle in degrees if True, radians if False (default).
Returns:
angle (Timeseries):
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
|
juraj-google-style
|
def _ed25519_key_from_file(fn, path):
try:
return fn(read_from_file(path, exception=ScriptWorkerEd25519Error))
except ScriptWorkerException as exc:
raise ScriptWorkerEd25519Error('Failed calling {} for {}: {}!'.format(fn, path, str(exc)))
|
Create an ed25519 key from the contents of ``path``.
``path`` is a filepath containing a base64-encoded ed25519 key seed.
Args:
fn (callable): the function to call with the contents from ``path``
path (str): the file path to the base64-encoded key seed.
Returns:
obj: the appropriate key type from ``path``
Raises:
ScriptWorkerEd25519Error
|
codesearchnet
|
def _make_assert_msg_data(sym, x, y, summarize, test_op):
data = []
data.append('Condition x %s y did not hold.' % sym)
if summarize > 0:
if x.shape == y.shape and x.shape.as_list():
mask = math_ops.logical_not(test_op)
indices = array_ops.where(mask)
indices_np = indices.numpy()
x_vals = array_ops.boolean_mask(x, mask)
y_vals = array_ops.boolean_mask(y, mask)
num_vals = min(summarize, indices_np.shape[0])
data.append('Indices of first %d different values:' % num_vals)
data.append(indices_np[:num_vals])
data.append('Corresponding x values:')
data.append(x_vals.numpy().reshape((-1,))[:num_vals])
data.append('Corresponding y values:')
data.append(y_vals.numpy().reshape((-1,))[:num_vals])
x_np = x.numpy().reshape((-1,))
y_np = y.numpy().reshape((-1,))
x_sum = min(x_np.size, summarize)
y_sum = min(y_np.size, summarize)
data.append('First %d elements of x:' % x_sum)
data.append(x_np[:x_sum])
data.append('First %d elements of y:' % y_sum)
data.append(y_np[:y_sum])
return data
|
Subroutine of _binary_assert that generates the components of the default error message when running in eager mode.
Args:
sym: Mathematical symbol for the test to apply to pairs of tensor elements,
i.e. "=="
x: First input to the assertion after applying `convert_to_tensor()`
y: Second input to the assertion
summarize: Value of the "summarize" parameter to the original assert_* call;
tells how many elements of each tensor to print.
test_op: TensorFlow op that returns a Boolean tensor with True in each
position where the assertion is satisfied.
Returns:
List of tensors and scalars that, when stringified and concatenated,
will produce the error message string.
|
github-repos
|
def bridge_exists(br):
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
|
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
|
codesearchnet
|
def drop_scored_calls(self,names):
def _remove(calls,names):
d = dict([(k,v) for k,v in calls.items() if k not in names])
return d
if isinstance(names, str):
names = [names]
output = self.copy()
output['scored_calls'] = output['scored_calls'].\
apply(lambda x: _remove(x,names))
return output
|
Take a name or list of scored call names and drop those from the scored calls
Args:
names (list): list of names to drop or a single string name to drop
Returns:
CellDataFrame: The CellDataFrame modified.
|
juraj-google-style
|
class WindowedTracker(BaseTracker):
def __init__(self, window_mode, **kwargs):
if window_mode == WindowMode.SLIDING:
self._window_size = kwargs.get('window_size', 100)
self._queue = deque(maxlen=self._window_size)
self._n = 0
self._window_mode = window_mode
def push(self, x):
self._queue.append(x)
def pop(self):
return self._queue.popleft()
|
Abstract base class for trackers that operate on a data window.
This class provides a foundation for trackers that maintain a window of data,
either as a landmark window or a sliding window. It provides basic push and
pop operations.
Args:
window_mode: A `WindowMode` enum specifying whether the window is `LANDMARK`
or `SLIDING`.
**kwargs: Keyword arguments.
For `SLIDING` window mode, `window_size` can be specified to set the
maximum size of the sliding window. Defaults to 100.
|
github-repos
|
def set_position(self, position):
self._player_interface.SetPosition(ObjectPath("/not/used"), Int64(position * 1000.0 * 1000))
self.positionEvent(self, position)
|
Set the video to playback position to `position` seconds from the start of the video
Args:
position (float): The position in seconds.
|
juraj-google-style
|
def move_to(self, folder):
if isinstance(folder, Folder):
self.move_to(folder.id)
else:
self._move_to(folder)
|
Moves the email to the folder specified by the folder parameter.
Args:
folder: A string containing the folder ID the message should be moved to, or a Folder instance
|
juraj-google-style
|
def run_plugins(context_obj, boto3_clients):
def print_if_verbose(message):
if context_obj.verbose:
print(message)
service_name = os.path.basename(sys.argv[0]).replace(".py", "")
try:
import plugins
except ImportError:
print_if_verbose("no plugins detected.")
return
else:
for plugin_importer, plugin_name, plugin_ispkg in pkgutil.iter_modules(plugins.__path__):
if plugin_ispkg:
plugin_package = importlib.import_module("plugins.{}".format(plugin_name))
for importer, modname, ispkg in pkgutil.iter_modules(plugin_package.__path__):
plugin_module = importlib.import_module("plugins.{}.{}".format(plugin_name, modname))
for name, obj in inspect.getmembers(plugin_module):
if inspect.isclass(obj) and obj.__name__ == "EFPlugin":
plugin_class = getattr(plugin_module, name)
plugin_instance = plugin_class(context=context_obj, clients=boto3_clients)
if plugin_instance.service == service_name:
print_if_verbose("plugin '{}' loaded".format(plugin_name))
if not context_obj.commit:
print_if_verbose("dryrun: skipping plugin execution.")
else:
try:
plugin_instance.run()
except AttributeError:
print("error executing plugin '{}'".format(modname))
|
Executes all loaded plugins designated for the service calling the function.
Args:
context_obj (obj:EFContext): The EFContext object created by the service.
boto3_clients (dict): Dictionary of boto3 clients created by ef_utils.create_aws_clients()
|
juraj-google-style
|
def apply(self, score: Optional[float]) -> Optional[int]:
if score is None:
return None
if math.isnan(score):
return self._missing_label
if score < self.threshold:
return self._normal_label
return self._outlier_label
|
Applies the fixed threshold to an anomaly score.
Classifies the given anomaly score as normal or outlier based on the
predefined cutoff.
Args:
score (Optional[float]): The input anomaly score.
Returns:
Optional[int]: The anomaly label:
- `normal_label` if the score is less than the threshold.
- `outlier_label` if the score is at or above the threshold.
- `missing_label` if the score is `NaN` (detector not ready).
- `None` if the score is `None` (detector ready, but unable to produce
score).
|
github-repos
|
def queue_scan_command(self, server_info: ServerConnectivityInfo, scan_command: PluginScanCommand) -> None:
self._check_and_create_process(server_info.hostname)
self._queued_tasks_nb += 1
if scan_command.is_aggressive:
self._hostname_queues_dict[server_info.hostname].put((server_info, scan_command))
else:
self._task_queue.put((server_info, scan_command))
|
Queue a scan command targeting a specific server.
Args:
server_info: The server's connectivity information. The test_connectivity_to_server() method must have been
called first to ensure that the server is online and accessible.
scan_command: The scan command to run against this server.
|
juraj-google-style
|
def _restore_and_convert(self, elem: tuple[tuple[Any, Any, beam.Row], Any]) -> NestedKeyedOutputT:
(orig_key, temp_key, row), prediction = elem
assert isinstance(prediction, AnomalyPrediction), 'Wrong model handler output type.' + f"Expected: 'AnomalyPrediction', but got '{type(prediction).__name__}'. " + 'Consider adding a post-processing function via `with_postprocess_fn` ' + f"to convert from '{type(prediction).__name__}' to 'AnomalyPrediction', " + 'or use `score_prediction_adapter` or `label_prediction_adapter` to ' + 'perform the conversion.'
result = AnomalyResult(example=row, predictions=[dataclasses.replace(prediction, model_id=self._offline_detector._model_id)])
return (orig_key, (temp_key, result))
|
Converts the model output to AnomalyResult.
Args:
elem: A tuple containing the combined key (original key, temp key, row)
and the output from RunInference.
Returns:
A tuple containing the keyed AnomalyResult.
|
github-repos
|
def console_set_char(
con: tcod.console.Console, x: int, y: int, c: Union[int, str]
) -> None:
lib.TCOD_console_set_char(_console(con), x, y, _int(c))
|
Change the character at x,y to c, keeping the current colors.
Args:
con (Console): Any Console instance.
x (int): Character x position from the left.
y (int): Character y position from the top.
c (Union[int, AnyStr]): Character to draw, can be an integer or string.
.. deprecated:: 8.4
Array access performs significantly faster than using this function.
See :any:`Console.ch`.
|
juraj-google-style
|
def to_tuple(param, low=None, bias=None):
if low is not None and bias is not None:
raise ValueError('Arguments low and bias are mutually exclusive')
if param is None:
return param
if isinstance(param, (int, float)):
if low is None:
param = - param, + param
else:
param = (low, param) if low < param else (param, low)
elif isinstance(param, (list, tuple)):
param = tuple(param)
else:
raise ValueError('Argument param must be either scalar (int,float) or tuple')
if bias is not None:
return tuple([bias + x for x in param])
return tuple(param)
|
Convert input argument to min-max tuple
Args:
param (scalar, tuple or list of 2+ elements): Input value.
If value is scalar, return value would be (offset - value, offset + value).
If value is tuple, return value would be value + offset (broadcasted).
low: Second element of tuple can be passed as optional argument
bias: An offset factor added to each element
|
juraj-google-style
|
def format_terminal_row(headers, example_row):
def format_column(col):
if isinstance(col, str):
return '{{:{w}.{w}}}'
return '{{:<{w}}}'
widths = [max(len(h), len(str(d))) for (h, d) in zip(headers, example_row)]
original_last_width = widths[(- 1)]
if sys.stdout.isatty():
widths[(- 1)] = max(len(headers[(- 1)]), ((tty.width() - sum(((w + 2) for w in widths[0:(- 1)]))) - 3))
cols = [format_column(c).format(w=w) for (c, w) in zip(example_row, widths)]
format_string = ' '.join(cols)
if (original_last_width > widths[(- 1)]):
format_string += '...'
return format_string
|
Uses headers and a row of example data to generate a format string
for printing a single row of data.
Args:
headers (tuple of strings): The headers for each column of data
example_row (tuple): A representative tuple of strings or ints
Returns
string: A format string with a size for each column
|
codesearchnet
|
def has_resource(self, feature_column, name):
del feature_column, name
raise NotImplementedError('StateManager.has_resource')
|
Returns true iff a resource with same name exists.
Resources can be things such as tables, variables, trackables, etc.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: Name of the resource.
|
github-repos
|
def check(self, read_tuple_name):
parts = read_tuple_name.split('__')
if ((len(parts[0]) != self.prefix_width) or (len(parts[1]) != self.read_tuple_id_width)):
return False
segments = parts[2][1:(- 1)].split('),(')
for segment in segments:
int_widths = list(map(len, segment.split(',')))
if (self.genome_id_width != int_widths[0]):
return False
if (self.chr_id_width != int_widths[1]):
return False
if ((self.coor_width != int_widths[3]) or (self.coor_width != int_widths[4])):
return False
return True
|
Check if the given read tuple name satisfies this profile.
Args:
read_tuple_name (str): Read tuple name.
|
codesearchnet
|
def on_state_changed(self, state):
if state:
self.editor.sig_breakpoints_changed.connect(self.repaint)
self.editor.sig_debug_stop.connect(self.set_current_line_arrow)
self.editor.sig_debug_stop[()].connect(self.stop_clean)
self.editor.sig_debug_start.connect(self.start_clean)
else:
self.editor.sig_breakpoints_changed.disconnect(self.repaint)
self.editor.sig_debug_stop.disconnect(self.set_current_line_arrow)
self.editor.sig_debug_stop[()].disconnect(self.stop_clean)
self.editor.sig_debug_start.disconnect(self.start_clean)
|
Change visibility and connect/disconnect signal.
Args:
state (bool): Activate/deactivate.
|
juraj-google-style
|
def mp2q(p, q):
p, q = flatten(p), flatten(q)
entropy_dist = 1 / len(p)
return sum(entropy_dist * np.nan_to_num((p ** 2) / q * np.log(p / q)))
|
Compute the MP2Q measure.
Args:
p (np.ndarray): The unpartitioned repertoire
q (np.ndarray): The partitioned repertoire
|
juraj-google-style
|
def load_addon(username, package_name, _globals):
addon_module = get_or_create_module_r(username)
package_module = __import__(package_name)
add_tasks_r(addon_module, package_module, package_name)
_globals.update({username: addon_module})
del package_module
del addon_module
|
Load an fabsetup addon given by 'package_name' and hook it in the
base task namespace 'username'.
Args:
username(str)
package_name(str)
_globals(dict): the globals() namespace of the fabric script.
Return: None
|
codesearchnet
|
def Gradient(inputs, f, name=None):
tlist = [_.type for _ in f.definition.signature.input_arg]
return symbolic_gradient(input=inputs, Tout=tlist, f=f, name=name)
|
Computes the gradient function for function f via backpropagation.
Args:
inputs: A list of tensors of size N + M.
f: The function we want to compute the gradient for. The function 'f' must
be a numerical function which takes N inputs and produces M outputs. Its
gradient function 'g', which is a function taking N + M inputs and
produces N outputs. I.e. if we have (y1, y2, ..., yM) = f(x1, x2, ...,
xN), then, g is (dL/dx1, dL/dx2, ..., dL/dxN) = g(x1, x2, ..., xN, dL/dy1,
dL/dy2, ..., dL/dyM), where L is a scalar-value function of (x1, x2, ...,
xN) (e.g., the loss function). dL/dxi is the partial derivative of L with
respect to xi.
name: A name for the operation (optional).
Returns:
A list of tensors of size N.
|
github-repos
|
def _ParseVolumeIdentifiersString(self, volume_identifiers_string, prefix='v'):
prefix_length = 0
if prefix:
prefix_length = len(prefix)
if (not volume_identifiers_string):
return []
if (volume_identifiers_string == 'all'):
return ['all']
volume_identifiers = set()
for identifiers_range in volume_identifiers_string.split(','):
if ('..' in identifiers_range):
(first_identifier, last_identifier) = identifiers_range.split('..')
if first_identifier.startswith(prefix):
first_identifier = first_identifier[prefix_length:]
if last_identifier.startswith(prefix):
last_identifier = last_identifier[prefix_length:]
try:
first_identifier = int(first_identifier, 10)
last_identifier = int(last_identifier, 10)
except ValueError:
raise ValueError('Invalid volume identifiers range: {0:s}.'.format(identifiers_range))
for volume_identifier in range(first_identifier, (last_identifier + 1)):
if (volume_identifier not in volume_identifiers):
volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)
volume_identifiers.add(volume_identifier)
else:
identifier = identifiers_range
if identifier.startswith(prefix):
identifier = identifiers_range[prefix_length:]
try:
volume_identifier = int(identifier, 10)
except ValueError:
raise ValueError('Invalid volume identifier range: {0:s}.'.format(identifiers_range))
volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)
volume_identifiers.add(volume_identifier)
return sorted(volume_identifiers)
|
Parses a user specified volume identifiers string.
Args:
volume_identifiers_string (str): user specified volume identifiers. A
range of volumes can be defined as: "3..5". Multiple volumes can be
defined as: "1,3,5" (a list of comma separated values). Ranges and
lists can also be combined as: "1,3..5". The first volume is 1. All
volumes can be defined as: "all".
prefix (Optional[str]): volume identifier prefix.
Returns:
list[str]: volume identifiers with prefix or the string "all".
Raises:
ValueError: if the volume identifiers string is invalid.
|
codesearchnet
|
def to_dict(self, remove_nones=False):
content = {}
for key in self._translation:
if hasattr(self, key):
content[key] = getattr(self, key)
content['parent_id'] = self.parent_id
content['item_id'] = self.item_id
content['restricted'] = self.restricted
content['title'] = self.title
if (self.resources != []):
content['resources'] = [resource.to_dict(remove_nones=remove_nones) for resource in self.resources]
content['desc'] = self.desc
return content
|
Return the dict representation of the instance.
Args:
remove_nones (bool, optional): Optionally remove dictionary
elements when their value is `None`.
Returns:
dict: a dict representation of the `DidlObject`.
|
codesearchnet
|
def AddCampaign(self, client_customer_id, campaign_name, ad_channel_type,
budget):
self.client.SetClientCustomerId(client_customer_id)
campaign_service = self.client.GetService('CampaignService')
budget_id = self.AddBudget(client_customer_id, budget)
operations = [{
'operator': 'ADD',
'operand': {
'name': campaign_name,
'status': 'PAUSED',
'biddingStrategyConfiguration': {
'biddingStrategyType': 'MANUAL_CPC',
'biddingScheme': {
'xsi_type': 'ManualCpcBiddingScheme',
'enhancedCpcEnabled': 'false'
}
},
'budget': {
'budgetId': budget_id
},
'advertisingChannelType': ad_channel_type
}
}]
campaign_service.mutate(operations)
|
Add a Campaign to the client account.
Args:
client_customer_id: str Client Customer Id to use when creating Campaign.
campaign_name: str Name of the campaign to be added.
ad_channel_type: str Primary serving target the campaign's ads.
budget: str a budget amount (in micros) to use.
|
juraj-google-style
|
def docker_list(registry_pass):
registry = conf.get('docker.registry', None)
if (registry is None):
log.err('You must define docker.registry conf variable to list images')
sys.exit((- 1))
registry_user = conf.get('docker.registry_user', None)
if (registry_user is None):
registry_user = click.prompt('Username')
rc = client.RegistryClient(registry, registry_user, registry_pass)
images = {x: rc.list_tags(x) for x in rc.list_images()}
shell.cprint('<32>Images in <34>{} <32>registry:', registry)
for (image, tags) in images.items():
shell.cprint(' <92>{}', image)
for tag in tags:
shell.cprint(' <90>{}:<35>{}', image, tag)
|
List docker images stored in the remote registry.
Args:
registry_pass (str):
Remote docker registry password.
|
codesearchnet
|
def from_array(arr, name=None):
tensor = TensorProto()
tensor.dims.extend(arr.shape)
if name:
tensor.name = name
if (arr.dtype == np.object):
tensor.data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]
flat_array = arr.flatten()
for e in flat_array:
if isinstance(e, text_type):
tensor.string_data.append(e.encode('utf-8'))
elif isinstance(e, np.ndarray):
for s in e:
if isinstance(s, text_type):
tensor.string_data.append(s.encode('utf-8'))
else:
raise NotImplementedError('Unrecognized object in the object array, expect a string, or array of bytes: ', str(type(e)))
return tensor
try:
dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]
except KeyError:
raise RuntimeError('Numpy data type not understood yet: {}'.format(str(arr.dtype)))
tensor.data_type = dtype
tensor.raw_data = arr.tobytes()
return tensor
|
Converts a numpy array to a tensor def.
Inputs:
arr: a numpy array.
name: (optional) the name of the tensor.
Returns:
tensor_def: the converted tensor def.
|
codesearchnet
|
def _path(cls, ndivsm, structure=None, kpath_bounds=None, comment=None):
if (kpath_bounds is None):
from pymatgen.symmetry.bandstructure import HighSymmKpath
sp = HighSymmKpath(structure)
kpath_labels = []
for labels in sp.kpath['path']:
kpath_labels.extend(labels)
kpath_bounds = []
for label in kpath_labels:
red_coord = sp.kpath['kpoints'][label]
kpath_bounds.append(red_coord)
return cls(mode=KSamplingModes.path, num_kpts=ndivsm, kpts=kpath_bounds, comment=(comment if comment else 'K-Path scheme'))
|
Static constructor for path in k-space.
Args:
structure: :class:`Structure` object.
kpath_bounds: List with the reduced coordinates of the k-points defining the path.
ndivsm: Number of division for the smallest segment.
comment: Comment string.
Returns:
:class:`KSampling` object.
|
codesearchnet
|
def cholesky(x):
if any_symbolic_tensors((x,)):
return Cholesky().symbolic_call(x)
return _cholesky(x)
|
Computes the Cholesky decomposition of a positive semi-definite matrix.
Args:
x: Input tensor of shape `(..., M, M)`.
Returns:
A tensor of shape `(..., M, M)` representing the lower triangular
Cholesky factor of `x`.
|
github-repos
|
def pauli_single(cls, num_qubits, index, pauli_label):
tmp = Pauli.from_label(pauli_label)
z = np.zeros(num_qubits, dtype=np.bool)
x = np.zeros(num_qubits, dtype=np.bool)
z[index] = tmp.z[0]
x[index] = tmp.x[0]
return cls(z, x)
|
Generate single qubit pauli at index with pauli_label with length num_qubits.
Args:
num_qubits (int): the length of pauli
index (int): the qubit index to insert the single qubii
pauli_label (str): pauli
Returns:
Pauli: single qubit pauli
|
juraj-google-style
|
def AddStopTime(self, stop, problems=None, schedule=None, **kwargs):
if problems is None:
problems = problems_module.default_problem_reporter
stoptime = self.GetGtfsFactory().StopTime(
problems=problems, stop=stop, **kwargs)
self.AddStopTimeObject(stoptime, schedule)
|
Add a stop to this trip. Stops must be added in the order visited.
Args:
stop: A Stop object
kwargs: remaining keyword args passed to StopTime.__init__
Returns:
None
|
juraj-google-style
|
def exists(self, **kwargs):
requests_params = self._handle_requests_params(kwargs)
self._check_load_parameters(**kwargs)
kwargs['uri_as_parts'] = True
session = self._meta_data['bigip']._meta_data['icr_session']
uri = self._meta_data['container']._meta_data['uri']
endpoint = kwargs.pop('id', '')
kwargs.pop('name', '')
base_uri = ((uri + endpoint) + '/')
kwargs.update(requests_params)
try:
session.get(base_uri, **kwargs)
except HTTPError as err:
if (err.response.status_code == 404):
return False
else:
raise
return True
|
r"""Check for the existence of the ASM object on the BIG-IP
Sends an HTTP GET to the URI of the ASM object and if it fails with
a :exc:~requests.HTTPError` exception it checks the exception for
status code of 404 and returns :obj:`False` in that case.
If the GET is successful it returns :obj:`True`.
For any other errors are raised as-is.
Args:
\*\*kwargs (dict): Arbitrary number of keyword arguments.
Keyword arguments required to get objects
If kwargs has a ``requests_param`` key the corresponding dict will
be passed to the underlying ``requests.session.get`` method where it will
be handled according to that API.
Returns:
bool: True is the object exists: False otherwise.
Raises:
requests.HTTPError: Any HTTP error that was not status code 404.
|
codesearchnet
|
def replace_with_quanto_layers(model, quantization_config=None, modules_to_not_convert=None, current_key_name=None, has_been_replaced=False):
from accelerate import init_empty_weights
if is_optimum_quanto_available():
from optimum.quanto import QLayerNorm, QLinear, qfloat8, qint2, qint4, qint8
w_mapping = {'float8': qfloat8, 'int8': qint8, 'int4': qint4, 'int2': qint2}
a_mapping = {None: None, 'float8': qfloat8, 'int8': qint8}
if modules_to_not_convert is None:
modules_to_not_convert = []
for name, module in model.named_children():
if current_key_name is None:
current_key_name = []
current_key_name.append(name)
if not any((key in '.'.join(current_key_name) for key in modules_to_not_convert)):
with init_empty_weights():
if isinstance(module, torch.nn.Linear):
model._modules[name] = QLinear(in_features=module.in_features, out_features=module.out_features, bias=module.bias is not None, dtype=module.weight.dtype, weights=w_mapping[quantization_config.weights], activations=a_mapping[quantization_config.activations])
model._modules[name].requires_grad_(False)
has_been_replaced = True
elif isinstance(module, torch.nn.LayerNorm):
if quantization_config.activations is not None:
model._modules[name] = QLayerNorm(module.normalized_shape, module.eps, module.elementwise_affine, module.bias is not None, activations=a_mapping[quantization_config.activations])
has_been_replaced = True
if len(list(module.children())) > 0:
_, has_been_replaced = replace_with_quanto_layers(module, quantization_config=quantization_config, modules_to_not_convert=modules_to_not_convert, current_key_name=current_key_name, has_been_replaced=has_been_replaced)
current_key_name.pop(-1)
return (model, has_been_replaced)
|
Public method that recursively replaces the Linear layers of the given model with Quanto quantized layers.
Returns the converted model and a boolean that indicates if the conversion has been successful or not.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
quantization_config (`AqlmConfig`, defaults to `None`):
The quantization config object that contains the quantization parameters.
modules_to_not_convert (`list`, *optional*, defaults to `None`):
A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be
converted.
current_key_name (`list`, *optional*, defaults to `None`):
A list that contains the current key name. This is used for recursion and should not be passed by the user.
has_been_replaced (`bool`, *optional*, defaults to `None`):
A boolean that indicates if the conversion has been successful or not. This is used for recursion and
should not be passed by the user.
|
github-repos
|
def populate_sites(self, number_of_atoms, selected_sites=None):
if (number_of_atoms > self.number_of_sites):
raise ValueError
if selected_sites:
atoms = [atom.Atom(initial_site=site) for site in random.sample([s for s in self.sites if (s.label in selected_sites)], number_of_atoms)]
else:
atoms = [atom.Atom(initial_site=site) for site in random.sample(self.sites, number_of_atoms)]
self.number_of_occupied_sites = number_of_atoms
return atoms
|
Populate the lattice sites with a specific number of atoms.
Args:
number_of_atoms (Int): The number of atoms to populate the lattice sites with.
selected_sites (:obj:List, optional): List of site labels if only some sites are to be occupied. Defaults to None.
Returns:
None
|
codesearchnet
|
def active_qubits(linear, quadratic):
active = {idx for (idx, bias) in uniform_iterator(linear)}
for (edge, _) in six.iteritems(quadratic):
active.update(edge)
return active
|
Calculate a set of all active qubits. Qubit is "active" if it has
bias or coupling attached.
Args:
linear (dict[variable, bias]/list[variable, bias]):
Linear terms of the model.
quadratic (dict[(variable, variable), bias]):
Quadratic terms of the model.
Returns:
set:
Active qubits' indices.
|
codesearchnet
|
def __get_bindings__(self, sparql, output_format):
return self.ext_conn.query(sparql,
rtn_format=output_format,
debug=False)
|
Internal method queries triplestore or remote
sparql endpont and returns the bindings
Args:
----
sparql: String of SPARQL query
output_format: String of type of outputform
|
juraj-google-style
|
def get_cur_rot(self) -> torch.Tensor:
if self._rot_mats is not None:
return self._rot_mats
elif self._quats is not None:
return self._quats
else:
raise ValueError('Both rotations are None')
|
Return the underlying rotation in its current form
Returns:
The stored rotation
|
github-repos
|
def wb010(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `wb010`'.format(value))
self._wb010 = value
|
Corresponds to IDD Field `wb010`
Wet-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `wb010`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def changed(dirname, filename='.md5', args=None, glob=None):
root = Path(dirname)
if (not root.exists()):
return True
cachefile = (root / filename)
current_digest = (cachefile.open().read() if cachefile.exists() else '')
_digest = digest(dirname, glob=glob)
if (args and args.verbose):
print('md5:', _digest)
has_changed = (current_digest != _digest)
if has_changed:
with open(os.path.join(dirname, filename), 'w') as fp:
fp.write(_digest)
return has_changed
|
Has `glob` changed in `dirname`
Args:
dirname: directory to measure
filename: filename to store checksum
|
codesearchnet
|
def remove_exit(self):
if self.items:
if (self.items[(- 1)] is self.exit_item):
del self.items[(- 1)]
return True
return False
|
Remove the exit item if necessary. Used to make sure we only remove the exit item, not something else.
Returns:
bool: True if item needed to be removed, False otherwise.
|
codesearchnet
|
def GetPlatformRestrictions(campaign_feed):
platform_restrictions = None
if (campaign_feed['matchingFunction']['operator'] == 'AND'):
for argument in campaign_feed['matchingFunction']['lhsOperand']:
if (argument['value']['operator'] == 'EQUALS'):
request_context_operand = argument['value']['lhsOperand'][0]
if (request_context_operand and (request_context_operand == 'DEVICE_PLATFORM')):
platform_restrictions = argument['value']['rhsOperand'][0].upper()
return platform_restrictions
|
Get the Platform Restrictions for a given Campaign Feed.
Args:
campaign_feed: the Campaign Feed we are retrieving Platform Restrictons for.
Returns:
The Platform Restrictions for the given feed.
|
codesearchnet
|
async def update(
self, *, node_id: str, version: int, spec: Mapping[str, Any]
) -> Mapping[str, Any]:
params = {"version": version}
if "Role" in spec:
assert spec["Role"] in {"worker", "manager"}
if "Availability" in spec:
assert spec["Availability"] in {"active", "pause", "drain"}
response = await self.docker._query_json(
"nodes/{node_id}/update".format(node_id=node_id),
method="POST",
params=params,
data=spec,
)
return response
|
Update the spec of a node.
Args:
node_id: The ID or name of the node
version: version number of the node being updated
spec: fields to be updated
|
juraj-google-style
|
def ones_comp_sum16(num1: int, num2: int) -> int:
carry = (1 << 16)
result = (num1 + num2)
return (result if (result < carry) else ((result + 1) - carry))
|
Calculates the 1's complement sum for 16-bit numbers.
Args:
num1: 16-bit number.
num2: 16-bit number.
Returns:
The calculated result.
|
codesearchnet
|
def UpdateNumberOfEventTags(self, number_of_consumed_event_tags, number_of_produced_event_tags):
consumed_event_tags_delta = 0
if (number_of_consumed_event_tags is not None):
if (number_of_consumed_event_tags < self.number_of_consumed_event_tags):
raise ValueError('Number of consumed event tags smaller than previous update.')
consumed_event_tags_delta = (number_of_consumed_event_tags - self.number_of_consumed_event_tags)
self.number_of_consumed_event_tags = number_of_consumed_event_tags
self.number_of_consumed_event_tags_delta = consumed_event_tags_delta
produced_event_tags_delta = 0
if (number_of_produced_event_tags is not None):
if (number_of_produced_event_tags < self.number_of_produced_event_tags):
raise ValueError('Number of produced event tags smaller than previous update.')
produced_event_tags_delta = (number_of_produced_event_tags - self.number_of_produced_event_tags)
self.number_of_produced_event_tags = number_of_produced_event_tags
self.number_of_produced_event_tags_delta = produced_event_tags_delta
return ((consumed_event_tags_delta > 0) or (produced_event_tags_delta > 0))
|
Updates the number of event tags.
Args:
number_of_consumed_event_tags (int): total number of event tags consumed
by the process.
number_of_produced_event_tags (int): total number of event tags produced
by the process.
Returns:
bool: True if either number of event tags has increased.
Raises:
ValueError: if the consumed or produced number of event tags is smaller
than the value of the previous update.
|
codesearchnet
|
def receive(self,message_type):
topic = None
message = None
if message_type == RAW:
message = self._sock.recv(flags=zmq.NOBLOCK)
elif message_type == PYOBJ:
message = self._sock.recv_pyobj(flags=zmq.NOBLOCK)
elif message_type == JSON:
message = self._sock.recv_json(flags=zmq.NOBLOCK)
elif message_type == MULTIPART:
data = self._sock.recv_multipart(flags=zmq.NOBLOCK)
message = data[1]
topic = data[0]
elif message_type == STRING:
message = self._sock.recv_string(flags=zmq.NOBLOCK)
elif message_type == UNICODE:
message = self._sock.recv_unicode(flags=zmq.NOBLOCK)
else:
raise Exception("Unknown message type %s"%(self._message_type,))
return (topic, message)
|
Receive the message of the specified type and retun
Args:
- message_type: the type of the message to receive
Returns:
- the topic of the message
- the message received from the socket
|
juraj-google-style
|
def show_status(self, **kwargs):
stream = kwargs.pop("stream", sys.stdout)
nids = as_set(kwargs.pop("nids", None))
wslice = kwargs.pop("wslice", None)
verbose = kwargs.pop("verbose", 0)
wlist = None
if wslice is not None:
wlist = list(range(wslice.start, wslice.step, wslice.stop))
has_colours = True
red = "red" if has_colours else None
for i, work in enumerate(self):
if nids and work.node_id not in nids: continue
print("", file=stream)
cprint_map("Work
if wlist is not None and i in wlist: continue
if verbose == 0 and work.finalized:
print(" Finalized works are not shown. Use verbose > 0 to force output.", file=stream)
continue
headers = ["Task", "Status", "Queue", "MPI|Omp|Gb",
"Warn|Com", "Class", "Sub|Rest|Corr", "Time",
"Node_ID"]
table = []
tot_num_errors = 0
for task in work:
if nids and task.node_id not in nids: continue
task_name = os.path.basename(task.name)
report = task.get_event_report()
stime = None
timedelta = task.datetimes.get_runtime()
if timedelta is not None:
stime = str(timedelta) + "R"
else:
timedelta = task.datetimes.get_time_inqueue()
if timedelta is not None:
stime = str(timedelta) + "Q"
events = "|".join(2*["NA"])
if report is not None:
events = '{:>4}|{:>3}'.format(*map(str, (
report.num_warnings, report.num_comments)))
para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (
task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb"))))
task_info = list(map(str, [task.__class__.__name__,
(task.num_launches, task.num_restarts, task.num_corrections), stime, task.node_id]))
qinfo = "None"
if task.queue_id is not None:
qname = str(task.qname)
if not verbose:
qname = qname[:min(5, len(qname))]
qinfo = str(task.queue_id) + "@" + qname
if task.status.is_critical:
tot_num_errors += 1
task_name = colored(task_name, red)
if has_colours:
table.append([task_name, task.status.colored, qinfo,
para_info, events] + task_info)
else:
table.append([task_name, str(task.status), qinfo, events,
para_info] + task_info)
print(tabulate(table, headers=headers, tablefmt="grid"), file=stream)
if tot_num_errors:
cprint("Total number of errors: %d" % tot_num_errors, "red", file=stream)
print("", file=stream)
if self.all_ok:
cprint("\nall_ok reached\n", "green", file=stream)
|
Report the status of the works and the status of the different tasks on the specified stream.
Args:
stream: File-like object, Default: sys.stdout
nids: List of node identifiers. By defaults all nodes are shown
wslice: Slice object used to select works.
verbose: Verbosity level (default 0). > 0 to show only the works that are not finalized.
|
juraj-google-style
|
def transfers(self, payment_id, data={}, **kwargs):
url = '{}/{}/transfers'.format(self.base_url, payment_id)
return self.get_url(url, data, **kwargs)
|
Fetches all transfer for given Payment Id
Args:
payment_id : Id for which payment object has to be refunded
Amount : Amount for which the payment has to be refunded
Returns:
Payment dict after getting refunded
|
codesearchnet
|
def __init__(self, config: FastSpeech2ConformerConfig, module_config):
super().__init__()
input_channels = config.hidden_size
hidden_channels = module_config['linear_units']
kernel_size = config.positionwise_conv_kernel_size
self.conv1 = nn.Conv1d(input_channels, hidden_channels, kernel_size, stride=1, padding=(kernel_size - 1)
self.conv2 = nn.Conv1d(hidden_channels, input_channels, kernel_size, stride=1, padding=(kernel_size - 1)
self.dropout = nn.Dropout(module_config['dropout_rate'])
|
Initialize FastSpeech2ConformerMultiLayeredConv1d module.
Args:
input_channels (`int`): Number of input channels.
hidden_channels (`int`): Number of hidden channels.
kernel_size (`int`): Kernel size of conv1d.
dropout_rate (`float`): Dropout rate.
|
github-repos
|
def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):
input_length = input.shape[0]
lower_index = round(input_length * (1 - lower_percentile * 0.01))
upper_index = round(input_length * upper_percentile * 0.01)
upper_bound = torch.kthvalue(input, k=upper_index).values
if lower_percentile == 0:
lower_bound = upper_bound * 0
else:
lower_bound = -torch.kthvalue(-input, k=lower_index).values
if not output_tensor:
lower_bound = lower_bound.item()
upper_bound = upper_bound.item()
return (lower_bound, upper_bound)
|
Calculate the percentile max and min values in a given tensor
Args:
input (`torch.Tensor`):
The target tensor to calculate percentile max and min.
lower_percentile (`float`):
If 0.1, means we return the value of the smallest 0.1% value in the tensor as percentile min.
upper_percentile (`float`):
If 99.9, means we return the value of the largest 0.1% value in the tensor as percentile max.
output_tensor (`bool`, *optional*, defaults to `False`):
If True, this function returns tensors, otherwise it returns values.
Returns:
`Tuple(torch.Tensor, torch.Tensor)`: Percentile min and max value of *input*
|
github-repos
|
def to_json(self, indent=None, separators=None, sort_keys=False):
def remove_callables(x):
'Omit callable elements from input with arbitrary nesting.'
if isinstance(x, dict):
return {k: remove_callables(v) for (k, v) in six.iteritems(x) if (not callable(v))}
elif isinstance(x, list):
return [remove_callables(i) for i in x if (not callable(i))]
return x
return json.dumps(remove_callables(self.values()), indent=indent, separators=separators, sort_keys=sort_keys)
|
Serializes the hyperparameters into JSON.
Args:
indent: If a non-negative integer, JSON array elements and object members
will be pretty-printed with that indent level. An indent level of 0, or
negative, will only insert newlines. `None` (the default) selects the
most compact representation.
separators: Optional `(item_separator, key_separator)` tuple. Default is
`(', ', ': ')`.
sort_keys: If `True`, the output dictionaries will be sorted by key.
Returns:
A JSON string.
|
codesearchnet
|
def nb_fit(data, P_init=None, R_init=None, epsilon=1e-08, max_iters=100):
means = data.mean(1)
variances = data.var(1)
if (means > variances).any():
raise ValueError('For NB fit, means must be less than variances')
(genes, cells) = data.shape
P = (1.0 - (means / variances))
R = ((means * (1 - P)) / P)
for i in range(genes):
result = minimize(nb_ll_row, [P[i], R[i]], args=(data[(i, :)],), bounds=[(0, 1), (eps, None)])
params = result.x
P[i] = params[0]
R[i] = params[1]
return (P, R)
|
Fits the NB distribution to data using method of moments.
Args:
data (array): genes x cells
P_init (array, optional): NB success prob param - genes x 1
R_init (array, optional): NB stopping param - genes x 1
Returns:
P, R - fit to data
|
codesearchnet
|
def _run(self, sess, enqueue_op, coord=None):
decremented = False
try:
enqueue_callable = sess.make_callable(enqueue_op)
while True:
if coord and coord.should_stop():
break
try:
enqueue_callable()
except self._queue_closed_exception_types:
with self._lock:
self._runs_per_session[sess] -= 1
decremented = True
if self._runs_per_session[sess] == 0:
try:
sess.run(self._close_op)
except Exception as e:
logging.vlog(1, 'Ignored exception: %s', str(e))
return
except Exception as e:
if coord:
coord.request_stop(e)
else:
logging.error('Exception in QueueRunner: %s', str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
if not decremented:
with self._lock:
self._runs_per_session[sess] -= 1
|
Execute the enqueue op in a loop, close the queue in case of error.
Args:
sess: A Session.
enqueue_op: The Operation to run.
coord: Optional Coordinator object for reporting errors and checking
for stop conditions.
|
github-repos
|
def seek(self, offset, whence=os.SEEK_SET):
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
offset += self._range_size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value less than zero.')
self._current_offset = offset
|
Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
|
juraj-google-style
|
def load_ipython_extension(shell):
def _request(self, uri, method='GET', body=None, headers=None, redirections=_httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
if (headers is None):
headers = {}
headers['user-agent'] = 'GoogleCloudDataLab/1.0'
return _orig_request(self, uri, method=method, body=body, headers=headers, redirections=redirections, connection_type=connection_type)
_httplib2.Http.request = _request
def _init_session(self):
_orig_init(self)
self.headers['User-Agent'] = 'GoogleCloudDataLab/1.0'
_requests.Session.__init__ = _init_session
def _run_line_magic(self, magic_name, line):
fn = self.find_line_magic(magic_name)
if (fn is None):
cm = self.find_cell_magic(magic_name)
if cm:
return _run_cell_magic(self, magic_name, line, None)
return _orig_run_line_magic(self, magic_name, line)
def _run_cell_magic(self, magic_name, line, cell):
if ((cell is None) or (len(cell) == 0) or cell.isspace()):
fn = self.find_line_magic(magic_name)
if fn:
return _orig_run_line_magic(self, magic_name, line)
cell = None
return _orig_run_cell_magic(self, magic_name, line, cell)
_shell.InteractiveShell.run_cell_magic = _run_cell_magic
_shell.InteractiveShell.run_line_magic = _run_line_magic
def _get_project_id():
try:
return google.datalab.Context.default().project_id
except Exception:
return None
def _set_project_id(project_id):
context = google.datalab.Context.default()
context.set_project_id(project_id)
try:
from datalab.context import Context as _old_context
_old_context.default().set_project_id(project_id)
except ImportError:
pass
try:
if ('datalab_project_id' not in _IPython.get_ipython().user_ns):
_IPython.get_ipython().user_ns['datalab_project_id'] = _get_project_id
_IPython.get_ipython().user_ns['set_datalab_project_id'] = _set_project_id
except TypeError:
pass
|
Called when the extension is loaded.
Args:
shell - (NotebookWebApplication): handle to the Notebook interactive shell instance.
|
codesearchnet
|
def sari_score(predictions, labels, features, **unused_kwargs):
if "inputs" not in features:
raise ValueError("sari_score requires inputs feature")
inputs = tf.squeeze(features["inputs"], axis=[-1, -2])
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
outputs = tf.squeeze(outputs, axis=[-1, -2])
labels = tf.squeeze(labels, axis=[-1, -2])
labels = tf.expand_dims(labels, axis=1)
score, _, _, _ = get_sari(inputs, outputs, labels)
return score, tf.constant(1.0)
|
Computes the SARI scores from the given source, prediction and targets.
An approximate SARI scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4.
Also, this does not have beam search.
Args:
predictions: tensor, model predictions.
labels: tensor, gold output.
features: dict, containing inputs.
Returns:
sari: int, approx sari score
|
juraj-google-style
|
def get_headers_from_environ(environ):
headers = wsgiref.headers.Headers([])
for (header, value) in environ.iteritems():
if header.startswith('HTTP_'):
headers[header[5:].replace('_', '-')] = value
if ('CONTENT_TYPE' in environ):
headers['CONTENT-TYPE'] = environ['CONTENT_TYPE']
return headers
|
Get a wsgiref.headers.Headers object with headers from the environment.
Headers in environ are prefixed with 'HTTP_', are all uppercase, and have
had dashes replaced with underscores. This strips the HTTP_ prefix and
changes underscores back to dashes before adding them to the returned set
of headers.
Args:
environ: An environ dict for the request as defined in PEP-333.
Returns:
A wsgiref.headers.Headers object that's been filled in with any HTTP
headers found in environ.
|
codesearchnet
|
def join_global_room(client: GMatrixClient, name: str, servers: Sequence[str]=()) -> Room:
our_server_name = urlparse(client.api.base_url).netloc
assert our_server_name, "Invalid client's homeserver url"
servers = ([our_server_name] + [urlparse(s).netloc for s in servers if (urlparse(s).netloc not in {None, '', our_server_name})])
our_server_global_room_alias_full = f'
for server in servers:
global_room_alias_full = f'
try:
global_room = client.join_room(global_room_alias_full)
except MatrixRequestError as ex:
if (ex.code not in (403, 404, 500)):
raise
log.debug('Could not join global room', room_alias_full=global_room_alias_full, _exception=ex)
else:
if (our_server_global_room_alias_full not in global_room.aliases):
global_room.add_room_alias(our_server_global_room_alias_full)
global_room.aliases.append(our_server_global_room_alias_full)
break
else:
log.debug('Could not join any global room, trying to create one')
for _ in range(JOIN_RETRIES):
try:
global_room = client.create_room(name, is_public=True)
except MatrixRequestError as ex:
if (ex.code not in (400, 409)):
raise
try:
global_room = client.join_room(our_server_global_room_alias_full)
except MatrixRequestError as ex:
if (ex.code not in (404, 403)):
raise
else:
break
else:
break
else:
raise TransportError('Could neither join nor create a global room')
return global_room
|
Join or create a global public room with given name
First, try to join room on own server (client-configured one)
If can't, try to join on each one of servers, and if able, alias it in our server
If still can't, create a public room with name in our server
Params:
client: matrix-python-sdk client instance
name: name or alias of the room (without #-prefix or server name suffix)
servers: optional: sequence of known/available servers to try to find the room in
Returns:
matrix's Room instance linked to client
|
codesearchnet
|
def push(self, stream_id, timestamp, value):
stream = DataStream.FromEncoded(stream_id)
reading = IOTileReading(stream_id, timestamp, value)
try:
self.storage.push(stream, reading)
return Error.NO_ERROR
except StorageFullError:
return pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.RING_BUFFER_FULL)
|
Push a value to a stream.
Args:
stream_id (int): The stream we want to push to.
timestamp (int): The raw timestamp of the value we want to
store.
value (int): The 32-bit integer value we want to push.
Returns:
int: Packed 32-bit error code.
|
codesearchnet
|
def hget(self, key):
data = self.r.hget(self.hash, key)
if ((data is not None) and (not isinstance(data, str))):
data = str(self.r.hget(self.hash, key), 'utf-8')
return data
|
Read data from Redis for the provided key.
Args:
key (string): The key to read in Redis.
Returns:
(any): The response data from Redis.
|
codesearchnet
|
def trunc(x):
if any_symbolic_tensors((x,)):
return Trunc().symbolic_call(x)
return backend.numpy.trunc(x)
|
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which is
closer to zero than `x` is. In short, the fractional part of the signed
number `x` is discarded.
Args:
x: Input tensor.
Returns:
The truncated value of each element in `x`.
Example:
>>> x = ops.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> ops.trunc(x)
array([-1.0, -1.0, -0.0, 0.0, 1.0, 1.0, 2.0])
|
github-repos
|
def is_hermitian(
matrix: np.ndarray,
*,
rtol: float = 1e-5,
atol: float = 1e-8) -> bool:
return (matrix.shape[0] == matrix.shape[1] and
np.allclose(matrix, np.conj(matrix.T), rtol=rtol, atol=atol))
|
Determines if a matrix is approximately Hermitian.
A matrix is Hermitian if it's square and equal to its adjoint.
Args:
matrix: The matrix to check.
rtol: The per-matrix-entry relative tolerance on equality.
atol: The per-matrix-entry absolute tolerance on equality.
Returns:
Whether the matrix is Hermitian within the given tolerance.
|
juraj-google-style
|
def UpdateUserCredentials(client_id, client_secret, refresh_token,
adwords_manager_cid, developer_token):
app_user = AppUser.query(AppUser.user == users.get_current_user()).fetch()[0]
app_user.client_id = client_id
app_user.client_secret = client_secret
app_user.refresh_token = refresh_token
app_user.adwords_manager_cid = adwords_manager_cid
app_user.developer_token = developer_token
app_user.put()
|
Update the credentials associated with application user.
Args:
client_id: str Client Id retrieved from the developer's console.
client_secret: str Client Secret retrieved from the developer's console.
refresh_token: str Refresh token generated with the above client id/secret.
adwords_manager_cid: str Customer Id for the AdWords manager account.
developer_token: str Developer Token for the AdWords account.
|
juraj-google-style
|
def _get_single_block_row_attention(block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1):
to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32)
perm_block = np.random.permutation(to_block_list)
illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1))
illegal_blocks.extend(list(range(global_block_left)))
illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id)))
if block_id == 1:
illegal_blocks.append(to_end_block_id - 2)
if block_id == to_end_block_id - 2:
illegal_blocks.append(1)
selected_random_blokcs = []
for i in range(to_end_block_id - to_start_block_id):
if perm_block[i] not in illegal_blocks:
selected_random_blokcs.append(perm_block[i])
if len(selected_random_blokcs) == num_rand_blocks:
break
return np.array(selected_random_blokcs, dtype=np.int32)
|
For a single row block get random row attention.
Args:
block_id: int. block id of row.
to_start_block_id: int. random attention column start id.
to_end_block_id: int. random attention column end id.
num_rand_blocks: int. number of random blocks to be selected.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
row containing the random attention vector of size num_rand_blocks.
|
github-repos
|
def compose_object(self, file_list, destination_file, content_type):
xml_setting_list = ['<ComposeRequest>']
for meta_data in file_list:
xml_setting_list.append('<Component>')
for (key, val) in meta_data.iteritems():
xml_setting_list.append(('<%s>%s</%s>' % (key, val, key)))
xml_setting_list.append('</Component>')
xml_setting_list.append('</ComposeRequest>')
xml = ''.join(xml_setting_list)
if (content_type is not None):
headers = {'Content-Type': content_type}
else:
headers = None
(status, resp_headers, content) = self.put_object((api_utils._quote_filename(destination_file) + '?compose'), payload=xml, headers=headers)
errors.check_status(status, [200], destination_file, resp_headers, body=content)
|
COMPOSE multiple objects together.
Using the given list of files, calls the put object with the compose flag.
This call merges all the files into the destination file.
Args:
file_list: list of dicts with the file name.
destination_file: Path to the destination file.
content_type: Content type for the destination file.
|
codesearchnet
|
def _build_attention(self, rank):
if self._attention_axes is None:
self._attention_axes = tuple(range(1, rank - 2))
else:
self._attention_axes = tuple(self._attention_axes)
self._dot_product_equation, self._combine_equation, attn_scores_rank = _build_attention_equation(rank, attn_axes=self._attention_axes)
norm_axes = tuple(range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))
self._softmax = Softmax(axis=norm_axes, dtype=self.dtype_policy)
self._dropout_layer = Dropout(rate=self._dropout, dtype=self.dtype_policy, seed=self.seed)
|
Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
customize attention computation to replace the default dot-product
attention.
Args:
rank: the rank of query, key, value tensors.
|
github-repos
|
def enhex(d, separator=''):
v = binascii.hexlify(d).decode('ascii')
if separator:
return separator.join((v[i:(i + 2)] for i in range(0, len(v), 2)))
else:
return v
|
Convert bytes to their hexadecimal representation, optionally joined by a
given separator.
Args:
d(bytes): The data to convert to hexadecimal representation.
separator(str): The separator to insert between hexadecimal tuples.
Returns:
str: The hexadecimal representation of ``d``.
Examples:
>>> from pwny import *
>>> enhex(b'pwnypack')
'70776e797061636b'
>>> enhex(b'pwnypack', separator=' ')
'70 77 6e 79 70 61 63 6b'
|
codesearchnet
|
def upload_timeline(self, timeline_name, plaso_storage_path):
resource_url = '{0:s}/upload/'.format(self.api_base_url)
files = {'file': open(plaso_storage_path, 'rb')}
data = {'name': timeline_name}
response = self.session.post(resource_url, files=files, data=data)
try:
response_dict = response.json()
except ValueError:
raise RuntimeError(
'Could not decode JSON response from Timesketch'
' (Status {0:d}):\n{1:s}'.format(
response.status_code, response.content))
index_id = response_dict['objects'][0]['id']
return index_id
|
Create a timeline with the specified name from the given plaso file.
Args:
timeline_name (str): Name of timeline
plaso_storage_path (str): Local path of plaso file to be uploaded
Returns:
int: ID of uploaded timeline
Raises:
RuntimeError: When the JSON response from Timesketch cannot be decoded.
|
juraj-google-style
|
def path_to_text(self, path):
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ''
maxpages = 0
caching = True
pagenos = set()
pages_data = PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True)
for page in pages_data:
interpreter.process_page(page)
text = retstr.getvalue()
text = text.replace('\n', '')
fp.close()
device.close()
retstr.close()
return text
|
Transform local PDF file to string.
Args:
path: path to PDF file.
Returns:
string.
|
codesearchnet
|
def copy(self, datasets=None):
new_scn = self.__class__()
new_scn.attrs = self.attrs.copy()
new_scn.dep_tree = self.dep_tree.copy()
for ds_id in (datasets or self.keys()):
new_scn.datasets[ds_id] = self[ds_id]
if not datasets:
new_scn.wishlist = self.wishlist.copy()
else:
new_scn.wishlist = set([DatasetID.from_dict(ds.attrs)
for ds in new_scn])
return new_scn
|
Create a copy of the Scene including dependency information.
Args:
datasets (list, tuple): `DatasetID` objects for the datasets
to include in the new Scene object.
|
juraj-google-style
|
def is_erc20(self):
full_names = [f.full_name for f in self.functions]
return (('transfer(address,uint256)' in full_names) and ('transferFrom(address,address,uint256)' in full_names) and ('approve(address,uint256)' in full_names))
|
Check if the contract is an erc20 token
Note: it does not check for correct return values
Returns:
bool
|
codesearchnet
|
def __init__(self, job_id, context):
super(Job, self).__init__(job_id, context)
|
Initializes an instance of a Job.
Args:
job_id: the BigQuery job ID corresponding to this job.
context: a Context object providing project_id and credentials.
|
juraj-google-style
|
def rep(obj, *attrs, **kwargs):
s = obj.__class__.__name__
args = chain(((attr, getattr(obj, attr)) for attr in attrs), kwargs.items())
s += '(%s)' % ','.join('{}={!r}'.format(k, v) for k, v in args)
return s
|
Create a repr of a property based class quickly
Args:
obj -- instance of class
*attrs -- list of attrs to add to the representation
**kwargs -- Extra arguments to add that are not captured as attributes
Returns: A string representing the class
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.