code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def image(request, data): try: width = int(request.GET.get('w', PYDENTICON_WIDTH)) except ValueError: raise SuspiciousOperation('Identicon width must be a positive integer.') try: height = int(request.GET.get('h', PYDENTICON_HEIGHT)) except ValueError: raise SuspiciousOperation('Identicon height must be a positive integer.') output_format = request.GET.get('f', PYDENTICON_FORMAT) try: padding = [int(p) for p in request.GET['p'].split(',')] except KeyError: padding = PYDENTICON_PADDING except ValueError: raise SuspiciousOperation('Identicon padding must consist out of 4 positive integers separated with commas.') if ('i' in request.GET): inverted = request.GET.get('i') if (inverted.lower() == 'true'): inverted = True elif (inverted.lower() == 'false'): inverted = False else: raise SuspiciousOperation('Inversion parameter must be a boolean (true/false).') else: inverted = PYDENTICON_INVERT if ((not isinstance(width, int)) or (width <= 0)): raise SuspiciousOperation('Identicon width must be a positive integer.') if ((not isinstance(height, int)) or (height <= 0)): raise SuspiciousOperation('Identicon height must be a positive integer.') if ((not all([(isinstance(p, int) and (p >= 0)) for p in padding])) or (len(padding) != 4)): raise SuspiciousOperation('Padding must be a 4-element tuple consisting out of positive integers.') if (output_format == 'png'): content_type = 'image/png' elif (output_format == 'ascii'): content_type = 'text/plain' else: raise SuspiciousOperation("Unsupported identicon format requested - '%s' % output_format") generator = Generator(PYDENTICON_ROWS, PYDENTICON_COLUMNS, foreground=PYDENTICON_FOREGROUND, background=PYDENTICON_BACKGROUND, digest=PYDENTICON_DIGEST) content = generator.generate(data, width, height, padding=padding, output_format=output_format, inverted=inverted) response = HttpResponse(content, content_type=content_type) return response
Generates identicon image based on passed data. Arguments: data - Data which should be used for generating an identicon. This data will be used in order to create a digest which is used for generating the identicon. If the data passed is a hex digest already, the digest will be used as-is. Returns: Identicon image in raw format.
codesearchnet
def _load_schema_for_record(data, schema=None): if (schema is None): if ('$schema' not in data): raise SchemaKeyNotFound(data=data) schema = data['$schema'] if isinstance(schema, six.string_types): schema = load_schema(schema_name=schema) return schema
Load the schema from a given record. Args: data (dict): record data. schema (Union[dict, str]): schema to validate against. Returns: dict: the loaded schema. Raises: SchemaNotFound: if the given schema was not found. SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was found in ``data``. jsonschema.SchemaError: if the schema is invalid.
codesearchnet
def format_diff_xml(a_xml, b_xml): return '\n'.join(difflib.ndiff(reformat_to_pretty_xml(a_xml).splitlines(), reformat_to_pretty_xml(b_xml).splitlines()))
Create a diff between two XML documents. Args: a_xml: str b_xml: str Returns: str : `Differ`-style delta
codesearchnet
def remove_all_servers(self): cmd = self.command_builder('ntp', disable=True) return self.configure(cmd)
Remove all NTP server entries from the node config Returns: True if the operation succeeds, otherwise False.
codesearchnet
def get_all_status(self, only_min=False): if (len(self) == 0): if only_min: return self.S_INIT else: return [self.S_INIT] self.check_status() status_list = [task.status for task in self] if only_min: return min(status_list) else: return status_list
Returns a list with the status of the tasks in self. Args: only_min: If True, the minimum of the status is returned.
codesearchnet
def get_cbm_vbm(self, tol=0.001, abs_tol=False, spin=None): tdos = self.get_densities(spin) if not abs_tol: tol = tol * tdos.sum() / tdos.shape[0] i_fermi = 0 while self.energies[i_fermi] <= self.efermi: i_fermi += 1 i_gap_start = i_fermi while i_gap_start - 1 >= 0 and tdos[i_gap_start - 1] <= tol: i_gap_start -= 1 i_gap_end = i_gap_start while i_gap_end < tdos.shape[0] and tdos[i_gap_end] <= tol: i_gap_end += 1 i_gap_end -= 1 return self.energies[i_gap_end], self.energies[i_gap_start]
Expects a DOS object and finds the cbm and vbm. Args: tol: tolerance in occupations for determining the gap abs_tol: An absolute tolerance (True) and a relative one (False) spin: Possible values are None - finds the gap in the summed densities, Up - finds the gap in the up spin channel, Down - finds the gap in the down spin channel. Returns: (cbm, vbm): float in eV corresponding to the gap
juraj-google-style
def patch(self, id_or_uri, operation, path, value, timeout=(- 1)): return self._client.patch(id_or_uri, operation, path, value, timeout=timeout)
Uses the PATCH to update a resource for a given logical switch group. Only one operation can be performed in each PATCH call. Args: id_or_uri: Can be either the resource ID or the resource URI. operation: Patch operation path: Path value: Value timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Updated resource.
codesearchnet
def match(pattern, name): try: re_pat = _PATTERN_CACHE[(pattern, True)] except KeyError: res = "(?ms)" + _translate(pattern) + r'\Z' _PATTERN_CACHE[(pattern, True)] = re_pat = re.compile(res) return re_pat.match(name) is not None
Test whether a name matches a wildcard pattern. Arguments: pattern (str): A wildcard pattern, e.g. ``"*.py"``. name (str): A filename. Returns: bool: `True` if the filename matches the pattern.
juraj-google-style
def trainGP(self,fast=False,scales0=None,fixed0=None,lambd=None): assert self.n_terms>0, 'CVarianceDecomposition:: No variance component terms' if not self.init: self.initGP(fast=fast) if lambd!=None: self.gp.setLambda(lambd) if scales0!=None: self.setScales(scales0) self.vd.initGPparams() if fixed0!=None: params = self.gp.getParams() params['dataTerm'] = fixed0 self.gp.setParams(params) conv =self.vd.trainGP() self.cache['Sigma'] = None self.cache['Hessian'] = None return conv
Train the gp Args: fast: if true and the gp has not been initialized, initializes a kronSum gp scales0: initial variance components params fixed0: initial fixed effect params
juraj-google-style
def grad(f, has_aux=False): def check_loss_shape(np_loss): if not isinstance(np_loss, tf_np.ndarray): raise ValueError('The result of the function to take gradient must be an ndarray.') if not np_loss.shape.is_compatible_with([]): raise ValueError('The result of the function to take gradient must be a scalar.') def _f(params, *args): with backprop.GradientTape() as g: g.watch(nest.flatten(params)) outputs = f(params, *args) if has_aux: np_loss, aux = outputs else: np_loss = outputs check_loss_shape(np_loss) tf_grads = g.gradient(np_loss, params) if has_aux: res = (tf_grads, aux) else: res = tf_grads return _tf_to_np(res) return _f
Returns a function that computes gradient of f. Gradients can only be computed through numpy and tensorflow operations and not through python float operations and values. Args: f: a function of type (params, *args) -> scalar. 'params' can be a nested structure (made of lists and tuples) of ndarrays and the gradient is evaluated against it. `scalar` is a scalar ndarray. has_aux: bool, indicates whether fun returns a pair where the first element is considered the output of the mathematical function to be differentiated and the second element is auxiliary data. Returns: A gradient function of type (params, *args) -> gradients, where the result 'gradients' has the same structure and shapes as 'params'.
github-repos
def execute(self, data_dict, callback, group=None, trace=None): group = group or self.group context = _ScopedContext(data_dict, self.undefined_str, group=group) _Execute(self._program.Statements(), context, callback, trace)
Low level method to expand the template piece by piece. Args: data_dict: The JSON data dictionary. callback: A callback which should be called with each expanded token. group: Dictionary of name -> Template instance (for styles) Example: You can pass 'f.write' as the callback to write directly to a file handle.
juraj-google-style
def allocate(self, size, max_time_to_block_ms): with self._lock: if self._free: return self._free.popleft() elif (self._poolable_size == 0): return io.BytesIO() else: buf = None more_memory = threading.Condition(self._lock) self._waiters.append(more_memory) while (buf is None): start_wait = time.time() more_memory.wait((max_time_to_block_ms / 1000.0)) end_wait = time.time() if self.wait_time: self.wait_time.record((end_wait - start_wait)) if self._free: buf = self._free.popleft() else: self._waiters.remove(more_memory) raise Errors.KafkaTimeoutError('Failed to allocate memory within the configured max blocking time') removed = self._waiters.popleft() assert (removed is more_memory), 'Wrong condition' if (self._free and self._waiters): self._waiters[0].notify() return buf
Allocate a buffer of the given size. This method blocks if there is not enough memory and the buffer pool is configured with blocking mode. Arguments: size (int): The buffer size to allocate in bytes [ignored] max_time_to_block_ms (int): The maximum time in milliseconds to block for buffer memory to be available Returns: io.BytesIO
codesearchnet
def next_state_scope(self, next_state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: return dict(zip(self.rddl.domain.next_state_fluent_ordering, next_state_fluents))
Returns a partial scope with current next state-fluents. Args: next_state_fluents (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
juraj-google-style
def listen(self, log, noprint=True): try: result = self.decode_event(log.topics, log.data) except ValueError: return if not noprint: print(result) return result
Return a dictionary representation of the Log instance. Note: This function won't work with anonymous events. Args: log (processblock.Log): The Log instance that needs to be parsed. noprint (bool): Flag to turn off priting of the decoded log instance.
juraj-google-style
def get_pdbs_for_gene(bigg_model, bigg_gene, cache_dir=tempfile.gettempdir(), force_rerun=False): my_structures = [] gene = ssbio.utils.request_json(link='http: uniprots = [] if ('database_links' in gene): if ('UniProt' in gene['database_links']): uniprots = [x['id'] for x in gene['database_links']['UniProt']] elif ('NCBI GI' in gene['database_links']): uniprots = [] gis = [x['id'] for x in gene['database_links']['NCBI GI']] gi_uniprots = bs_unip.mapping(fr='P_GI', to='ACC', query=gis).values() uniprots.extend(gi_uniprots) uniprots = ssbio.utils.flatlist_dropdup(uniprots) uniprots = [x for x in uniprots if ssbio.databases.uniprot.is_valid_uniprot_id(x)] if uniprots: for u in uniprots: get_best_structure = ssbio.databases.pdb.best_structures(uniprot_id=u, outdir=cache_dir) if get_best_structure: for best_structure in get_best_structure: my_structures.append((best_structure['pdb_id'], best_structure['chain_id'])) return my_structures
Attempt to get a rank-ordered list of available PDB structures for a BiGG Model and its gene. Args: bigg_model: BiGG Model ID bigg_gene: BiGG Gene ID Returns: list: rank-ordered list of tuples of (pdb_id, chain_id)
codesearchnet
def get_members(self, name): grpid = re.search(r'(\d+)', name).group() command = 'show port-channel %s all-ports' % grpid config = self.node.enable(command, 'text') return re.findall(r'\b(?!Peer)Ethernet[\d/]*\b', config[0]['result']['output'])
Returns the member interfaces for the specified Port-Channel Args: name(str): The Port-channel interface name to return the member interfaces for Returns: A list of physical interface names that belong to the specified interface
juraj-google-style
def _MatchValue(expected, actual): if isinstance(expected, dict): if not isinstance(actual, dict): return False for k, v in expected.items(): if k not in actual: logging.log(1, 'Not exist: field=' + k) return False if not MessageValue._MatchValue(v, actual[k]): logging.log(1, 'Different: field=%s, expected=%s, actual=%s', k, v, actual[k]) return False return True if isinstance(expected, list): if not isinstance(actual, list): return False for e in expected: found = False for a in actual: if MessageValue._MatchValue(e, a): found = True break if not found: return False return True if isinstance(expected, stl.base.QualifierValue.Resolved): return expected.ValidateAndSet(actual) if isinstance(expected, stl.base.FuncSet): expected.SetValue(actual) return True if isinstance(expected, stl.base.LocalVar): return expected.value == actual if isinstance(expected, stl.base.Func): return expected.Run() == actual if isinstance(expected, MessageValue): return expected._MatchFromString(actual) return expected == actual
Whether or not |expected| is same value of |actual|. Args: expected: Expected value. actual: Actual value. Returns: True if: 1) Type of |expected| and of |actual| must be same. 2) If type of |expected| is dictionary or sub-message, all fields specified in |expected| must have same value in |actual|. 3) If type of |expected| is array, all entries specified in |expected| must exist in |actual| in any order. 4) If type of |expected| is either integer or string, |expected| must be same to |actual|.
github-repos
def MatchBuildContext(self, target_os, target_arch, target_package, context=None): for spec in self.Get('ClientBuilder.target_platforms', context=context): (spec_os, arch, package_name) = spec.split('_') if ((spec_os == target_os) and (arch == target_arch) and (package_name == target_package)): return True return False
Return true if target_platforms matches the supplied parameters. Used by buildanddeploy to determine what clients need to be built. Args: target_os: which os we are building for in this run (linux, windows, darwin) target_arch: which arch we are building for in this run (i386, amd64) target_package: which package type we are building (exe, dmg, deb, rpm) context: config_lib context Returns: bool: True if target_platforms spec matches parameters.
codesearchnet
def validate(bo, error_level: str='WARNING') -> Tuple[(bool, List[Tuple[(str, str)]])]: if bo.ast: bo = validate_functions(bo.ast, bo) if (error_level == 'WARNING'): bo = validate_arg_values(bo.ast, bo) else: bo.validation_messages.append(('ERROR', 'Invalid BEL Statement - cannot parse')) for msg in bo.validation_messages: if (msg[0] == 'ERROR'): bo.parse_valid = False break return bo
Semantically validate BEL AST Add errors and warnings to bel_obj.validation_messages Error Levels are similar to log levels - selecting WARNING includes both WARNING and ERROR, selecting ERROR just includes ERROR Args: bo: main BEL language object error_level: return ERRORs only or also WARNINGs Returns: Tuple[bool, List[Tuple[str, str]]]: (is_valid, messages)
codesearchnet
def metamodel_from_file(file_name, **kwargs): with codecs.open(file_name, 'r', 'utf-8') as f: lang_desc = f.read() metamodel = metamodel_from_str(lang_desc=lang_desc, file_name=file_name, **kwargs) return metamodel
Creates new metamodel from the given file. Args: file_name(str): The name of the file with textX language description. other params: See metamodel_from_str.
codesearchnet
def __init__(self, real_env, world_model_dir, hparams, random_starts, setable_initial_frames=False): self._setable_initial_frames = setable_initial_frames if self._setable_initial_frames: real_obs_shape = real_env.observation_space.shape shape = (1, hparams.frame_stack_size) + real_obs_shape self._initial_frames = np.zeros(shape=shape, dtype=np.uint8) def initial_frame_chooser(batch_size): assert batch_size == 1 return self._initial_frames else: initial_frame_chooser = rl_utils.make_initial_frame_chooser( real_env, hparams.frame_stack_size, simulation_random_starts=random_starts, simulation_flip_first_random_for_beginning=False ) env_fn = make_simulated_env_fn_from_hparams( real_env, hparams, batch_size=1, initial_frame_chooser=initial_frame_chooser, model_dir=world_model_dir, ) env = env_fn(in_graph=False) self.env = FlatBatchEnv(env) self.observation_space = self.env.observation_space self.action_space = self.env.action_space
Init. Args: real_env: gym environment. world_model_dir: path to world model checkpoint directory. hparams: hparams for rlmb pipeline. random_starts: if restart world model from random frames, or only from initial ones (from beginning of episodes). Valid only when `setable_initial_fames` set to False. setable_initial_frames: if True, initial_frames for world model should be set by `add_to_initial_stack`.
juraj-google-style
def mean_absolute_error(y_true, y_pred): y_pred = ops.convert_to_tensor(y_pred) y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) return ops.mean(ops.abs(y_true - y_pred), axis=-1)
Computes the mean absolute error between labels and predictions. ```python loss = mean(abs(y_true - y_pred), axis=-1) ``` Args: y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`. Returns: Mean absolute error values with shape = `[batch_size, d0, .. dN-1]`. Example: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = keras.losses.mean_absolute_error(y_true, y_pred)
github-repos
def alerts(self): if (not self.__alerts): self.__alerts = Alerts(self.__connection) return self.__alerts
Gets the Alerts API client. Returns: Alerts:
codesearchnet
def get_job(self, jobid): import shlex from pyccc.job import Job job = Job(engine=self) job.jobid = job.rundata.containerid = jobid try: jobdata = self.client.inspect_container(job.jobid) except docker.errors.NotFound: raise exceptions.JobNotFound(('The daemon could not find containter "%s"' % job.jobid)) cmd = jobdata['Config']['Cmd'] entrypoint = jobdata['Config']['Entrypoint'] if ((len(cmd) == 3) and (cmd[0:2] == ['sh', '-c'])): cmd = cmd[2] elif (entrypoint is not None): cmd = (entrypoint + cmd) if isinstance(cmd, list): cmd = ' '.join((shlex.quote(x) for x in cmd)) job.command = cmd job.env = jobdata['Config']['Env'] job.workingdir = jobdata['Config']['WorkingDir'] job.rundata.container = jobdata return job
Return a Job object for the requested job id. The returned object will be suitable for retrieving output, but depending on the engine, may not populate all fields used at launch time (such as `job.inputs`, `job.commands`, etc.) Args: jobid (str): container id Returns: pyccc.job.Job: job object for this container Raises: pyccc.exceptions.JobNotFound: if no job could be located for this jobid
codesearchnet
def sync_ik_robot(self, joint_positions, simulate=False, sync_last=True): num_joints = len(joint_positions) if (not sync_last): num_joints -= 1 for i in range(num_joints): if simulate: p.setJointMotorControl2(self.ik_robot, self.actual[i], p.POSITION_CONTROL, targetVelocity=0, targetPosition=joint_positions[i], force=500, positionGain=0.5, velocityGain=1.0) else: p.resetJointState(self.ik_robot, self.actual[i], joint_positions[i])
Force the internal robot model to match the provided joint angles. Args: joint_positions (list): a list or flat numpy array of joint positions. simulate (bool): If True, actually use physics simulation, else write to physics state directly. sync_last (bool): If False, don't sync the last joint angle. This is useful for directly controlling the roll at the end effector.
codesearchnet
def _joint_mean(self): with tf.name_scope('mean_joint'): with tf.control_dependencies(self.runtime_assertions): initial_latent_mean = _broadcast_to_shape(self.initial_state_prior.mean()[(..., tf.newaxis)], tf.concat([self.batch_shape_tensor(), [self.latent_size, 1]], axis=0)) initial_observation_mean = _propagate_mean(initial_latent_mean, self.get_observation_matrix_for_timestep(self.initial_step), self.get_observation_noise_for_timestep(self.initial_step)) mean_step = build_kalman_mean_step(self.get_transition_matrix_for_timestep, self.get_transition_noise_for_timestep, self.get_observation_matrix_for_timestep, self.get_observation_noise_for_timestep) (latent_means, observation_means) = tf.scan(mean_step, elems=tf.range((self.initial_step + 1), self.final_step), initializer=(initial_latent_mean, initial_observation_mean)) latent_means = tf.concat([initial_latent_mean[(tf.newaxis, ...)], latent_means], axis=0) observation_means = tf.concat([initial_observation_mean[(tf.newaxis, ...)], observation_means], axis=0) latent_means = tf.squeeze(latent_means, (- 1)) latent_means = distribution_util.move_dimension(latent_means, 0, (- 2)) observation_means = tf.squeeze(observation_means, (- 1)) observation_means = distribution_util.move_dimension(observation_means, 0, (- 2)) return (latent_means, observation_means)
Compute prior means for all variables via dynamic programming. Returns: latent_means: Prior means of latent states `z_t`, as a `Tensor` of shape `batch_shape + [num_timesteps, latent_size]` observation_means: Prior covariance matrices of observations `x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps, observation_size]`
codesearchnet
def _add_case(self, case_obj): if self.case(case_obj['_id']): raise IntegrityError("Case %s already exists in database" % case_obj['_id']) return self.case_collection.insert_one(case_obj)
Add a case to the database If the case already exists exception is raised Args: case_obj(Case)
juraj-google-style
def dump(self, output, close_after_write=True): self.open(output) try: self.make_worksheet(self.table_name) self.write_table() finally: if close_after_write: self.close()
Write a worksheet to the current workbook. Args: output (str): Path to the workbook file to write. close_after_write (bool, optional): Close the workbook after write. Defaults to |True|.
codesearchnet
def query_with_attributes(type_to_query, client): session = client.create_session() query = session.query(Attribute.name, Attribute.value, Entity.id) \ .join(Entity) \ .filter(Entity.type == type_to_query) df = client.df_query(query) session.close() df = df.dropna(how='any') df = df.set_index(['id', 'name']).unstack().reset_index() df.columns = ['id'] + list(df.columns.get_level_values(1)[1:]) return df
Query all entities of a specific type, with their attributes Args: type_to_query (str): type of entity to query client: DB client to perform query with Returns: pandas.DataFrame: table of entities, with attributes as columns
juraj-google-style
def set_column_count(self, count): current_row_count = self.row_count() current_column_count = self.column_count() if count > current_column_count: cl = TableEditableItem if self._editable else TableItem for r_key in self.children.keys(): row = self.children[r_key] for i in range(current_column_count, count): row.append(cl(), str(i)) if self._editable: row.children[str(i)].onchange.connect( self.on_item_changed, int(r_key), int(i)) self._update_first_row() elif count < current_column_count: for row in self.children.values(): for i in range(count, current_column_count): row.remove_child(row.children[str(i)]) self._column_count = count
Sets the table column count. Args: count (int): column of rows
juraj-google-style
def getRowByIndex(self, index): assert isinstance(index, int) return Row(self._impl.getRowByIndex(index))
Get row by numeric index. Args: index: Zero-based index of the row to get. Returns: The corresponding row.
juraj-google-style
def add_to_tensor(self, x, name='add_to_tensor'): with self._name_scope(name): x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x') self._check_input_dtype(x) return self._add_to_tensor(x)
Add matrix represented by this operator to `x`. Equivalent to `A + x`. Args: x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`. name: A name to give this `Op`. Returns: A `Tensor` with broadcast shape and same `dtype` as `self`.
github-repos
def compare_names(first, second): first = name_to_vector(first) second = name_to_vector(second) zipped = zip(first, second) if not zipped: return 0 similarity_factor = 0 for fitem, _ in zipped: if fitem in second: similarity_factor += 1 return (float(similarity_factor) / len(zipped)) * 100
Compare two names in complicated, but more error prone way. Algorithm is using vector comparison. Example: >>> compare_names("Franta Putšálek", "ing. Franta Putšálek") 100.0 >>> compare_names("F. Putšálek", "ing. Franta Putšálek") 50.0 Args: first (str): Fisst name as string. second (str): Second name as string. Returns: float: Percentage of the similarity.
juraj-google-style
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False): residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) return (hidden_states, attn_weights)
Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Input to the layer. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Attention mask. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings, to be added to `hidden_states`. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes of the backbone feature maps. spatial_shapes_list (`List[Tuple[int, int]]`, *optional*): Spatial shapes of the backbone feature maps (but as list for export compatibility). level_start_index (`torch.LongTensor`, *optional*): Level start index. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def initialize(self): self.log.info('Initializing the snippet package %s.', self.package) start_time = time.perf_counter() self.log.debug('Preparing to start the snippet server of %s.', self.package) self.before_starting_server() try: self.log.debug('Starting the snippet server of %s.', self.package) self.start_server() self.log.debug('Making a connection to the snippet server of %s.', self.package) self._make_connection() except Exception: self.log.error('Error occurred trying to start and connect to the snippet server of %s.', self.package) try: self.stop() except Exception: self.log.exception('Failed to stop the snippet package %s after failure to start and connect.', self.package) raise self.log.debug('Snippet package %s initialized after %.1fs.', self.package, time.perf_counter() - start_time)
Initializes the snippet client to interact with the remote device. This function contains following stages: 1. before starting server: preparing to start the snippet server. 2. start server: starting the snippet server on the remote device. 3. make connection: making a connection to the snippet server. An error occurring at any stage will abort the initialization. Only errors at the `start_server` and `make_connection` stages will trigger `stop` to clean up. Raises: errors.ProtocolError: something went wrong when exchanging data with the server. errors.ServerStartPreCheckError: when prechecks for starting the server failed. errors.ServerStartError: when failed to start the snippet server.
github-repos
def scalar(name, data, step=None, description=None): summary_metadata = metadata.create_summary_metadata(display_name=None, description=description) summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None) or tf.summary.summary_scope) with summary_scope(name, 'scalar_summary', values=[data, step]) as (tag, _): tf.debugging.assert_scalar(data) return tf.summary.write(tag=tag, tensor=tf.cast(data, tf.float32), step=step, metadata=summary_metadata)
Write a scalar summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A real numeric scalar value, convertible to a `float32` Tensor. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
codesearchnet
def _collect_metrics(repo, path, recursive, typ, xpath, branch): outs = [out for stage in repo.stages() for out in stage.outs] if path: try: outs = repo.find_outs_by_path(path, outs=outs, recursive=recursive) except OutputNotFoundError: logger.debug("stage file not for found for '{}' in branch '{}'".format(path, branch)) return [] res = [] for o in outs: if (not o.metric): continue if ((not typ) and isinstance(o.metric, dict)): t = o.metric.get(o.PARAM_METRIC_TYPE, typ) x = o.metric.get(o.PARAM_METRIC_XPATH, xpath) else: t = typ x = xpath res.append((o, t, x)) return res
Gather all the metric outputs. Args: path (str): Path to a metric file or a directory. recursive (bool): If path is a directory, do a recursive search for metrics on the given path. typ (str): The type of metric to search for, could be one of the following (raw|json|tsv|htsv|csv|hcsv). xpath (str): Path to search for. branch (str): Branch to look up for metrics. Returns: list(tuple): (output, typ, xpath) - output: - typ: - xpath:
codesearchnet
def matrix(self): matrix = (c_float * 6)() rc = self._libinput.libinput_device_config_calibration_get_matrix(self._handle, matrix) return (rc, tuple(matrix))
The current calibration matrix for this device. Returns: (bool, (float, float, float, float, float, float)): :obj:`False` if no calibration is set and the returned matrix is the identity matrix, :obj:`True` otherwise. :obj:`tuple` representing the first two rows of a 3x3 matrix as described in :meth:`set_matrix`.
codesearchnet
def _order_code(dis_code: pycnite.types.DisassembledCode) -> OrderedCode: ops = opcodes.build_opcodes(dis_code) add_pop_block_targets(ops) blocks = compute_order(ops, dis_code.python_version) return OrderedCode(dis_code.code, ops, blocks)
Split a CodeType object into ordered blocks. This takes a CodeType object (i.e., a piece of compiled Python code) and splits it into ordered basic blocks. Args: dis_code: A pycnite.types.DisassembledCode object. Returns: An OrderedCode instance.
github-repos
def __validate_args(self, func_name, args, kwargs): from pyvalid.validators import Validator for i, (arg_name, accepted_values) in enumerate(self.accepted_args): if i < len(args): value = args[i] else: if arg_name in kwargs: value = kwargs[arg_name] elif i in self.optional_args: continue else: raise InvalidArgumentNumberError(func_name) is_valid = False for accepted_val in accepted_values: is_validator = ( isinstance(accepted_val, Validator) or ( isinstance(accepted_val, MethodType) and hasattr(accepted_val, '__func__') and isinstance(accepted_val.__func__, Validator) ) ) if is_validator: is_valid = accepted_val(value) elif isinstance(accepted_val, type): is_valid = isinstance(value, accepted_val) else: is_valid = value == accepted_val if is_valid: break if not is_valid: ord_num = self.__ordinal(i + 1) raise ArgumentValidationError( ord_num, func_name, value, accepted_values )
Compare value of each required argument with list of accepted values. Args: func_name (str): Function name. args (list): Collection of the position arguments. kwargs (dict): Collection of the keyword arguments. Raises: InvalidArgumentNumberError: When position or count of the arguments is incorrect. ArgumentValidationError: When encountered unexpected argument value.
juraj-google-style
def load(self, context): try: import tensorflow except ImportError: return from tensorboard.plugins.beholder.beholder_plugin import BeholderPlugin return BeholderPlugin(context)
Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A BeholderPlugin instance or None if it couldn't be loaded.
juraj-google-style
def _get_condition_json(self, index): condition = self.condition_data[index] condition_log = { 'name': condition[0], 'value': condition[1], 'type': condition[2], 'match': condition[3] } return json.dumps(condition_log)
Method to generate json for logging audience condition. Args: index: Index of the condition. Returns: String: Audience condition JSON.
juraj-google-style
def get_size(self, value=None): if value is None: if not self: return 0 elif issubclass(type(self[0]), GenericType): return len(self) * self[0].get_size() return sum(item.get_size() for item in self) return type(self)(value).get_size()
Return the size in bytes. Args: value: In structs, the user can assign other value instead of this class' instance. Here, in such cases, ``self`` is a class attribute of the struct. Returns: int: The size in bytes.
juraj-google-style
def Map(self, function): new_table = self.__class__() new_table._table = [self.header] for row in self: filtered_row = function(row) if filtered_row: new_table.Append(filtered_row) return new_table
Applies the function to every row in the table. Args: function: A function applied to each row. Returns: A new TextTable() Raises: TableError: When transform is not invalid row entry. The transform must be compatible with Append().
codesearchnet
def get_embeddings_index(embedding_type='glove.42B.300d', embedding_dims=None, embedding_path=None, cache=True): if (embedding_path is not None): embedding_type = embedding_path embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type) if (embeddings_index is not None): return embeddings_index if (embedding_path is None): embedding_type_obj = get_embedding_type(embedding_type) extract = embedding_type_obj.get('extract', True) file_path = get_file(embedding_type_obj['file'], origin=embedding_type_obj['url'], extract=extract, cache_subdir='embeddings', file_hash=embedding_type_obj.get('file_hash')) if ('file_in_zip' in embedding_type_obj): zip_folder = file_path.split('.zip')[0] with ZipFile(file_path, 'r') as zf: zf.extractall(zip_folder) file_path = os.path.join(zip_folder, embedding_type_obj['file_in_zip']) elif extract: if file_path.endswith('.zip'): file_path = file_path.split('.zip')[0] else: file_path = embedding_path embeddings_index = _build_embeddings_index(file_path, embedding_dims) if cache: _EMBEDDINGS_CACHE[embedding_type] = embeddings_index return embeddings_index
Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed. Args: embedding_type: The embedding type to load. embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified. Returns: The embeddings indexed by word.
codesearchnet
def _read_mode_utopt(self, size, kind): temp = self._read_fileng(size) data = dict(kind=kind, length=size, granularity=('minutes' if int(temp[0]) else 'seconds'), timeout=bytes(chr(int(temp[0:], base=2)), encoding='utf-8')) return data
Read User Timeout option. Positional arguments: * size - int, length of option * kind - int, 28 (User Timeout Option) Returns: * dict -- extracted User Timeout (TIMEOUT) option Structure of TCP TIMEOUT [RFC 5482]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Kind = 28 | Length = 4 |G| User Timeout | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 tcp.timeout.kind Kind (28) 1 8 tcp.timeout.length Length (4) 2 16 tcp.timeout.granularity Granularity 2 17 tcp.timeout.timeout User Timeout
codesearchnet
def make_datastore_query(self, cursor=None): filters = {} filters['__key__ >= '] = _key_for_namespace(self.namespace_start, self.app) filters['__key__ <= '] = _key_for_namespace(self.namespace_end, self.app) return datastore.Query('__namespace__', filters=filters, keys_only=True, cursor=cursor, _app=self.app)
Returns a datastore.Query that generates all namespaces in the range. Args: cursor: start cursor for the query. Returns: A datastore.Query instance that generates db.Keys for each namespace in the NamespaceRange.
codesearchnet
def set_metadata(self, key: str, value: Any, cloneable: bool=False) -> 'DNA': self.metadata.rebind({key: value}, raise_on_no_change=False, skip_notification=True) if cloneable: self._cloneable_metadata_keys.add(key) return self
Set metadata associated with a key. Metadata associated with the DNA will be persisted and carried over across processes, which is different the `userdata`. (See `set_userdata` for more details.) Args: key: Key for the metadata. value: Value for the metadata. cloneable: If True, the key/value will be propagated during clone. Returns: Self.
github-repos
def db_ws004c(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db_ws004c`'.format(value)) self._db_ws004c = value
Corresponds to IDD Field `db_ws004c` Mean coincident dry-bulb temperature to wind speed corresponding to 0.40% cumulative frequency for coldest month Args: value (float): value for IDD Field `db_ws004c` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def AddIndex(self, path_segment_index): if path_segment_index in self._weight_per_index: raise ValueError('Path segment index already set.') self._weight_per_index[path_segment_index] = 0
Adds a path segment index and sets its weight to 0. Args: path_segment_index: an integer containing the path segment index. Raises: ValueError: if the path segment weights already contains the path segment index.
juraj-google-style
def _ValidateDataTypeDefinition(cls, data_type_definition): if (not cls._IsIdentifier(data_type_definition.name)): raise ValueError('Data type definition name: {0!s} not a valid identifier'.format(data_type_definition.name)) if keyword.iskeyword(data_type_definition.name): raise ValueError('Data type definition name: {0!s} matches keyword'.format(data_type_definition.name)) members = getattr(data_type_definition, 'members', None) if (not members): raise ValueError('Data type definition name: {0!s} missing members'.format(data_type_definition.name)) defined_attribute_names = set() for member_definition in members: attribute_name = member_definition.name if (not cls._IsIdentifier(attribute_name)): raise ValueError('Attribute name: {0!s} not a valid identifier'.format(attribute_name)) if attribute_name.startswith('_'): raise ValueError('Attribute name: {0!s} starts with underscore'.format(attribute_name)) if keyword.iskeyword(attribute_name): raise ValueError('Attribute name: {0!s} matches keyword'.format(attribute_name)) if (attribute_name in defined_attribute_names): raise ValueError('Attribute name: {0!s} already defined'.format(attribute_name)) defined_attribute_names.add(attribute_name)
Validates the data type definition. Args: data_type_definition (DataTypeDefinition): data type definition. Raises: ValueError: if the data type definition is not considered valid.
codesearchnet
def get_container_list(self) -> list: containers = [] containers_list = self._client.containers.list() for c_list in containers_list: containers.append(c_list.short_id) return containers
Get list of containers. Returns: list, all the ids of containers
codesearchnet
def update(self, forecasts, observations): for t, threshold in enumerate(self.thresholds[:-1]): self.frequencies.loc[t, "Positive_Freq"] += np.count_nonzero((threshold <= forecasts) & (forecasts < self.thresholds[t+1]) & (observations >= self.obs_threshold)) self.frequencies.loc[t, "Total_Freq"] += np.count_nonzero((threshold <= forecasts) & (forecasts < self.thresholds[t+1]))
Update the statistics with a set of forecasts and observations. Args: forecasts (numpy.ndarray): Array of forecast probability values observations (numpy.ndarray): Array of observation values
juraj-google-style
def delete(adapter, case_obj, update=False, existing_case=False): if update: adapter.add_case(existing_case) else: adapter.delete_case(case_obj) for file_type in ['vcf_path','vcf_sv_path']: if not case_obj.get(file_type): continue variant_file = case_obj[file_type] vcf_obj = get_vcf(variant_file) delete_variants( adapter=adapter, vcf_obj=vcf_obj, case_obj=case_obj, )
Delete a case and all of it's variants from the database. Args: adapter: Connection to database case_obj(models.Case) update(bool): If we are in the middle of an update existing_case(models.Case): If something failed during an update we need to revert to the original case
juraj-google-style
def get(self, name): config = self.get_block(('interface %s' % name)) if ((name[0:2] in ['Et', 'Po']) and (not SWITCHPORT_RE.search(config, re.M))): return None resource = dict(name=name) resource.update(self._parse_address(config)) resource.update(self._parse_mtu(config)) return resource
Returns the specific IP interface properties The Ipinterface resource returns the following: * name (str): The name of the interface * address (str): The IP address of the interface in the form of A.B.C.D/E * mtu (int): The configured value for IP MTU. Args: name (string): The interface identifier to retrieve the configuration for Return: A Python dictionary object of key/value pairs that represents the current configuration of the node. If the specified interface does not exist then None is returned.
codesearchnet
def model_inference_fn(features, training, params): mg_batchn = functools.partial( tf.layers.batch_normalization, axis=-1, momentum=.95, epsilon=1e-5, center=True, scale=True, fused=True, training=training) mg_conv2d = functools.partial( tf.layers.conv2d, filters=params['conv_width'], kernel_size=3, padding="same", data_format="channels_last", use_bias=False) mg_global_avgpool2d = functools.partial( tf.layers.average_pooling2d, pool_size=go.N, strides=1, padding="valid", data_format="channels_last") def mg_activation(inputs): if FLAGS.use_swish: return tf.nn.swish(inputs) return tf.nn.relu(inputs) def residual_inner(inputs): conv_layer1 = mg_batchn(mg_conv2d(inputs)) initial_output = mg_activation(conv_layer1) conv_layer2 = mg_batchn(mg_conv2d(initial_output)) return conv_layer2 def mg_res_layer(inputs): residual = residual_inner(inputs) output = mg_activation(inputs + residual) return output def mg_squeeze_excitation_layer(inputs): channels = params['conv_width'] ratio = FLAGS.SE_ratio assert channels % ratio == 0 residual = residual_inner(inputs) pool = mg_global_avgpool2d(residual) fc1 = tf.layers.dense(pool, units=channels squeeze = mg_activation(fc1) if FLAGS.use_SE_bias: fc2 = tf.layers.dense(squeeze, units=2*channels) gamma, bias = tf.split(fc2, 2, axis=3) else: gamma = tf.layers.dense(squeeze, units=channels) bias = 0 sig = tf.nn.sigmoid(gamma) scale = tf.reshape(sig, [-1, 1, 1, channels]) excitation = tf.multiply(scale, residual) + bias return mg_activation(inputs + excitation) initial_block = mg_activation(mg_batchn(mg_conv2d(features))) shared_output = initial_block for _ in range(params['trunk_layers']): if FLAGS.use_SE or FLAGS.use_SE_bias: shared_output = mg_squeeze_excitation_layer(shared_output) else: shared_output = mg_res_layer(shared_output) policy_conv = mg_conv2d( shared_output, filters=params['policy_conv_width'], kernel_size=1) policy_conv = mg_activation(mg_batchn(policy_conv, center=False, scale=False)) logits = tf.layers.dense( tf.reshape( policy_conv, [-1, params['policy_conv_width'] * go.N * go.N]), go.N * go.N + 1) policy_output = tf.nn.softmax(logits, name='policy_output') value_conv = mg_conv2d( shared_output, filters=params['value_conv_width'], kernel_size=1) value_conv = mg_activation(mg_batchn(value_conv, center=False, scale=False)) value_fc_hidden = mg_activation(tf.layers.dense( tf.reshape(value_conv, [-1, params['value_conv_width'] * go.N * go.N]), params['fc_width'])) value_output = tf.nn.tanh( tf.reshape(tf.layers.dense(value_fc_hidden, 1), [-1]), name='value_output') return policy_output, value_output, logits
Builds just the inference part of the model graph. Args: features: input features tensor. training: True if the model is training. params: A dictionary Returns: (policy_output, value_output, logits) tuple of tensors.
juraj-google-style
async def check_record(self, record, timeout=60): start_time = time.time() name, rr_data, r_type, ttl = self._extract_record_data(record) r_type_code = async_dns.types.get_code(r_type) resolvable_record = False retries = 0 sleep_time = 5 while not resolvable_record and \ timeout > retries * sleep_time: retries += 1 resolver_res = await self._resolver.query(name, r_type_code) possible_ans = resolver_res.an resolvable_record = \ await self._check_resolver_ans(possible_ans, name, rr_data, ttl, r_type_code) if not resolvable_record: await asyncio.sleep(sleep_time) if not resolvable_record: logging.info( f'Sending metric record-checker-failed: {record}.') else: final_time = float(time.time() - start_time) success_msg = (f'This record: {record} took {final_time} to ' 'register.') logging.info(success_msg)
Measures the time for a DNS record to become available. Query a provided DNS server multiple times until the reply matches the information in the record or until timeout is reached. Args: record (dict): DNS record as a dict with record properties. timeout (int): Time threshold to query the DNS server.
juraj-google-style
def _file_size(self, field): size = 0 try: handle = open(self._files[field], 'r') size = os.fstat(handle.fileno()).st_size handle.close() except: size = 0 self._file_lengths[field] = size return self._file_lengths[field]
Returns the file size for given file field. Args: field (str): File field Returns: int. File size
codesearchnet
def list(self, path, timeout=None): transport = DentFilesyncTransport(self.stream) transport.write_data('LIST', path, timeout) return (DeviceFileStat(dent_msg.name, dent_msg.mode, dent_msg.size, dent_msg.time) for dent_msg in transport.read_until_done('DENT', timeout))
List directory contents on the device. Args: path: List the contents of this directory. timeout: Timeout to use for this operation. Returns: Generator yielding DeviceFileStat tuples representing the contents of the requested path.
codesearchnet
def fillup_layer(layer, first_clbit): for nones in [i for i, x in enumerate(layer) if x is None]: layer[nones] = EmptyWire('═') if nones >= first_clbit else EmptyWire('─') return layer
Given a layer, replace the Nones in it with EmptyWire elements. Args: layer (list): The layer that contains Nones. first_clbit (int): The first wire that is classic. Returns: list: The new layer, with no Nones.
juraj-google-style
def mark_job_as_failed(self, job_id, exception, traceback): session = self.sessionmaker() job, orm_job = self._update_job_state( job_id, State.FAILED, session=session) job = copy(job) job.exception = exception job.traceback = traceback orm_job.obj = job session.add(orm_job) session.commit() session.close()
Mark the job as failed, and record the traceback and exception. Args: job_id: The job_id of the job that failed. exception: The exception object thrown by the job. traceback: The traceback, if any. Note (aron): Not implemented yet. We need to find a way for the conncurrent.futures workers to throw back the error to us. Returns: None
juraj-google-style
def sample(reader, writer, n, start=None, stop=None, tsCol=None, writeSampleOnly=True): rows = list(reader) if tsCol is not None: ts = rows[0][tsCol] inc = rows[1][tsCol] - ts if start is None: start = 0 if stop is None: stop = len(rows) - 1 initialN = stop - start + 1 numDeletes = initialN - n for i in xrange(numDeletes): delIndex = random.randint(start, stop - i) del rows[delIndex] if writeSampleOnly: rows = rows[start:start + n] if tsCol is not None: ts = rows[0][tsCol] for row in rows: if tsCol is not None: row[tsCol] = ts ts += inc writer.appendRecord(row)
Samples n rows. Args: reader: A FileRecordStream object with input data. writer: A FileRecordStream object to write output data to. n: The number of elements to sample. start: The first row in the range to sample from. stop: The last row in the range to sample from. tsCol: If specified, the timestamp column to update. writeSampleOnly: If False, the rows before start are written before the sample and the rows after stop are written after the sample.
juraj-google-style
def posix_to_dt_str(posix): dt = datetime.datetime.utcfromtimestamp(posix) dt_str = dt.strftime(_DT_FORMAT) return dt_str + '.000Z'
Reverse of str_to_datetime. This is used by GCS stub to generate GET bucket XML response. Args: posix: A float of secs from unix epoch. Returns: A datetime str.
juraj-google-style
def select(self, attr, default=None): return List([_select(item, attr, default) for item in self])
Select a given attribute (or chain or attributes) from the objects within the list. Args: attr (str): attributes to be selected (with initial `.` omitted) default (any): value to return if given element in list doesn't contain desired attribute Returns: nhl.List: list of selected attribute values
juraj-google-style
def profile_python(self, options): opts = _build_options(options) tfprof_node = tfprof_output_pb2.MultiGraphNodeProto() try: tfprof_node.ParseFromString(print_mdl.Profile('code'.encode('utf-8'), opts.SerializeToString())) except message.DecodeError as e: sys.stderr.write('Cannot parse returned proto: %s.\n' % e) return tfprof_node
Profile the statistics of the Python codes. By default, it shows the call stack from root. To avoid redundant output, you may use options to filter as below options['show_name_regexes'] = ['.*my_code.py.*'] Args: options: A dict of options. See core/profiler/g3doc/options.md. Returns: a MultiGraphNodeProto that records the results.
github-repos
def __init__(self, stack_name, region, cf_client): try: self._stack_name = stack_name self._region = region self._cf_client = cf_client except Exception: raise SystemError
StackTool is a simple tool to print some specific data about a CloudFormation stack. Args: stack_name - name of the stack of interest region - AWS region where the stack was created Returns: not a damn thing Raises: SystemError - if everything isn't just right
juraj-google-style
def sas_logical_jbods(self): if (not self.__sas_logical_jbods): self.__sas_logical_jbods = SasLogicalJbods(self.__connection) return self.__sas_logical_jbods
Gets the SAS Logical JBODs API client. Returns: SasLogicalJbod:
codesearchnet
def get_num_bytes(self, batch: Sequence[torch.Tensor]) -> int: return sum((el.element_size() for tensor in batch for el in tensor))
Returns: The number of bytes of data for a batch of Tensors.
github-repos
def __init__(self, ca_cert=None, worker_cls=None, private_key=None): self.ca_cert = ca_cert if private_key is None: private_key = config.CONFIG.Get("Client.private_key", default=None) self.server_certificate = None self.http_manager = self.http_manager_class() self.communicator = ClientCommunicator(private_key=private_key) self.timer = Timer() self.last_enrollment_time = 0 self.last_foreman_check = 0 if worker_cls: self.client_worker = worker_cls(client=self) else: self.client_worker = GRRClientWorker(client=self) self.client_worker.start()
Constructor. Args: ca_cert: String representation of a CA certificate to use for checking server certificate. worker_cls: The client worker class to use. Defaults to GRRClientWorker. private_key: The private key for this client. Defaults to config Client.private_key.
juraj-google-style
def save(model, filepath, overwrite, include_optimizer, signatures=None, options=None, save_traces=True): if not overwrite and os.path.exists(filepath): proceed = ask_to_proceed_with_overwrite(filepath) if not proceed: return if save_traces: if save_impl.should_skip_serialization(model): saving_utils.raise_model_input_error(model) if not include_optimizer: orig_optimizer = model.optimizer model.optimizer = None model._delete_tracking('optimizer') with K.deprecated_internal_learning_phase_scope(0): with utils.keras_option_scope(save_traces): saved_nodes, node_paths = save_lib.save_and_return_nodes(model, filepath, signatures, options) metadata = generate_keras_metadata(saved_nodes, node_paths) with gfile.GFile(os.path.join(filepath, constants.SAVED_METADATA_PATH), 'wb') as w: w.write(metadata.SerializeToString(deterministic=True)) if not include_optimizer: model.optimizer = orig_optimizer
Saves a model as a SavedModel to the filepath. Args: model: Keras model instance to be saved. filepath: String path to save the model. overwrite: whether to overwrite the existing filepath. include_optimizer: If True, save the model's optimizer state. signatures: Signatures to save with the SavedModel. Applicable to the 'tf' format only. Please see the `signatures` argument in `tf.saved_model.save` for details. options: (only applies to SavedModel format) `tf.saved_model.SaveOptions` object that specifies options for saving to SavedModel. save_traces: (only applies to SavedModel format) When enabled, the SavedModel will store the function traces for each layer. This can be disabled, so that only the configs of each layer are stored. Defaults to `True`. Disabling this will decrease serialization time and reduce file size, but it requires that all custom layers/models implement a `get_config()` method. Raises: ValueError: if the model's inputs have not been defined.
github-repos
def reserveIdentifier(self, pid, vendorSpecific=None): response = self.reserveIdentifierResponse(pid, vendorSpecific) return self._read_dataone_type_response(response, 'Identifier', vendorSpecific)
See Also: reserveIdentifierResponse() Args: pid: vendorSpecific: Returns:
juraj-google-style
def _BroadcastMul(vec, mat): vec = array_ops.expand_dims(vec, -1) return vec * mat
Multiply after broadcasting vec to match dimensions of mat. Args: vec: A 1-D tensor of dimension [D0] mat: A 2-D tensor of dimension [D0, D1] Returns: A tensor of dimension [D0, D1], the result of vec * mat
github-repos
def _process_using_meta_feature_generator(self, X, meta_feature_generator): all_learner_meta_features = [] for (idx, base_learner) in enumerate(self.base_learners): single_learner_meta_features = getattr(base_learner, self.meta_feature_generators[idx])(X) if (len(single_learner_meta_features.shape) == 1): single_learner_meta_features = single_learner_meta_features.reshape((- 1), 1) all_learner_meta_features.append(single_learner_meta_features) all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1) out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features) return out
Process using secondary learner meta-feature generator Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba, this internal method gives the ability to use any string. Just make sure secondary learner has the method. Args: X (array-like): Features array meta_feature_generator (str, unicode): Method for use by secondary learner
codesearchnet
def serialized_tensors_to_saveable_cache(serialized_tensors): saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary() for obj, tensor_dict in serialized_tensors.items(): if not tensor_dict: continue if isinstance(obj, SaveableCompatibilityConverter): trackable_obj = obj.obj saveables_cache[trackable_obj] = {} for saveable in obj.saveables: local_name = trackable_utils.extract_local_name(saveable.name) saveables_cache[trackable_obj][local_name] = [saveable] continue specs = [] local_names = [] prefix = saveable_compat.get_saveable_name(obj) or '' for checkpoint_key, maybe_tensor in tensor_dict.items(): if not isinstance(maybe_tensor, dict): maybe_tensor = {'': maybe_tensor} for slice_spec, tensor in maybe_tensor.items(): if isinstance(tensor, saveable_object.SaveSpec): specs.append(tensor) else: specs.append(saveable_object.SaveSpec(tensor, slice_spec, checkpoint_key)) local_names.append(trackable_utils.extract_local_name(checkpoint_key, prefix)) object_name = trackable_utils.extract_object_name(next(iter(tensor_dict.keys()))) saveables_cache[obj] = {trackable_utils.SERIALIZE_TO_TENSORS_NAME: [TrackableSaveable(obj, specs, object_name, local_names=local_names, prefix=prefix)]} return saveables_cache
Converts a tensor dict to a SaveableObject cache. Args: serialized_tensors: Map from Trackable to a tensor dict. The tensor dict maps checkpoint key (-> slice_spec) -> Tensor Returns: A dict mapping Trackable objects to a map from local savable name to SaveableObject.
github-repos
def __init__(self, *args, exit_code=1, **kwargs): self.exit_code = exit_code super(ScriptWorkerTaskException, self).__init__(*args, **kwargs)
Initialize ScriptWorkerTaskException. Args: *args: These are passed on via super(). exit_code (int, optional): The exit_code we should exit with when this exception is raised. Defaults to 1 (failure). **kwargs: These are passed on via super().
juraj-google-style
def address_to_ip(address): address_parts = address.split(":") ip_address = socket.gethostbyname(address_parts[0]) if ip_address == "127.0.0.1": ip_address = get_node_ip_address() return ":".join([ip_address] + address_parts[1:])
Convert a hostname to a numerical IP addresses in an address. This should be a no-op if address already contains an actual numerical IP address. Args: address: This can be either a string containing a hostname (or an IP address) and a port or it can be just an IP address. Returns: The same address but with the hostname replaced by a numerical IP address.
juraj-google-style
def _DisableNetworkManager(self, interfaces, logger): for interface in interfaces: interface_config = os.path.join(self.network_path, ('ifcfg-%s' % interface)) if os.path.exists(interface_config): self._ModifyInterface(interface_config, 'DEVICE', interface, replace=False) self._ModifyInterface(interface_config, 'NM_CONTROLLED', 'no', replace=True) else: with open(interface_config, 'w') as interface_file: interface_content = [' interface_file.write('\n'.join(interface_content)) logger.info('Created config file for interface %s.', interface)
Disable network manager management on a list of network interfaces. Args: interfaces: list of string, the output device names enable. logger: logger object, used to write to SysLog and serial port.
codesearchnet
def get_grouping_from_attentions(attentions, hw_shape): attn_maps = [] with torch.no_grad(): prev_attn_masks = None for attn_masks in attentions: attn_masks = attn_masks.permute(0, 2, 1).contiguous() if prev_attn_masks is None: prev_attn_masks = attn_masks else: prev_attn_masks = prev_attn_masks @ attn_masks cur_attn_map = resize_attention_map(prev_attn_masks.permute(0, 2, 1).contiguous(), *hw_shape) attn_maps.append(cur_attn_map) final_grouping = attn_maps[-1] return final_grouping
Args: attentions (`tuple(torch.FloatTensor)`: tuple of attention maps returned by `GroupViTVisionTransformer` hw_shape (`tuple(int)`): height and width of the output attention map Returns: `torch.Tensor`: the attention map of shape [batch_size, groups, height, width]
github-repos
def setUdpJoinerPort(self, portNumber): print '%s call setUdpJoinerPort' % self.port cmd = 'joinerport %d' % portNumber print cmd return self.__sendCommand(cmd)[0] == 'Done'
set Joiner UDP Port Args: portNumber: Joiner UDP Port number Returns: True: successful to set Joiner UDP Port False: fail to set Joiner UDP Port
juraj-google-style
def read_string(self, key, embedded=True): data = None if key is not None: key_type = self.variable_type(key) data = self.db.read(key.strip()) if data is not None: try: data = json.loads(data) if embedded: data = self.read_embedded(data, key_type) if data is not None: data = u'{}'.format(data) except ValueError as e: err = u'Failed loading JSON data ({}). Error: ({})'.format(data, e) self.tcex.log.error(err) else: self.tcex.log.warning(u'The key field was None.') return data
Read method of CRUD operation for string data. Args: key (string): The variable to read from the DB. embedded (boolean): Resolve embedded variables. Returns: (string): Results retrieved from DB.
juraj-google-style
def cancelMktData(self, contract: Contract): ticker = self.ticker(contract) reqId = self.wrapper.endTicker(ticker, 'mktData') if reqId: self.client.cancelMktData(reqId) else: self._logger.error( 'cancelMktData: ' f'No reqId found for contract {contract}')
Unsubscribe from realtime streaming tick data. Args: contract: The exact contract object that was used to subscribe with.
juraj-google-style
def is_rotation(self, tol=0.001, include_improper=True): det = np.abs(np.linalg.det(self)) if include_improper: det = np.abs(det) return ((np.abs((self.inv - self.trans)) < tol).all() and (np.abs((det - 1.0)) < tol))
Test to see if tensor is a valid rotation matrix, performs a test to check whether the inverse is equal to the transpose and if the determinant is equal to one within the specified tolerance Args: tol (float): tolerance to both tests of whether the the determinant is one and the inverse is equal to the transpose include_improper (bool): whether to include improper rotations in the determination of validity
codesearchnet
def update(self, puts, deletes): with self._lmdb.begin(write=True, buffers=True) as txn: cursor = txn.cursor(self._main_db) for key in deletes: if (not cursor.set_key(key.encode())): continue value = self._deserializer(bytes(cursor.value())) cursor.delete() for (index_db, index_key_fn) in self._indexes.values(): index_keys = index_key_fn(value) index_cursor = txn.cursor(index_db) for idx_key in index_keys: if index_cursor.set_key(idx_key): index_cursor.delete() for (key, value) in puts: packed = self._serializer(value) cursor.put(key.encode(), packed, overwrite=True) for (index_db, index_key_fn) in self._indexes.values(): index_keys = index_key_fn(value) index_cursor = txn.cursor(index_db) for idx_key in index_keys: index_cursor.put(idx_key, key.encode()) self.sync()
Applies the given puts and deletes atomically. Args: puts (:iterable:`tuple`): an iterable of key/value pairs to insert deletes (:iterable:str:) an iterable of keys to delete
codesearchnet
def Group(params, name=None, type=None): atts = {} if name: atts['name'] = name if type: atts['type'] = type g = objectify.Element('Group', attrib=atts) for p in params: g.append(p) return g
Groups together Params for adding under the 'What' section. Args: params(list of :func:`Param`): Parameter elements to go in this group. name(str): Group name. NB ``None`` is valid, since the group may be best identified by its type. type(str): Type of group, e.g. 'complex' (for real and imaginary).
codesearchnet
def _remove_curly_braces(text): current_pos = 0 depth = 0 ret = '' for match in re.finditer('[{}]', text): if (depth == 0): ret += text[current_pos:match.start()] depth += (1 if (text[match.start()] == '{') else (- 1)) current_pos = match.end() if (depth != 0): pass else: ret += text[current_pos:] return ret
Remove everything in curly braces. Curly braces may be nested, so we keep track of depth. Args: text: a string Returns: a string
codesearchnet
def delete(self): headers = self.headers endpoint = ('https: r = requests.delete(endpoint, headers=headers) check_response(r)
Deletes this Folder. Raises: AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.
codesearchnet
def __add__(self, other): try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value + other.value)
Returns the sum of `self` and `other`. Dimensions are summed as follows: ```python tf.compat.v1.Dimension(m) + tf.compat.v1.Dimension(n) == tf.compat.v1.Dimension(m + n) tf.compat.v1.Dimension(m) + tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(n) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the sum of `self` and `other`.
github-repos
def get_container_instance_logs(access_token, subscription_id, resource_group, container_group_name, container_name=None): if container_name is None: container_name = container_group_name endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '/containers/', container_name, '/logs?api-version=', CONTAINER_API]) return do_get(endpoint, access_token)
Get the container logs for containers in a container group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. container_name (str): Optional name of a container in the group. Returns: HTTP response. Container logs.
juraj-google-style
def apply_grad(self, grad, local_step=0, name=None): grad = ops.convert_to_tensor(grad, self._dtype) grad.get_shape().assert_is_compatible_with(self._shape) local_step = math_ops.cast(ops.convert_to_tensor(local_step), _dtypes.int64) return gen_data_flow_ops.resource_accumulator_apply_gradient(self._accumulator_ref, local_step=local_step, gradient=grad, name=name)
Attempts to apply a gradient to the accumulator. The attempt is silently dropped if the gradient is stale, i.e., local_step is less than the accumulator's global time step. Args: grad: The gradient tensor to be applied. local_step: Time step at which the gradient was computed. name: Optional name for the operation. Returns: The operation that (conditionally) applies a gradient to the accumulator. Raises: ValueError: If grad is of the wrong shape
github-repos
def _add_impact_severity(self, variant_obj, gemini_variant): gemini_impact = gemini_variant['impact_severity'] if gemini_impact == 'MED': gemini_impact = 'MEDIUM' variant_obj.impact_severity = gemini_impact
Add the impact severity for the most severe consequence Args: variant_obj (puzzle.models.Variant) gemini_variant (GeminiQueryRow)
juraj-google-style
def extract_response(self, extractors): if (not extractors): return {} logger.log_debug('start to extract from response object.') extracted_variables_mapping = OrderedDict() extract_binds_order_dict = utils.ensure_mapping_format(extractors) for (key, field) in extract_binds_order_dict.items(): extracted_variables_mapping[key] = self.extract_field(field) return extracted_variables_mapping
extract value from requests.Response and store in OrderedDict. Args: extractors (list): [ {"resp_status_code": "status_code"}, {"resp_headers_content_type": "headers.content-type"}, {"resp_content": "content"}, {"resp_content_person_first_name": "content.person.name.first_name"} ] Returns: OrderDict: variable binds ordered dict
codesearchnet
def from_str(text): segment_list = chat_message_parser.parse(text) return [ChatMessageSegment(segment.text, **segment.params) for segment in segment_list]
Construct :class:`ChatMessageSegment` list parsed from a string. Args: text (str): Text to parse. May contain line breaks, URLs and formatting markup (simplified Markdown and HTML) to be converted into equivalent segments. Returns: List of :class:`ChatMessageSegment` objects.
juraj-google-style
def similar(self, **kwargs): path = self._get_id_path('similar') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the similar TV series for a specific TV series id. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. append_to_response: (optional) Comma separated, any TV method. Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category, module_name): module_name = module_name.replace(': return (((get_image_path(image_lists, label_name, index, bottleneck_dir, category) + '_') + module_name) + '.txt')
Returns a path to a bottleneck file for a label at the given index. Args: image_lists: OrderedDict of training images for each label. label_name: Label string we want to get an image for. index: Integer offset of the image we want. This will be moduloed by the available number of images for the label, so it can be arbitrarily large. bottleneck_dir: Folder string holding cached files of bottleneck values. category: Name string of set to pull images from - training, testing, or validation. module_name: The name of the image module being used. Returns: File system path string to an image that meets the requested parameters.
codesearchnet
def touch(path, content="", encoding="utf-8", overwrite=False): path = os.path.abspath(path) if not overwrite and os.path.exists(path): logger.warning('touch: "%s" already exists', path) return False try: logger.info("touch: %s", path) with io.open(path, "wb") as f: if not isinstance(content, six.binary_type): content = content.encode(encoding) f.write(content) return True except Exception as e: logger.error("touch: %s failed. Error: %s", path, e) return False
Create a file at the given path if it does not already exists. Args: path (str): Path to the file. content (str): Optional content that will be written in the file. encoding (str): Encoding in which to write the content. Default: ``utf-8`` overwrite (bool): Overwrite the file if exists. Returns: bool: True if the operation is successful, False otherwise.
juraj-google-style
def get_individual_positions(individuals): ind_pos = {} if individuals: for i, ind in enumerate(individuals): ind_pos[ind] = i return ind_pos
Return a dictionary with individual positions Args: individuals(list): A list with vcf individuals in correct order Returns: ind_pos(dict): Map from ind_id -> index position
juraj-google-style
def alias_tool(self, context_name, tool_name, tool_alias): data = self._context(context_name) aliases = data['tool_aliases'] if (tool_name in aliases): raise SuiteError(('Tool %r in context %r is already aliased to %r' % (tool_name, context_name, aliases[tool_name]))) self._validate_tool(context_name, tool_name) aliases[tool_name] = tool_alias self._flush_tools()
Register an alias for a specific tool. Note that a tool alias takes precedence over a context prefix/suffix. Args: context_name (str): Context containing the tool. tool_name (str): Name of tool to alias. tool_alias (str): Alias to give the tool.
codesearchnet
def prepend(self, line, font_attr_segs=None): other = RichTextLines(line) if font_attr_segs: other.font_attr_segs[0] = font_attr_segs self._extend_before(other)
Prepend (i.e., add to the front) a single line of text. Args: line: (str) The text to be added to the front. font_attr_segs: (list of tuples) Font attribute segments of the appended line.
github-repos
def add_answer_for_student(student_item, vote, rationale): answers = get_answers_for_student(student_item) answers.add_answer(vote, rationale) sub_api.create_submission(student_item, {ANSWER_LIST_KEY: answers.get_answers_as_list()})
Add an answer for a student to the backend Args: student_item (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. vote (int): the option that student voted for rationale (str): the reason why the student vote for the option
codesearchnet
def sysctl(command): out = subprocess.check_output(command) result = out.split(b" ")[1] try: return int(result) except ValueError: return result
Run a sysctl command and parse the output. Args: command: A sysctl command with an argument, for example, ["sysctl", "hw.memsize"]. Returns: The parsed output.
juraj-google-style