code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def update_target_state(self, value: str, force: bool=True) -> datetime: value = value.lower() if (not force): current_state = self.current_state if (current_state == 'unknown'): raise RuntimeError("Unable to set target state when current state is 'unknown'") allowed_target_states = self._allowed_target_states[current_state] LOG.debug('Updating target state of %s to %s', self._id, value) if (value not in allowed_target_states): raise ValueError("Invalid target state: '{}'. {} can be commanded to states: {}".format(value, current_state, allowed_target_states)) return self._update_state('target', value)
Set the target state. Args: value (str): New value for target state force (bool): If true, ignore allowed transitions Returns: datetime, update timestamp Raises: RuntimeError, if it is not possible to currently set the target state. ValueError, if the specified target stat is not allowed.
codesearchnet
def add_object_to_scope(self, obj): if isinstance(obj, Computer): self.add_object_to_path(obj, 'scope/computers') elif isinstance(obj, ComputerGroup): self.add_object_to_path(obj, 'scope/computer_groups') elif isinstance(obj, Building): self.add_object_to_path(obj, 'scope/buildings') elif isinstance(obj, Department): self.add_object_to_path(obj, 'scope/departments') else: raise TypeError
Add an object to the appropriate scope block. Args: obj: JSSObject to add to scope. Accepted subclasses are: Computer ComputerGroup Building Department Raises: TypeError if invalid obj type is provided.
codesearchnet
def _format_src_url(self, path, caller_system): path = '%s/%s' % (self._endpoint, self.relpath(path)) if caller_system is not self: try: path = '%s?%s' % (path, self._storage_parameters['sas_token']) except KeyError: pass return path
Ensure path is absolute and use the correct URL format for use with cross Azure storage account copy function. Args: path (str): Path or URL. caller_system (pycosio.storage.azure._AzureBaseSystem subclass): System calling this method (Can be another Azure system). Returns: str: URL.
juraj-google-style
def get_temporary_scripts_path(self): result = None if (len(self.config.temporary_scripts_path) > 0): if os.path.isdir(self.config.temporary_scripts_path): result = self.config.temporary_scripts_path return result
Get path for temporary scripts. Returns: str: path for temporary scripts or None if not set
codesearchnet
def _execute(self, command, data=None, unpack=True): if (not data): data = {} if (self.session_id is not None): data.setdefault('session_id', self.session_id) data = self._wrap_el(data) res = self.remote_invoker.execute(command, data) ret = WebDriverResult.from_object(res) ret.raise_for_status() ret.value = self._unwrap_el(ret.value) if (not unpack): return ret return ret.value
Private method to execute command. Args: command(Command): The defined command. data(dict): The uri variable and body. uppack(bool): If unpack value from result. Returns: The unwrapped value field in the json response.
codesearchnet
def add_functions(spec_dict: Mapping[(str, Any)]) -> Mapping[(str, Any)]: spec_dict['functions']['list'] = [] spec_dict['functions']['list_long'] = [] spec_dict['functions']['list_short'] = [] spec_dict['functions']['primary'] = {} spec_dict['functions']['primary']['list_long'] = [] spec_dict['functions']['primary']['list_short'] = [] spec_dict['functions']['modifier'] = {} spec_dict['functions']['modifier']['list_long'] = [] spec_dict['functions']['modifier']['list_short'] = [] spec_dict['functions']['to_short'] = {} spec_dict['functions']['to_long'] = {} for func_name in spec_dict['functions']['info']: abbreviated_name = spec_dict['functions']['info'][func_name]['abbreviation'] spec_dict['functions']['list'].extend((func_name, abbreviated_name)) spec_dict['functions']['list_long'].append(func_name) spec_dict['functions']['list_short'].append(abbreviated_name) if (spec_dict['functions']['info'][func_name]['type'] == 'primary'): spec_dict['functions']['primary']['list_long'].append(func_name) spec_dict['functions']['primary']['list_short'].append(abbreviated_name) else: spec_dict['functions']['modifier']['list_long'].append(func_name) spec_dict['functions']['modifier']['list_short'].append(abbreviated_name) spec_dict['functions']['to_short'][abbreviated_name] = abbreviated_name spec_dict['functions']['to_short'][func_name] = abbreviated_name spec_dict['functions']['to_long'][abbreviated_name] = func_name spec_dict['functions']['to_long'][func_name] = func_name return spec_dict
Add function keys to spec_dict Args: spec_dict (Mapping[str, Any]): bel specification dictionary Returns: Mapping[str, Any]: bel specification dictionary with added function keys
codesearchnet
def combine_first_two_dimensions(x): ret = tf.reshape(x, tf.concat([[-1], common_layers.shape_list(x)[2:]], 0)) old_shape = x.get_shape().dims a, b = old_shape[:2] new_shape = [a * b if a and b else None] + old_shape[2:] ret.set_shape(new_shape) return ret
Reshape x so that the first two dimension become one. Args: x: a Tensor with shape [a, b, ...] Returns: a Tensor with shape [ab, ...]
juraj-google-style
def _head(self, client_kwargs): with _handle_oss_error(): bucket = self._get_bucket(client_kwargs) if 'key' in client_kwargs: return bucket.head_object( key=client_kwargs['key']).headers return bucket.get_bucket_info().headers
Returns object HTTP header. Args: client_kwargs (dict): Client arguments. Returns: dict: HTTP header.
juraj-google-style
def Process(self, parser_mediator, cache=None, database=None, **unused_kwargs): if (cache is None): raise ValueError('Missing cache value.') if (database is None): raise ValueError('Missing database value.') super(SQLitePlugin, self).Process(parser_mediator) for (query, callback_method) in self.QUERIES: if parser_mediator.abort: break callback = getattr(self, callback_method, None) if (callback is None): logger.warning('[{0:s}] missing callback method: {1:s} for query: {2:s}'.format(self.NAME, callback_method, query)) continue self._ParseQuery(parser_mediator, database, query, callback, cache)
Determine if this is the right plugin for this database. This function takes a SQLiteDatabase object and compares the list of required tables against the available tables in the database. If all the tables defined in REQUIRED_TABLES are present in the database then this plugin is considered to be the correct plugin and the function will return back a generator that yields event objects. Args: parser_mediator (ParserMediator): parser mediator. cache (Optional[SQLiteCache]): cache. database (Optional[SQLiteDatabase]): database. Raises: ValueError: If the database or cache value are missing.
codesearchnet
def solve(self, print_solution=False): self._cp_solver = cp_model.CpSolver() status = self._cp_solver.Solve(self._model) if status != cp_model.OPTIMAL: if status == cp_model.FEASIBLE: logging.warning("A potentially suboptimal solution was found.") else: logging.error("Solver returned status %d.", status) raise SolverError("The solver could not solve the problem and returned " "status {}.".format(status)) if print_solution: print_cp_model_solution.print_solution(self._model, self._cp_solver) layout = [] for mtf_dimension_name in ( self._layout_validator.splittable_mtf_dimension_names): for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size): value = self._cp_solver.Value(self._global_vars[(mtf_dimension_name, mesh_dimension_name)]) if value: layout.append(mtf_dimension_name + ":" + mesh_dimension_name) layout.sort() return ";".join(layout)
Solves the current integer program and returns the computed layout. Args: print_solution: An optional boolean indicating whether to print the full solution in human-readable format. Returns: The computed layout (as a string). Raises: SolverError: the internal solver could not find a solution, or the solution found is infeasible.
juraj-google-style
def __init__(self, cl_environments=None, compile_flags=None, double_precision=None): super().__init__() self._cl_environments = cl_environments self._compile_flags = compile_flags self._double_precision = double_precision
Updates the runtime settings. Args: cl_environments (list of CLEnvironment): the new CL environments we wish to use for future computations compile_flags (list): the list of compile flags to use during analysis. double_precision (boolean): if we compute in double precision or not
juraj-google-style
def __init__(self, client_path, data, chunk_index, total_chunks, offset, total_size): self.client_path = client_path self.data = data self.offset = offset self.total_size = total_size self.chunk_index = chunk_index self.total_chunks = total_chunks
Initializes StreamedFileChunk object. Args: client_path: db.ClientPath identifying the file. data: bytes with chunk's contents. chunk_index: Index of this chunk (relative to the sequence of chunks corresponding to the file). total_chunks: Total number of chunks corresponding to a given file. offset: Offset of this chunk in bytes from the beginning of the file. total_size: Total size of the file in bytes.
juraj-google-style
def tcp_ping(task: Task, ports: List[int], timeout: int=2, host: Optional[str]=None) -> Result: if isinstance(ports, int): ports = [ports] if isinstance(ports, list): if (not all((isinstance(port, int) for port in ports))): raise ValueError("Invalid value for 'ports'") else: raise ValueError("Invalid value for 'ports'") host = (host or task.host.hostname) result = {} for port in ports: s = socket.socket() s.settimeout(timeout) try: status = s.connect_ex((host, port)) if (status == 0): connection = True else: connection = False except (socket.gaierror, socket.timeout, socket.error): connection = False finally: s.close() result[port] = connection return Result(host=task.host, result=result)
Tests connection to a tcp port and tries to establish a three way handshake. To be used for network discovery or testing. Arguments: ports (list of int): tcp ports to ping timeout (int, optional): defaults to 2 host (string, optional): defaults to ``hostname`` Returns: Result object with the following attributes set: * result (``dict``): Contains port numbers as keys with True/False as values
codesearchnet
def publishCombinedWebMap(self, maps_info, webmaps): if self.securityhandler is None: print ("Security handler required") return admin = None map_results = None map_info = None operationalLayers = None tableLayers = None item = None response = None opLays = None operationalLayers = None tblLays = None tblLayer = None itemInfo = None try: admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler) map_results = [] for map_info in maps_info: operationalLayers = [] tableLayers = [] for webmap in webmaps: item = admin.content.getItem(itemId=webmap) response = item.itemData() if 'operationalLayers' in response: opLays = [] for opLayer in response['operationalLayers']: opLays.append(opLayer) opLays.extend(operationalLayers) operationalLayers = opLays if 'tables' in response: tblLays = [] for tblLayer in response['tables']: tblLays.append(tblLayer) tblLays.extend(tableLayers) tableLayers = tblLays if 'ReplaceTag' in map_info: itemInfo = {"ReplaceTag":map_info['ReplaceTag'] } else: itemInfo = {"ReplaceTag":"{WebMap}" } itemInfo['MapInfo'] = self._publishMap(config=map_info, replaceInfo=None, operationalLayers=operationalLayers, tableLayers=tableLayers) map_results.append(itemInfo) if not itemInfo is None: if not 'error' in itemInfo['MapInfo']['Results']: print ("%s webmap created" % itemInfo['MapInfo']['Name']) else: print (str(itemInfo['MapInfo']['Results'])) else: print ("Map not created") return map_results except Exception as e: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "publishedCombinedWebMap", "line": line, "filename": filename, "synerror": synerror, }) finally: admin = None map_info = None tableLayers = None item = None response = None opLays = None operationalLayers = None tblLays = None tblLayer = None itemInfo = None del admin del map_info del tableLayers del item del response del opLays del operationalLayers del tblLays del tblLayer del itemInfo gc.collect()
Publishes a combination of web maps. Args: maps_info (list): A list of JSON configuration combined web maps to publish. Returns: list: A list of results from :py:meth:`arcrest.manageorg._content.UserItem.updateItem`.
juraj-google-style
def get_message(routing_key, properties, body): if properties.headers is None: _log.error( "Message (body=%r) arrived without headers. " "A publisher is misbehaving!", body, ) properties.headers = {} try: MessageClass = get_class(properties.headers["fedora_messaging_schema"]) except KeyError: _log.error( "Message (headers=%r, body=%r) arrived without a schema header." " A publisher is misbehaving!", properties.headers, body, ) MessageClass = Message try: severity = properties.headers["fedora_messaging_severity"] except KeyError: _log.error( "Message (headers=%r, body=%r) arrived without a severity." " A publisher is misbehaving! Defaulting to INFO.", properties.headers, body, ) severity = INFO if properties.content_encoding is None: _log.error("Message arrived without a content encoding") properties.content_encoding = "utf-8" try: body = body.decode(properties.content_encoding) except UnicodeDecodeError as e: _log.error( "Unable to decode message body %r with %s content encoding", body, properties.content_encoding, ) raise ValidationError(e) try: body = json.loads(body) except ValueError as e: _log.error("Failed to load message body %r, %r", body, e) raise ValidationError(e) message = MessageClass( body=body, topic=routing_key, properties=properties, severity=severity ) try: message.validate() _log.debug("Successfully validated message %r", message) except jsonschema.exceptions.ValidationError as e: _log.error("Message validation of %r failed: %r", message, e) raise ValidationError(e) return message
Construct a Message instance given the routing key, the properties and the body received from the AMQP broker. Args: routing_key (str): The AMQP routing key (will become the message topic) properties (pika.BasicProperties): the AMQP properties body (bytes): The encoded message body Raises: ValidationError: If Message validation failed or message body docoding/loading is impossible.
juraj-google-style
def get_random_distorted_bottlenecks(sess, image_lists, how_many, category, image_dir, input_jpeg_tensor, distorted_image, resized_input_tensor, bottleneck_tensor): class_count = len(image_lists.keys()) bottlenecks = [] ground_truths = [] for unused_i in range(how_many): label_index = random.randrange(class_count) label_name = list(image_lists.keys())[label_index] image_index = random.randrange((MAX_NUM_IMAGES_PER_CLASS + 1)) image_path = get_image_path(image_lists, label_name, image_index, image_dir, category) if (not tf.gfile.Exists(image_path)): tf.logging.fatal('File does not exist %s', image_path) jpeg_data = tf.gfile.GFile(image_path, 'rb').read() distorted_image_data = sess.run(distorted_image, {input_jpeg_tensor: jpeg_data}) bottleneck_values = sess.run(bottleneck_tensor, {resized_input_tensor: distorted_image_data}) bottleneck_values = np.squeeze(bottleneck_values) bottlenecks.append(bottleneck_values) ground_truths.append(label_index) return (bottlenecks, ground_truths)
Retrieves bottleneck values for training images, after distortions. If we're training with distortions like crops, scales, or flips, we have to recalculate the full model for every image, and so we can't use cached bottleneck values. Instead we find random images for the requested category, run them through the distortion graph, and then the full graph to get the bottleneck results for each. Args: sess: Current TensorFlow Session. image_lists: OrderedDict of training images for each label. how_many: The integer number of bottleneck values to return. category: Name string of which set of images to fetch - training, testing, or validation. image_dir: Root folder string of the subfolders containing the training images. input_jpeg_tensor: The input layer we feed the image data to. distorted_image: The output node of the distortion graph. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The bottleneck output layer of the CNN graph. Returns: List of bottleneck arrays and their corresponding ground truths.
codesearchnet
def _compress_hextets(cls, hextets): best_doublecolon_start = -1 best_doublecolon_len = 0 doublecolon_start = -1 doublecolon_len = 0 for index, hextet in enumerate(hextets): if hextet == '0': doublecolon_len += 1 if doublecolon_start == -1: doublecolon_start = index if doublecolon_len > best_doublecolon_len: best_doublecolon_len = doublecolon_len best_doublecolon_start = doublecolon_start else: doublecolon_len = 0 doublecolon_start = -1 if best_doublecolon_len > 1: best_doublecolon_end = (best_doublecolon_start + best_doublecolon_len) if best_doublecolon_end == len(hextets): hextets += [''] hextets[best_doublecolon_start:best_doublecolon_end] = [''] if best_doublecolon_start == 0: hextets = [''] + hextets return hextets
Compresses a list of hextets. Compresses a list of strings, replacing the longest continuous sequence of "0" in the list with "" and adding empty strings at the beginning or at the end of the string such that subsequently calling ":".join(hextets) will produce the compressed version of the IPv6 address. Args: hextets: A list of strings, the hextets to compress. Returns: A list of strings.
juraj-google-style
def oem(self): buf = (ctypes.c_char * self.MAX_BUF_SIZE)() res = self._dll.JLINKARM_GetOEMString(ctypes.byref(buf)) if (res != 0): raise errors.JLinkException('Failed to grab OEM string.') oem = ctypes.string_at(buf).decode() if (len(oem) == 0): return None return oem
Retrieves and returns the OEM string of the connected J-Link. Args: self (JLink): the ``JLink`` instance Returns: The string of the OEM. If this is an original SEGGER product, then ``None`` is returned instead. Raises: JLinkException: on hardware error.
codesearchnet
def _kl_bernoulli_bernoulli(a, b, name=None): with tf.name_scope(name or "kl_bernoulli_bernoulli"): delta_probs0 = tf.nn.softplus(-b.logits) - tf.nn.softplus(-a.logits) delta_probs1 = tf.nn.softplus(b.logits) - tf.nn.softplus(a.logits) return (tf.sigmoid(a.logits) * delta_probs0 + tf.sigmoid(-a.logits) * delta_probs1)
Calculate the batched KL divergence KL(a || b) with a and b Bernoulli. Args: a: instance of a Bernoulli distribution object. b: instance of a Bernoulli distribution object. name: (optional) Name to use for created operations. default is "kl_bernoulli_bernoulli". Returns: Batchwise KL(a || b)
juraj-google-style
async def remember_ticket(self, request, ticket): session = (await get_session(request)) session[self.cookie_name] = ticket
Called to store the ticket data for a request. Ticket data is stored in the aiohttp_session object Args: request: aiohttp Request object. ticket: String like object representing the ticket to be stored.
codesearchnet
def get_contacts(self, issue): if (not issue.resource): return [] account_contacts = issue.resource.account.contacts try: resource_owners = issue.resource.get_owner_emails() if (type(resource_owners) is list): for resource_owner in resource_owners: account_contacts.append({'type': 'email', 'value': resource_owner}) except AttributeError: pass return account_contacts
Returns a list of contacts for an issue Args: issue (:obj:`RequiredTagsIssue`): Issue record Returns: `list` of `dict`
codesearchnet
def get_files_re(self, file_re, full_path=False, ignorecase=False): try: if ignorecase: compiled_re = re.compile(file_re, re.I) else: compiled_re = re.compile(file_re) except sre_constants.error: logger.error('Failed to compile regex: {}.'.format(file_re)) return [] found = [] if self.handle: for member in self.handle.getmembers(): if (isinstance(member, TarInfo) and member.isdir()): pass elif ((full_path and compiled_re.search(member.name)) or ((not full_path) and compiled_re.search(os.path.basename(member.name)))): found.append(member.name) return found
Finds all files that match file_re and returns their list. Doesn't return directories, only files. Args: file_re: raw string to match files against (gets compiled into re) full_path: whether to match against full path inside the archive or just the filenames ignorecase: whether to ignore case when using the given re Returns: List of full paths of files inside the archive that match the given file_re.
codesearchnet
def GetScriptHashesForVerifying(self): if (self.PrevHash.Data == bytearray(32)): if (type(self.Script.VerificationScript) is bytes): return [bytearray(self.Script.VerificationScript)] elif (type(self.Script.VerificationScript) is bytearray): return [self.Script.VerificationScript] else: raise Exception('Invalid Verification script') prev_header = GetBlockchain().GetHeader(self.PrevHash.ToBytes()) if (prev_header is None): raise Exception('Invalid operation') return [prev_header.NextConsensus]
Get the script hash used for verification. Raises: Exception: if the verification script is invalid, or no header could be retrieved from the Blockchain. Returns: list: with a single UInt160 representing the next consensus node.
codesearchnet
def list_from_file(filename, prefix='', offset=0, max_num=0): cnt = 0 item_list = [] with open(filename, 'r') as f: for _ in range(offset): f.readline() for line in f: if max_num > 0 and cnt >= max_num: break item_list.append(prefix + line.rstrip('\n')) cnt += 1 return item_list
Load a text file and parse the content as a list of strings. Args: filename (str): Filename. prefix (str): The prefix to be inserted to the begining of each item. offset (int): The offset of lines. max_num (int): The maximum number of lines to be read, zeros and negatives mean no limitation. Returns: list[str]: A list of strings.
juraj-google-style
def tf_next_step(self, x, iteration, conjugate, residual, squared_residual): next_step = super(ConjugateGradient, self).tf_next_step(x, iteration, conjugate, residual, squared_residual) return tf.logical_and(x=next_step, y=(squared_residual >= util.epsilon))
Termination condition: max number of iterations, or residual sufficiently small. Args: x: Current solution estimate $x_t$. iteration: Current iteration counter $t$. conjugate: Current conjugate $c_t$. residual: Current residual $r_t$. squared_residual: Current squared residual $r_t^2$. Returns: True if another iteration should be performed.
juraj-google-style
def partial_derivative_sigma(mu, sigma, low, high, data): pd_sigma = np.sum(((- (1 / sigma)) + (((data - mu) ** 2) / (sigma ** 3)))) pd_sigma -= (len(data) * ((((low - mu) * norm.pdf(low, mu, sigma)) - ((high - mu) * norm.pdf(high, mu, sigma))) / (sigma * (norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma))))) return (- pd_sigma)
The partial derivative with respect to the standard deviation. Args: mu (float): the mean of the truncated normal sigma (float): the std of the truncated normal low (float): the lower truncation bound high (float): the upper truncation bound data (ndarray): the one dimension list of data points for which we want to calculate the likelihood Returns: float: the partial derivative evaluated at the given point
codesearchnet
def assemble(self, ops): return pwnypack.asm.asm(self.compile(ops), target=self.target)
Assemble a list of operations into executable code. Arguments: ops(list): A list of shellcode operations. Returns: bytes: The executable code that implements the shellcode.
juraj-google-style
def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None): egg_project_name = pkg_resources.to_filename(project_name) req = '{}@{} if subdir: req += '&subdirectory={}'.format(subdir) return req
Return the URL for a VCS requirement. Args: repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+"). project_name: the (unescaped) project name.
juraj-google-style
def get_max_atten(self): return self.attenuation_device.max_atten
Gets the max attenuation supported by the Attenuator. Returns: A float that is the max attenuation value.
github-repos
def UpdateClass(self, class_name, gtfs_class): if class_name not in self._class_mapping: raise problems.NonexistentMapping(class_name) self._class_mapping[class_name] = gtfs_class
Updates an entry in the list of known classes. Args: class_name: A string with the class name that is to be updated. gtfs_class: The new class Raises: NonexistentMapping if there is no class with the specified class_name.
juraj-google-style
def feedforward(inputs, num_units, scope="multihead_attention"): with tf.variable_scope(scope): params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1, "activation": tf.nn.relu, "use_bias": True} outputs = tf.layers.conv1d(**params) params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1, "activation": None, "use_bias": True} outputs = tf.layers.conv1d(**params) outputs += inputs outputs = normalize(outputs) return outputs
Point-wise feed forward net. Args: inputs: A 3d tensor with shape of [N, T, C]. num_units: A list of two integers. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A 3d tensor with the same shape and dtype as inputs
juraj-google-style
def _scale_boxes(boxes, target_sizes): if isinstance(target_sizes, (list, tuple)): image_height = torch.tensor([i[0] for i in target_sizes]) image_width = torch.tensor([i[1] for i in target_sizes]) elif isinstance(target_sizes, torch.Tensor): image_height, image_width = target_sizes.unbind(1) else: raise ValueError('`target_sizes` must be a list, tuple or torch.Tensor') scale_factor = torch.stack([image_width, image_height, image_width, image_height], dim=1) scale_factor = scale_factor.unsqueeze(1).to(boxes.device) boxes = boxes * scale_factor return boxes
Scale batch of bounding boxes to the target sizes. Args: boxes (`torch.Tensor` of shape `(batch_size, num_boxes, 4)`): Bounding boxes to scale. Each box is expected to be in (x1, y1, x2, y2) format. target_sizes (`List[Tuple[int, int]]` or `torch.Tensor` of shape `(batch_size, 2)`): Target sizes to scale the boxes to. Each target size is expected to be in (height, width) format. Returns: `torch.Tensor` of shape `(batch_size, num_boxes, 4)`: Scaled bounding boxes.
github-repos
def Lookup(self, keywords, start_time=FIRST_TIMESTAMP, end_time=LAST_TIMESTAMP, last_seen_map=None): posting_lists = self.ReadPostingLists(keywords, start_time=start_time, end_time=end_time, last_seen_map=last_seen_map) results = list(itervalues(posting_lists)) relevant_set = results[0] for hits in results: relevant_set &= hits if (not relevant_set): return relevant_set return relevant_set
Finds objects associated with keywords. Find the names related to all keywords. Args: keywords: A collection of keywords that we are interested in. start_time: Only considers keywords added at or after this point in time. end_time: Only considers keywords at or before this point in time. last_seen_map: If present, is treated as a dict and populated to map pairs (keyword, name) to the timestamp of the latest connection found. Returns: A set of potentially relevant names.
codesearchnet
def get_attribute(self, node, obj, name, valself=None): obj = abstract_utils.unwrap_final(obj) special_attribute = obj.get_special_attribute(node, name, valself) if special_attribute is not None: return (node, special_attribute) if isinstance(obj, abstract.Function): if name == '__get__': return (node, None) else: return self._get_instance_attribute(node, obj, name, valself) elif isinstance(obj, abstract.ParameterizedClass): return self.get_attribute(node, obj.base_cls, name, valself) elif isinstance(obj, abstract.Class): return self._get_class_attribute(node, obj, name, valself) elif isinstance(obj, overlay.Overlay): return self._get_module_attribute(node, obj.get_module(name), name, valself) elif isinstance(obj, abstract.Module): return self._get_module_attribute(node, obj, name, valself) elif isinstance(obj, abstract.SimpleValue): return self._get_instance_attribute(node, obj, name, valself) elif isinstance(obj, abstract.Union): if name == '__getitem__': return (node, self.ctx.new_unsolvable(node)) nodes = [] ret = self.ctx.program.NewVariable() for o in obj.options: node2, attr = self.get_attribute(node, o, name, valself) if attr is not None: ret.PasteVariable(attr, node2) nodes.append(node2) if ret.bindings: return (self.ctx.join_cfg_nodes(nodes), ret) else: return (node, None) elif isinstance(obj, special_builtins.SuperInstance): return self._get_attribute_from_super_instance(node, obj, name, valself) elif isinstance(obj, special_builtins.Super): return self.get_attribute(node, self.ctx.convert.super_type, name, valself) elif isinstance(obj, (abstract.StaticMethod, abstract.ClassMethod)): return self.get_attribute(node, obj.method, name, valself) elif isinstance(obj, abstract.BoundFunction): return self.get_attribute(node, obj.underlying, name, valself) elif isinstance(obj, abstract.TypeParameterInstance): param_var = obj.instance.get_instance_type_parameter(obj.name) if not param_var.bindings: param_var = obj.param.instantiate(self.ctx.root_node) results = [] nodes = [] for b in param_var.bindings: if b.data == obj: continue node2, ret = self.get_attribute(node, b.data, name, valself) if ret is None: if b.IsVisible(node): return (node, None) else: results.append(ret) nodes.append(node2) if nodes: node = self.ctx.join_cfg_nodes(nodes) return (node, self.ctx.join_variables(node, results)) else: return (node, self.ctx.new_unsolvable(node)) elif isinstance(obj, abstract.Empty): return (node, None) elif isinstance(obj, abstract.ParamSpec): if name == 'args': return (node, abstract.ParamSpecArgs(obj, self.ctx).to_variable(node)) elif name == 'kwargs': return (node, abstract.ParamSpecKwargs(obj, self.ctx).to_variable(node)) else: return (node, None) else: return (node, None)
Get the named attribute from the given object. Args: node: The current CFG node. obj: The object. name: The name of the attribute to retrieve. valself: A cfg.Binding to a self reference to include in the attribute's origins. If obj is an abstract.Class, valself can be a binding to: * an instance of obj - obj will be treated strictly as a class. * obj itself - obj will be treated as an instance of its metaclass. * None - if name == "__getitem__", obj is a type annotation; else, obj is strictly a class, but the attribute is left unbound. Else, valself is optional and should be a binding to obj when given. Returns: A tuple (CFGNode, cfg.Variable). If this attribute doesn't exist, the Variable will be None.
github-repos
def construct_gene_object(ensembl, transcript_id): (chrom, start, end, strand, genomic_sequence) = ensembl.get_genomic_seq_for_transcript(transcript_id, expand=10) cds_sequence = ensembl.get_cds_seq_for_transcript(transcript_id) cds_ranges = ensembl.get_cds_ranges_for_transcript(transcript_id) exon_ranges = ensembl.get_exon_ranges_for_transcript(transcript_id) transcript = Transcript(transcript_id, chrom, start, end, strand) transcript.set_exons(exon_ranges, cds_ranges) transcript.set_cds(cds_ranges) transcript.add_cds_sequence(cds_sequence) transcript.add_genomic_sequence(genomic_sequence, offset=10) return transcript
creates an Transcript object for a gene from ensembl databases Args: ensembl: EnsemblRequest object to request data from ensembl transcript_id: string for an Ensembl transcript ID Returns: a Transcript object, containing transcript coordinates and gene and transcript sequence. Raises: ValueError if CDS from genomic sequence given gene coordinates and CDS retrieved from Ensembl do not match.
juraj-google-style
def __init__(self, file_system, tsk_attribute): super(TSKDataStream, self).__init__() self._file_system = file_system self._tsk_attribute = tsk_attribute
Initializes a data stream. Args: file_system (TSKFileSystem): file system. tsk_attribute (pytsk3.Attribute): TSK attribute.
juraj-google-style
def _call_method_from_namespace(obj, method_name, namespace): method = getattr(obj, method_name) method_parser = method.parser arg_names = _get_args_name_from_parser(method_parser) if (method_name == '__init__'): return _call(obj, arg_names, namespace) return _call(method, arg_names, namespace)
Call the method, retrieved from obj, with the correct arguments via the namespace Args: obj: any kind of object method_name: method to be called namespace: an argparse.Namespace object containing parsed command line arguments
codesearchnet
def append_to_history(self, filename, command, go_to_eof): if not is_text_string(filename): filename = to_text_string(filename.toUtf8(), 'utf-8') command = to_text_string(command) index = self.filenames.index(filename) self.editors[index].append(command) if go_to_eof: self.editors[index].set_cursor_position('eof') self.tabwidget.setCurrentIndex(index)
Append an entry to history filename. Args: filename (str): file to be updated in a new tab. command (str): line to be added. go_to_eof (bool): scroll to the end of file.
juraj-google-style
def _CompressionSizeDelta(self, records, options_a, options_b): fn_a = self._WriteRecordsToFile(records, 'tfrecord_a', options=options_a) test_a = list(tf_record.tf_record_iterator(fn_a, options=options_a)) self.assertEqual(records, test_a, options_a) fn_b = self._WriteRecordsToFile(records, 'tfrecord_b', options=options_b) test_b = list(tf_record.tf_record_iterator(fn_b, options=options_b)) self.assertEqual(records, test_b, options_b) return os.path.getsize(fn_a) - os.path.getsize(fn_b)
Validate compression with options_a and options_b and return size delta. Compress records with options_a and options_b. Uncompress both compressed files and assert that the contents match the original records. Finally calculate how much smaller the file compressed with options_a was than the file compressed with options_b. Args: records: The records to compress options_a: First set of options to compress with, the baseline for size. options_b: Second set of options to compress with. Returns: The difference in file size when using options_a vs options_b. A positive value means options_a was a better compression than options_b. A negative value means options_b had better compression than options_a.
github-repos
def init_datapackage(resource_paths): dp = datapackage.Package({'name': 'change-me', 'schema': 'tabular-data-package'}) for path in resource_paths: dp.infer(path) return dp
Create tabular data package with resources. It will also infer the tabular resources' schemas. Args: resource_paths (List[str]): Paths to the data package resources. Returns: datapackage.Package: The data package.
codesearchnet
def process_test_logs(name, test_name, test_args, benchmark_type, start_time, run_time, log_files): results = test_log_pb2.TestResults() results.name = name results.target = test_name results.start_time = start_time results.run_time = run_time results.benchmark_type = test_log_pb2.TestResults.BenchmarkType.Value(benchmark_type.upper()) git_sha = get_git_commit_sha() if git_sha: results.commit_id.hash = git_sha results.entries.CopyFrom(process_benchmarks(log_files)) results.run_configuration.argument.extend(test_args) results.machine_configuration.CopyFrom(system_info_lib.gather_machine_configuration()) return results
Gather test information and put it in a TestResults proto. Args: name: Benchmark target identifier. test_name: A unique bazel target, e.g. "//path/to:test" test_args: A string containing all arguments to run the target with. benchmark_type: A string representing the BenchmarkType enum; the benchmark type for this target. start_time: Test starting time (epoch) run_time: Wall time that the test ran for log_files: Paths to the log files Returns: A TestResults proto
github-repos
def serialize_data(data, compression=False, encryption=False, public_key=None): message = json.dumps(data) if compression: message = zlib.compress(message) message = binascii.b2a_base64(message) if (encryption and public_key): message = encryption.encrypt(message, public_key) encoded_message = str.encode(message) return encoded_message
Serializes normal Python datatypes into plaintext using json. You may also choose to enable compression and encryption when serializing data to send over the network. Enabling one or both of these options will incur additional overhead. Args: data (dict): The data to convert into plain text using json. compression (boolean): True or False value on whether or not to compress the serialized data. encryption (rsa.encryption): An encryption instance used to encrypt the message if encryption is desired. public_key (str): The public key to use to encrypt if encryption is enabled. Returns: The string message serialized using json.
codesearchnet
def __init__(self, raw_string, bow=True): self.raw = raw_string self.as_list = list(self.raw) self.as_np = np.array(self.as_list) self.string_start = np.arange(len(self.raw)) vocab = {} self.inverse_vocab = [] self.positions = [] self.bow = bow non_vocab = set() for i, char in enumerate(self.as_np): if char in non_vocab: continue if bow: if char not in vocab: vocab[char] = len(vocab) self.inverse_vocab.append(char) self.positions.append([]) idx_char = vocab[char] self.positions[idx_char].append(i) else: self.inverse_vocab.append(char) self.positions.append(i) if not bow: self.positions = np.array(self.positions)
Initializer. Args: raw_string: string with raw text in it bow: if True, a char is the same everywhere in the text - i.e. we will index multiple occurrences of the same character. If False, order matters, so that the same word will have different ids according to position.
juraj-google-style
def compile_intermediate_cpfs(self, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[Noise]=None) -> List[CPFPair]: interm_fluents = [] with self.graph.as_default(): with tf.name_scope('intermediate_cpfs'): for cpf in self.rddl.domain.intermediate_cpfs: cpf_noise = (noise.get(cpf.name, None) if (noise is not None) else None) name_scope = utils.identifier(cpf.name) with tf.name_scope(name_scope): t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise) interm_fluents.append((cpf.name, t)) scope[cpf.name] = t return interm_fluents
Compiles the intermediate fluent CPFs given the current `state` and `action` scope. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation. batch_size (Optional[int]): The batch size. Returns: A list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.
codesearchnet
def rename_document(self, did, name): payload = { 'name': name } return self._api.request('post', '/api/documents/' + did, body=payload)
Renames the specified document. Args: - did (str): Document ID - name (str): New document name Returns: - requests.Response: Onshape response data
juraj-google-style
def get_page_artid_for_publication_info(publication_info, separator): if 'artid' in publication_info: return publication_info['artid'] elif 'page_start' in publication_info and 'page_end' in publication_info: page_start = publication_info['page_start'] page_end = publication_info['page_end'] return text_type('{}{}{}').format( page_start, text_type(separator), page_end ) return ''
Return the page range or the article id of a publication_info entry. Args: publication_info(dict): a publication_info field entry of a record separator(basestring): optional page range symbol, defaults to a single dash Returns: string: the page range or the article id of the record. Examples: >>> publication_info = {'artid': '054021'} >>> get_page_artid(publication_info) '054021'
juraj-google-style
def generate_examples(options): _prepare_dir(options) out = options.zip_to_output if options.multi_gen_state: test_name = options.multi_gen_state.test_name else: test_name = re.sub('(_(|with-flex|forward-compat|edgetpu|mlir-quant))?(_xnnpack)?\\.zip$', '', out, count=1) test_function_name = 'make_%s_tests' % test_name test_function = get_test_function(test_function_name) if test_function is None: raise RuntimeError("Can't find a test function to create %r. Tried %r" % (out, test_function_name)) if options.make_forward_compat_test: future_date = datetime.date.today() + datetime.timedelta(days=30) with tf.compat.forward_compatibility_horizon(future_date.year, future_date.month, future_date.day): test_function(options) else: test_function(options)
Generate examples for a test set. Args: options: Options containing information to generate examples. Raises: RuntimeError: if the test function cannot be found.
github-repos
def __contains__(self, item): try: _libexec('merkle_db_contains', self.pointer, item.encode()) return True except KeyError: return False
Does the tree contain an address. Args: item (str): An address. Returns: (bool): True if it does contain, False otherwise.
juraj-google-style
def from_row_starts(cls, row_starts, nvals, validate=True, dtype=None, dtype_hint=None): if not isinstance(validate, bool): raise TypeError('validate must have type bool') with ops.name_scope(None, 'RowPartitionFromRowStarts', [row_starts]): row_starts = cls._convert_row_partition(row_starts, 'row_starts', dtype_hint=dtype_hint, dtype=dtype) row_starts.shape.assert_has_rank(1) nvals = math_ops.cast(nvals, row_starts.dtype) if validate: msg = 'Arguments to from_row_starts do not form a valid RaggedTensor' checks = [check_ops.assert_rank(row_starts, 1, message=msg), _assert_zero(row_starts[:1], message=msg), _assert_monotonic_increasing(row_starts, message=msg), check_ops.assert_less_equal(row_starts[-1:], nvals, message=msg)] row_starts = control_flow_ops.with_dependencies(checks, row_starts) row_splits = array_ops.concat([row_starts, [nvals]], axis=0) return cls(row_splits=row_splits, nvals=nvals, internal=_row_partition_factory_key)
Creates a `RowPartition` with rows partitioned by `row_starts`. Equivalent to: `from_row_splits(concat([row_starts, nvals], axis=0))`. Args: row_starts: A 1-D integer tensor with shape `[nrows]`. Must be nonnegative and sorted in ascending order. If `nrows>0`, then `row_starts[0]` must be zero. nvals: A scalar tensor indicating the number of values. validate: If true, then use assertions to check that the arguments form a valid `RowPartition`. dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of `row_starts`, dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. Returns: A `RowPartition`.
github-repos
def impersonate(self, name=None, lifetime=None, mechs=None, usage='initiate'): if (rcred_s4u is None): raise NotImplementedError('Your GSSAPI implementation does not have support for S4U') res = rcred_s4u.acquire_cred_impersonate_name(self, name, lifetime, mechs, usage) return type(self)(base=res.creds)
Impersonate a name using the current credentials This method acquires credentials by impersonating another name using the current credentials. :requires-ext:`s4u` Args: name (Name): the name to impersonate lifetime (int): the desired lifetime of the new credentials, or None for indefinite mechs (list): the desired :class:`MechType` OIDs for the new credentials usage (str): the desired usage for the new credentials -- either 'both', 'initiate', or 'accept'. Note that some mechanisms may only support 'initiate'. Returns: Credentials: the new credentials impersonating the given name
codesearchnet
def update_state(world): world_size = len(world) def wrap(index): return index % world_size for x in range(world_size): for y in range(world_size): if not world[x][y].allow_change.get(): continue live_neighbor_count = sum([ world[wrap(x)][wrap(y + 1)].value, world[wrap(x + 1)][wrap(y + 1)].value, world[wrap(x + 1)][wrap(y)].value, world[wrap(x + 1)][wrap(y - 1)].value, world[wrap(x)][wrap(y-1)].value, world[wrap(x - 1)][wrap(y - 1)].value, world[wrap(x - 1)][wrap(y)].value, world[wrap(x - 1)][wrap(y + 1)].value ]) if world[x][y].value: if not (live_neighbor_count == 2 or live_neighbor_count == 3): world[x][y].value = False else: if live_neighbor_count == 3: world[x][y].value = True
Increment the world state, determining which cells live, die, or appear. Args: world (list[list]): A square matrix of cells Returns: None
juraj-google-style
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'): if block_type == 'block35': branch_0 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(branch_1, 32, 3) branch_2 = conv2d_bn(x, 32, 1) branch_2 = conv2d_bn(branch_2, 48, 3) branch_2 = conv2d_bn(branch_2, 64, 3) branches = [branch_0, branch_1, branch_2] elif block_type == 'block17': branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 128, 1) branch_1 = conv2d_bn(branch_1, 160, [1, 7]) branch_1 = conv2d_bn(branch_1, 192, [7, 1]) branches = [branch_0, branch_1] elif block_type == 'block8': branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(branch_1, 224, [1, 3]) branch_1 = conv2d_bn(branch_1, 256, [3, 1]) branches = [branch_0, branch_1] else: raise ValueError('Unknown Inception-ResNet block type. Expects "block35", "block17" or "block8", but got: ' + str(block_type)) block_name = block_type + '_' + str(block_idx) channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3 mixed = layers.Concatenate(axis=channel_axis, name=block_name + '_mixed')(branches) up = conv2d_bn(mixed, x.shape[channel_axis], 1, activation=None, use_bias=True, name=block_name + '_conv') x = CustomScaleLayer(scale)([x, up]) if activation is not None: x = layers.Activation(activation, name=block_name + '_ac')(x) return x
Adds an Inception-ResNet block. Args: x: input tensor. scale: scaling factor to scale the residuals (i.e., the output of passing `x` through an inception module) before adding them to the shortcut branch. Let `r` be the output from the residual branch, the output of this block will be `x + scale * r`. block_type: `'block35'`, `'block17'` or `'block8'`, determines the network structure in the residual branch. block_idx: an `int` used for generating layer names. The Inception-ResNet blocks are repeated many times in this network. We use `block_idx` to identify each of the repetitions. For example, the first Inception-ResNet-A block will have `block_type='block35', block_idx=0`, and the layer names will have a common prefix `'block35_0'`. activation: activation function to use at the end of the block. Returns: Output tensor for the block.
github-repos
def diffusion_mds(means, weights, d, diffusion_rounds=10): for i in range(diffusion_rounds): weights = weights*weights weights = weights/weights.sum(0) X = dim_reduce(means, weights, d) if X.shape[0]==2: return X.dot(weights) else: return X.T.dot(weights)
Dimensionality reduction using MDS, while running diffusion on W. Args: means (array): genes x clusters weights (array): clusters x cells d (int): desired dimensionality Returns: W_reduced (array): array of shape (d, cells)
juraj-google-style
def ParseInteger(text, is_signed=False, is_long=False): try: if is_long: result = long(text, 0) else: result = int(text, 0) except ValueError: raise ValueError(("Couldn't parse integer: %s" % text)) checker = _INTEGER_CHECKERS[((2 * int(is_long)) + int(is_signed))] checker.CheckValue(result) return result
Parses an integer. Args: text: The text to parse. is_signed: True if a signed integer must be parsed. is_long: True if a long integer must be parsed. Returns: The integer value. Raises: ValueError: Thrown Iff the text is not a valid integer.
codesearchnet
def get_dict(self, name, default=None): if (name not in self): if (default is not None): return default raise EnvironmentError.not_found(self._prefix, name) return dict(**self.get(name))
Retrieves an environment variable value as a dictionary. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: dict: The environment variable's value as a ``dict``. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided.
codesearchnet
def __init__(self, flow, **kwargs): self.flow = flow self.max_njobs_inqueue = kwargs.get("max_njobs_inqueue", 200)
Initialize the object Args: flow: :class:`Flow` object max_njobs_inqueue: The launcher will stop submitting jobs when the number of jobs in the queue is >= Max number of jobs
juraj-google-style
def translate_array(self, string, language, level=3, retdata=False): language = language.lower() assert (self.is_built_in(language) or (language in self.outer_templates)), (('Sorry, ' + language) + ' is not a supported language.') data = phpserialize.loads(bytes(string, 'utf-8'), array_hook=list, decode_strings=True) if self.is_built_in(language): self.get_built_in(language, level, data) print(self) return (self.data_structure if retdata else None) def loop_print(iterable, level=3): '\n Loops over a python representation of a php array \n (list of tuples) and constructs a representation in another language.\n Translates a php array into another structure.\n\n Args:\n iterable: list or tuple to unpack.\n\n level: integer, number of spaces to use for indentation\n ' retval = '' indentation = (' ' * level) if ((not self.is_iterable(iterable)) or isinstance(iterable, str)): non_iterable = str(iterable) return str(non_iterable) for item in iterable: if (isinstance(item, tuple) and (len(item) == 2)): key = item[0] val = loop_print(item[1], level=(level + 3)) val = (self.translate_val(language, val) if ((language in self.lang_specific_values) and (val in self.lang_specific_values[language])) else val) key = (str(key) if isinstance(key, int) else (("'" + str(key)) + "'")) needs_unpacking = ((hasattr(item[0], '__iter__') == False) and (hasattr(item[1], '__iter__') == True)) if needs_unpacking: retval += self.get_inner_template(language, 'iterable', indentation, key, val) else: val = (str(val) if (val.isdigit() or (val in self.lang_specific_values[language].values())) else (("'" + str(val)) + "'")) retval += self.get_inner_template(language, 'singular', indentation, key, val) return retval self.data_structure = (self.outer_templates[language] % loop_print(data)) print(self) return (self.data_structure if retdata else None)
Unserializes a serialized php array and prints it to the console as a data structure in the specified language. Used to translate or convert a php array into a data structure in another language. Currently supports, PHP, Python, Javascript, and JSON. Args: string: a string of serialized php language: a string representing the desired output format for the array. level: integer, indentation level in spaces. Defaults to 3. retdata: boolean, the method will return the string in addition to printing it if set to True. Defaults to false. Returns: None but prints a string to the console if retdata is False, otherwise returns a string.
codesearchnet
def quad_2d(width, height, xpos=0.0, ypos=0.0) -> VAO: pos = numpy.array([(xpos - (width / 2.0)), (ypos + (height / 2.0)), 0.0, (xpos - (width / 2.0)), (ypos - (height / 2.0)), 0.0, (xpos + (width / 2.0)), (ypos - (height / 2.0)), 0.0, (xpos - (width / 2.0)), (ypos + (height / 2.0)), 0.0, (xpos + (width / 2.0)), (ypos - (height / 2.0)), 0.0, (xpos + (width / 2.0)), (ypos + (height / 2.0)), 0.0], dtype=numpy.float32) normals = numpy.array([0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0], dtype=numpy.float32) uvs = numpy.array([0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0], dtype=numpy.float32) vao = VAO('geometry:quad', mode=moderngl.TRIANGLES) vao.buffer(pos, '3f', ['in_position']) vao.buffer(normals, '3f', ['in_normal']) vao.buffer(uvs, '2f', ['in_uv']) return vao
Creates a 2D quad VAO using 2 triangles with normals and texture coordinates. Args: width (float): Width of the quad height (float): Height of the quad Keyword Args: xpos (float): Center position x ypos (float): Center position y Returns: A :py:class:`demosys.opengl.vao.VAO` instance.
codesearchnet
def _rot90_4D(images, k, name_scope): def _rot90(): return array_ops.transpose(array_ops.reverse_v2(images, [2]), [0, 2, 1, 3]) def _rot180(): return array_ops.reverse_v2(images, [1, 2]) def _rot270(): return array_ops.reverse_v2(array_ops.transpose(images, [0, 2, 1, 3]), [2]) cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180), (math_ops.equal(k, 3), _rot270)] result = control_flow_case.case(cases, default=lambda: images, exclusive=True, name=name_scope) shape = result.get_shape() result.set_shape([shape[0], None, None, shape[3]]) return result
Rotate batch of images counter-clockwise by 90 degrees `k` times. Args: images: 4-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the images are rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 4-D `Tensor` of the same type and shape as `images`.
github-repos
def ParseInteger(text, is_signed=False, is_long=False): try: if is_long: result = long(text, 0) else: result = int(text, 0) except ValueError: raise ValueError('Couldn\'t parse integer: %s' % text) checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] checker.CheckValue(result) return result
Parses an integer. Args: text: The text to parse. is_signed: True if a signed integer must be parsed. is_long: True if a long integer must be parsed. Returns: The integer value. Raises: ValueError: Thrown Iff the text is not a valid integer.
juraj-google-style
def reset(target, containers=None, config=None): if target is not None: target = compat.as_bytes(target) if containers is not None: containers = [compat.as_bytes(c) for c in containers] else: containers = [] tf_session.TF_Reset(target, containers, config)
Resets resource containers on `target`, and close all connected sessions. A resource container is distributed across all workers in the same cluster as `target`. When a resource container on `target` is reset, resources associated with that container will be cleared. In particular, all Variables in the container will become undefined: they lose their values and shapes. NOTE: (i) reset() is currently only implemented for distributed sessions. (ii) Any sessions on the master named by `target` will be closed. If no resource containers are provided, all containers are reset. Args: target: The execution engine to connect to. containers: A list of resource container name strings, or `None` if all of all the containers are to be reset. config: (Optional.) Protocol buffer with configuration options. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while resetting containers.
github-repos
def get_contract_data(self, contract_name): contract_data_path = (self.output_dir + '/{0}.json'.format(contract_name)) with open(contract_data_path, 'r') as contract_data_file: contract_data = json.load(contract_data_file) abi = contract_data['abi'] bytecode = contract_data['evm']['bytecode']['object'] return (abi, bytecode)
Returns the contract data for a given contract Args: contract_name (str): Name of the contract to return. Returns: str, str: ABI and bytecode of the contract
codesearchnet
def trainable_variables(scope=None): return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope)
Returns all variables created with `trainable=True`. When passed `trainable=True`, the `Variable()` constructor automatically adds new variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the contents of that collection. @compatibility(TF2) Not compatible with eager execution and `tf.function`. In particular, Graph collections are deprecated in TF2. Instead please create a `tf.Module` container for all your model state, including variables. You can then list all the trainable variables in your `tf.Module` through the `trainable_variables` attribute. @end_compatibility Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of Variable objects.
github-repos
def __init__(self, dependency_name, is_upstream=False, optional=False): self.dependency_name = dependency_name self.is_upstream = is_upstream self.optional = optional
Constructor for `Extension`. Args: dependency_name: str, see `ExtDependency.dependency_name` is_upstream: bool, see `ExtDependency.is_upstream`
juraj-google-style
def to_json(self, variables=None): variables_to_resolve = [] if variables: for (key, value) in variables.items(): variables_to_resolve.append(Variable(key, value)) for k in self.get_parameter_definitions(): if ((not variables) or (k not in variables)): variables_to_resolve.append(Variable(k, 'unused_value')) self.resolve_variables(variables_to_resolve) return self.render_template()[1]
Render the blueprint and return the template in json form. Args: variables (dict): Optional dictionary providing/overriding variable values. Returns: str: the rendered CFN JSON template
codesearchnet
def _seconds_have_elapsed(token, num_seconds): now = timeit.default_timer() then = _log_timer_per_token.get(token, None) if ((then is None) or ((now - then) >= num_seconds)): _log_timer_per_token[token] = now return True else: return False
Tests if 'num_seconds' have passed since 'token' was requested. Not strictly thread-safe - may log with the wrong frequency if called concurrently from multiple threads. Accuracy depends on resolution of 'timeit.default_timer()'. Always returns True on the first call for a given 'token'. Args: token: The token for which to look up the count. num_seconds: The number of seconds to test for. Returns: Whether it has been >= 'num_seconds' since 'token' was last requested.
codesearchnet
def log_every_n(level, msg, n, *args): count = _get_next_log_count_per_token(get_absl_logger().findCaller()) log_if(level, msg, not (count % n), *args)
Logs 'msg % args' at level 'level' once per 'n' times. Logs the 1st call, (N+1)st call, (2N+1)st call, etc. Not threadsafe. Args: level: int, the absl logging level at which to log. msg: str, the message to be logged. n: int, the number of times this should be called before it is logged. *args: The args to be substitued into the msg.
juraj-google-style
def parse_outputtrans(path_dir): run_type = None warning = None efermi = None gap = None doping_levels = [] with open(os.path.join(path_dir, "boltztrap.outputtrans"), 'r') \ as f: for line in f: if "WARNING" in line: warning = line elif "Calc type:" in line: run_type = line.split()[-1] elif line.startswith("VBM"): efermi = Energy(line.split()[1], "Ry").to("eV") elif line.startswith("Egap:"): gap = Energy(float(line.split()[1]), "Ry").to("eV") elif line.startswith("Doping level number"): doping_levels.append(float(line.split()[6])) return run_type, warning, efermi, gap, doping_levels
Parses .outputtrans file Args: path_dir: dir containing boltztrap.outputtrans Returns: tuple - (run_type, warning, efermi, gap, doping_levels)
juraj-google-style
def ws010(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `ws010`'.format(value)) self._ws010 = value
Corresponds to IDD Field `ws010` Wind speed corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `ws010` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def patch_addContext(self, patch, text): if (len(text) == 0): return pattern = text[patch.start2:(patch.start2 + patch.length1)] padding = 0 while ((text.find(pattern) != text.rfind(pattern)) and ((self.Match_MaxBits == 0) or (len(pattern) < ((self.Match_MaxBits - self.Patch_Margin) - self.Patch_Margin)))): padding += self.Patch_Margin pattern = text[max(0, (patch.start2 - padding)):((patch.start2 + patch.length1) + padding)] padding += self.Patch_Margin prefix = text[max(0, (patch.start2 - padding)):patch.start2] if prefix: patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)] suffix = text[(patch.start2 + patch.length1):((patch.start2 + patch.length1) + padding)] if suffix: patch.diffs.append((self.DIFF_EQUAL, suffix)) patch.start1 -= len(prefix) patch.start2 -= len(prefix) patch.length1 += (len(prefix) + len(suffix)) patch.length2 += (len(prefix) + len(suffix))
Increase the context until it is unique, but don't let the pattern expand beyond Match_MaxBits. Args: patch: The patch to grow. text: Source text.
codesearchnet
def variant(self, case_id, variant_id): variant_id = int(variant_id) gemini_query = "SELECT * from variants WHERE variant_id = {0}".format( variant_id ) individuals = [] case_obj = self.case(case_id) for individual in case_obj.individuals: individuals.append(individual) self.db = case_obj.variant_source self.variant_type = case_obj.variant_type gq = GeminiQuery(self.db) gq.run(gemini_query) for gemini_variant in gq: variant = self._format_variant( case_id=case_id, gemini_variant=gemini_variant, individual_objs=individuals, index=gemini_variant['variant_id'], add_all_info = True ) return variant return None
Return a specific variant. We solve this by building a gemini query and send it to _variants Args: case_id (str): Path to a gemini database variant_id (int): A gemini variant id Returns: variant_obj (dict): A puzzle variant
juraj-google-style
def get_fixture(self, fixture_id, head2head=None): filters = [] if ((head2head is not None) and (int(head2head) > 0)): self.logger.debug(f'Getting fixture {fixture_id}. head2head is {head2head}.') filters.append(self.__createFilter('head2head', head2head)) else: self.logger.debug(f'Getting fixture {fixture_id}.') return self._request('fixtures', fixture_id, filters=filters)
Loads a single fixture. Args: * fixture_id (str): the id of the fixture * head2head (int, optional): load the previous n fixture of the two teams Returns: * :obj: json: the fixture-json
codesearchnet
def calc_limits(data, dist=None, padding=0.25): dmin = (sys.float_info.max if (dist is None) else dist.get('min', sys.float_info.max)) dmax = (sys.float_info.min if (dist is None) else dist.get('max', sys.float_info.min)) _min = min(min(data), dmin) _max = max(max(data), dmax) padding = (padding * (_max - _min)) return ((_min - padding), (_max + padding))
Calculate a suitable range for a histogram Returns: tuple of (min, max)
codesearchnet
def GetUsernameForPath(self, path): path = path.lower() user_accounts = self._user_accounts.get(self.CURRENT_SESSION, {}) for user_account in iter(user_accounts.values()): if (not user_account.user_directory): continue user_directory = user_account.user_directory.lower() if path.startswith(user_directory): return user_account.username return None
Retrieves a username for a specific path. This is determining if a specific path is within a user's directory and returning the username of the user if so. Args: path (str): path. Returns: str: username or None if the path does not appear to be within a user's directory.
codesearchnet
def _PrintAnalysisStatusUpdateLinear(self, processing_status): for worker_status in processing_status.workers_status: status_line = ( '{0:s} (PID: {1:d}) - events consumed: {2:d} - running: ' '{3!s}\n').format( worker_status.identifier, worker_status.pid, worker_status.number_of_consumed_events, worker_status.status not in definitions.ERROR_STATUS_INDICATORS) self._output_writer.Write(status_line)
Prints an analysis status update in linear mode. Args: processing_status (ProcessingStatus): processing status.
juraj-google-style
def get_catalog_courses(self, catalog_id): return self._load_data( self.CATALOGS_COURSES_ENDPOINT.format(catalog_id), default=[] )
Return the courses included in a single course catalog by ID. Args: catalog_id (int): The catalog ID we want to retrieve. Returns: list: Courses of the catalog in question
juraj-google-style
def load(self, email, master_token, android_id): self._email = email self._android_id = android_id self._master_token = master_token self.refresh() return True
Authenticate to Google with the provided master token. Args: email (str): The account to use. master_token (str): The master token. android_id (str): An identifier for this client. Raises: LoginException: If there was a problem logging in.
juraj-google-style
def create_graph_from_data(self, data, **kwargs): self.arguments['{VERBOSE}'] = str(self.verbose).upper() results = self._run_ccdr(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for (idx, i) in enumerate(data.columns)})
Apply causal discovery on observational data using CCDr. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the CCDR algorithm.
codesearchnet
def set_nodes_vlan(site, nodes, interface, vlan_id): def _to_network_address(host): 'Translate a host to a network address\n e.g:\n paranoia-20.rennes.grid5000.fr -> paranoia-20-eth2.rennes.grid5000.fr\n ' splitted = host.split('.') splitted[0] = ((splitted[0] + '-') + interface) return '.'.join(splitted) gk = get_api_client() network_addresses = [_to_network_address(n) for n in nodes] gk.sites[site].vlans[str(vlan_id)].submit({'nodes': network_addresses})
Set the interface of the nodes in a specific vlan. It is assumed that the same interface name is available on the node. Args: site(str): site to consider nodes(list): nodes to consider interface(str): the network interface to put in the vlan vlan_id(str): the id of the vlan
codesearchnet
def get_is_group_member(self, grp_name, user): self.project_service.set_auth(self._token_project) return self.project_service.get_is_group_member(grp_name, user)
Check if the given user is a member of the named group. Note that a group maintainer is not considered a member unless the user is also explicitly added as a member. Args: name (string): Name of group. user_name (string): User of interest. Returns: (bool): False if user not a member.
juraj-google-style
def __init__(self, latitude, longitude, time, status, mode=None): super(LoranPosition, self).__init__(latitude, longitude) self.time = time self.status = status self.mode = mode
Initialise a new ``LoranPosition`` object. Args: latitude (float): Fix's latitude longitude (float): Fix's longitude time (datetime.time): Time the fix was taken status (bool): Whether the data is active mode (str): Type of reading
juraj-google-style
def learn(self, state_key, limit=1000): self.t = 1 while self.t <= limit: next_action_list = self.extract_possible_actions(state_key) if len(next_action_list): action_key = self.select_action( state_key=state_key, next_action_list=next_action_list ) reward_value = self.observe_reward_value(state_key, action_key) if len(next_action_list): next_state_key = self.update_state( state_key=state_key, action_key=action_key ) next_next_action_list = self.extract_possible_actions(next_state_key) next_action_key = self.predict_next_action(next_state_key, next_next_action_list) next_max_q = self.extract_q_df(next_state_key, next_action_key) self.update_q( state_key=state_key, action_key=action_key, reward_value=reward_value, next_max_q=next_max_q ) state_key = next_state_key self.normalize_q_value() self.normalize_r_value() self.visualize_learning_result(state_key) if self.check_the_end_flag(state_key) is True: break self.t += 1
Learning and searching the optimal solution. Args: state_key: Initial state. limit: The maximum number of iterative updates based on value iteration algorithms.
juraj-google-style
def fn(x: int, y: str): return x
Test function Args: x: The input y: Also the input
github-repos
def _calculate_aggregation_loss(logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels, aggregation_loss_weight): per_example_aggregation_loss = _calculate_aggregation_loss_known(logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels) if use_answer_as_supervision: per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask) return aggregation_loss_weight * per_example_aggregation_loss
Calculates the aggregation loss per example. Args: logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. aggregation_loss_weight (`float`, *optional*, defaults to 1.0): Importance weight for the aggregation loss. Returns: aggregation_loss (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss per example.
github-repos
def eigenvalues(df): corr = np.corrcoef(df, rowvar=0) eigvals = np.linalg.eigvals(corr) return pd.Series(eigvals, df.columns, name='Eigenvalue')
Returns a pandas Series with eigenvalues of the correlation matrix. Args: df: pandas DataFrame with columns to run diagnostics on
codesearchnet
class Wrapper(Layer): def __init__(self, layer, **kwargs): try: assert isinstance(layer, Layer) except Exception: raise ValueError(f"Layer {layer} supplied to Wrapper isn't a supported layer type. Please ensure wrapped layer is a valid Keras layer.") super().__init__(**kwargs) self.layer = layer def build(self, input_shape=None): if not self.layer.built: self.layer.build(input_shape) self.layer.built = True def get_config(self): config = {'layer': serialization_lib.serialize_keras_object(self.layer)} base_config = super().get_config() return {**base_config, **config} @classmethod def from_config(cls, config, custom_objects=None): layer = serialization_lib.deserialize_keras_object(config.pop('layer'), custom_objects=custom_objects) return cls(layer, **config)
Abstract wrapper base class. Wrappers take another layer and augment it in various ways. Do not use this class as a layer, it is only an abstract base class. Two usable wrappers are the `TimeDistributed` and `Bidirectional` layers. Args: layer: The layer to be wrapped.
github-repos
def eval(self, expr): if (self.depth >= self.max_depth): raise LimitationError('too much nesting') if (self.steps >= self.max_steps): raise LimitationError('too many steps') self.depth += 1 self.steps += 1 res = expr.eval(self) self.depth -= 1 return res
Evaluate an expression. This does **not** add its argument (or its result) as an element of me! That is the responsibility of the code that created the object. This means that you need to :meth:`Environment.rec_new` any expression you get from user input before evaluating it. This, and any wrappers around it, are the **only** entry points to expression evaluation you should call from ordinary code (i.e., code that isn't part of a extension). Args: expr (LispVal): The expression to evaluate. Returns: LispVal: The result of evaluating the expression. Raises: ~parthial.errs.LimitationError: If evaluating the expression would require more nesting, more time, or the allocation of more values than is permissible.
codesearchnet
def enroll_users_in_course(cls, enterprise_customer, course_id, course_mode, emails): (existing_users, unregistered_emails) = cls.get_users_by_email(emails) successes = [] pending = [] failures = [] for user in existing_users: succeeded = cls.enroll_user(enterprise_customer, user, course_mode, course_id) if succeeded: successes.append(user) else: failures.append(user) for email in unregistered_emails: pending_user = enterprise_customer.enroll_user_pending_registration(email, course_mode, course_id) pending.append(pending_user) return (successes, pending, failures)
Enroll existing users in a course, and create a pending enrollment for nonexisting users. Args: enterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment course_id (str): The unique identifier of the course in which we're enrolling course_mode (str): The mode with which we're enrolling in the course emails: An iterable of email addresses which need to be enrolled Returns: successes: A list of users who were successfully enrolled in the course pending: A list of PendingEnterpriseCustomerUsers who were successfully linked and had pending enrollments created for them in the database failures: A list of users who could not be enrolled in the course
codesearchnet
def get_all_publications(return_namedtuples=True): sources = [ben_cz.get_publications, grada_cz.get_publications, cpress_cz.get_publications, zonerpress_cz.get_publications] publications = [] for source in sources: publications.extend(filters.filter_publications(source())) if return_namedtuples: publications = map((lambda x: x.to_namedtuple()), publications) return publications
Get list publications from all available source. Args: return_namedtuples (bool, default True): Convert :class:`.Publication` structures to namedtuples (used in AMQP communication). Returns: list: List of :class:`.Publication` structures converted to namedtuple.
codesearchnet
def FoldValue(self, value): if value is False and self._data_type_definition.false_value is not None: return self._data_type_definition.false_value if value is True and self._data_type_definition.true_value is not None: return self._data_type_definition.true_value raise ValueError('No matching True and False values')
Folds the data type into a value. Args: value (object): value. Returns: object: folded value. Raises: ValueError: if the data type definition cannot be folded into the value.
juraj-google-style
def __init__(self, email, password): self.email = email self.password = password self.token = None self.last_api_call = None self.state = [] self.authenticate() self.update_state_from_api()
Create the Trackr API interface object. Args: email (str): Trackr account email address. password (str): Trackrr account password.
juraj-google-style
def update(self, *args, **kwargs): for k, v in args: self[k] = v for k, v in kwargs.items(): self[k] = v
Update ConfigMap from mapping/iterable. If the key exists the entry is updated else it is added. Args: *args: variable length argument list. A valid argument is a two item tuple/list. The first item is the key and the second is the value. **kwargs: Arbitrary keyword arguments representing the config.
juraj-google-style
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1, 1] return [0] * len(token_ids_0) + [1, 1]
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def AddTrip(self, schedule=None, headsign=None, service_period=None, trip_id=None): if (schedule is None): assert (self._schedule is not None) schedule = self._schedule if (trip_id is None): trip_id = util.FindUniqueId(schedule.trips) if (service_period is None): service_period = schedule.GetDefaultServicePeriod() trip_class = self.GetGtfsFactory().Trip trip_obj = trip_class(route=self, headsign=headsign, service_period=service_period, trip_id=trip_id) schedule.AddTripObject(trip_obj) return trip_obj
Add a trip to this route. Args: schedule: a Schedule object which will hold the new trip or None to use the schedule of this route. headsign: headsign of the trip as a string service_period: a ServicePeriod object or None to use schedule.GetDefaultServicePeriod() trip_id: optional trip_id for the new trip Returns: a new Trip object
codesearchnet
def get_associated_resource(self, task): if not task: raise HPOneViewUnknownType(MSG_INVALID_TASK) if task['category'] != 'tasks' and task['category'] != 'backups': raise HPOneViewUnknownType(MSG_UNKNOWN_OBJECT_TYPE) if task['type'] == 'TaskResourceV2': resource_uri = task['associatedResource']['resourceUri'] if resource_uri and resource_uri.startswith("/rest/appliance/support-dumps/"): return task, resource_uri elif task['type'] == 'BACKUP': task = self._connection.get(task['taskUri']) resource_uri = task['uri'] else: raise HPOneViewInvalidResource(MSG_TASK_TYPE_UNRECONIZED % task['type']) entity = {} if resource_uri: entity = self._connection.get(resource_uri) return task, entity
Retrieve a resource associated with a task. Args: task: task dict Returns: tuple: task (updated), the entity found (dict)
juraj-google-style
def compute_order(bytecode: list[opcodes.Opcode], python_version) -> list[Block]: processed_blocks = set() blocks = _split_bytecode(bytecode, processed_blocks, python_version) if python_version >= (3, 12): blocks = _remove_jump_back_block(blocks) blocks = _remove_jmp_to_get_anext_and_merge(blocks, processed_blocks) first_op_to_block = {block.code[0]: block for block in blocks} for i, block in enumerate(blocks): next_block = blocks[i + 1] if i < len(blocks) - 1 else None if block in processed_blocks: continue first_op, last_op = (block.code[0], block.code[-1]) if next_block and (not last_op.no_next()): block.connect_outgoing(next_block) if first_op.target: block.connect_outgoing(first_op_to_block[first_op.target]) if last_op.target: block.connect_outgoing(first_op_to_block[last_op.target]) if last_op.block_target: block.connect_outgoing(first_op_to_block[last_op.block_target]) return cfg_utils.order_nodes(blocks)
Split bytecode into blocks and order the blocks. This builds an "ancestor first" ordering of the basic blocks of the bytecode. Args: bytecode: A list of instances of opcodes.Opcode. (E.g. returned from opcodes.dis()) Returns: A list of Block instances.
github-repos
def calculate_subscription_lifecycle(subscription_id): subscription = Subscription.objects.select_related('messageset', 'schedule').get(id=subscription_id) behind = subscription.messages_behind() if (behind == 0): return current_messageset = subscription.messageset current_sequence_number = subscription.next_sequence_number end_subscription = Subscription.fast_forward_lifecycle(subscription, save=False)[(- 1)] BehindSubscription.objects.create(subscription=subscription, messages_behind=behind, current_messageset=current_messageset, current_sequence_number=current_sequence_number, expected_messageset=end_subscription.messageset, expected_sequence_number=end_subscription.next_sequence_number)
Calculates the expected lifecycle position the subscription in subscription_ids, and creates a BehindSubscription entry for them. Args: subscription_id (str): ID of subscription to calculate lifecycle for
codesearchnet
def insert(self, loc, column, value): if is_list_like(value): if isinstance(value, pandas.Series): value = value.reindex(self.index) value = list(value) def insert(df, internal_indices=[]): internal_idx = int(internal_indices[0]) old_index = df.index df.index = pandas.RangeIndex(len(df.index)) df.insert(internal_idx, internal_idx, value, allow_duplicates=True) df.columns = pandas.RangeIndex(len(df.columns)) df.index = old_index return df new_data = self.data.apply_func_to_select_indices_along_full_axis(0, insert, loc, keep_remaining=True) new_columns = self.columns.insert(loc, column) return self.__constructor__(new_data, self.index, new_columns)
Insert new column data. Args: loc: Insertion index. column: Column labels to insert. value: Dtype object values to insert. Returns: A new PandasQueryCompiler with new data inserted.
codesearchnet
def save_screenshot(driver, name): if hasattr(driver, 'save_screenshot'): screenshot_dir = os.environ.get('SCREENSHOT_DIR') if not screenshot_dir: LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot') return elif not os.path.exists(screenshot_dir): os.makedirs(screenshot_dir) image_name = os.path.join(screenshot_dir, name + '.png') driver.save_screenshot(image_name) else: msg = ( u"Browser does not support screenshots. " u"Could not save screenshot '{name}'" ).format(name=name) LOGGER.warning(msg)
Save a screenshot of the browser. The location of the screenshot can be configured by the environment variable `SCREENSHOT_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. name (str): A name for the screenshot, which will be used in the output file name. Returns: None
juraj-google-style
def release_client(self, client): if isinstance(client, Client): if (not self._is_expired_client(client)): LOG.debug('Client is not expired. Adding back to pool') self.__pool.append(client) elif client.is_connected(): LOG.debug('Client is expired and connected. Disconnecting') client.disconnect() if (self.__sem is not None): self.__sem.release()
Releases a client object to the pool. Args: client: Client object.
codesearchnet