code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __init__(self, object_local_name: str, from_shard_layouts: Sequence[sparse_core_layout_pb2.SparseCoreTableLayout], to_shard_layouts: Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]): logging.info('Creating EmbeddingReshardCallback for %s', object_local_name) self._object_local_name = object_local_name self._from_shard_layouts = from_shard_layouts self._to_shard_layouts = to_shard_layouts
Initializes Reshard callback. Args: object_local_name: The local name of the object being restored. from_shard_layouts: layouts as in checkpoint being restored from. to_shard_layouts: target layouts as specified in the embedding being restored.
github-repos
def igmpize(self): gaddr = (self.gaddr if (hasattr(self, 'gaddr') and self.gaddr) else '0.0.0.0') underlayer = self.underlayer if (self.type not in [17, 48]): self.mrcode = 0 if isinstance(underlayer, IP): if (self.type == 17): if (gaddr == '0.0.0.0'): underlayer.dst = '224.0.0.1' elif isValidMCAddr(gaddr): underlayer.dst = gaddr else: warning('Invalid IGMP Group Address detected !') return False elif ((self.type == 23) and isValidMCAddr(gaddr)): underlayer.dst = '224.0.0.2' elif (((self.type == 18) or (self.type == 22)) and isValidMCAddr(gaddr)): underlayer.dst = gaddr else: warning('Invalid IGMP Type detected !') return False if (not any((isinstance(x, IPOption_Router_Alert) for x in underlayer.options))): underlayer.options.append(IPOption_Router_Alert()) underlayer.ttl = 1 _root = self.firstlayer() if _root.haslayer(Ether): _root[Ether].dst = getmacbyip(underlayer.dst) from scapy.contrib.igmpv3 import IGMPv3 if isinstance(self, IGMPv3): self.encode_maxrespcode() return True
Called to explicitly fixup the packet according to the IGMP RFC The rules are: General: 1. the Max Response time is meaningful only in Membership Queries and should be zero IP: 1. Send General Group Query to 224.0.0.1 (all systems) 2. Send Leave Group to 224.0.0.2 (all routers) 3a.Otherwise send the packet to the group address 3b.Send reports/joins to the group address 4. ttl = 1 (RFC 2236, section 2) 5. send the packet with the router alert IP option (RFC 2236, section 2) Ether: 1. Recalculate destination Returns: True The tuple ether/ip/self passed all check and represents a proper IGMP packet. False One of more validation checks failed and no fields were adjusted. The function will examine the IGMP message to assure proper format. Corrections will be attempted if possible. The IP header is then properly adjusted to ensure correct formatting and assignment. The Ethernet header is then adjusted to the proper IGMP packet format.
codesearchnet
def slideshow(self, **kwargs): for i, cycle in enumerate(self.cycles): cycle.plot(title="Relaxation step %s" % (i + 1), tight_layout=kwargs.pop("tight_layout", True), show=kwargs.pop("show", True))
Uses matplotlib to plot the evolution of the structural relaxation. Args: ax_list: List of axes. If None a new figure is produced. Returns: `matplotlib` figure
juraj-google-style
def apply(self, func, num_splits=None, other_axis_partition=None, **kwargs): if num_splits is None: num_splits = len(self.list_of_blocks) if other_axis_partition is not None: return [ PyarrowOnRayFramePartition(obj) for obj in deploy_ray_func_between_two_axis_partitions._remote( args=(self.axis, func, num_splits, len(self.list_of_blocks), kwargs) + tuple(self.list_of_blocks + other_axis_partition.list_of_blocks), num_return_vals=num_splits, ) ] args = [self.axis, func, num_splits, kwargs] args.extend(self.list_of_blocks) return [ PyarrowOnRayFramePartition(obj) for obj in deploy_ray_axis_func._remote(args, num_return_vals=num_splits) ]
Applies func to the object in the plasma store. See notes in Parent class about this method. Args: func: The function to apply. num_splits: The number of times to split the result object. other_axis_partition: Another `PyarrowOnRayFrameAxisPartition` object to apply to func with this one. Returns: A list of `RayRemotePartition` objects.
juraj-google-style
def __init__(self, wildcard, sep="|"): self.pats = ["*"] if wildcard: self.pats = wildcard.split(sep)
Initializes a WildCard. Args: wildcard (str): String of tokens separated by sep. Each token represents a pattern. sep (str): Separator for shell patterns.
juraj-google-style
def alpha_blend(self, other): fa = ((self.__a + other.__a) - (self.__a * other.__a)) if (fa == 0): sa = 0 else: sa = min(1.0, (self.__a / other.__a)) da = (1.0 - sa) (sr, sg, sb) = [(v * sa) for v in self.__rgb] (dr, dg, db) = [(v * da) for v in other.__rgb] return Color(((sr + dr), (sg + dg), (sb + db)), 'rgb', fa, self.__wref)
Alpha-blend this color on the other one. Args: :other: The grapefruit.Color to alpha-blend with this one. Returns: A grapefruit.Color instance which is the result of alpha-blending this color on the other one. >>> c1 = Color.from_rgb(1, 0.5, 0, 0.2) >>> c2 = Color.from_rgb(1, 1, 1, 0.8) >>> c3 = c1.alpha_blend(c2) >>> c3 Color(1.0, 0.875, 0.75, 0.84)
codesearchnet
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple[int, int]]]=None): class_queries_logits = outputs.logits masks_queries_logits = outputs.pred_masks masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') semantic_segmentation = [] for idx in range(batch_size): resized_logits = nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
Converts the output of [`DetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`DetrForSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If unset, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
github-repos
def daylight_saving_end_day(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `daylight_saving_end_day`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `daylight_saving_end_day`') self._daylight_saving_end_day = value
Corresponds to IDD Field `daylight_saving_end_day` Args: value (str): value for IDD Field `daylight_saving_end_day` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def __init__(self, channel): self.DetectIntent = channel.unary_unary( '/google.cloud.dialogflow.v2.Sessions/DetectIntent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__pb2.DetectIntentRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__pb2.DetectIntentResponse.FromString, ) self.StreamingDetectIntent = channel.stream_stream( '/google.cloud.dialogflow.v2.Sessions/StreamingDetectIntent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__pb2.StreamingDetectIntentRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__pb2.StreamingDetectIntentResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def _create_session(self, username, password): session = requests.Session() session.verify = False try: response = session.get(self.host_url) except requests.exceptions.ConnectionError: return False soup = BeautifulSoup(response.text, 'html.parser') csrf_token = soup.find('input', dict(name='csrf_token'))['value'] login_data = dict(username=username, password=password) session.headers.update({ 'x-csrftoken': csrf_token, 'referer': self.host_url }) _ = session.post('{0:s}/login/'.format(self.host_url), data=login_data) return session
Create HTTP session. Args: username (str): Timesketch username password (str): Timesketch password Returns: requests.Session: Session object.
juraj-google-style
def _ConvertMapFieldValue(self, value, message, field): if (not isinstance(value, dict)): raise ParseError('Map field {0} must be in a dict which is {1}.'.format(field.name, value)) key_field = field.message_type.fields_by_name['key'] value_field = field.message_type.fields_by_name['value'] for key in value: key_value = _ConvertScalarFieldValue(key, key_field, True) if (value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE): self.ConvertMessage(value[key], getattr(message, field.name)[key_value]) else: getattr(message, field.name)[key_value] = _ConvertScalarFieldValue(value[key], value_field)
Convert map field value for a message map field. Args: value: A JSON object to convert the map field value. message: A protocol message to record the converted data. field: The descriptor of the map field to be converted. Raises: ParseError: In case of convert problems.
codesearchnet
def BatchConvert(self, metadata_value_pairs, token=None): msg_dict = {} for (metadata, msg) in metadata_value_pairs: msg_dict.setdefault(msg.source, []).append((metadata, msg)) metadata_objects = [] metadata_to_fetch = [] for client_urn in msg_dict: try: metadata_objects.append(self.cached_metadata[client_urn]) except KeyError: metadata_to_fetch.append(client_urn) if metadata_to_fetch: if data_store.RelationalDBEnabled(): client_ids = set((urn.Basename() for urn in metadata_to_fetch)) infos = data_store.REL_DB.MultiReadClientFullInfo(client_ids) fetched_metadata = [GetMetadata(client_id, info) for (client_id, info) in infos.items()] else: client_fds = aff4.FACTORY.MultiOpen(metadata_to_fetch, mode='r', token=token) fetched_metadata = [GetMetadataLegacy(client_fd, token=token) for client_fd in client_fds] for metadata in fetched_metadata: self.cached_metadata[metadata.client_urn] = metadata metadata_objects.extend(fetched_metadata) data_by_type = {} for metadata in metadata_objects: try: for (original_metadata, message) in msg_dict[metadata.client_urn]: new_metadata = ExportedMetadata(metadata) new_metadata.source_urn = original_metadata.source_urn new_metadata.annotations = original_metadata.annotations new_metadata.original_timestamp = message.payload.age cls_name = message.payload.__class__.__name__ if (cls_name not in data_by_type): converters_classes = ExportConverter.GetConvertersByValue(message.payload) data_by_type[cls_name] = {'converters': [cls(self.options) for cls in converters_classes], 'batch_data': [(new_metadata, message.payload)]} else: data_by_type[cls_name]['batch_data'].append((new_metadata, message.payload)) except KeyError: pass converted_batch = [] for dataset in itervalues(data_by_type): for converter in dataset['converters']: converted_batch.extend(converter.BatchConvert(dataset['batch_data'], token=token)) return converted_batch
Converts a batch of GrrMessages into a set of RDFValues at once. Args: metadata_value_pairs: a list or a generator of tuples (metadata, value), where metadata is ExportedMetadata to be used for conversion and value is a GrrMessage to be converted. token: Security token. Returns: Resulting RDFValues. Empty list is a valid result and means that conversion wasn't possible.
codesearchnet
def __init__(self, channel): self.DeletePosixAccount = channel.unary_unary( "/google.cloud.oslogin.v1.OsLoginService/DeletePosixAccount", request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.DeletePosixAccountRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.DeleteSshPublicKey = channel.unary_unary( "/google.cloud.oslogin.v1.OsLoginService/DeleteSshPublicKey", request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.DeleteSshPublicKeyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.GetLoginProfile = channel.unary_unary( "/google.cloud.oslogin.v1.OsLoginService/GetLoginProfile", request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.GetLoginProfileRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.LoginProfile.FromString, ) self.GetSshPublicKey = channel.unary_unary( "/google.cloud.oslogin.v1.OsLoginService/GetSshPublicKey", request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.GetSshPublicKeyRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2.SshPublicKey.FromString, ) self.ImportSshPublicKey = channel.unary_unary( "/google.cloud.oslogin.v1.OsLoginService/ImportSshPublicKey", request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.ImportSshPublicKeyRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.ImportSshPublicKeyResponse.FromString, ) self.UpdateSshPublicKey = channel.unary_unary( "/google.cloud.oslogin.v1.OsLoginService/UpdateSshPublicKey", request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.UpdateSshPublicKeyRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2.SshPublicKey.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def get_config_path(appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME): return os.path.join(appdirs.user_config_dir, file_name)
Return the path where the config file is stored. Args: app_name (text_type, optional): Name of the application, defaults to ``'projecthamster``. Allows you to use your own application specific namespace if you wish. file_name (text_type, optional): Name of the config file. Defaults to ``config.conf``. Returns: str: Fully qualified path (dir & filename) where we expect the config file.
codesearchnet
def _training(self): with tf.device(('/gpu:0' if self._use_gpu else '/cpu:0')): with tf.name_scope('training'): assert_full = tf.assert_equal(self._num_finished_episodes, self._config.update_every) with tf.control_dependencies([assert_full]): data = self._finished_episodes.data() ((observ, action, old_policy_params, reward), length) = data old_policy_params = tools.nested.map((lambda param: self._mask(param, length, 1)), old_policy_params) with tf.control_dependencies([tf.assert_greater(length, 0)]): length = tf.identity(length) observ = self._observ_filter.transform(observ) reward = self._reward_filter.transform(reward) update_summary = self._perform_update_steps(observ, action, old_policy_params, reward, length) with tf.control_dependencies([update_summary]): penalty_summary = self._adjust_penalty(observ, old_policy_params, length) with tf.control_dependencies([penalty_summary]): clear_memory = tf.group(self._finished_episodes.clear(), self._num_finished_episodes.assign(0)) with tf.control_dependencies([clear_memory]): weight_summary = utility.variable_summaries(tf.trainable_variables(), self._config.weight_summaries) return tf.summary.merge([update_summary, penalty_summary, weight_summary])
Perform multiple training iterations of both policy and value baseline. Training on the episodes collected in the memory. Reset the memory afterwards. Always returns a summary string. Returns: Summary tensor.
codesearchnet
def get(self, type: Type[T], query: Mapping[(str, Any)]) -> T: LOGGER.info('Getting SourceHandlers for "{type}"'.format(type=type.__name__)) try: handlers = self._get_types[type] except KeyError: try: LOGGER.info('Building new SourceHandlers for "{type}"'.format(type=type.__name__)) handlers = self._get_handlers(type) except NoConversionError: handlers = None self._get_types[type] = handlers if (handlers is None): raise NoConversionError('No source can provide "{type}"'.format(type=type.__name__)) LOGGER.info('Creating new PipelineContext') context = self._new_context() LOGGER.info('Querying SourceHandlers for "{type}"'.format(type=type.__name__)) for handler in handlers: try: return handler.get(query, context) except NotFoundError: pass raise NotFoundError('No source returned a query result!')
Gets a query from the data pipeline. 1) Extracts the query the sequence of data sources. 2) Inserts the result into the data sinks (if appropriate). 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object.
codesearchnet
def keep_file(self, task, response, min_size=None, max_size=None): try: img = Image.open(BytesIO(response.content)) except (IOError, OSError): return False task['img_size'] = img.size if min_size and not self._size_gt(img.size, min_size): return False if max_size and not self._size_lt(img.size, max_size): return False return True
Decide whether to keep the image Compare image size with ``min_size`` and ``max_size`` to decide. Args: response (Response): response of requests. min_size (tuple or None): minimum size of required images. max_size (tuple or None): maximum size of required images. Returns: bool: whether to keep the image.
juraj-google-style
def min_edit_distance(source: Sequence[T], target: Sequence[T], ins_cost: Callable[(..., int)]=(lambda _x: 1), del_cost: Callable[(..., int)]=(lambda _x: 1), sub_cost: Callable[(..., int)]=(lambda x, y: (0 if (x == y) else 1))) -> int: n = len(target) m = len(source) distance = np.zeros(((m + 1), (n + 1)), dtype=np.int16) for i in range(1, (m + 1)): distance[(i, 0)] = (distance[((i - 1), 0)] + ins_cost(source[(i - 1)])) for j in range(1, (n + 1)): distance[(0, j)] = (distance[(0, (j - 1))] + ins_cost(target[(j - 1)])) for j in range(1, (n + 1)): for i in range(1, (m + 1)): distance[(i, j)] = min((distance[((i - 1), j)] + ins_cost(source[(i - 1)])), (distance[((i - 1), (j - 1))] + sub_cost(source[(i - 1)], target[(j - 1)])), (distance[(i, (j - 1))] + del_cost(target[(j - 1)]))) return int(distance[(len(source), len(target))])
Calculates the minimum edit distance between two sequences. Uses the Levenshtein weighting as a default, but offers keyword arguments to supply functions to measure the costs for editing with different elements. Args: ins_cost: A function describing the cost of inserting a given char del_cost: A function describing the cost of deleting a given char sub_cost: A function describing the cost of substituting one char for Returns: The edit distance between the two input sequences.
codesearchnet
def update_one_time_key_counts(self, counts): self.one_time_keys_manager.server_counts = counts if self.one_time_keys_manager.should_upload(): logger.info('Uploading new one-time keys.') self.upload_one_time_keys()
Update data on one-time keys count and upload new ones if necessary. Args: counts (dict): Counts of keys currently on the HS for each key type.
juraj-google-style
def batch_shape_tensor(self): batch_shape = tf.constant([], dtype=tf.int32) for param in self.parameters: batch_shape = tf.broadcast_dynamic_shape(batch_shape, param.prior.batch_shape_tensor()) return batch_shape
Runtime batch shape of models represented by this component. Returns: batch_shape: `int` `Tensor` giving the broadcast batch shape of all model parameters. This should match the batch shape of derived state space models, i.e., `self.make_state_space_model(...).batch_shape_tensor()`.
codesearchnet
def decode(data): decoded = None try: decoded = json.loads(data) except Exception, e: raise MetaParsingException("Can't parse your JSON data: %s" % e.message) decoded = validator.check_structure(decoded) return decoded
Handles decoding of the JSON `data`. Args: data (str): Data which will be decoded. Returns: dict: Dictionary with decoded data.
juraj-google-style
def check_publish_block(self, block_header): if any(((publisher_key != block_header.signer_public_key) for publisher_key in self._valid_block_publishers)): return False if (self._min_wait_time == 0): return True if (self._min_wait_time < 0): return False assert (self._min_wait_time > 0) if (self._max_wait_time <= 0): return ((self._start_time + self._min_wait_time) <= time.time()) assert (self._max_wait_time > 0) if (self._max_wait_time <= self._min_wait_time): return False assert (0 < self._min_wait_time < self._max_wait_time) return ((self._start_time + self._wait_time) <= time.time())
Check if a candidate block is ready to be claimed. block_header (BlockHeader): the block_header to be checked if it should be claimed Returns: Boolean: True if the candidate block_header should be claimed.
codesearchnet
def PreparePairedSequenceBatch(source, target_in, pad=0): target = target_in[:, :-1] target_y = target_in[:, 1:] source_mask = np.reshape(source != pad, (source.shape[0], 1, 1, source.shape[-1])) target_mask = MakeTargetMask(target, pad) memory_mask = ( np.reshape(np.arange(target.shape[-1]) < source.shape[-1], [-1, 1])) ntokens = np.sum(target_y != pad) return (source, target, target_y, source_mask, target_mask, memory_mask, ntokens)
Build masks for this batch. Args: source: (batch, source_len) array of integer-coded symbols for inputs target_in: (batch, batch_len) array of integer-coded symbols for targets pad: int: the padding symbol used to pad the above Returns: Prepared batch of tuple of arrays: source, input-target, shifted-target, source mask, target mask, source-target "memory" mask, minibatch token count
juraj-google-style
def _build_node_error_message(op): node_error_message = [f'Detected at node {op.name!r} defined at (most recent call last):'] field_dict = _compute_field_dict(op) for frame in field_dict['definition_traceback']: if '<embedded' not in frame: node_error_message.extend([f' {line}' for line in frame.split('\n') if line.strip()]) node_error_message.append(f'Node: {op.name!r}') return '\n'.join(node_error_message)
Returns the formatted error message for the given op. Args: op: The node. Returns: The formatted error message for the given op with traceback.
github-repos
def read_configs(__pkg: str, __name: str='config', *, local: bool=True) -> ConfigParser: configs = get_configs(__pkg, __name) if local: localrc = path.abspath('.{}rc'.format(__pkg)) if path.exists(localrc): configs.append(localrc) cfg = ConfigParser(converters={'datetime': parse_datetime, 'humandelta': parse_timedelta, 'timedelta': parse_delta}) cfg.read(configs, 'utf-8') cfg.configs = configs if (('NO_COLOUR' in environ) or ('NO_COLOR' in environ)): cfg.colour = False elif (__pkg in cfg): if ('colour' in cfg[__pkg]): cfg.colour = cfg[__pkg].getboolean('colour') if ('color' in cfg[__pkg]): cfg.colour = cfg[__pkg].getboolean('color') else: cfg.colour = True return cfg
Process configuration file stack. We export the time parsing functionality of ``jnrbase`` as custom converters for :class:`configparser.ConfigParser`: =================== =========================================== Method Function =================== =========================================== ``.getdatetime()`` :func:`~jnrbase.iso_8601.parse_datetime` ``.gethumantime()`` :func:`~jnrbase.human_time.parse_timedelta` ``.gettimedelta()`` :func:`~jnrbase.iso_8601.parse_delta` =================== =========================================== Args: __pkg: Package name to use as base for config files __name: File name to search for within config directories local: Whether to include config files from current directory Returns: Parsed configuration files
codesearchnet
def get_fixture(self, fixture_id, head2head=None): filters = [] if head2head is not None and int(head2head) > 0: self.logger.debug(f'Getting fixture {fixture_id}. head2head is {head2head}.') filters.append(self.__createFilter('head2head', head2head)) else: self.logger.debug(f'Getting fixture {fixture_id}.') return self._request('fixtures', fixture_id, filters=filters)
Loads a single fixture. Args: * fixture_id (str): the id of the fixture * head2head (int, optional): load the previous n fixture of the two teams Returns: * :obj: json: the fixture-json
juraj-google-style
def id_pools_vwwn_ranges(self): if (not self.__id_pools_vwwn_ranges): self.__id_pools_vwwn_ranges = IdPoolsRanges('vwwn', self.__connection) return self.__id_pools_vwwn_ranges
Gets the IdPoolsRanges API Client for VWWN Ranges. Returns: IdPoolsRanges:
codesearchnet
def cluster_spec(self): if self._tpu != 'local': network_endpoints = self._cloud_tpu_client.network_endpoints() worker_list = ['%s:%s' % (endpoint['ipAddress'], endpoint['port']) for endpoint in network_endpoints] cluster_spec = {self.task_type: worker_list} if self._coordinator_address: cluster_spec[self._coordinator_name] = [self._coordinator_address] return server_lib.ClusterSpec(cluster_spec) else: return server_lib.ClusterSpec({})
Returns a ClusterSpec object based on the latest TPU information. We retrieve the information from the GCE APIs every time this method is called. Returns: A ClusterSpec containing host information returned from Cloud TPUs, or None. Raises: RuntimeError: If the provided TPU is not healthy.
github-repos
def _preprocess_tensor_input(x, data_format, mode): ndim = len(x.shape) if mode == 'tf': x /= 127.5 x -= 1.0 return x elif mode == 'torch': x /= 255.0 mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] else: if data_format == 'channels_first': if len(x.shape) == 3: x = ops.stack([x[i, ...] for i in (2, 1, 0)], axis=0) else: x = ops.stack([x[:, i, :] for i in (2, 1, 0)], axis=1) else: x = ops.stack([x[..., i] for i in (2, 1, 0)], axis=-1) mean = [103.939, 116.779, 123.68] std = None mean_tensor = ops.convert_to_tensor(-np.array(mean), dtype=x.dtype) if data_format == 'channels_first': mean_tensor = ops.reshape(mean_tensor, (1, 3) + (1,) * (ndim - 2)) else: mean_tensor = ops.reshape(mean_tensor, (1,) * (ndim - 1) + (3,)) x += mean_tensor if std is not None: std_tensor = ops.convert_to_tensor(np.array(std), dtype=x.dtype) if data_format == 'channels_first': std_tensor = ops.reshape(std_tensor, (-1, 1, 1)) x /= std_tensor return x
Preprocesses a tensor encoding a batch of images. Args: x: Input tensor, 3D or 4D. data_format: Data format of the image tensor. mode: One of "caffe", "tf" or "torch". - caffe: will convert the images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. - torch: will scale pixels between 0 and 1 and then will normalize each channel with respect to the ImageNet dataset. Returns: Preprocessed tensor.
github-repos
def is_experimental_feature_activated(feature_name): return feature_name in os.environ.get('TF_TRT_EXPERIMENTAL_FEATURES', default='').split(',')
Determines if a TF-TRT experimental feature is enabled. This helper function checks if an experimental feature was enabled using the environment variable `TF_TRT_EXPERIMENTAL_FEATURES=feature_1,feature_2`. Args: feature_name: Name of the feature being tested for activation.
github-repos
def one_or_more(e, delimiter=None): if (delimiter is None): delimiter = (lambda s, grm, pos: (s, Ignore, (pos, pos))) msg = 'Expected one or more of: {}'.format(repr(e)) def match_one_or_more(s, grm=None, pos=0): start = pos (s, obj, span) = e(s, grm, pos) pos = span[1] data = ([] if (obj is Ignore) else [obj]) try: while True: (s, obj, span) = delimiter(s, grm, pos) pos = span[1] if (obj is not Ignore): data.append(obj) (s, obj, span) = e(s, grm, pos) pos = span[1] if (obj is not Ignore): data.append(obj) except PegreError: pass return PegreResult(s, data, (start, pos)) return match_one_or_more
Create a PEG function to match one or more expressions. Args: e: the expression to match delimiter: an optional expression to match between the primary *e* matches.
codesearchnet
def int64_user_counter(namespace, name, metric, ptransform=None) -> metrics_pb2.MonitoringInfo: labels = create_labels(ptransform=ptransform, namespace=namespace, name=name) if isinstance(metric, int): metric = coders.VarIntCoder().encode(metric) return create_monitoring_info(USER_COUNTER_URN, SUM_INT64_TYPE, metric, labels)
Return the counter monitoring info for the specifed URN, metric and labels. Args: urn: The URN of the monitoring info/metric. metric: The payload field to use in the monitoring info or an int value. ptransform: The ptransform id used as a label.
github-repos
def onWith(self, evnt, func): self.on(evnt, func) try: (yield self) finally: self.off(evnt, func)
A context manager which can be used to add a callback and remove it when using a ``with`` statement. Args: evnt (str): An event name func (function): A callback function to receive event tufo
codesearchnet
def update(self, instance, validated_data): is_primary = validated_data.pop("is_primary", False) instance = super(EmailSerializer, self).update( instance, validated_data ) if is_primary: instance.set_primary() return instance
Update the instance the serializer is bound to. Args: instance: The instance the serializer is bound to. validated_data: The data to update the serializer with. Returns: The updated instance.
juraj-google-style
def join(self, other): r assert self._load == other._load, 'loads must be the same' self._lists.extend(other._lists) self._cumlen.extend([c + self._len for c in other._cumlen]) self._len += other._len
r""" Args: other (?): CommandLine: python -m sortedcontainers.sortedlist join2 Example: >>> from utool.experimental.dynamic_connectivity import * # NOQA >>> self = EulerTourList([1, 2, 3, 2, 4, 2, 1], load=3) >>> other = EulerTourList([0, 5, 9, 5, 0], load=3) >>> result = self.join(other) >>> print(result)
juraj-google-style
def _prepare_sample_data(self, submission_type): images = np.random.randint(0, 256, size=[BATCH_SIZE, 299, 299, 3], dtype=np.uint8) for i in range(BATCH_SIZE): Image.fromarray(images[i, :, :, :]).save( os.path.join(self._sample_input_dir, IMAGE_NAME_PATTERN.format(i))) if submission_type == 'targeted_attack': target_classes = np.random.randint(1, 1001, size=[BATCH_SIZE]) target_class_filename = os.path.join(self._sample_input_dir, 'target_class.csv') with open(target_class_filename, 'w') as f: for i in range(BATCH_SIZE): f.write((IMAGE_NAME_PATTERN + ',{1}\n').format(i, target_classes[i]))
Prepares sample data for the submission. Args: submission_type: type of the submission.
juraj-google-style
def find_layer_idx(model, layer_name): layer_idx = None for idx, layer in enumerate(model.layers): if layer.name == layer_name: layer_idx = idx break if layer_idx is None: raise ValueError("No layer with name '{}' within the model".format(layer_name)) return layer_idx
Looks up the layer index corresponding to `layer_name` from `model`. Args: model: The `keras.models.Model` instance. layer_name: The name of the layer to lookup. Returns: The layer index if found. Raises an exception otherwise.
juraj-google-style
def ensure_dir(path): os.makedirs(os.path.abspath(os.path.dirname(path)), exist_ok=True)
Create all parent directories of path if they don't exist. Args: path. Path-like object. Create parent dirs to this path. Return: None.
juraj-google-style
def _get_num_inputs_outputs(op_type): def _is_list_arg(arg): return arg.number_attr or arg.type_list_attr def _count_args(arg_defs): for arg in arg_defs: if _is_list_arg(arg): return -1 return len(arg_defs) op_def = op_def_registry.get(op_type) if not op_def: return (-1, -1) return (_count_args(op_def.input_arg), _count_args(op_def.output_arg))
Returns (num_inputs, num_outputs). Args: op_type: String. The type of the Operation. Used to lookup the op in the registry. Returns: (num_inputs, num_outputs), for either num_inputs or num_outputs if the value can't be statically inferred from the OpDef alone or of the OpDef lookup fails, -1 is returned.
github-repos
def build_sanitiser_node_dict(cfg, sinks_in_file): sanitisers = list() for sink in sinks_in_file: sanitisers.extend(sink.sanitisers) sanitisers_in_file = list() for sanitiser in sanitisers: for cfg_node in cfg.nodes: if (sanitiser in cfg_node.label): sanitisers_in_file.append(Sanitiser(sanitiser, cfg_node)) sanitiser_node_dict = dict() for sanitiser in sanitisers: sanitiser_node_dict[sanitiser] = list(find_sanitiser_nodes(sanitiser, sanitisers_in_file)) return sanitiser_node_dict
Build a dict of string -> TriggerNode pairs, where the string is the sanitiser and the TriggerNode is a TriggerNode of the sanitiser. Args: cfg(CFG): cfg to traverse. sinks_in_file(list[TriggerNode]): list of TriggerNodes containing the sinks in the file. Returns: A string -> TriggerNode dict.
codesearchnet
def scheduled_sample_prob(ground_truth_x, generated_x, batch_size, scheduled_sample_var): probability_threshold = scheduled_sample_var probability_of_generated = tf.random_uniform([batch_size]) return tf.where((probability_of_generated > probability_threshold), generated_x, ground_truth_x)
Probability based scheduled sampling. Args: ground_truth_x: tensor of ground-truth data points. generated_x: tensor of generated data points. batch_size: batch size scheduled_sample_var: probability of choosing from ground_truth. Returns: New batch with randomly selected data points.
codesearchnet
def fetcher(date=datetime.today(), url_pattern=URL_PATTERN): api_url = (url_pattern % date.strftime('%Y-%m-%d')) headers = {'Referer': 'http: raw_result = requests.get(api_url, headers=headers).json() return raw_result
Fetch json data from n.pl Args: date (date) - default today url_patter (string) - default URL_PATTERN Returns: dict - data from api
codesearchnet
def connect(filename: str, mode: str='r+', *, validate: bool=True, spec_version: str='2.0.1') -> LoomConnection: return LoomConnection(filename, mode, validate=validate, spec_version=spec_version)
Establish a connection to a .loom file. Args: filename: Path to the Loom file to open mode: Read/write mode, 'r+' (read/write) or 'r' (read-only), defaults to 'r+' validate: Validate the file structure against the Loom file format specification spec_version: The loom file spec version to validate against (e.g. "2.0.1" or "old") Returns: A LoomConnection instance. Remarks: This function should typically be used as a context manager (i.e. inside a ``with``-block): .. highlight:: python .. code-block:: python import loompy with loompy.connect("mydata.loom") as ds: print(ds.ca.keys()) This ensures that the file will be closed automatically when the context block ends Note: if validation is requested, an exception is raised if validation fails.
codesearchnet
def __init__(self, name=None, description=None, hint=None, allow_failure=False, passes=None, arguments=None): if name: self.name = name if description: self.description = description if hint: self.hint = hint self.allow_failure = allow_failure self.passes = passes self.arguments = arguments or {} self.result = None
Initialization method. Args: allow_failure (bool): still pass if failed or not. arguments (dict): arguments passed to the check method when run.
juraj-google-style
def update_state(self, y_true, y_pred, sample_weight=None): return metrics_utils.update_confusion_matrix_variables({self._confusion_matrix_cond: self.accumulator}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight)
Accumulates the metric statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op.
github-repos
def chunk_layer(layer: Callable, inputs: Dict[str, Any], chunk_size: int, no_batch_dims: int, low_mem: bool=False, _out: Any=None, _add_into_out: bool=False) -> Any: if not len(inputs) > 0: raise ValueError('Must provide at least one input') initial_dims = [shape[:no_batch_dims] for shape in _fetch_dims(inputs)] orig_batch_dims = tuple([max(s) for s in zip(*initial_dims)]) def _prep_inputs(t: torch.Tensor) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims]) == no_batch_dims: t = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) t = t.reshape(-1, *t.shape[no_batch_dims:]) else: t = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) return t prepped_inputs: Dict[str, Any] = tensor_tree_map(_prep_inputs, inputs) prepped_outputs = None if _out is not None: prepped_outputs = tensor_tree_map(lambda t: t.view([-1] + list(t.shape[no_batch_dims:])), _out) flat_batch_dim = 1 for d in orig_batch_dims: flat_batch_dim *= d no_chunks = flat_batch_dim def _select_chunk(t: torch.Tensor) -> torch.Tensor: return t[i:i + chunk_size] if t.shape[0] != 1 else t i = 0 out = prepped_outputs for _ in range(no_chunks): if not low_mem: select_chunk = _select_chunk else: select_chunk = partial(_chunk_slice, flat_start=i, flat_end=min(flat_batch_dim, i + chunk_size), no_batch_dims=len(orig_batch_dims)) chunks: Dict[str, Any] = tensor_tree_map(select_chunk, prepped_inputs) output_chunk = layer(**chunks) if out is None: out = tensor_tree_map(lambda t: t.new_zeros((flat_batch_dim,) + t.shape[1:]), output_chunk) if isinstance(output_chunk, dict): def assign(d1: dict, d2: dict) -> None: for k, v in d1.items(): if isinstance(v, dict): assign(v, d2[k]) elif _add_into_out: v[i:i + chunk_size] += d2[k] else: v[i:i + chunk_size] = d2[k] assign(out, output_chunk) elif isinstance(output_chunk, tuple): for x1, x2 in zip(out, output_chunk): if _add_into_out: x1[i:i + chunk_size] += x2 else: x1[i:i + chunk_size] = x2 elif isinstance(output_chunk, torch.Tensor): if _add_into_out: out[i:i + chunk_size] += output_chunk else: out[i:i + chunk_size] = output_chunk else: raise TypeError('Not supported') i += chunk_size out = tensor_tree_map(lambda t: t.view(orig_batch_dims + t.shape[1:]), out) return out
Implements the "chunking" procedure described in section 1.11.8. Layer outputs and inputs are assumed to be simple "pytrees," consisting only of (arbitrarily nested) lists, tuples, and dicts with torch.Tensor leaves. Args: layer: The layer to be applied chunk-wise inputs: A (non-nested) dictionary of keyworded inputs. All leaves must be tensors and must share the same batch dimensions. chunk_size: The number of sub-batches per chunk. If multiple batch dimensions are specified, a "sub-batch" is defined as a single indexing of all batch dimensions simultaneously (s.t. the number of sub-batches is the product of the batch dimensions). no_batch_dims: How many of the initial dimensions of each input tensor can be considered batch dimensions. low_mem: Avoids flattening potentially large input tensors. Unnecessary in most cases, and is ever so slightly slower than the default setting. Returns: The reassembled output of the layer on the inputs.
github-repos
def __init__(self, correction_limit=88., **kwargs): self.correction_limit = correction_limit super(SunZenithCorrector, self).__init__(**kwargs)
Collect custom configuration values. Args: correction_limit (float): Maximum solar zenith angle to apply the correction in degrees. Pixels beyond this limit have a constant correction applied. Default 88. max_sza (float): Maximum solar zenith angle in degrees that is considered valid and correctable. Default 95.0.
juraj-google-style
def read(self, uri): read_response = self.connect(uri) fedora_graph = rdflib.Graph().parse(data=read_response.read(), format='turtle') return fedora_graph
Method takes uri and creates a RDF graph from Fedora Repository Args: uri(str): URI of Fedora URI Returns: rdflib.Graph
codesearchnet
def get_output_details(self): return [self._get_tensor_details(i, subgraph_index=0) for i in self._interpreter.OutputIndices()]
Gets model output tensor details. Returns: A list in which each item is a dictionary with details about an output tensor. The dictionary contains the same fields as described for `get_input_details()`.
github-repos
def get_containers(self, container_class): with self._store_lock: return self.store.get(container_class.CONTAINER_TYPE, [])
Thread-safe method to retrieve data from the state's store. Args: container_class: AttributeContainer class used to filter data. Returns: A list of AttributeContainer objects of matching CONTAINER_TYPE.
codesearchnet
def load(path): importpath = path.replace("/", ".").replace("\\", ".") if importpath[-3:] == ".py": importpath = importpath[:-3] try: importlib.import_module(importpath) except (ModuleNotFoundError, TypeError): exec(open(path).read())
Helper function that tries to load a filepath (or python module notation) as a python module and on failure `exec` it. Args: path (str): Path or module to load The function tries to import `example.module` when either `example.module`, `example/module` or `example/module.py` is given.
juraj-google-style
def get_workflow(workflow_id: str, workflow_version: str) -> dict: name = 'workflow_definitions:{}:{}'.format(workflow_id, workflow_version) workflow = DB.get_hash_dict(name) workflow['stages'] = ast.literal_eval(workflow['stages']) return workflow
Get a workflow definition from the Configuration Database. Args: workflow_id (str): Workflow identifier workflow_version (str): Workflow version Returns: dict, Workflow definition dictionary
codesearchnet
def normalize_partial_name(decl): if decl.cache.normalized_partial_name is None: decl.cache.normalized_partial_name = normalize(decl.partial_name) return decl.cache.normalized_partial_name
Cached variant of normalize Args: decl (declaration.declaration_t): the declaration Returns: str: normalized name
juraj-google-style
def restore(self, directory=None, file=None): if file is None: file = tf.train.latest_checkpoint( checkpoint_dir=(self.saver_directory if directory is None else directory), ) elif directory is None: file = os.path.join(self.saver_directory, file) elif not os.path.isfile(file): file = os.path.join(directory, file) self.saver.restore(sess=self.session, save_path=file) self.session.run(fetches=self.list_buffer_index_reset_op)
Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is restored. If no checkpoint directory is given, the model's default saver directory is used (unless file specifies the entire path). Args: directory: Optional checkpoint directory. file: Optional checkpoint file, or path if directory not given.
juraj-google-style
class LabelAggregation(AggregationFn, _AggModelIdMixin, _SourcePredictionMixin): def __init__(self, agg_func: Callable[[Iterable[int]], int], agg_model_id: Optional[str]=None, include_source_predictions: bool=False, normal_label: int=DEFAULT_NORMAL_LABEL, outlier_label: int=DEFAULT_OUTLIER_LABEL, missing_label: int=DEFAULT_MISSING_LABEL): self._agg = agg_func self._normal_label = normal_label self._outlier_label = outlier_label self._missing_label = missing_label _AggModelIdMixin.__init__(self, agg_model_id) _SourcePredictionMixin.__init__(self, include_source_predictions) def apply(self, predictions: Iterable[AnomalyPrediction]) -> AnomalyPrediction: result_dict: dict[str, Any] = {} _AggModelIdMixin.add_model_id(self, result_dict) _SourcePredictionMixin.add_source_predictions(self, result_dict, predictions) labels = [prediction.label for prediction in predictions if prediction.label is not None and prediction.label != self._missing_label] if len(labels) > 0: result_dict['label'] = self._agg(labels) elif all(map(lambda x: x.label is None, predictions)): result_dict['label'] = None else: result_dict['label'] = self._missing_label return AnomalyPrediction(**result_dict)
Aggregates anomaly predictions based on their labels. This is an abstract base class for `AggregationFn`s that combine multiple `AnomalyPrediction` objects into a single `AnomalyPrediction` based on the labels of the input predictions. Args: agg_func (Callable[[Iterable[int]], int]): A function that aggregates a collection of anomaly labels (integers) into a single label. agg_model_id (Optional[str]): The model id used in aggregated predictions. Defaults to None. include_source_predictions (bool): If True, include the input predictions in the `source_predictions` of the output. Defaults to False.
github-repos
def restore_ops(self, reader=None): if self._has_registered_saver(): raise ValueError('Unable to run individual checkpoint restore for objects with registered savers.') restore_ops, tensor_saveables, python_positions, _ = self.gather_ops_or_named_saveables() restore_ops.extend(self._checkpoint.restore_saveables(tensor_saveables, python_positions, reader=reader)) return restore_ops
Create or fetch restore ops for this object's attributes. Requires that the `Trackable` Python object has been bound to an object ID in the checkpoint. Args: reader: A `CheckpointReader`. If None, a new instance will be created. Returns: A list of operations when graph building, or an empty list when executing eagerly.
github-repos
def get_object_errors(self): if (self._object_errors is None): self._object_errors = [{str(o): o.get_errors()} for o in self.objects() if o.has_error()] return self._object_errors
Gets a list of business error message strings for each of the requested objects that had a business error. If there was no error, returns an empty list Returns: List of strings
codesearchnet
def on_epoch_begin(self, epoch, logs=None): logs = self._process_logs(logs) for callback in self.callbacks: callback.on_epoch_begin(epoch, logs)
Calls the `on_epoch_begin` methods of its callbacks. This function should only be called during TRAIN mode. Args: epoch: Integer, index of epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.
github-repos
def build_album_art_full_uri(self, url): if not url.startswith(('http:', 'https:')): url = 'http: return url
Ensure an Album Art URI is an absolute URI. Args: url (str): the album art URI. Returns: str: An absolute URI.
juraj-google-style
def delete(self, roomId): check_type(roomId, basestring, may_be_none=False) self._session.delete(((API_ENDPOINT + '/') + roomId))
Delete a room. Args: roomId(basestring): The ID of the room to be deleted. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
codesearchnet
def list_of_vars(arg_plot): lovs = [[[var for var in svars.split(',') if var] for svars in pvars.split('.') if svars] for pvars in arg_plot.split('-') if pvars] lovs = [[slov for slov in lov if slov] for lov in lovs if lov] return [lov for lov in lovs if lov]
Construct list of variables per plot. Args: arg_plot (str): string with variable names separated with ``_`` (figures), ``.`` (subplots) and ``,`` (same subplot). Returns: three nested lists of str - variables on the same subplot; - subplots on the same figure; - figures.
juraj-google-style
def confusion_matrix( gold, pred, null_pred=False, null_gold=False, normalize=False, pretty_print=True ): conf = ConfusionMatrix(null_pred=null_pred, null_gold=null_gold) gold = arraylike_to_numpy(gold) pred = arraylike_to_numpy(pred) conf.add(gold, pred) mat = conf.compile() if normalize: mat = mat / len(gold) if pretty_print: conf.display(normalize=normalize) return mat
A shortcut method for building a confusion matrix all at once. Args: gold: an array-like of gold labels (ints) pred: an array-like of predictions (ints) null_pred: If True, include the row corresponding to null predictions null_gold: If True, include the col corresponding to null gold labels normalize: if True, divide counts by the total number of items pretty_print: if True, pretty-print the matrix before returning
juraj-google-style
def less_equal(x1, x2): if any_symbolic_tensors((x1, x2)): return LessEqual().symbolic_call(x1, x2) return backend.numpy.less_equal(x1, x2)
Return the truth value of `x1 <= x2` element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output tensor, element-wise comparison of `x1` and `x2`.
github-repos
def rescale(self, image: 'torch.Tensor', scale: float, **kwargs) -> 'torch.Tensor': return image * scale
Rescale an image by a scale factor. image = image * scale. Args: image (`torch.Tensor`): Image to rescale. scale (`float`): The scaling factor to rescale pixel values by. Returns: `torch.Tensor`: The rescaled image.
github-repos
def _delete(self, url, data, scope): self._create_session(scope) response = self.session.delete(url, data=data) return (response.status_code, response.text)
Make a DELETE request using the session object to a Degreed endpoint. Args: url (str): The url to send a DELETE request to. data (str): The json encoded payload to DELETE. scope (str): Must be one of the scopes Degreed expects: - `CONTENT_PROVIDER_SCOPE` - `COMPLETION_PROVIDER_SCOPE`
codesearchnet
def copy_to_device(target_device, source_device='/cpu:0'): def _apply_fn(dataset): return _CopyToDeviceDataset(dataset, target_device=target_device, source_device=source_device) return _apply_fn
A transformation that copies dataset elements to the given `target_device`. Args: target_device: The name of a device to which elements will be copied. source_device: The original device on which `input_dataset` will be placed. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`.
github-repos
def convert_to_layout_rules(x): if isinstance(x, LayoutRules): return x if isinstance(x, str): x = _parse_string_to_list_of_pairs(x) return LayoutRules(x)
Converts input to a LayoutRules. Args: x: LayoutRules, str, or set-like of string pairs. Returns: LayoutRules.
juraj-google-style
def get_experiment_in_group(self, group, bucketing_id): experiment_id = self.bucketer.find_bucket(bucketing_id, group.id, group.trafficAllocation) if experiment_id: experiment = self.config.get_experiment_from_id(experiment_id) if experiment: self.logger.info(('User with bucketing ID "%s" is in experiment %s of group %s.' % (bucketing_id, experiment.key, group.id))) return experiment self.logger.info(('User with bucketing ID "%s" is not in any experiments of group %s.' % (bucketing_id, group.id))) return None
Determine which experiment in the group the user is bucketed into. Args: group: The group to bucket the user into. bucketing_id: ID to be used for bucketing the user. Returns: Experiment if the user is bucketed into an experiment in the specified group. None otherwise.
codesearchnet
def oauth2_callback(request): if ('error' in request.GET): reason = request.GET.get('error_description', request.GET.get('error', '')) reason = html.escape(reason) return http.HttpResponseBadRequest('Authorization failed {0}'.format(reason)) try: encoded_state = request.GET['state'] code = request.GET['code'] except KeyError: return http.HttpResponseBadRequest('Request missing state or authorization code') try: server_csrf = request.session[_CSRF_KEY] except KeyError: return http.HttpResponseBadRequest('No existing session for this flow.') try: state = json.loads(encoded_state) client_csrf = state['csrf_token'] return_url = state['return_url'] except (ValueError, KeyError): return http.HttpResponseBadRequest('Invalid state parameter.') if (client_csrf != server_csrf): return http.HttpResponseBadRequest('Invalid CSRF token.') flow = _get_flow_for_token(client_csrf, request) if (not flow): return http.HttpResponseBadRequest('Missing Oauth2 flow.') try: credentials = flow.step2_exchange(code) except client.FlowExchangeError as exchange_error: return http.HttpResponseBadRequest('An error has occurred: {0}'.format(exchange_error)) get_storage(request).put(credentials) signals.oauth2_authorized.send(sender=signals.oauth2_authorized, request=request, credentials=credentials) return shortcuts.redirect(return_url)
View that handles the user's return from OAuth2 provider. This view verifies the CSRF state and OAuth authorization code, and on success stores the credentials obtained in the storage provider, and redirects to the return_url specified in the authorize view and stored in the session. Args: request: Django request. Returns: A redirect response back to the return_url.
codesearchnet
def run(self, host='localhost', port=8000, shutdown_timeout=60.0, **kwargs): print((('Running service on http: self.config.port = port self.config.host = host try: if self.event_broker: self.event_broker.start() self.loop.run_until_complete(self.announce()) http_handler = self.app.make_handler() self._http_server = self.loop.create_server(http_handler, host, port) self._server_handler = self.loop.run_until_complete(self._http_server) self.loop.run_forever() except KeyboardInterrupt: pass finally: try: self.cleanup() except UnboundLocalError: pass self.loop.close()
This function starts the service's network intefaces. Args: port (int): The port for the http server.
codesearchnet
def _FormatExpression(self, frame, expression): (rc, value) = _EvaluateExpression(frame, expression) if (not rc): message = _FormatMessage(value['description']['format'], value['description'].get('parameters')) return (('<' + message) + '>') return self._FormatValue(value)
Evaluates a single watched expression and formats it into a string form. If expression evaluation fails, returns error message string. Args: frame: Python stack frame in which the expression is evaluated. expression: string expression to evaluate. Returns: Formatted expression value that can be used in the log message.
codesearchnet
def get_metric_group_infos(self): mg_defs = self.get_metric_group_definitions() mg_infos = [] for mg_def in mg_defs: metric_infos = [] for (metric_name, metric_type) in mg_def.types: metric_infos.append({'metric-name': metric_name, 'metric-type': metric_type}) mg_info = {'group-name': mg_def.name, 'metric-infos': metric_infos} mg_infos.append(mg_info) return mg_infos
Get the faked metric group definitions for this context object that are to be returned from its create operation, in the format needed for the "Create Metrics Context" operation response. Returns: "metric-group-infos" JSON object as described for the "Create Metrics Context "operation response.
codesearchnet
def hwvtep_attach_vlan_vid(self, **kwargs): name = kwargs.pop('name') mac = kwargs.pop('mac') vlan = kwargs.pop('vlan') name_args = dict(name=name, vid=vlan, mac=mac) method_name = 'overlay_gateway_attach_vlan_mac' method_class = self._brocade_tunnels gw_attr = getattr(method_class, method_name) config = gw_attr(**name_args) output = self._callback(config) return output
Identifies exported VLANs in VXLAN gateway configurations. Args: name (str): overlay_gateway name vlan(str): vlan_id range callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
juraj-google-style
def _allocate_subnets(self, conf): allocated_subnets = [] try: for net_spec in conf.get('nets', {}).itervalues(): if net_spec['type'] != 'nat': continue gateway = net_spec.get('gw') if gateway: allocated_subnet = self._subnet_store.acquire( self.paths.uuid(), gateway ) else: allocated_subnet = self._subnet_store.acquire( self.paths.uuid() ) net_spec['gw'] = str(allocated_subnet.iter_hosts().next()) allocated_subnets.append(allocated_subnet) except: self._subnet_store.release(allocated_subnets) raise return allocated_subnets, conf
Allocate all the subnets needed by the given configuration spec Args: conf (dict): Configuration spec where to get the nets definitions from Returns: tuple(list, dict): allocated subnets and modified conf
juraj-google-style
def show_error(self, message): assert isinstance(message, string_types) self.post('error', data=message)
Send an error message to the active client. The new error will be displayed on any active GUI clients. Args: message (str): Plain-text message to display. Returns: None >>> s = _syncthing() >>> s.system.show_error('my error msg') >>> s.system.errors()[0] ... # doctest: +ELLIPSIS ErrorEvent(when=datetime.datetime(...), message='"my error msg"') >>> s.system.clear_errors() >>> s.system.errors() []
codesearchnet
def user(self, email): LOG.info("Fetching user %s", email) user_obj = self.user_collection.find_one({'_id': email}) return user_obj
Fetch a user from the database. Args: email(str) Returns: user_obj(dict)
juraj-google-style
def __init__(self, fn): if not callable(fn): raise TypeError('Expected a callable object instead of: %r' % fn) self._fn = fn
Initializes a PartitionFn object wrapping a callable. Args: fn: A callable object, which should accept the following arguments: element - element to assign to a partition. num_partitions - number of output partitions. and may accept additional arguments and side inputs. Raises: TypeError: if fn is not a callable type.
github-repos
def find_previous(a, value, index=False, return_distance=False): b = (a - value) i = np.where((b > 0))[0][0] d = ((value - a[(i - 1)]) / (a[i] - a[(i - 1)])) if index: if return_distance: return ((i - 1), d) else: return (i - 1) elif return_distance: return (a[(i - 1)], d) else: return a[(i - 1)]
Find the nearest array value, or index of the array value, before some given value. Optionally also return the fractional distance of the given value from that previous value. Args: a (ndarray) value (float) index (bool): whether to return the index instead of the array value. Default: False. return_distance(bool): whether to return the fractional distance from the nearest value to the specified value. Default: False. Returns: float. The array value (or index, as int) before the specified value. If ``return_distance==True`` then a tuple is returned, where the second value is the distance.
codesearchnet
def bfloat16_activations_var_getter(getter, *args, **kwargs): requested_dtype = kwargs['dtype'] if (requested_dtype == tf.bfloat16): kwargs['dtype'] = tf.float32 var = getter(*args, **kwargs) if (var.dtype.base_dtype != requested_dtype): var = tf.cast(var, requested_dtype) return var
A custom getter function for float32 parameters and bfloat16 activations. Args: getter: custom getter *args: arguments **kwargs: keyword arguments Returns: variables with the correct dtype. Raises: KeyError: if "dtype" is not provided as a kwarg.
codesearchnet
def create_view(self, state_root_hash=None): if state_root_hash is None: state_root_hash = INIT_ROOT_KEY merkle_db = MerkleDatabase(self._database, merkle_root=state_root_hash) return StateView(merkle_db)
Creates a StateView for the given state root hash. Args: state_root_hash (str): The state root hash of the state view to return. If None, returns the state view for the Returns: StateView: state view locked to the given root hash.
juraj-google-style
def traverse_data(obj, use_numpy=True, buffers=None): if (use_numpy and all((isinstance(el, np.ndarray) for el in obj))): return [transform_array(el, buffers=buffers) for el in obj] obj_copy = [] for item in obj: if (type(item) is float): if math.isnan(item): item = 'NaN' elif math.isinf(item): if (item > 0): item = 'Infinity' else: item = '-Infinity' obj_copy.append(item) elif isinstance(item, (list, tuple)): obj_copy.append(traverse_data(item)) else: obj_copy.append(item) return obj_copy
Recursively traverse an object until a flat list is found. If NumPy is available, the flat list is converted to a numpy array and passed to transform_array() to handle ``nan``, ``inf``, and ``-inf``. Otherwise, iterate through all items, converting non-JSON items Args: obj (list) : a list of values or lists use_numpy (bool, optional) toggle NumPy as a dependency for testing This argument is only useful for testing (default: True)
codesearchnet
def recipe_floodlight_monitor(config, auth_read, dcm_account, sheet): floodlight_monitor(config, {'auth': auth_read, 'account': dcm_account, 'template': {'template': {'sheet': 'https:
Monitor floodlight impressions specified in sheet and send email alerts. Args: auth_read (authentication) - Credentials used for reading data. dcm_account (string) - Specify an account_id as a number. sheet (string) - Full Name or URL to Google Sheet, Floodlight Monitor tab will be added.
github-repos
def from_gpx(gpx_track_point): return Point( lat=gpx_track_point.latitude, lon=gpx_track_point.longitude, time=gpx_track_point.time )
Creates a point from GPX representation Arguments: gpx_track_point (:obj:`gpxpy.GPXTrackPoint`) Returns: :obj:`Point`
juraj-google-style
def _transform_cur_commands(cur_commands, alias_table=None): transformed = [] alias_table = (alias_table if alias_table else get_alias_table()) for cmd in cur_commands: if ((cmd in alias_table.sections()) and alias_table.has_option(cmd, 'command')): transformed += alias_table.get(cmd, 'command').split() else: transformed.append(cmd) cur_commands[:] = transformed
Transform any aliases in cur_commands into their respective commands. Args: alias_table: The alias table. cur_commands: current commands typed in the console.
codesearchnet
def print_info(self, buf=None, format_=FileFormat.yaml, skip_attributes=None, include_release=False): data = self.validated_data().copy() data.pop('config', None) if self.config: if isinstance(self, Package): config_dict = self.data.get('config') else: config_dict = self.parent.data.get('config') data['config'] = config_dict if (not include_release): skip_attributes = (list((skip_attributes or [])) + list(package_release_keys)) buf = (buf or sys.stdout) dump_package_data(data, buf=buf, format_=format_, skip_attributes=skip_attributes)
Print the contents of the package. Args: buf (file-like object): Stream to write to. format_ (`FileFormat`): Format to write in. skip_attributes (list of str): List of attributes to not print. include_release (bool): If True, include release-related attributes, such as 'timestamp' and 'changelog'
codesearchnet
def _subsample_labels(self, label): pos_idx, neg_idx = subsample_labels(label, self.batch_size_per_image, self.positive_fraction, 0) label.fill_(-1) label.scatter_(0, pos_idx, 1) label.scatter_(0, neg_idx, 0) return label
Randomly sample a subset of positive and negative examples, and overwrite the label vector to the ignore value (-1) for all elements that are not included in the sample. Args: labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned.
github-repos
def sign(x): return math_ops.sign(x)
Element-wise sign. Args: x: Tensor or variable. Returns: A tensor.
github-repos
def reconstruct_text(tokens: List[Token]) -> str: return ''.join([x.text_with_ws for x in tokens])
Given a list of tokens, reconstruct the original text with as much fidelity as possible. Args: [tokens]: Returns: a string.
codesearchnet
def initialize_plugs(self, plug_types=None): types = (plug_types if (plug_types is not None) else self._plug_types) for plug_type in types: plug_logger = self.logger.getChild(plug_type.__name__) if (plug_type in self._plugs_by_type): continue try: if (not issubclass(plug_type, BasePlug)): raise InvalidPlugError(('Plug type "%s" is not an instance of BasePlug' % plug_type)) if (plug_type.logger != _LOG): raise InvalidPlugError('Do not override "logger" in your plugs.', plug_type) plug_type.logger = plug_logger try: plug_instance = plug_type() finally: plug_type.logger = _LOG if (plug_instance.logger != _LOG): raise InvalidPlugError('Do not set "self.logger" in __init__ in your plugs', plug_type) else: plug_instance.logger = plug_logger except Exception: plug_logger.exception('Exception instantiating plug type %s', plug_type) self.tear_down_plugs() raise self.update_plug(plug_type, plug_instance)
Instantiate required plugs. Instantiates plug types and saves the instances in self._plugs_by_type for use in provide_plugs(). Args: plug_types: Plug types may be specified here rather than passed into the constructor (this is used primarily for unit testing phases).
codesearchnet
def Instance(reactor=None): if NodeLeader._LEAD is None: NodeLeader._LEAD = NodeLeader(reactor) return NodeLeader._LEAD
Get the local node instance. Args: reactor: (optional) custom reactor to use in NodeLeader. Returns: NodeLeader: instance.
juraj-google-style
def write(self, noautocmd=False): cmd = ('noautocmd write' if noautocmd else 'write') self._vim.command(cmd)
Writes the file of the current buffer. Args: noautocmd (bool): If true, write will skip autocommands. Todo: We should consider whether ``SourceFileInfo`` can replace most usage of noautocmd. See #298
codesearchnet
def __init__(self, types=None, capabilities=None, max_groups1=None, max_groups2=None, max_groups3=None, max_groups4=None, actions1=None, actions2=None, actions3=None, actions4=None): super().__init__() self.types = types self.capabilities = capabilities self.max_groups1 = max_groups1 self.max_groups2 = max_groups2 self.max_groups3 = max_groups3 self.max_groups4 = max_groups4 self.actions1 = actions1 self.actions2 = actions2 self.actions3 = actions3 self.actions4 = actions4
Create a GroupFeatures with the optional parameters below. Args: types: Bitmap of OFPGT_* values supported. capabilities: Bitmap of OFPGFC_* capability supported. max_groups: 4-position array; Maximum number of groups for each type. actions: 4-position array; Bitmaps of OFPAT_* that are supported.
juraj-google-style
def visit_membership(self, relation: _evaluation.MembershipRelationNode) -> Any: lhs_result = self.visit(relation.left) rhs_result = self.visit(relation.right) in_lhs = lhs_result if isinstance(relation, _evaluation.InNode) else rhs_result in_rhs = rhs_result if isinstance(relation, _evaluation.InNode) else lhs_result sql_expr = f'({in_lhs.as_operand()})\nIN ({in_rhs.as_operand()})' return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_expr, _sql_data_type=_sql_data_types.Boolean, _sql_alias='mem_'), from_part=None)
Translates a FHIRPath membership relation to Standard SQL. For the `IN` relation, the LHS operand is assumed to be a collection of a single value. For 'CONTAINS', the RHS operand is assumed to be a collection of a single value. Args: relation: The FHIRPath AST `MembershipRelation` node. Returns: A compiled Standard SQL expression.
github-repos
def load_checkpoint(ckpt_dir_or_file): filename = _get_checkpoint_filename(ckpt_dir_or_file) if filename is None: raise ValueError("Couldn't find 'checkpoint' file or checkpoints in given directory %s" % ckpt_dir_or_file) return py_checkpoint_reader.NewCheckpointReader(filename)
Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`. If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints, reader for the latest checkpoint is returned. Example usage: ```python import tensorflow as tf a = tf.Variable(1.0) b = tf.Variable(2.0) ckpt = tf.train.Checkpoint(var_list={'a': a, 'b': b}) ckpt_path = ckpt.save('tmp-ckpt') reader= tf.train.load_checkpoint(ckpt_path) print(reader.get_tensor('var_list/a/.ATTRIBUTES/VARIABLE_VALUE')) # 1.0 ``` Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint file. Returns: `CheckpointReader` object. Raises: ValueError: If `ckpt_dir_or_file` resolves to a directory with no checkpoints.
github-repos
def SetHeaders(self, soap_headers, http_headers): self.suds_client.set_options(soapheaders=soap_headers, headers=http_headers)
Set the headers for the underlying client. Args: soap_headers: A SOAP element for the SOAP headers. http_headers: A dictionary for the http headers.
juraj-google-style
def setup_modules(self, args): def _setup_module_thread(module_description): "Calls the module's setup() function and sets an Event object for it.\n\n Args:\n module_description (dict): Corresponding recipe module description.\n " new_args = utils.import_args_from_dict(module_description['args'], vars(args), self.config) module = self._module_pool[module_description['name']] try: module.setup(**new_args) except Exception as error: self.add_error('An unknown error occurred: {0!s}\nFull traceback:\n{1:s}'.format(error, traceback.format_exc()), critical=True) self.events[module_description['name']] = threading.Event() self.cleanup() threads = [] for module_description in self.recipe['modules']: t = threading.Thread(target=_setup_module_thread, args=(module_description,)) threads.append(t) t.start() for t in threads: t.join() self.check_errors(is_global=True)
Performs setup tasks for each module in the module pool. Threads declared modules' setup() functions. Takes CLI arguments into account when replacing recipe parameters for each module. Args: args: Command line arguments that will be used to replace the parameters declared in the recipe.
codesearchnet
def haversine(px, py, r=r_mm): lat1, lon1 = px lat2, lon2 = py dlat = math.radians(lat2 - lat1) dlon = math.radians(lon2 - lon1) lat1 = math.radians(lat1) lat2 = math.radians(lat2) a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2 c = 2 * math.asin(math.sqrt(a)) return c * r
Calculate the haversine distance between two points defined by (lat,lon) tuples. Args: px ((float,float)): lat/long position 1 py ((float,float)): lat/long position 2 r (float): Radius of sphere Returns: (int): Distance in mm.
juraj-google-style
def __init__(self, exit_node: tensor_lib.Tensor, pfor_ops: List[ops.Operation], fallback_to_while_loop: bool, pfor_config: 'PForConfig'): self._fallback_to_while_loop = fallback_to_while_loop self._pfor_config = pfor_config self._pfor_ops = set(pfor_ops) self._pfor_op_ids = set((x._id for x in pfor_ops)) assert isinstance(exit_node, tensor_lib.Tensor) self._while_context = exit_node.op._get_control_flow_context() assert isinstance(self._while_context, control_flow_ops.WhileContext) self._context_name = self._while_context.name self._condition = self._while_context.pivot.op.inputs[0] self._is_inside_loop = self.op_is_inside_loop(self._condition.op) if self._is_inside_loop: for e in self._while_context.loop_exits: assert self.op_is_inside_loop(e.op) self._exit_switches = [] self._body_outputs = [] self._next_iter_control_inputs = [] self._enter_merges = [] self._outputs = [] self._enters = [] self._direct_enters = [] for e in self._while_context.loop_exits: self._outputs.append(e.op.outputs[0]) switch = e.op.inputs[0].op assert switch.type == 'Switch', switch self._exit_switches.append(switch) merge = switch.inputs[0].op assert merge.type == 'Merge', merge self._enter_merges.append(merge) enter = merge.inputs[0].op assert enter.type == 'Enter', enter self._enters.append(enter.outputs[0]) next_iter = merge.inputs[1].op assert next_iter.type == 'NextIteration', next_iter self._body_outputs.append(next_iter.inputs[0]) self._next_iter_control_inputs.append(next_iter.control_inputs) self._is_stateful = False for op in ops.get_default_graph().get_operations(): control_flow_context = op._get_control_flow_context() if control_flow_context is None: continue if control_flow_context.name == self._context_name: self._is_stateful |= _is_stateful_pfor_op(op) if op.type == 'Enter': output = op.outputs[0] if output not in self._enters: if output.dtype in (dtypes.resource, dtypes.variant): if output not in self._direct_enters: self._direct_enters.append(output) else: self._enters.append(output)
Initializer. Args: exit_node: A tensor output from the while_loop. pfor_ops: list of ops inside the current pfor loop. fallback_to_while_loop: If True, fallback to while loop when conversion of an op is not supported pfor_config: PForConfig object used while constructing loop body.
github-repos
def __init__(self, key_value_pairs): self._dict = OrderedDict() for key, value in key_value_pairs: if key not in self._dict: self._dict[key] = [] self._dict[key].append(value) for key, value in iteritems(self._dict): grouping = Grouping(key, value) self._dict[key] = grouping super(Lookup, self).__init__(self._dict)
Construct a Lookup with a sequence of (key, value) tuples. Args: key_value_pairs: An iterable over 2-tuples each containing a key, value pair.
juraj-google-style
def clone(self, *args, **overrides): clone = super(Layout, self).clone(*args, **overrides) clone._max_cols = self._max_cols return clone
Clones the Layout, overriding data and parameters. Args: data: New data replacing the existing data shared_data (bool, optional): Whether to use existing data new_type (optional): Type to cast object to *args: Additional arguments to pass to constructor **overrides: New keyword arguments to pass to constructor Returns: Cloned Layout object
codesearchnet