code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _split_input_from_namespace(cls, app, namespace, entity_kind, shard_count): raw_entity_kind = cls._get_raw_entity_kind(entity_kind) if shard_count == 1: return [key_range.KeyRange(namespace=namespace, _app=app)] ds_query = datastore.Query(kind=raw_entity_kind, namespace=namespace, _app=app, keys_only=True) ds_query.Order("__scatter__") random_keys = ds_query.Get(shard_count * cls._OVERSAMPLING_FACTOR) if not random_keys: return ([key_range.KeyRange(namespace=namespace, _app=app)] + [None] * (shard_count - 1)) random_keys.sort() if len(random_keys) >= shard_count: random_keys = cls._choose_split_points(random_keys, shard_count) key_ranges = [] key_ranges.append(key_range.KeyRange( key_start=None, key_end=random_keys[0], direction=key_range.KeyRange.ASC, include_start=False, include_end=False, namespace=namespace, _app=app)) for i in range(0, len(random_keys) - 1): key_ranges.append(key_range.KeyRange( key_start=random_keys[i], key_end=random_keys[i+1], direction=key_range.KeyRange.ASC, include_start=True, include_end=False, namespace=namespace, _app=app)) key_ranges.append(key_range.KeyRange( key_start=random_keys[-1], key_end=None, direction=key_range.KeyRange.ASC, include_start=True, include_end=False, namespace=namespace, _app=app)) if len(key_ranges) < shard_count: key_ranges += [None] * (shard_count - len(key_ranges)) return key_ranges
Helper for _split_input_from_params. If there are not enough Entities to make all of the given shards, the returned list of KeyRanges will include Nones. The returned list will contain KeyRanges ordered lexographically with any Nones appearing at the end. Args: app: the app. namespace: the namespace. entity_kind: entity kind as string. shard_count: the number of shards. Returns: KeyRange objects.
juraj-google-style
def files_comments_edit(self, *, comment: str, file: str, id: str, **kwargs) -> SlackResponse: kwargs.update({'comment': comment, 'file': file, 'id': id}) return self.api_call('files.comments.edit', json=kwargs)
Edit an existing file comment. Args: comment (str): The body of the comment. e.g. 'Everyone should take a moment to read this file.' file (str): The file id. e.g. 'F1234467890' id (str): The file comment id. e.g. 'Fc1234567890'
codesearchnet
def _read_git_tags( default_version=DEFAULT_VERSION, git_command=('git', 'tag'), ): try: current_tags = check_output(git_command).splitlines() except Exception: raise if not current_tags[0]: warnings.warn( 'Unable to resolve current version', exceptions.ProsperDefaultVersionWarning) return default_version latest_version = semantic_version.Version(default_version) for tag in current_tags: tag_str = decode(tag, 'utf-8').replace('v', '') try: tag_ver = semantic_version.Version(tag_str) except Exception: continue if tag_ver > latest_version: latest_version = tag_ver return str(latest_version)
tries to find current git tag Notes: git_command exposed for testing null case Args: default_version (str): what version to make git_command (:obj:`list`): subprocess command Retruns: str: latest version found, or default Warns: exceptions.ProsperDefaultVersionWarning: git version not found
juraj-google-style
def _req(self, req): logger.debug('DUT> %s', req) self._log and self.pause() times = 3 res = None while times: times = times - 1 try: self._sendline(req) self._expect(req) line = None res = [] while True: line = self._readline() logger.debug('Got line %s', line) if line == 'Done': break if line: res.append(line) break except: logger.exception('Failed to send command') self.close() self._init() self._log and self.resume() return res
Send command and wait for response. The command will be repeated 3 times at most in case data loss of serial port. Args: req (str): Command to send, please do not include new line in the end. Returns: [str]: The output lines
juraj-google-style
def get_oauth_data(self, code, client_id, client_secret, state): request = self._get_request() response = request.post(self.OAUTH_TOKEN_URL, {'state': state, 'code': code, 'grant_type': 'authorization_code', 'client_id': client_id, 'client_secret': client_secret}) return HSAccessTokenAuth.from_response(response)
Get Oauth data from HelloSign Args: code (str): Code returned by HelloSign for our callback url client_id (str): Client id of the associated app client_secret (str): Secret token of the associated app Returns: A HSAccessTokenAuth object
codesearchnet
def click_nowait(self, pattern, action='click', desc=None, **match_kwargs): point = self.match(pattern, **match_kwargs) if ((not point) or (not point.matched)): return None func = getattr(self, action) func(*point.pos) return point
Return immediately if no image found Args: - pattern (str or Pattern): filename or an opencv image object. - action (str): click or long_click Returns: Click point or None
codesearchnet
def Run(self, request, global_params=None): config = self.GetMethodConfig('Run') return self._RunMethod(config, request, global_params=global_params)
Runs a `BuildTrigger` at a particular source revision. Args: request: (CloudbuildProjectsTriggersRunRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Operation) The response message.
github-repos
def _GetMergeTaskStorageFilePath(self, task): filename = '{0:s}.plaso'.format(task.identifier) return os.path.join(self._merge_task_storage_path, filename)
Retrieves the path of a task storage file in the merge directory. Args: task (Task): task. Returns: str: path of a task storage file file in the merge directory.
codesearchnet
def filter_dict(d, exclude): ret = {} for key, value in d.items(): if key not in exclude: ret.update({key: value}) return ret
Return a new dict with specified keys excluded from the origional dict Args: d (dict): origional dict exclude (list): The keys that are excluded
juraj-google-style
def crt(self, mp, mq): u = (((mq - mp) * self.p_inverse) % self.q) return (mp + (u * self.p))
The Chinese Remainder Theorem as needed for decryption. Returns the solution modulo n=pq. Args: mp(int): the solution modulo p. mq(int): the solution modulo q.
codesearchnet
def isholiday(self, date): date = parsefun(date) if self.holidays: i = bisect.bisect_left(self.holidays, date) if i == 0 and date < self.holidays[0]: warn('Holiday list exhausted at start, ' \ 'isholiday(%s) output may be incorrect.' % date) elif i == len(self.holidays): warn('Holiday list exhausted at end, ' \ 'isholiday(%s) output may be incorrect.' % date) elif self.holidays[i] == date: return True return False
Check if a given date is a holiday. Args: date (date, datetime or str): Date to be checked. Returns: bool: True if the date is a holiday, False otherwise.
juraj-google-style
def count_function(function: _evaluation.CountFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select: del function, params_result if operand_result is None: raise ValueError('count() cannot be called without an operand.') if operand_result.from_part is None: return _sql_data_types.Select(select_part=_sql_data_types.CountCall((_sql_data_types.RawExpression(operand_result.sql_alias, _sql_data_type=operand_result.sql_data_type),)), from_part=str(operand_result.to_subquery()), where_part=operand_result.where_part, sql_dialect=_sql_data_types.SqlDialect.SPARK) else: return dataclasses.replace(operand_result, select_part=_sql_data_types.CountCall((operand_result.select_part,)))
Returns an integer representing the number of elements in a collection. By default, `_CountFunction` will return 0. Args: function: The FHIRPath AST `HasValueFunction` node operand_result: The expression which is being evaluated params_result: The parameter passed in to function Returns: A compiled Spark SQL expression. Raises: ValueError: When the function is called without an operand
github-repos
def _get_music_services_data(cls): if (cls._music_services_data is not None): return cls._music_services_data result = {} root = XML.fromstring(cls._get_music_services_data_xml().encode('utf-8')) services = root.findall('Service') for service in services: result_value = service.attrib.copy() name = service.get('Name') result_value['Name'] = name auth_element = service.find('Policy') auth = auth_element.attrib result_value.update(auth) presentation_element = service.find('. if (presentation_element is not None): result_value['PresentationMapUri'] = presentation_element.get('Uri') result_value['ServiceID'] = service.get('Id') service_type = str(((int(service.get('Id')) * 256) + 7)) result_value['ServiceType'] = service_type result[service_type] = result_value cls._music_services_data = result return result
Parse raw account data xml into a useful python datastructure. Returns: dict: Each key is a service_type, and each value is a `dict` containing relevant data.
codesearchnet
def AddWeight(self, path_segment_index, weight): if (path_segment_index not in self._weight_per_index): raise ValueError('Path segment index not set.') self._weight_per_index[path_segment_index] += weight if (weight not in self._indexes_per_weight): self._indexes_per_weight[weight] = [] self._indexes_per_weight[weight].append(path_segment_index)
Adds a weight for a specific path segment index. Args: path_segment_index: an integer containing the path segment index. weight: an integer containing the weight. Raises: ValueError: if the path segment weights do not contain the path segment index.
codesearchnet
def Get(self, request, global_params=None): config = self.GetMethodConfig('Get') return self._RunMethod(config, request, global_params=global_params)
Returns information about a previously requested build. The `Build` that is returned includes its status (such as `SUCCESS`, `FAILURE`, or `WORKING`), and timing information. Args: request: (CloudbuildProjectsBuildsGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Build) The response message.
github-repos
async def inspect(self, service_id: str) -> Mapping[str, Any]: response = await self.docker._query_json( "services/{service_id}".format(service_id=service_id), method="GET" ) return response
Inspect a service Args: service_id: ID or name of the service Returns: a dict with info about a service
juraj-google-style
def test(x, y, regex_expr=False): return matches(x, y, regex_expr=regex_expr) if isregex(x) else equal(x, y)
Compares to values based on regular expression matching or strict equality comparison. Arguments: x (regex|str): string or regular expression to test. y (str): value to match. regex_expr (bool): enables regex string based expression matching. Raises: AssertionError: in case of matching error. Returns: bool
juraj-google-style
def authenticate(self, username, password): if self.config.get('LDAP_BIND_DIRECT_CREDENTIALS'): result = self.authenticate_direct_credentials(username, password) elif ((not self.config.get('LDAP_ALWAYS_SEARCH_BIND')) and (self.config.get('LDAP_USER_RDN_ATTR') == self.config.get('LDAP_USER_LOGIN_ATTR'))): result = self.authenticate_direct_bind(username, password) else: result = self.authenticate_search_bind(username, password) return result
An abstracted authentication method. Decides whether to perform a direct bind or a search bind based upon the login attribute configured in the config. Args: username (str): Username of the user to bind password (str): User's password to bind with. Returns: AuthenticationResponse
codesearchnet
def evaluate_tensor_slice(tensor, tensor_slicing): _ = tensor if not validate_slicing_string(tensor_slicing): raise ValueError('Invalid tensor-slicing string.') return tensor[_parse_slices(tensor_slicing)]
Call eval on the slicing of a tensor, with validation. Args: tensor: (numpy ndarray) The tensor value. tensor_slicing: (str or None) Slicing of the tensor, e.g., "[:, 1]". If None, no slicing will be performed on the tensor. Returns: (numpy ndarray) The sliced tensor. Raises: ValueError: If tensor_slicing is not a valid numpy ndarray slicing str.
github-repos
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(CheckResponsePayload, self).read(input_stream, kmip_version=kmip_version) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream): self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER) self._unique_identifier.read(local_stream, kmip_version=kmip_version) if self.is_tag_next(enums.Tags.USAGE_LIMITS_COUNT, local_stream): self._usage_limits_count = primitives.LongInteger(tag=enums.Tags.USAGE_LIMITS_COUNT) self._usage_limits_count.read(local_stream, kmip_version=kmip_version) if self.is_tag_next(enums.Tags.CRYPTOGRAPHIC_USAGE_MASK, local_stream): self._cryptographic_usage_mask = primitives.Integer(tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK) self._cryptographic_usage_mask.read(local_stream, kmip_version=kmip_version) if self.is_tag_next(enums.Tags.LEASE_TIME, local_stream): self._lease_time = primitives.Interval(tag=enums.Tags.LEASE_TIME) self._lease_time.read(local_stream, kmip_version=kmip_version) self.is_oversized(local_stream)
Read the data encoding the Check response payload and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is missing from the encoded payload.
codesearchnet
def Analyze(self, hashes): hash_analyses = [] for digest in hashes: json_response = self._QueryHash(digest) hash_analysis = interface.HashAnalysis(digest, json_response) hash_analyses.append(hash_analysis) return hash_analyses
Looks up hashes in Viper using the Viper HTTP API. Args: hashes (list[str]): hashes to look up. Returns: list[HashAnalysis]: hash analysis. Raises: RuntimeError: If no host has been set for Viper.
juraj-google-style
def get_explanation_dict(self, entry): centry = self.process_entry(entry) if (centry is None): uncorrected_energy = entry.uncorrected_energy corrected_energy = None else: uncorrected_energy = centry.uncorrected_energy corrected_energy = centry.energy d = {'compatibility': self.__class__.__name__, 'uncorrected_energy': uncorrected_energy, 'corrected_energy': corrected_energy} corrections = [] corr_dict = self.get_corrections_dict(entry) for c in self.corrections: cd = {'name': str(c), 'description': c.__doc__.split('Args')[0].strip(), 'value': corr_dict.get(str(c), 0)} corrections.append(cd) d['corrections'] = corrections return d
Provides an explanation dict of the corrections that are being applied for a given compatibility scheme. Inspired by the "explain" methods in many database methodologies. Args: entry: A ComputedEntry. Returns: (dict) of the form {"Compatibility": "string", "Uncorrected_energy": float, "Corrected_energy": float, "Corrections": [{"Name of Correction": { "Value": float, "Explanation": "string"}]}
codesearchnet
def _generate_matrix(self, hash_bytes): half_columns = ((self.columns cells = (self.rows * half_columns) matrix = [([False] * self.columns) for _ in range(self.rows)] for cell in range(cells): if self._get_bit(cell, hash_bytes[1:]): column = (cell row = (cell % self.rows) matrix[row][column] = True matrix[row][((self.columns - column) - 1)] = True return matrix
Generates matrix that describes which blocks should be coloured. Arguments: hash_bytes - List of hash byte values for which the identicon is being generated. Each element of the list should be an integer from 0 to 255. Returns: List of rows, where each element in a row is boolean. True means the foreground colour should be used, False means a background colour should be used.
codesearchnet
def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, top_k: int=100): out_logits, out_bbox = (outputs.logits, outputs.pred_boxes) if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') prob = out_logits.sigmoid() prob = prob.view(out_logits.shape[0], -1) k_value = min(top_k, prob.size(1)) topk_values, topk_indexes = torch.topk(prob, k_value, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor') labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({'scores': score, 'labels': label, 'boxes': box}) return results
Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. top_k (`int`, *optional*, defaults to 100): Keep only top k bounding boxes before filtering by thresholding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model.
github-repos
def clear_values(self, red=0.0, green=0.0, blue=0.0, alpha=0.0, depth=1.0): self.clear_color = (red, green, blue, alpha) self.clear_depth = depth
Sets the clear values for the window buffer. Args: red (float): red compoent green (float): green compoent blue (float): blue compoent alpha (float): alpha compoent depth (float): depth value
codesearchnet
def __init__(self, outputs): self._outputs = self._wrap_and_check_outputs(outputs, self._SINGLE_OUTPUT_DEFAULT_NAME, error_label='Prediction')
Constructor for PredictOutput. Args: outputs: A `Tensor` or a dict of string to `Tensor` representing the predictions. Raises: ValueError: if the outputs is not dict, or any of its keys are not strings, or any of its values are not `Tensor`s.
github-repos
def unlock_swarm(self, key): if isinstance(key, dict): if ('UnlockKey' not in key): raise errors.InvalidArgument('Invalid unlock key format') else: key = {'UnlockKey': key} url = self._url('/swarm/unlock') res = self._post_json(url, data=key) self._raise_for_status(res) return True
Unlock a locked swarm. Args: key (string): The unlock key as provided by :py:meth:`get_unlock_key` Raises: :py:class:`docker.errors.InvalidArgument` If the key argument is in an incompatible format :py:class:`docker.errors.APIError` If the server returns an error. Returns: `True` if the request was successful. Example: >>> key = client.get_unlock_key() >>> client.unlock_node(key)
codesearchnet
def convert_variables_to_constants_from_session_graph(session, graph_def, output_node_names, variable_names_allowlist=None, variable_names_denylist=None): graph_def, _ = _replace_variables_by_constants(converter_data=_SessionConverterData(session=session, graph_def=graph_def, output_node_names=output_node_names, variable_names_allowlist=variable_names_allowlist, variable_names_denylist=variable_names_denylist)) return graph_def
Replaces all the variables in a graph with constants of the same values. This function works similarly to convert_variables_to_constants_v2, but it retrieves the constant values from a Session instead of from a ConcreteFunction. This is useful when converting graphs generated from TensorFlow V1, where ConcreteFunctions are not available. This also differs from graph_util.convert_variables_to_constants in that it supports resource variables when V2 control flow constructions are present. Args: session: Active TensorFlow session containing the variables. graph_def: A GraphDef to convert. output_node_names: List of name strings for the result nodes of the graph. variable_names_allowlist: The set of variable names to convert (by default, all variables are converted). variable_names_denylist: The set of variable names to omit converting to constants. Returns: An optimized GraphDef.
github-repos
def get_sessions(self, app_path=None): if app_path is not None: return self._tornado.get_sessions(app_path) all_sessions = [] for path in self._tornado.app_paths: all_sessions += self._tornado.get_sessions(path) return all_sessions
Gets all currently active sessions for applications. Args: app_path (str, optional) : The configured application path for the application to return sessions for. If None, return active sessions for all applications. (default: None) Returns: list[ServerSession]
juraj-google-style
def on_test_end(self, logs=None): logs = self._process_logs(logs) for callback in self.callbacks: callback.on_test_end(logs)
Calls the `on_test_end` methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.
github-repos
def refund(request, invoice_id): current_invoice = InvoiceController.for_id_or_404(invoice_id) try: current_invoice.refund() messages.success(request, "This invoice has been refunded.") except ValidationError as ve: messages.error(request, ve) return redirect("invoice", invoice_id)
Marks an invoice as refunded and requests a credit note for the full amount paid against the invoice. This view requires a login, and the logged in user must be staff. Arguments: invoice_id (castable to int): The ID of the invoice to refund. Returns: redirect: Redirects to ``invoice``.
juraj-google-style
def symm_reduce(self, coords_set, threshold=1e-06): surf_sg = SpacegroupAnalyzer(self.slab, 0.1) symm_ops = surf_sg.get_symmetry_operations() unique_coords = [] coords_set = [self.slab.lattice.get_fractional_coords(coords) for coords in coords_set] for coords in coords_set: incoord = False for op in symm_ops: if in_coord_list_pbc(unique_coords, op.operate(coords), atol=threshold): incoord = True break if (not incoord): unique_coords += [coords] return [self.slab.lattice.get_cartesian_coords(coords) for coords in unique_coords]
Reduces the set of adsorbate sites by finding removing symmetrically equivalent duplicates Args: coords_set: coordinate set in cartesian coordinates threshold: tolerance for distance equivalence, used as input to in_coord_list_pbc for dupl. checking
codesearchnet
def element_or_none(self, using, value): try: return self._execute(Command.FIND_CHILD_ELEMENT, { 'using': using, 'value': value }) except: return None
Check if an element in the current element. Support: Android iOS Web(WebView) Args: using(str): The element location strategy. value(str): The value of the location strategy. Returns: Return Element if the element does exists and return None otherwise. Raises: WebDriverException.
juraj-google-style
def _SetFieldType(self, field_proto, field_desc, package, scope): if field_proto.type_name: desc = self._GetTypeFromScope(package, field_proto.type_name, scope) else: desc = None if (not field_proto.HasField('type')): if isinstance(desc, descriptor.Descriptor): field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE else: field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type) if ((field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE) or (field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP)): field_desc.message_type = desc if (field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM): field_desc.enum_type = desc if (field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED): field_desc.has_default_value = False field_desc.default_value = [] elif field_proto.HasField('default_value'): field_desc.has_default_value = True if ((field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE) or (field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT)): field_desc.default_value = float(field_proto.default_value) elif (field_proto.type == descriptor.FieldDescriptor.TYPE_STRING): field_desc.default_value = field_proto.default_value elif (field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL): field_desc.default_value = (field_proto.default_value.lower() == 'true') elif (field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM): field_desc.default_value = field_desc.enum_type.values_by_name[field_proto.default_value].number elif (field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES): field_desc.default_value = text_encoding.CUnescape(field_proto.default_value) else: field_desc.default_value = int(field_proto.default_value) else: field_desc.has_default_value = False if ((field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE) or (field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT)): field_desc.default_value = 0.0 elif (field_proto.type == descriptor.FieldDescriptor.TYPE_STRING): field_desc.default_value = u'' elif (field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL): field_desc.default_value = False elif (field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM): field_desc.default_value = field_desc.enum_type.values[0].number elif (field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES): field_desc.default_value = b'' else: field_desc.default_value = 0 field_desc.type = field_proto.type
Sets the field's type, cpp_type, message_type and enum_type. Args: field_proto: Data about the field in proto format. field_desc: The descriptor to modiy. package: The package the field's container is in. scope: Enclosing scope of available types.
codesearchnet
def equals(self, rhs): for comparator in self._comparators: if comparator.equals(rhs): return True return False
Checks whether any Comparator is equal to rhs. Args: # rhs: can be anything Returns: bool
codesearchnet
def __init__(self, default: typing.Any, values: typing.List[typing.Any], frozen: bool=False): if not isinstance(values, list) or not values: raise ValueError(f'Values for Enum should be a non-empty list. Found {values!r}.') if MISSING_VALUE != default and default not in values: raise ValueError(f'Enum default value {default!r} is not in candidate list {values!r}.') value_type = None for v in values: if v is None: continue if value_type is None: value_type = type(v) else: next_type = type(v) if issubclass(value_type, next_type): value_type = next_type elif not issubclass(next_type, value_type): value_type = None break is_noneable = any([v is None for v in values]) if value_type is not None and issubclass(value_type, str): value_type = str self._values = values super().__init__(value_type, default, is_noneable=is_noneable, frozen=frozen)
Constructor. Args: default: default value for this spec. values: all acceptable values. frozen: If True, values other than the default value is not accceptable.
github-repos
def signature(self, name, file_name, file_type, file_content, owner=None, **kwargs): return Signature(self.tcex, name, file_name, file_type, file_content, owner=owner, **kwargs)
Create the Signature TI object. Args: owner: file_content: file_name: file_type: name: **kwargs: Return:
codesearchnet
def _maybe_read_file(filename): try: with open(filename) as infile: return infile.read() except IOError as e: if e.errno == errno.ENOENT: return None
Read the given file, if it exists. Args: filename: A path to a file. Returns: A string containing the file contents, or `None` if the file does not exist.
juraj-google-style
def load_caffe(model_desc, model_file): with change_env('GLOG_minloglevel', '2'): import caffe caffe.set_mode_cpu() net = caffe.Net(model_desc, model_file, caffe.TEST) param_dict = CaffeLayerProcessor(net).process() logger.info("Model loaded from caffe. Params: " + ", ".join(sorted(param_dict.keys()))) return param_dict
Load a caffe model. You must be able to ``import caffe`` to use this function. Args: model_desc (str): path to caffe model description file (.prototxt). model_file (str): path to caffe model parameter file (.caffemodel). Returns: dict: the parameters.
juraj-google-style
def variable_dtype(self): return self._variable_dtype
The variable dtype of this policy. This is the dtype layers will create their variables in, unless a layer explicitly chooses a different dtype. If this is different than `Policy.compute_dtype`, Layers will cast variables to the compute dtype to avoid type errors. Variable regularizers are run in the variable dtype, not the compute dtype. Returns: The variable dtype of this policy, as a string.
github-repos
def delete(self, key): data = None if (key is not None): data = self.db.delete(key.strip()) else: self.tcex.log.warning(u'The key field was None.') return data
Delete method of CRUD operation for all data types. Args: key (string): The variable to write to the DB. Returns: (string): Result of DB write.
codesearchnet
def unpack(self, buff, offset=0): super().unpack(buff, offset) self.version = self._version_ihl.value >> 4 self.ihl = self._version_ihl.value & 15 self.dscp = self._dscp_ecn.value >> 2 self.ecn = self._dscp_ecn.value & 3 self.length = self.length.value self.identification = self.identification.value self.flags = self._flags_offset.value >> 13 self.offset = self._flags_offset.value & 8191 self.ttl = self.ttl.value self.protocol = self.protocol.value self.checksum = self.checksum.value self.source = self.source.value self.destination = self.destination.value if self.ihl > 5: options_size = (self.ihl - 5) * 4 self.data = self.options.value[options_size:] self.options = self.options.value[:options_size] else: self.data = self.options.value self.options = b''
Unpack a binary struct into this object's attributes. Return the values instead of the lib's basic types. Args: buff (bytes): Binary buffer. offset (int): Where to begin unpacking. Raises: :exc:`~.exceptions.UnpackException`: If unpack fails.
juraj-google-style
def PrintMessage(self, message): fields = message.ListFields() if self.use_index_order: fields.sort(key=lambda x: x[0].index) for field, value in fields: if _IsMapEntry(field): for key in sorted(value): entry_submsg = field.message_type._concrete_class( key=key, value=value[key]) self.PrintField(field, entry_submsg) elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: for element in value: self.PrintField(field, element) else: self.PrintField(field, value)
Convert protobuf message to text format. Args: message: The protocol buffers message.
juraj-google-style
def __init__(self, strategy, cluster_spec, task_type, task_id, session_config=None, rpc_layer='grpc', worker_barrier=None): self._strategy = strategy self._cluster_spec = cluster_spec self._task_type = task_type self._task_id = task_id self._session_config = session_config self._worker_barrier = worker_barrier self._rpc_layer = rpc_layer self._master_target = self._get_master_target() self._num_workers = _get_num_workers(cluster_spec) self._is_chief_node = self._is_chief()
Initialize the worker context object. Args: strategy: a `DistributionStrategy` object. cluster_spec: a ClusterSpec object. It can be empty or None in the local training case. task_type: a string indicating the role of the corresponding task, such as "worker" or "ps". It can be None if it is local training or in-graph replicated training. task_id: an integer indicating id of the corresponding task. It can be None if it is local training or in-graph replicated training. session_config: an optional `tf.compat.v1.ConfigProto` object. rpc_layer: optional string specifying the RPC protocol for communication with worker masters. If None or empty, hosts in the `cluster_spec` will be used directly. worker_barrier: optional, the barrier object for worker synchronization.
github-repos
async def _multipart(self, files_dict): boundary = bytes(_BOUNDARY, self.encoding) hder_format = 'Content-Disposition: form-data; name="{}"' hder_format_io = '; filename="{}"' multip_pkg = b'' num_of_parts = len(files_dict) for index, kv in enumerate(files_dict.items(), start=1): multip_pkg += (b'--' + boundary + b'\r\n') k, v = kv try: pkg_body = await self._file_manager(v) multip_pkg += bytes(hder_format.format(k) + hder_format_io.format(basename(v)), self.encoding) mime_type = mimetypes.guess_type(basename(v)) if not mime_type[1]: mime_type = 'application/octet-stream' else: mime_type = '/'.join(mime_type) multip_pkg += bytes('; Content-Type: ' + mime_type, self.encoding) multip_pkg += b'\r\n'*2 + pkg_body except (TypeError, FileNotFoundError): pkg_body = bytes(v, self.encoding) + b'\r\n' multip_pkg += bytes(hder_format.format(k) + '\r\n'*2, self.encoding) multip_pkg += pkg_body if index == num_of_parts: multip_pkg += b'--' + boundary + b'--\r\n' return multip_pkg
Forms multipart requests from a dict with name, path k/vs. Name does not have to be the actual file name. Args: files_dict (dict): A dict of `filename:filepath`s, to be sent as multipart files. Returns: multip_pkg (str): The strings representation of the content body, multipart formatted.
juraj-google-style
def download(self, location, local_dir='.'): self.logger.debug('Getting S3 info') bucket = self.info['bucket'] prefix = self.info['prefix'] self.logger.debug('Connecting to S3') s3conn = self.client location = location.strip('/') self.logger.debug('Downloading contents') objects = s3conn.list_objects(Bucket=bucket, Prefix=(prefix+'/'+location)) if 'Contents' not in objects: raise ValueError('Download target {}/{}/{} was not found or inaccessible.'.format(bucket, prefix, location)) for s3key in objects['Contents']: key = s3key['Key'] if not key or key.endswith('/'): continue filepath = key.replace(prefix+'/'+location, '', 1).lstrip('/') filename = key.split('/')[-1] file_dir = filepath.split('/')[:-1] file_dir = '/'.join(file_dir) full_dir = os.path.join(local_dir, file_dir) if not os.path.isdir(full_dir): os.makedirs(full_dir) s3conn.download_file(bucket, key, os.path.join(full_dir, filename)) self.logger.debug('Done!')
Download content from bucket/prefix/location. Location can be a directory or a file (e.g., my_dir or my_dir/my_image.tif) If location is a directory, all files in the directory are downloaded. If it is a file, then that file is downloaded. Args: location (str): S3 location within prefix. local_dir (str): Local directory where file(s) will be stored. Default is here.
juraj-google-style
def _comparison(self, op, value): if (not self._indexed): raise datastore_errors.BadFilterError(('Cannot query for unindexed property %s' % self._name)) from .query import FilterNode if (value is not None): value = self._do_validate(value) value = self._call_to_base_type(value) value = self._datastore_type(value) return FilterNode(self._name, op, value)
Internal helper for comparison operators. Args: op: The operator ('=', '<' etc.). Returns: A FilterNode instance representing the requested comparison.
codesearchnet
def handle_config_change(self, new_config): if self.user_handler: self.user_handler(self.current_config, new_config) self._call_spec_handlers(new_config) self.current_config = copy.deepcopy(new_config)
Handle the new configuration. Args: new_config (dict): The new configuration
codesearchnet
def transform_python_types(self, obj): if is_datetime_type(obj): return convert_datetime_type(obj) if is_timedelta_type(obj): return convert_timedelta_type(obj) elif isinstance(obj, slice): return dict(start=obj.start, stop=obj.stop, step=obj.step) elif np.issubdtype(type(obj), np.floating): return float(obj) elif np.issubdtype(type(obj), np.integer): return int(obj) elif np.issubdtype(type(obj), np.bool_): return bool(obj) elif isinstance(obj, decimal.Decimal): return float(obj) elif rd and isinstance(obj, rd.relativedelta): return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours, minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds) else: return super(BokehJSONEncoder, self).default(obj)
Handle special scalars such as (Python, NumPy, or Pandas) datetimes, or Decimal values. Args: obj (obj) : The object to encode. Anything not specifically handled in this method is passed on to the default system JSON encoder.
juraj-google-style
def set_iprouting(self, value=None, default=False, disable=False): if (value is False): disable = True cmd = self.command_builder('ip routing', value=value, default=default, disable=disable) return self.configure(cmd)
Configures the state of global ip routing EosVersion: 4.13.7M Args: value(bool): True if ip routing should be enabled or False if ip routing should be disabled default (bool): Controls the use of the default keyword disable (bool): Controls the use of the no keyword Returns: bool: True if the commands completed successfully otherwise False
codesearchnet
def as_list(self, label=1, **kwargs): label_to_use = (label if (self.mode == 'classification') else self.dummy_label) ans = self.domain_mapper.map_exp_ids(self.local_exp[label_to_use], **kwargs) ans = [(x[0], float(x[1])) for x in ans] return ans
Returns the explanation as a list. Args: label: desired label. If you ask for a label for which an explanation wasn't computed, will throw an exception. Will be ignored for regression explanations. kwargs: keyword arguments, passed to domain_mapper Returns: list of tuples (representation, weight), where representation is given by domain_mapper. Weight is a float.
codesearchnet
def run_foreach_or_conditional(self, context): logger.debug("starting") if self.foreach_items: self.foreach_loop(context) else: self.run_conditional_decorators(context) logger.debug("done")
Run the foreach sequence or the conditional evaluation. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate.
juraj-google-style
def generate_sample_set(self, tags=None): if isinstance(tags, str): tags = [tags] md5_list = self.data_store.tag_match(tags) return self.store_sample_set(md5_list)
Generate a sample_set that maches the tags or all if tags are not specified. Args: tags: Match samples against this tag list (or all if not specified) Returns: The sample_set of those samples matching the tags
juraj-google-style
def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: default_to_square = True if 'shortest_edge' in size: size = size['shortest_edge'] default_to_square = False elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred.
github-repos
async def start(self, name="websocket_client"): self._con = await websockets.connect(self.url) self._connection_task = self._loop.add_task(self._manage_connection(), name=name)
Connect to the websocket server. This method will spawn a background task in the designated event loop that will run until stop() is called. You can control the name of the background task for debugging purposes using the name parameter. The name is not used in anyway except for debug logging statements. Args: name (str): Optional name for the background task.
juraj-google-style
def get_list(self, key, is_optional=False, is_secret=False, is_local=False, default=None, options=None): def parse_list(v): parts = v.split(',') results = [] for part in parts: part = part.strip() if part: results.append(part) return results return self._get_typed_value(key=key, target_type=list, type_convert=parse_list, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)
Get a the value corresponding to the key and converts comma separated values to a list. Args: key: the dict key. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key.
codesearchnet
def split_input(cls, mapper_spec): params = _get_params(mapper_spec) shard_count = mapper_spec.shard_count start_time = params[cls.START_TIME_PARAM] end_time = params[cls.END_TIME_PARAM] seconds_per_shard = (end_time - start_time) / shard_count shards = [] for _ in xrange(shard_count - 1): params[cls.END_TIME_PARAM] = (params[cls.START_TIME_PARAM] + seconds_per_shard) shards.append(LogInputReader(**params)) params[cls.START_TIME_PARAM] = params[cls.END_TIME_PARAM] params[cls.END_TIME_PARAM] = end_time return shards + [LogInputReader(**params)]
Returns a list of input readers for the given input specification. Args: mapper_spec: The MapperSpec for this InputReader. Returns: A list of InputReaders.
juraj-google-style
def reset(self, history=None): if not history: history = dict() self.episode_rewards = history.get("episode_rewards", list()) self.episode_timesteps = history.get("episode_timesteps", list()) self.episode_times = history.get("episode_times", list())
Resets the Runner's internal stats counters. If history is empty, use default values in history.get(). Args: history (dict): A dictionary containing an already run experiment's results. Keys should be: episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times)
juraj-google-style
def lstsq(A, b): r A = asarray(A, float) b = asarray(b, float) if A.ndim == 1: A = A[:, newaxis] if A.shape[1] == 1: return dot(A.T, b) / squeeze(dot(A.T, A)) rcond = finfo(double).eps * max(*A.shape) return npy_lstsq(A, b, rcond=rcond)[0]
r"""Return the least-squares solution to a linear matrix equation. Args: A (array_like): Coefficient matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Least-squares solution.
juraj-google-style
def prepare_capstone(syntax=AsmSyntax.att, target=None): if not HAVE_CAPSTONE: raise NotImplementedError('pwnypack requires capstone to disassemble to AT&T and Intel syntax') if target is None: target = pwnypack.target.target if target.arch == pwnypack.target.Target.Arch.x86: if target.bits is pwnypack.target.Target.Bits.bits_32: md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32) else: md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64) elif target.arch == pwnypack.target.Target.Arch.arm: mode = 0 if target.bits is pwnypack.target.Target.Bits.bits_32: arch = capstone.CS_ARCH_ARM if target.mode and pwnypack.target.Target.Mode.arm_thumb: mode = capstone.CS_MODE_THUMB else: mode = capstone.CS_MODE_ARM if target.mode and pwnypack.target.Target.Mode.arm_m_class: mode |= capstone.CS_MODE_MCLASS if target.mode and pwnypack.target.Target.Mode.arm_v8: mode |= capstone.CS_MODE_V8 else: arch = capstone.CS_ARCH_ARM64 if target.endian is pwnypack.target.Target.Endian.little: mode |= capstone.CS_MODE_LITTLE_ENDIAN else: mode |= capstone.CS_MODE_BIG_ENDIAN md = capstone.Cs(arch, mode) else: raise NotImplementedError('Only x86 is currently supported.') md.skipdata = True if syntax is AsmSyntax.att: md.syntax = capstone.CS_OPT_SYNTAX_ATT elif syntax is AsmSyntax.intel: md.skipdata_setup(('db', None, None)) else: raise NotImplementedError('capstone engine only implements AT&T and Intel syntax.') return md
Prepare a capstone disassembler instance for a given target and syntax. Args: syntax(AsmSyntax): The assembler syntax (Intel or AT&T). target(~pwnypack.target.Target): The target to create a disassembler instance for. The global target is used if this argument is ``None``. Returns: An instance of the capstone disassembler. Raises: NotImplementedError: If the specified target isn't supported.
juraj-google-style
def market_if_touched(self, accountID, **kwargs): return self.create(accountID, order=MarketIfTouchedOrderRequest(**kwargs))
Shortcut to create a MarketIfTouched Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a MarketIfTouchedOrderRequest Returns: v20.response.Response containing the results from submitting the request
codesearchnet
def Items(self, key): with self._mutex: if key not in self._buckets: raise KeyError('Key %s was not found in Reservoir' % key) bucket = self._buckets[key] return bucket.Items()
Return items associated with given key. Args: key: The key for which we are finding associated items. Raises: KeyError: If the key is not found in the reservoir. Returns: [list, of, items] associated with that key.
juraj-google-style
def prepend(self, key, value, expire=0, noreply=None): if (noreply is None): noreply = self.default_noreply return self._store_cmd(b'prepend', {key: value}, expire, noreply)[key]
The memcached "prepend" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True.
codesearchnet
def _resolve_subkeys(key, separator='.'): subkey = None if (separator in key): index = key.index(separator) subkey = key[(index + 1):] key = key[:index] return (key, subkey)
Given a key which may actually be a nested key, return the top level key and any nested subkeys as separate values. Args: key (str): A string that may or may not contain the separator. separator (str): The namespace separator. Defaults to `.`. Returns: Tuple[str, str]: The key and subkey(s).
codesearchnet
def _result_type_impl(*arrays_and_dtypes): promo_safety_mode = ops.get_dtype_conversion_mode() valid_arrays_and_dtypes = [] for inp in arrays_and_dtypes: if inp is not None: if _is_acceptable_input_type(inp): valid_arrays_and_dtypes.append(inp) else: raise NotImplementedError(f'Auto dtype conversion semantics does not support {type(inp)} type.') dtypes_and_is_weak = [_get_dtype_and_weakness(x) for x in nest.flatten(valid_arrays_and_dtypes)] if not dtypes_and_is_weak: dtypes_and_is_weak = [(dtypes.float32, True)] res = dtypes_and_is_weak[0] for arg in dtypes_and_is_weak[1:]: res = (res[0].base_dtype, res[1]) arg = (arg[0].base_dtype, arg[1]) try: res_next, allowed_mode = _BINARY_DTYPE_RES_FULL[res][arg] except KeyError as exc: raise NotImplementedError(f'Implicit Conversion between {res[0]} and {arg[0]} is not allowed. Please convert the input manually if you need to.') from exc if allowed_mode.value > promo_safety_mode.value: raise TypeError(f'In promotion mode {promo_safety_mode}, implicit dtype promotion between ({res[0]}, weak={res[1]}) and ({arg[0]}, weak={arg[1]}) is disallowed. You need to explicitly specify the dtype in your op, or relax your dtype promotion rules (such as from SAFE mode to ALL mode).') res = res_next return res
Internal implementation of jnp_style_result_type. Args: *arrays_and_dtypes: A list of Tensors, Variables, NumPy arrays or python numbers. Returns: The result promotion type from all the inputs. Raises: TypeError: when the promotion between the input dtypes is disabled in the current mode NotImplementedError: (1) When arrays_and_dtypes contains an unsupported input type (e.g. RaggedTensor). (2) When there isn't a possible promotion for the input dtypes.
github-repos
def roles(self): if (not self.__roles): self.__roles = Roles(self.__connection) return self.__roles
Gets the Roles API client. Returns: Roles:
codesearchnet
def result_to_dict(raw_result): result = {} for channel_index, channel in enumerate(raw_result): channel_id, channel_name = channel[0], channel[1] channel_result = { 'id': channel_id, 'name': channel_name, 'movies': [] } for movie in channel[2]: channel_result['movies'].append({ 'title': movie[1], 'start_time': datetime.fromtimestamp(movie[2]), 'end_time': datetime.fromtimestamp(movie[2] + movie[3]), 'inf': True if movie[3] else False, }) result[channel_id] = channel_result return result
Parse raw result from fetcher into readable dictionary Args: raw_result (dict) - raw data from `fetcher` Returns: dict - readable dictionary
juraj-google-style
def VerifyStructure(self, parser_mediator, lines): return (re.match(self._VERIFICATION_REGEX, lines) or re.match(self._CHROMEOS_VERIFICATION_REGEX, lines)) is not None
Verifies that this is a syslog-formatted file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. lines (str): one or more lines from the text file. Returns: bool: True if this is the correct parser, False otherwise.
juraj-google-style
def intersect_one_round(candidates, intersections): next_candidates = [] for (first, second) in candidates: both_linearized = False if (first.__class__ is Linearization): if (second.__class__ is Linearization): both_linearized = True bbox_int = bbox_intersect(first.curve.nodes, second.curve.nodes) else: bbox_int = bbox_line_intersect(second.nodes, first.start_node, first.end_node) elif (second.__class__ is Linearization): bbox_int = bbox_line_intersect(first.nodes, second.start_node, second.end_node) else: bbox_int = bbox_intersect(first.nodes, second.nodes) if (bbox_int == BoxIntersectionType.DISJOINT): continue elif ((bbox_int == BoxIntersectionType.TANGENT) and (not both_linearized)): tangent_bbox_intersection(first, second, intersections) continue if both_linearized: from_linearized(first, second, intersections) continue lin1 = six.moves.map(Linearization.from_shape, first.subdivide()) lin2 = six.moves.map(Linearization.from_shape, second.subdivide()) next_candidates.extend(itertools.product(lin1, lin2)) return next_candidates
Perform one step of the intersection process. .. note:: This is a helper for :func:`_all_intersections` and that function has a Fortran equivalent. Checks if the bounding boxes of each pair in ``candidates`` intersect. If the bounding boxes do not intersect, the pair is discarded. Otherwise, the pair is "accepted". Then we attempt to linearize each curve in an "accepted" pair and track the overall linearization error for every curve encountered. Args: candidates (Union[list, itertools.chain]): An iterable of pairs of curves (or linearized curves). intersections (list): A list of already encountered intersections. If any intersections can be readily determined during this round of subdivision, then they will be added to this list. Returns: list: Returns a list of the next round of ``candidates``.
codesearchnet
def load_maps(maps_dir): maps_dir = os.path.abspath(maps_dir) maps = {} for (root, dirnames, filenames) in os.walk(maps_dir): for filename in filenames: if filename.endswith('.xml'): xml_file = os.path.join(root, filename) map = MapSource.from_xml(xml_file, maps_dir) if (map.id in maps): raise MapSourceException('duplicate map id: {} in file {}'.format(map.id, xml_file)) else: maps[map.id] = map return maps
Load all xml map sources from a given directory. Args: maps_dir: path to directory to search for maps Returns: dict of MapSource:
codesearchnet
def split(node, stack): node, defined, reaching = _fix(node) node = store_state(node, reaching, defined, stack) anno.clearanno(node) return node
Carry over the state from the primal to the adjoint. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. stack: The stack node to use for storing and restoring state. Returns: func: A `Module` node with two function definitions containing the primal and adjoint respectively.
juraj-google-style
def _decode_filename(base_filename, problem_name, decode_hp): if decode_hp.shards > 1: base_filename = _add_shard_to_filename(base_filename, decode_hp) if ("beam{beam}.alpha{alpha}.decodes".format( beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha)) in base_filename): return base_filename else: return ( "{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format( base=base_filename, model=FLAGS.model, hp=FLAGS.hparams_set, problem=problem_name, beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha)))
Generates decode filename. Args: base_filename: A string, base of the decode filename. problem_name: A string, name of the problem. decode_hp: HParams for decoding. Returns: A string, produced decode filename.
juraj-google-style
def send_result_email(self, sender=None): status = 'successful' if self.was_aborted: status = 'aborted' app_id = os.environ['APPLICATION_ID'] shard_index = app_id.find('~') if shard_index != -1: app_id = app_id[shard_index+1:] param_dict = { 'status': status, 'app_id': app_id, 'class_path': self._class_path, 'pipeline_id': self.root_pipeline_id, 'base_path': '%s.appspot.com%s' % (app_id, self.base_path), } subject = ( 'Pipeline %(status)s: App "%(app_id)s", %(class_path)s' ' body = % param_dict html = % param_dict if sender is None: sender = '%s@%s.appspotmail.com' % (app_id, app_id) try: self._send_mail(sender, subject, body, html=html) except (mail.InvalidSenderError, mail.InvalidEmailError): logging.warning('Could not send result email for ' 'root pipeline ID "%s" from sender "%s"', self.root_pipeline_id, sender)
Sends an email to admins indicating this Pipeline has completed. For developer convenience. Automatically called from finalized for root Pipelines that do not override the default action. Args: sender: (optional) Override the sender's email address.
juraj-google-style
def validate(data): text = data.get('text') if not isinstance(text, _string_types) or len(text) == 0: raise ValueError('text field is required and should not be empty') if 'markdown' in data and not type(data['markdown']) is bool: raise ValueError('markdown field should be bool') if 'attachments' in data: if not isinstance(data['attachments'], (list, tuple)): raise ValueError('attachments field should be list or tuple') for attachment in data['attachments']: if 'text' not in attachment and 'title' not in attachment: raise ValueError('text or title is required in attachment') return True
Validates incoming data Args: data(dict): the incoming data Returns: True if the data is valid Raises: ValueError: the data is not valid
juraj-google-style
async def getPropNorm(self, prop, valu): pobj = self.model.prop(prop) if pobj is None: raise s_exc.NoSuchProp(mesg=f'The property {prop} does not exist.', prop=prop) norm, info = pobj.type.norm(valu) return norm, info
Get the normalized property value based on the Cortex data model. Args: prop (str): The property to normalize. valu: The value to normalize. Returns: (tuple): A two item tuple, containing the normed value and the info dictionary. Raises: s_exc.NoSuchProp: If the prop does not exist. s_exc.BadTypeValu: If the value fails to normalize.
juraj-google-style
def mobility(sdat, tstart=None, tend=None): tseries = sdat.tseries_between(tstart, tend) steps = sdat.steps[tseries.index[0]:tseries.index[(- 1)]] time = [] mob = [] for step in steps.filter(rprof=True): time.append(step.timeinfo['t']) mob.append((step.rprof.iloc[(- 1)].loc['vrms'] / step.timeinfo['vrms'])) return (np.array(mob), np.array(time))
Plates mobility. Compute the ratio vsurf / vrms. Args: sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance. tstart (float): time at which the computation should start. Use the beginning of the time series data if set to None. tend (float): time at which the computation should end. Use the end of the time series data if set to None. Returns: tuple of :class:`numpy.array`: mobility and time arrays.
codesearchnet
def _create_centerline(self): border = array(self.__densify_border()) vor = Voronoi(border) vertex = vor.vertices lst_lines = [] for (j, ridge) in enumerate(vor.ridge_vertices): if ((- 1) not in ridge): line = LineString([((vertex[ridge[0]][0] + self._minx), (vertex[ridge[0]][1] + self._miny)), ((vertex[ridge[1]][0] + self._minx), (vertex[ridge[1]][1] + self._miny))]) if (line.within(self._input_geom) and (len(line.coords[0]) > 1)): lst_lines.append(line) nr_lines = len(lst_lines) if (nr_lines < 2): raise RuntimeError('Number of produced ridges is too small: {}, this might be caused by too large interpolation distance.'.format(nr_lines)) return unary_union(lst_lines)
Calculate the centerline of a polygon. Densifies the border of a polygon which is then represented by a Numpy array of points necessary for creating the Voronoi diagram. Once the diagram is created, the ridges located within the polygon are joined and returned. Returns: a union of lines that are located within the polygon.
codesearchnet
def process_data(data, number_to_keep): result = dict() if (number_to_keep != 0): data_temp = dict(Counter(data).most_common(number_to_keep)) data_temp['rest'] = (sum(data.values()) - sum(data_temp.values())) data = data_temp labels = data values = np.array([data[key] for key in labels], dtype=float) pvalues = (values / sum(values)) for (position, label) in enumerate(labels): result[label] = round(pvalues[position], 5) return result
Prepare received data for representation. Args: data (dict): values to represent (ex. {'001' : 130}) number_to_keep (int): number of elements to show individually. Returns: dict: processed data to show.
codesearchnet
def ManuallyScheduleClients(self, token=None): client_ids = set() for flow_request in self.args.flows: for client_id in flow_request.client_ids: client_ids.add(client_id) self.StartClients(self.session_id, client_ids, token=token)
Schedule all flows without using the Foreman. Since we know all the client ids to run on we might as well just schedule all the flows and wait for the results. Args: token: A datastore access token.
codesearchnet
def select_symbols(self, symbols, ret_list=False): symbols = list_strings(symbols) exclude = symbols[0].startswith("-") if exclude: if not all(s.startswith("-") for s in symbols): raise ValueError("When excluding symbols, all strings must start with `-`") symbols = [s[1:] for s in symbols] symbols = set(symbols) pseudos = [] for p in self: if exclude: if p.symbol in symbols: continue else: if p.symbol not in symbols: continue pseudos.append(p) if ret_list: return pseudos else: return self.__class__(pseudos)
Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols. Args: symbols: str or list of symbols Prepend the symbol string with "-", to exclude pseudos. ret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable`
juraj-google-style
def recursion_error(self, repeated_parser: str): if self.finished: return super().recursion_error(repeated_parser) else: line_index, character_index, line, pointer = self.current_line() return 'Infinite recursion detected in {}; empty string was matched and will be matched forever\n' \ 'Line {}, character {}\n\n{}{}'.format(repeated_parser, line_index, character_index, line, pointer)
Generate an error to indicate that infinite recursion was encountered. A parser can supply a representation of itself to this method and the reader will supply the context, including the location where the parser stalled. Args: repeated_parser: A representation of the repeated parser Returns: A full error message
juraj-google-style
def absolute_proportions(proportions, count): relative_sum = sum(proportions.values()) absolute_proportions = {idx: int(count / relative_sum * prop_value) for idx, prop_value in proportions.items()} absolute_sum = sum(absolute_proportions.values()) rest_value = count - absolute_sum subset_keys = sorted(list(proportions.keys())) for i in range(rest_value): key = subset_keys[i % len(subset_keys)] absolute_proportions[key] += 1 return absolute_proportions
Split a given integer into n parts according to len(proportions) so they sum up to count and match the given proportions. Args: proportions (dict): Dict of proportions, with a identifier as key. Returns: dict: Dictionary with absolute proportions and same identifiers as key. Example:: >>> absolute_proportions({'train': 0.5, 'test': 0.5}, 100) {'train': 50, 'test': 50}
juraj-google-style
def _ExtractContentSettingsExceptions(self, exceptions_dict, parser_mediator): for permission in exceptions_dict: if (permission not in self._EXCEPTIONS_KEYS): continue exception_dict = exceptions_dict.get(permission, {}) for (urls, url_dict) in exception_dict.items(): last_used = url_dict.get('last_used', None) if (not last_used): continue (primary_url, secondary_url) = urls.split(',') event_data = ChromeContentSettingsExceptionsEventData() event_data.permission = permission event_data.primary_url = primary_url event_data.secondary_url = secondary_url timestamp = int((last_used * 1000000)) date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts site specific events. Args: exceptions_dict (dict): Permission exceptions data from Preferences file. parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs.
codesearchnet
def filter_all_reachable_leaves_many(self, identifier_filters, language, forbidden_identifiers=None): for i, identifier_filter in enumerate(identifier_filters): if len(identifier_filter) == 1 and not isinstance(identifier_filter[0], list): identifier_filters[i] = [identifier_filter] item_identifiers = [ identifier[1:] if identifier.startswith('-') else identifier for identifier_filter in identifier_filters for identifier in set(flatten(identifier_filter)) ] if forbidden_identifiers is None: forbidden_identifiers = [] for identifier in forbidden_identifiers: item_identifiers.append(identifier) translated = self.translate_identifiers(item_identifiers, language) forbidden_item_ids = {translated[identifier] for identifier in forbidden_identifiers} leaves = self.get_leaves({translated[i] for i in item_identifiers}, language=language, forbidden_item_ids=forbidden_item_ids) result = [] for identifier_filter in identifier_filters: if len(identifier_filter) == 0: result.append(self.get_all_available_leaves(language=language, forbidden_item_ids=forbidden_item_ids)) continue filter_result = None filter_neg_result = set() for inner_filter in identifier_filter: inner_result = None inner_neg_result = None if len(inner_filter) == 0: raise Exception('Empty nested filters are not allowed.') for identifier in inner_filter: if inner_neg_result is not None: raise Exception('Nested filters can not contain multiple statements.') if identifier.startswith('-'): inner_neg_result = set(leaves[translated[identifier[1:]]]) else: if inner_result is None: inner_result = set() inner_result |= set(leaves[translated[identifier]]) if inner_result is not None: if filter_result is None: filter_result = inner_result else: filter_result &= inner_result if inner_neg_result is not None: filter_neg_result != inner_neg_result result.append(sorted(list(filter_result - filter_neg_result))) return result
Provides the same functionality as .. py:method:: ItemManager.filter_all_reachable_leaves(), but for more filters in the same time. Args: identifier_filters: list of identifier filters language (str): language used for further filtering (some objects for different languages share the same item Returns: list: list of list of item ids
juraj-google-style
def change_numbering(self, rename_dict, inplace=False): output = self if inplace else self.copy() new_index = [rename_dict.get(key, key) for key in self.index] output.index = new_index if not inplace: return output
Return the reindexed version of Cartesian. Args: rename_dict (dict): A dictionary mapping integers on integers. Returns: Cartesian: A renamed copy according to the dictionary passed.
juraj-google-style
def get_session(self, app_path, session_id): if app_path not in self._applications: raise ValueError("Application %s does not exist on this server" % app_path) return self._applications[app_path].get_session(session_id)
Get an active a session by name application path and session ID. Args: app_path (str) : The configured application path for the application to return a session for. session_id (str) : The session ID of the session to retrieve. Returns: ServerSession
juraj-google-style
def check(self, cell): pass
Check correctness against single Jupyter cell. Args: cell: JSON representation of single cell. Returns None if test succeeds, raise exception if test fails.
github-repos
class RandomUniform(RandomInitializer): def __init__(self, minval=-0.05, maxval=0.05, seed=None): self.minval = minval self.maxval = maxval super().__init__(seed=seed) def __call__(self, shape, dtype=None): return random.uniform(shape=shape, minval=self.minval, maxval=self.maxval, seed=self.seed, dtype=dtype) def get_config(self): base_config = super().get_config() config = {'minval': self.minval, 'maxval': self.maxval} return {**base_config, **config}
Random uniform initializer. Draws samples from a uniform distribution for given parameters. Examples: >>> # Standalone usage: >>> initializer = RandomUniform(minval=0.0, maxval=1.0) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = RandomUniform(minval=0.0, maxval=1.0) >>> layer = Dense(3, kernel_initializer=initializer) Args: minval: A python scalar or a scalar keras tensor. Lower bound of the range of random values to generate (inclusive). maxval: A python scalar or a scalar keras tensor. Upper bound of the range of random values to generate (exclusive). seed: A Python integer or instance of `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance of `keras.backend.SeedGenerator`.
github-repos
def __init__(self, name, row_identifier): super(SQLTableIdentifier, self).__init__() self.name = name self.row_identifier = row_identifier
Initializes a SQL table attribute container identifier. Args: name (str): name of the table. row_identifier (int): unique identifier of the row in the table.
juraj-google-style
def run_filter_query(self, resource_name, filter_clause): url = self.base_url + "/" + resource_name params = {"filter":json.dumps(filter_clause)} r = requests.get(url, headers=self.headers, params=params) logger.debug("requests.get result r.status_code: {}".format(r.status_code)) ClueApiClient._check_request_response(r) return r.json()
run a query (get) against the CLUE api, using the API and user key fields of self and the fitler_clause provided Args: resource_name: str - name of the resource / collection to query - e.g. genes, perts, cells etc. filter_clause: dictionary - contains filter to pass to API to; uses loopback specification Returns: list of dictionaries containing the results of the query
juraj-google-style
def optimizer(name): warn_msg = 'Please update `registry.optimizer` callsite (likely due to a `HParams.optimizer` value)' if (name == 'SGD'): name = 'sgd' tf.logging.warning(("'SGD' optimizer now keyed by 'sgd'. %s" % warn_msg)) elif (name == 'RMSProp'): name = 'rms_prop' tf.logging.warning(("'RMSProp' optimizer now keyed by 'rms_prop'. %s" % warn_msg)) else: snake_name = misc_utils.camelcase_to_snakecase(name) if (name != snake_name): tf.logging.warning(('optimizer names now keyed by snake_case names. %s' % warn_msg)) name = snake_name return Registries.optimizers[name]
Get pre-registered optimizer keyed by name. `name` should be snake case, though SGD -> sgd, RMSProp -> rms_prop and UpperCamelCase -> snake_case conversions included for legacy support. Args: name: name of optimizer used in registration. This should be a snake case identifier, though others supported for legacy reasons. Returns: optimizer
codesearchnet
def install(pkg, target='LocalSystem', store=False, allow_untrusted=False): if ('*.' not in pkg): pkg = _quote(pkg) target = _quote(target) cmd = 'installer -pkg {0} -target {1}'.format(pkg, target) if store: cmd += ' -store' if allow_untrusted: cmd += ' -allowUntrusted' python_shell = False if ('*.' in cmd): python_shell = True return __salt__['cmd.run_all'](cmd, python_shell=python_shell)
Install a pkg file Args: pkg (str): The package to install target (str): The target in which to install the package to store (bool): Should the package be installed as if it was from the store? allow_untrusted (bool): Allow the installation of untrusted packages? Returns: dict: A dictionary containing the results of the installation CLI Example: .. code-block:: bash salt '*' macpackage.install test.pkg
codesearchnet
def transition_state(self, new_state): if self.state == _InstrumentationBlockStates.UNKNOWN: self.state = new_state return self else: next_block = _InstrumentationBlock(state=new_state, prefix=self.prefix, previous_instrumentation_block=self) if self.status_code in _InstrumentationStatusCodeCategories.TIMING: next_block.begin_time = self.begin_time return next_block
Transitions or sets the current instrumentation block to the new parser state. Args: new_state: _InstrumentationBlockStates, the state that the parser should transition to. Returns: A new instrumentation block set to the new state, representing the start of parsing a new instrumentation test method. Alternatively, if the current instrumentation block represents the start of parsing a new instrumentation block (state UNKNOWN), then this returns the current instrumentation block set to the now known parsing state.
github-repos
def fn_args(fn): if isinstance(fn, functools.partial): args = fn_args(fn.func) args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])] else: if _is_callable_object(fn): fn = fn.__call__ args = tf_inspect.getfullargspec(fn).args if _is_bound_method(fn) and args: args.pop(0) return tuple(args)
Get argument names for function-like object. Args: fn: Function, or function-like object (e.g., result of `functools.partial`). Returns: `tuple` of string argument names. Raises: ValueError: if partial function has positionally bound arguments
github-repos
def get_item(self, name, bootstrap=False): for item in self._get_items(bootstrap): if item.name == name: return item return None
Get a particular item in the specification. Args: name (str): The name of the item to retrieve. bootstrap (bool): Only search bootstrap items Returns (YapconfItem): A YapconfItem if it is found, None otherwise.
juraj-google-style
def measurements(self, value): if value == self._defaults['measurements'] and 'measurements' in self._values: del self._values['measurements'] else: self._values['measurements'] = value
The measurements property. Args: value (hash). the property value.
juraj-google-style
def __init__(self, columns: list[str], vocab_size: Optional[int]=None, smooth: bool=True, name: Optional[str]=None): super().__init__(columns) self.vocab_size = vocab_size self.smooth = smooth self.name = name self.tfidf_weight = None
This function applies a tf-idf transformation on the given columns of incoming data. TFIDF outputs two artifacts for each column: the vocabulary index and the tfidf weight. The vocabulary index is a mapping from the original vocabulary to the new vocabulary. The tfidf weight is a mapping from the original vocabulary to the tfidf score. Input passed to the TFIDF is not modified and used to calculate the required artifacts. Args: columns: List of column names to apply the transformation. vocab_size: (Optional) An integer that specifies the size of the vocabulary. Defaults to None. If vocab_size is None, then the size of the vocabulary is determined by `tft.get_num_buckets_for_transformed_feature`. smooth: (Optional) A boolean that specifies whether to apply smoothing to the tf-idf score. Defaults to True. name: (Optional) A string that specifies the name of the operation.
github-repos
def _get_implicit_credentials(cls): environ_checkers = [cls._implicit_credentials_from_files, cls._implicit_credentials_from_gae, cls._implicit_credentials_from_gce] for checker in environ_checkers: credentials = checker() if (credentials is not None): return credentials raise ApplicationDefaultCredentialsError(ADC_HELP_MSG)
Gets credentials implicitly from the environment. Checks environment in order of precedence: - Environment variable GOOGLE_APPLICATION_CREDENTIALS pointing to a file with stored credentials information. - Stored "well known" file associated with `gcloud` command line tool. - Google App Engine (production and testing) - Google Compute Engine production environment. Raises: ApplicationDefaultCredentialsError: raised when the credentials fail to be retrieved.
codesearchnet
def build_pipeline_labels(job_metadata, task_metadata, task_id_pattern=None): labels = { Label(name, job_metadata[name]) for name in ['job-name', 'job-id', 'user-id', 'dsub-version'] } task_id = task_metadata.get('task-id') if task_id is not None: if task_id_pattern: task_id = task_id_pattern % task_id labels.add(Label('task-id', str(task_id))) task_attempt = task_metadata.get('task-attempt') if task_attempt is not None: labels.add(Label('task-attempt', str(task_attempt))) return labels
Build a set() of standard job and task labels. Args: job_metadata: Job metadata, such as job-id, job-name, and user-id. task_metadata: Task metadata, such as the task-id. task_id_pattern: A pattern for the task-id value, such as "task-%d"; the original google label values could not be strictly numeric, so "task-" was prepended. Returns: A set of standard dsub Label() objects to attach to a pipeline.
juraj-google-style
def _is_statically_shaped(element_spec): for spec in nest.flatten(element_spec): if isinstance(spec, (sparse_tensor.SparseTensorSpec, ragged_tensor.RaggedTensorSpec)): if spec.shape.rank > 0 and spec.shape.as_list()[0] is None: return False else: for component in spec._flat_tensor_specs: if not component.shape.is_fully_defined(): return False return True
Test if an iterator output is statically shaped. For sparse and ragged tensors this only tests the batch dimension. Args: element_spec: a nest structure of `tf.TypeSpec`. The element spec of the dataset of the iterator. Returns: True if the shape is static, false otherwise.
github-repos