code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def update_compounds(self, variants): LOG.debug("Updating compound objects") for var_id in variants: variant_obj = variants[var_id] if not variant_obj.get('compounds'): continue updated_compounds = self.update_variant_compounds(variant_obj, variants) variant_obj['compounds'] = updated_compounds LOG.debug("Compounds updated") return variants
Update the compounds for a set of variants. Args: variants(dict): A dictionary with _ids as keys and variant objs as values
juraj-google-style
def run(self, sensor_graph, model): for node, inputs, outputs in sensor_graph.iterate_bfs(): can_remove = False if len(outputs) != 0: continue if sensor_graph.is_output(node.stream): continue if node.stream.stream_id < StreamAllocator.StartingID: continue if node.func_name == u'call_rpc': continue if node.stream.buffered: continue if node.func_name == u'trigger_streamer': continue for input_node in inputs: input_node.outputs.remove(node) if node in sensor_graph.roots: sensor_graph.roots.remove(node) sensor_graph.nodes.remove(node) return True return False
Run this optimization pass on the sensor graph If necessary, information on the device model being targeted can be found in the associated model argument. Args: sensor_graph (SensorGraph): The sensor graph to optimize model (DeviceModel): The device model we're using
juraj-google-style
def get_current_epoch_time(): return int(round(time.time() * 1000))
Current epoch time in milliseconds. Returns: An integer representing the current epoch time in milliseconds.
github-repos
def _on_cancelok(self, cancel_frame): _log.info('Consumer canceled; returning all unprocessed messages to the queue') self._channel.basic_nack(delivery_tag=0, multiple=True, requeue=True)
Called when the server acknowledges a cancel request. Args: cancel_frame (pika.spec.Basic.CancelOk): The cancelok frame from the server.
codesearchnet
def _PrintAPFSVolumeIdentifiersOverview( self, volume_system, volume_identifiers): header = 'The following Apple File System (APFS) volumes were found:\n' self._output_writer.Write(header) column_names = ['Identifier', 'Name'] table_view = views.CLITabularTableView(column_names=column_names) for volume_identifier in volume_identifiers: volume = volume_system.GetVolumeByIdentifier(volume_identifier) if not volume: raise errors.SourceScannerError( 'Volume missing for identifier: {0:s}.'.format( volume_identifier)) volume_attribute = volume.GetAttribute('name') table_view.AddRow([volume.identifier, volume_attribute.value]) self._output_writer.Write('\n') table_view.Write(self._output_writer) self._output_writer.Write('\n')
Prints an overview of APFS volume identifiers. Args: volume_system (dfvfs.APFSVolumeSystem): volume system. volume_identifiers (list[str]): allowed volume identifiers. Raises: SourceScannerError: if a volume cannot be resolved from the volume identifier.
juraj-google-style
def convert_slice(params, w_name, scope_name, inputs, layers, weights, names): print('Converting slice ...') if (len(params['axes']) > 1): raise AssertionError('Cannot convert slice by multiple dimensions') if (params['axes'][0] not in [0, 1, 2, 3]): raise AssertionError('Slice by dimension more than 3 or less than 0 is not supported') def target_layer(x, axis=int(params['axes'][0]), start=int(params['starts'][0]), end=int(params['ends'][0])): if (axis == 0): return x[start:end] elif (axis == 1): return x[(:, start:end)] elif (axis == 2): return x[(:, :, start:end)] elif (axis == 3): return x[(:, :, :, start:end)] lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert slice operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
codesearchnet
def forward(self, pixel_values: torch.FloatTensor, spatial_shapes: torch.LongTensor) -> torch.Tensor: target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) positional_embeddings = self.position_embedding.weight.reshape(self.position_embedding_size, self.position_embedding_size, -1) resized_positional_embeddings = self.resize_positional_embeddings(positional_embeddings, spatial_shapes, max_length=pixel_values.shape[1]) embeddings = patch_embeds + resized_positional_embeddings return embeddings
Args: pixel_values (`torch.FloatTensor`): Pixel values of shape (batch_size, max_num_patches, num_channels * patch_size * patch_size) spatial_shapes (`List[Tuple[int, int]]`): Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
github-repos
def real(x): if any_symbolic_tensors((x,)): return Real().symbolic_call(x) return backend.numpy.real(x)
Return the real part of the complex argument. Args: x: Input tensor. Returns: The real component of the complex argument.
github-repos
def Scripts(unicode_dir=_UNICODE_DIR): scripts = {} def DoLine(codes, fields): 'Process single Scripts.txt line, updating scripts.' (_, name) = fields scripts.setdefault(name, []).extend(codes) ReadUnicodeTable((unicode_dir + '/Scripts.txt'), 2, DoLine) return scripts
Returns dict mapping script names to code lists. Args: unicode_dir: Unicode data directory Returns: dict mapping script names to code lists
codesearchnet
def with_scopes_if_required(credentials, scopes): if (isinstance(credentials, Scoped) and credentials.requires_scopes): return credentials.with_scopes(scopes) else: return credentials
Creates a copy of the credentials with scopes if scoping is required. This helper function is useful when you do not know (or care to know) the specific type of credentials you are using (such as when you use :func:`google.auth.default`). This function will call :meth:`Scoped.with_scopes` if the credentials are scoped credentials and if the credentials require scoping. Otherwise, it will return the credentials as-is. Args: credentials (google.auth.credentials.Credentials): The credentials to scope if necessary. scopes (Sequence[str]): The list of scopes to use. Returns: google.auth.credentials.Credentials: Either a new set of scoped credentials, or the passed in credentials instance if no scoping was required.
codesearchnet
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): for subkey in registry_key.GetSubkeys(): drive_letter = subkey.name if not drive_letter: continue values_dict = { 'DriveLetter': drive_letter, 'Type': 'Mapped Drive'} remote_path_value = subkey.GetValueByName('RemotePath') if remote_path_value: remote_path = remote_path_value.GetDataAsObject() if remote_path.startswith('\\\\'): server_name, _, share_name = remote_path[2:].partition('\\') values_dict['RemoteServer'] = server_name values_dict['ShareName'] = '\\{0:s}'.format( share_name.replace(' event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = subkey.offset event_data.regvalue = values_dict event_data.source_append = self._SOURCE_APPEND event_data.urls = self.URLS event = time_events.DateTimeValuesEvent( subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
juraj-google-style
def delay(self, secs): secs = int(secs) for i in reversed(range(secs)): sys.stdout.write('\r') sys.stdout.write("sleep %ds, left %2ds" % (secs, i+1)) sys.stdout.flush() time.sleep(1) sys.stdout.write("\n") return self
Delay some seconds Args: secs: float seconds Returns: self
juraj-google-style
def __init__(self, terms: Mapping[raw_types.Gate, value.Scalar]) -> None: super().__init__(terms, validator=self._is_compatible)
Initializes linear combination from a collection of terms. Args: terms: Mapping of gates to coefficients in the linear combination being initialized.
juraj-google-style
def set_datetime_format(self, format): if not format in ["UNIX", "RFC3339"]: return self.datetime_format = format self.set_header("Accept-Datetime-Format", self.datetime_format)
Set the Accept-Datetime-Format header to an acceptable value Args: format: UNIX or RFC3339
juraj-google-style
def read_gbq(table, dataset=None, project_id=None, use_bqstorage_api=False, **kwargs): if table is None: raise ValueError('Please specify a BigQuery table to read from.') elif len(kwargs) > 0: raise ValueError(f'Encountered unsupported parameter(s) in read_gbq: {kwargs.keys()!r}') return _ReadGbq(table, dataset, project_id, use_bqstorage_api)
This function reads data from a BigQuery table and produces a :class:`~apache_beam.dataframe.frames.DeferredDataFrame. Args: table (str): Please specify a table. This can be done in the format 'PROJECT:dataset.table' if one would not wish to utilize the parameters below. dataset (str): Please specify the dataset (can omit if table was specified as 'PROJECT:dataset.table'). project_id (str): Please specify the project ID (can omit if table was specified as 'PROJECT:dataset.table'). use_bqstorage_api (bool): If you would like to utilize the BigQuery Storage API in ReadFromBigQuery, please set this flag to true. Otherwise, please set flag to false or leave it unspecified.
github-repos
def instantiate_references_json(references_json): references = {} for obj in references_json: obj_id = obj['id'] obj_type = obj.get('subtype', obj['type']) cls = get_class(obj_type) instance = cls.__new__(cls, id=obj_id) if (instance is None): raise RuntimeError(('Error loading model from JSON (type: %s, id: %s)' % (obj_type, obj_id))) references[instance.id] = instance return references
Given a JSON representation of all the models in a graph, return a dict of new model objects. Args: references_json (``JSON``) JSON specifying new Bokeh models to create Returns: dict[str, Model]
codesearchnet
def set_flowcontrol_receive(self, name, value=None, default=False, disable=False): return self.set_flowcontrol(name, 'receive', value, default, disable)
Configures the interface flowcontrol receive value Args: name (string): The interface identifier. It must be a full interface name (ie Ethernet, not Et) value (boolean): True if the interface should enable receiving flow control packets, otherwise False default (boolean): Specifies to default the interface flow control receive value disable (boolean): Specifies to disable the interface flow control receive value Returns: True if the operation succeeds otherwise False is returned
codesearchnet
def GetAddress(self): script = ((b'21' + self.PublicKey.encode_point(True)) + b'ac') script_hash = Crypto.ToScriptHash(script) address = Crypto.ToAddress(script_hash) return address
Returns the public NEO address for this KeyPair Returns: str: The private key
codesearchnet
def adjust(self, amount, update=True, flow=True, fee=0.0): self._capital += amount self._last_fee += fee if flow: self._net_flows += amount if update: self.root.stale = True
Adjust capital - used to inject capital to a Strategy. This injection of capital will have no effect on the children. Args: * amount (float): Amount to adjust by. * update (bool): Force update? * flow (bool): Is this adjustment a flow? A flow will not have an impact on the performance (price index). Example of flows are simply capital injections (say a monthly contribution to a portfolio). This should not be reflected in the returns. A non-flow (flow=False) does impact performance. A good example of this is a commission, or a dividend.
codesearchnet
def reminders_info(self, *, reminder: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"reminder": reminder}) return self.api_call("reminders.info", http_verb="GET", params=kwargs)
Gets information about a reminder. Args: reminder (str): The ID of the reminder. e.g. 'Rm12345678'
juraj-google-style
def get_area_url(location, distance): locations = [location.destination(i, distance) for i in range(0, 360, 90)] latitudes = list(map(attrgetter('latitude'), locations)) longitudes = list(map(attrgetter('longitude'), locations)) bounds = (min(longitudes), min(latitudes), max(longitudes), max(latitudes)) return ('http:
Generate URL for downloading OSM data within a region. This function defines a boundary box where the edges touch a circle of ``distance`` kilometres in radius. It is important to note that the box is neither a square, nor bounded within the circle. The bounding box is strictly a trapezoid whose north and south edges are different lengths, which is longer is dependant on whether the box is calculated for a location in the Northern or Southern hemisphere. You will get a shorter north edge in the Northern hemisphere, and vice versa. This is simply because we are applying a flat transformation to a spherical object, however for all general cases the difference will be negligible. Args: location (Point): Centre of the region distance (int): Boundary distance in kilometres Returns: str: URL that can be used to fetch the OSM data within ``distance`` of ``location``
codesearchnet
def pred_to_prob(Y_h, k): Y_h = Y_h.clone() if (Y_h.dim() > 1): Y_h = Y_h.squeeze() assert (Y_h.dim() == 1) assert (Y_h >= 1).all() assert (Y_h <= k).all() n = Y_h.shape[0] Y_s = torch.zeros((n, k), dtype=Y_h.dtype, device=Y_h.device) for (i, j) in enumerate(Y_h): Y_s[(i, (j - 1))] = 1.0 return Y_s
Converts a 1D tensor of predicted labels into a 2D tensor of probabilistic labels Args: Y_h: an [n], or [n,1] tensor of predicted (int) labels in {1,...,k} k: the largest possible label in Y_h Returns: Y_s: a torch.FloatTensor of shape [n, k] where Y_s[i, j-1] is the probabilistic label for item i and label j
codesearchnet
def _infer_fused_data_format(self, input_batch): input_shape = input_batch.get_shape().as_list() input_shape_len = len(input_shape) if (input_shape_len != 4): raise NotImplementedError('fused batch norm supports only input with 4 dimensions, it received input of dimensionality {:d}'.format(input_shape_len)) axis = (range(input_shape_len)[:(- 1)] if (self._axis is None) else self._axis) axis = tuple(axis) if (axis == (0, 1, 2)): return 'NHWC' elif (axis == (0, 2, 3)): return 'NCHW' else: raise ValueError('Invalid axis option {}. This does not correspond to either the NHWC format (0, 1, 2) or the NCHW (0, 2, 3).'.format(axis))
Infers the data format for the fused batch norm. It uses the axis option to infer this information. Specifically, the axis value (0, 1, 2) corresponds to data format NHWC and the axis value (0, 2, 3) to data format NCHW. Args: input_batch: A Tensor of arbitrary dimension. Returns: A string description of the data format NHWC or NCHW. Raises: NotImplementedError: for input of dimensionality different from 4. ValueError: for axis configuration different from (0, 1, 2) and (0, 2, 3).
codesearchnet
def Convert(self, input_file, output_file): for version, schema, raw_binary, _ in self._schemas: try: data_candidate = self._Read(input_file, schema, raw_binary) except RuntimeError: continue if 'version' not in data_candidate: data_candidate['version'] = 1 elif data_candidate['version'] == 0: data_candidate['version'] = 1 if data_candidate['version'] == version: self._PerformUpgrade(data_candidate) self._Write(data_candidate, output_file) return raise RuntimeError('No schema that the converter understands worked with the data file you provided.')
Perform schema conversion from input_file to output_file. Args: input_file: Filename of TensorFlow Lite data to convert from. Must be `.json` or `.bin` extension files for JSON or Binary forms of the TensorFlow FlatBuffer schema. output_file: Filename to write to. Extension also must be `.json` or `.bin`. Raises: RuntimeError: Generated when none of the upgrader supported schemas matche the `input_file` data.
github-repos
def generate_data(self, data_dir, tmp_dir, task_id=-1): tf.logging.info("generate_data task_id=%s" % task_id) encoder = self.get_or_create_vocab(data_dir, tmp_dir) assert task_id >= 0 and task_id < self.num_generate_tasks if task_id < self.num_train_shards: out_file = self.training_filepaths( data_dir, self.num_train_shards, shuffled=False)[task_id] else: out_file = self.dev_filepaths( data_dir, self.num_dev_shards, shuffled=False)[task_id - self.num_train_shards] generator_utils.generate_files( self.example_generator(encoder, tmp_dir, task_id), [out_file]) generator_utils.shuffle_dataset([out_file])
Generates training/dev data. Args: data_dir: a string tmp_dir: a string task_id: an optional integer Returns: shard or shards for which data was generated.
juraj-google-style
def get_matching_text_in_strs(a, b, match_min_size=30, ignore='', end_characters=''): compare = difflib.SequenceMatcher((lambda x: (x in ignore))) compare.set_seqs(a=a, b=b) matching_text = list() for match in compare.get_matching_blocks(): start = match.a text = a[start:(start + match.size)] if end_characters: prev_text = text while ((len(text) != 0) and (text[0] in end_characters)): text = text[1:] while ((len(text) != 0) and (text[(- 1)] not in end_characters)): text = text[:(- 1)] if (len(text) == 0): text = prev_text if (len(text) >= match_min_size): matching_text.append(text) return matching_text
Returns a list of matching blocks of text in a and b Args: a (str): First string to match b (str): Second string to match match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to ''. Returns: List[str]: List of matching blocks of text
codesearchnet
def _field(self, field, value): field = str(field) value = str(value) if (any([char in value for char in QUOTE_LIST]) and '"' not in value and not any([char in value for char in UNQUOTE_LIST])): value = '"' + value + '"' if field and value: self.__query["q"] += field + ":" + value self.__query["advanced"] = True return self
Add a ``field:value`` term to the query. Matches will have the ``value`` in the ``field``. Note: This method triggers advanced mode. Arguments: field (str): The field to check for the value, in Elasticsearch dot syntax. value (str): The value to match. Returns: SearchHelper: Self
juraj-google-style
def update(self, friendly_name=None, description=None, expiry=None, schema=None): self._load_info() if (friendly_name is not None): self._info['friendlyName'] = friendly_name if (description is not None): self._info['description'] = description if (expiry is not None): if isinstance(expiry, datetime.datetime): expiry = (calendar.timegm(expiry.utctimetuple()) * 1000) self._info['expirationTime'] = expiry if (schema is not None): if isinstance(schema, _schema.Schema): schema = schema._bq_schema self._info['schema'] = {'fields': schema} try: self._api.table_update(self._name_parts, self._info) except datalab.utils.RequestException: self._info = None except Exception as e: raise e
Selectively updates Table information. Any parameters that are omitted or None are not updated. Args: friendly_name: if not None, the new friendly name. description: if not None, the new description. expiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch. schema: if not None, the new schema: either a list of dictionaries or a Schema.
codesearchnet
def run(self, dag): if self.layout is None: if self.property_set["layout"]: self.layout = self.property_set["layout"] else: self.layout = Layout.generate_trivial_layout(*dag.qregs.values()) self.property_set['is_direction_mapped'] = True edges = self.coupling_map.get_edges() for gate in dag.twoQ_gates(): physical_q0 = self.layout[gate.qargs[0]] physical_q1 = self.layout[gate.qargs[1]] if isinstance(gate.op, (CXBase, CnotGate)) and ( physical_q0, physical_q1) not in edges: self.property_set['is_direction_mapped'] = False return
If `dag` is mapped and the direction is correct the property `is_direction_mapped` is set to True (or to False otherwise). Args: dag (DAGCircuit): DAG to check.
juraj-google-style
def export_to_xml(video_id, resource_fs, static_dir, course_id=None): video_image_name = '' video = _get_video(video_id) try: course_video = CourseVideo.objects.select_related('video_image').get(course_id=course_id, video=video) video_image_name = course_video.video_image.image.name except ObjectDoesNotExist: pass video_el = Element('video_asset', attrib={'client_video_id': video.client_video_id, 'duration': six.text_type(video.duration), 'image': video_image_name}) for encoded_video in video.encoded_videos.all(): SubElement(video_el, 'encoded_video', {name: six.text_type(getattr(encoded_video, name)) for name in ['profile', 'url', 'file_size', 'bitrate']}) return create_transcripts_xml(video_id, video_el, resource_fs, static_dir)
Exports data for a video into an xml object. NOTE: For external video ids, only transcripts information will be added into xml. If external=False, then edx_video_id is going to be on first index of the list. Arguments: video_id (str): Video id of the video to export transcripts. course_id (str): The ID of the course with which this video is associated. static_dir (str): The Directory to store transcript file. resource_fs (SubFS): Export file system. Returns: An lxml video_asset element containing export data Raises: ValVideoNotFoundError: if the video does not exist
codesearchnet
def get_cytoband_coordinates(chrom, pos): coordinate = '' if (chrom in CYTOBANDS): for interval in CYTOBANDS[chrom][pos]: coordinate = interval.data return coordinate
Get the cytoband coordinate for a position Args: chrom(str) pos(int) Returns: coordinate(str)
codesearchnet
def basis_state(str_state, num): n = int(str_state, 2) if num >= len(str_state): state = np.zeros(1 << num, dtype=complex) state[n] = 1 return state else: raise QiskitError('size of bitstring is greater than num.')
Return a basis state ndarray. Args: str_state (string): a string representing the state. num (int): the number of qubits Returns: ndarray: state(2**num) a quantum state with basis basis state. Raises: QiskitError: if the dimensions is wrong
juraj-google-style
def shift_relative_position_tensor(self, pos_tensor): zero_pad = torch.zeros((*pos_tensor.size()[:3], 1), device=pos_tensor.device, dtype=pos_tensor.dtype) pos_tensor_padded = torch.cat([zero_pad, pos_tensor], dim=-1) pos_tensor_padded = pos_tensor_padded.view(*pos_tensor.size()[:2], pos_tensor.size(3) + 1, pos_tensor.size(2)) pos_tensor = pos_tensor_padded[:, :, 1:].view_as(pos_tensor)[:, :, :, :pos_tensor.size(-1) return pos_tensor
Args: pos_tensor (torch.Tensor of shape (batch_size, head, time1, 2*time1-1)): Input tensor.
github-repos
def _handle_response(self, response, valid_status_codes, resource): if (response.status_code not in valid_status_codes): raise InvalidStatusCodeError(status_code=response.status_code, expected_status_codes=valid_status_codes) if response.content: data = response.json() if isinstance(data, list): return [resource(**x) for x in data] else: key = getattr(resource.Meta, 'pagination_key', None) if isinstance(data.get(key), list): return [resource(**x) for x in data.get(key)] else: return [resource(**data)] return []
Handles Response objects Args: response: An HTTP reponse object valid_status_codes: A tuple list of valid status codes resource: The resource class to build from this response returns: resources: A list of Resource instances
codesearchnet
def partitioned_dim_sizes(self): return self._partitioned_dim_sizes
The partitioned dimension sizes for this shape. Returns: A `list` of 0-D or 1-D integer `Tensor`.
github-repos
def date_range(start, end, boo): earliest = datetime.strptime(start.replace('-', ' '), '%Y %m %d') latest = datetime.strptime(end.replace('-', ' '), '%Y %m %d') num_days = ((latest - earliest).days + 1) all_days = [(latest - timedelta(days=x)) for x in range(num_days)] all_days.reverse() output = [] if boo: for d in all_days: output.append(int(str(d).replace('-', '')[:8])) else: for d in all_days: output.append(str(d)[:10]) return output
Return list of dates within a specified range, inclusive. Args: start: earliest date to include, String ("2015-11-25") end: latest date to include, String ("2015-12-01") boo: if true, output list contains Numbers (20151230); if false, list contains Strings ("2015-12-30") Returns: list of either Numbers or Strings
codesearchnet
def _validate_min_version(min_version): if min_version is not None: try: parsed_min_version = version.StrictVersion(min_version) except ValueError: return ExtensionVersionResult( error_reason=ExtensionValidationError.UNPARSEABLE_REQUESTED_VERSION, requested_extension_version=min_version) if parsed_min_version > HANDLER_VERSION: return ExtensionVersionResult( error_reason=ExtensionValidationError.OUTDATED_VERSION, requested_extension_version=str(parsed_min_version)) return ExtensionVersionResult( error_reason=None, requested_extension_version=min_version)
Validates the extension version matches the requested version. Args: min_version: Minimum version passed as a query param when establishing the connection. Returns: An ExtensionVersionResult indicating validation status. If there is a problem, the error_reason field will be non-empty.
juraj-google-style
def sort(self, by=None, reverse=False): if by is None: by = self.kdims elif not isinstance(by, list): by = [by] sorted_columns = self.interface.sort(self, by, reverse) return self.clone(sorted_columns)
Sorts the data by the values along the supplied dimensions. Args: by: Dimension(s) to sort by reverse (bool, optional): Reverse sort order Returns: Sorted Dataset
juraj-google-style
def object_key(self, root_path: KeyPath, *, value: Any, parent: Any, css_classes: Optional[Sequence[str]]=None, key_color: Union[Tuple[Optional[str], Optional[str]], Callable[[KeyPath, Any, Any], Tuple[Optional[str], Optional[str]]]]=None, enable_key_tooltip: bool=True, key_tooltip_fn: Optional[Callable[..., Html]]=None, **kwargs) -> Html: del kwargs key_tooltip_fn = key_tooltip_fn or self.tooltip key_color = self.get_color(key_color, root_path, value, parent) return (Html.element('span', [str(root_path.key)], css_classes=['object-key', type(root_path.key).__name__, css_classes], styles=dict(color=key_color[0], background_color=key_color[1])) + (lambda: key_tooltip_fn(value=root_path, root_path=root_path, parent=parent) if enable_key_tooltip else None)).add_style("\n \n .object-key {\n margin: 0.15em 0.3em 0.15em 0;\n display: block;\n }\n .object-key:hover + .tooltip {\n visibility: visible;\n background-color: darkblue;\n }\n .object-key.str {\n color: gray;\n border: 1px solid lightgray;\n background-color: ButtonFace;\n border-radius: 0.2em;\n padding: 0.3em;\n }\n .object-key.int::before{\n content: '[';\n }\n .object-key.int::after{\n content: ']';\n }\n .object-key.int{\n border: 0;\n color: lightgray;\n background-color: transparent;\n border-radius: 0;\n padding: 0;\n }\n ")
Renders a label-style key for the value. Args: root_path: The root path of the value. value: The value to render. parent: The parent of the value. css_classes: The CSS classes to add to the HTML element. key_color: The color of the key. If None, the key will be rendered without a color. If a tuple, the first element is the text color and the second element is the background color. If a function, the function takes (root_path, value, parent) and returns a tuple of (text_color, background_color). enable_key_tooltip: Whether to enable the tooltip. key_tooltip_fn: The function to render the key tooltip. **kwargs: Additional arguments passed by the user that will be ignored. Returns: The rendered HTML as the key of the value.
github-repos
def calculate_sun(self, month, day, hour, is_solar_time=False): datetime = DateTime(month, day, *self._calculate_hour_and_minute(hour), leap_year=self.is_leap_year) return self.calculate_sun_from_date_time(datetime, is_solar_time)
Get Sun data for an hour of the year. Args: month: An integer between 1-12 day: An integer between 1-31 hour: A positive number between 0..23 is_solar_time: A boolean to indicate if the input hour is solar time. (Default: False) Returns: A sun object for this particular time
juraj-google-style
def get_all_results_for_query_batch(self, batch_id, job_id=None, chunk_size=2048): result_ids = self.get_query_batch_result_ids(batch_id, job_id=job_id) if not result_ids: raise RuntimeError('Batch is not complete') for result_id in result_ids: yield self.get_query_batch_results( batch_id, result_id, job_id=job_id, chunk_size=chunk_size )
Gets result ids and generates each result set from the batch and returns it as an generator fetching the next result set when needed Args: batch_id: id of batch job_id: id of job, if not provided, it will be looked up
juraj-google-style
def get_image_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False): pass
Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images.
github-repos
def _string_to_byte_list(self, data): bytes_length = 16 m = self.digest() m.update(str.encode(data)) hex_digest = m.hexdigest() return list((int(hex_digest[(num * 2):((num * 2) + 2)], bytes_length) for num in range(bytes_length)))
Creates a hex digest of the input string given to create the image, if it's not already hexadecimal Returns: Length 16 list of rgb value range integers (each representing a byte of the hex digest)
codesearchnet
def check_whitelist(host, whitelist): if ':' not in host: host = host + ':80' if host in whitelist: return True return any(match_host(host, pattern) for pattern in whitelist)
Check a given request host against a whitelist. Args: host (str) : A host string to compare against a whitelist. If the host does not specify a port, then ``":80"`` is implicitly assumed. whitelist (seq[str]) : A list of host patterns to match against Returns: ``True``, if ``host`` matches any pattern in ``whitelist``, otherwise ``False``
juraj-google-style
def _create_c_op(graph, node_def, inputs, control_inputs, op_def=None, extract_traceback=True) -> pywrap_tf_session.TF_Operation: if op_def is None: op_def = graph.op_def_for_type(node_def.op) inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.attr) with graph._c_graph.get() as c_graph: op_desc = pywrap_tf_session.TF_NewOperation(c_graph, compat.as_str(node_def.op), compat.as_str(node_def.name)) if node_def.device: pywrap_tf_session.TF_SetDevice(op_desc, compat.as_str(node_def.device)) for op_input in inputs: if isinstance(op_input, (list, tuple)): pywrap_tf_session.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input]) else: pywrap_tf_session.TF_AddInput(op_desc, op_input._as_tf_output()) for control_input in control_inputs: pywrap_tf_session.TF_AddControlInput(op_desc, control_input._c_op) for name, attr_value in node_def.attr.items(): serialized = attr_value.SerializeToString() pywrap_tf_session.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized) try: c_op = pywrap_tf_session.TF_FinishOperation(op_desc) except errors.InvalidArgumentError as e: raise ValueError(e.message) if extract_traceback: pywrap_tf_session.TF_SetOpStackTrace(c_op, tf_stack.extract_stack(stacklevel=3)) return c_op
Creates a TF_Operation. Args: graph: a `Graph`. node_def: `node_def_pb2.NodeDef` for the operation to create. inputs: A flattened list of `Tensor`s. This function handles grouping tensors into lists as per attributes in the `node_def`. control_inputs: A list of `Operation`s to set as control dependencies. op_def: Optional. `op_def_pb2.OpDef` for the operation to create. If not specified, is looked up from the `graph` using `node_def.op`. extract_traceback: if True, extract the current Python traceback to the TF_Operation. Returns: A wrapped TF_Operation*.
github-repos
def plugin_wait_time(seconds: float, item_session: ItemSession, error: Optional[Exception]=None) -> float: return seconds
Return the wait time between requests. Args: seconds: The original time in seconds. item_session: error: Returns: The time in seconds.
codesearchnet
def to_array(tensor): if tensor.HasField('segment'): raise ValueError('Currently not supporting loading segments.') if (tensor.data_type == TensorProto.UNDEFINED): raise ValueError('The data type is not defined.') tensor_dtype = tensor.data_type np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[tensor_dtype] storage_type = mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[tensor_dtype] storage_np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[storage_type] storage_field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[storage_type] dims = tensor.dims if (tensor.data_type == TensorProto.STRING): utf8_strings = getattr(tensor, storage_field) ss = list((s.decode('utf-8') for s in utf8_strings)) return np.asarray(ss).astype(np_dtype).reshape(dims) if tensor.HasField('raw_data'): return np.frombuffer(tensor.raw_data, dtype=np_dtype).reshape(dims) else: data = (getattr(tensor, storage_field),) if ((tensor_dtype == TensorProto.COMPLEX64) or (tensor_dtype == TensorProto.COMPLEX128)): data = combine_pairs_to_complex(data) return np.asarray(data, dtype=storage_np_dtype).astype(np_dtype).reshape(dims)
Converts a tensor def object to a numpy array. Inputs: tensor: a TensorProto object. Returns: arr: the converted array.
codesearchnet
def _inspect_history_cache(self, cache, replica_id, step_num, tensor_trace_order): if not tensor_trace_order.traced_tensors: logging.warn('TT history mode has no tensors in the cache to check.') return control_flow_ops.no_op stats = ['\n\n', 'core:', replica_id, ',', 'step:', step_num] diffs = [] for tensor_name, cache_idx in sorted(tensor_trace_order.tensorname_to_cache_idx.items(), key=lambda item: item[1]): tensor_to_write = cache[cache_idx, 0] snapshot_variable = self._create_or_get_tensor_history_values_cache(tensor_to_write.name, tensor_to_write.op.graph, tensor_to_write.shape.as_list(), tensor_to_write.dtype) with ops.control_dependencies([snapshot_variable]): old_value = state_ops.assign_add(snapshot_variable, 0.0) with ops.control_dependencies([old_value]): new_value = math_ops.cast(tensor_to_write, dtypes.float32) delta = math_ops.abs(math_ops.subtract(old_value, new_value)) updated = state_ops.assign(snapshot_variable, new_value) diffs.append(delta) with ops.control_dependencies([updated]): new_value_from_var = state_ops.assign_add(snapshot_variable, 0.0) stats.extend(['\n', 'core:', replica_id, ',', 'step:', step_num, ',', tensor_name, '-->', old_value, new_value_from_var, delta]) diff_stack = array_ops_stack.stack(diffs) step_max = math_ops.reduce_max(diff_stack) return cond.cond(math_ops.greater(step_max, tensor_tracer_flags.DELTA_THRESHOLD.value), lambda: logging_ops.print_v2(*stats, summarize=-1), lambda: control_flow_ops.no_op())
Generates a conditional print operation to log differences in tensor values. Args: cache: Tensor storing the trace results for the step. replica_id: Tensor storing the replica id of the running core. step_num: Step number. tensor_trace_order: TensorTraceOrder object holding tensorname to id map. Returns: The Op to flush the cache to file.
github-repos
def apply(self, func, **kwargs): import dask delayed_call = self.delayed_call self.delayed_call = self.dask_obj return self.__class__(dask.delayed(func)(delayed_call, **kwargs))
Apply some callable function to the data in this partition. Note: It is up to the implementation how kwargs are handled. They are an important part of many implementations. As of right now, they are not serialized. Args: func: The lambda to apply (may already be correctly formatted) Returns: A new `BaseFramePartition` containing the object that has had `func` applied to it.
juraj-google-style
def on_epoch_end(self, epoch, logs=None):
Called at the end of an epoch. Subclasses should override for any actions to run. This function should only be called during TRAIN mode. Args: epoch: Integer, index of epoch. logs: Dict, metric results for this training epoch, and for the validation epoch if validation is performed. Validation result keys are prefixed with `val_`. For training epoch, the values of the `Model`'s metrics are returned. Example: `{'loss': 0.2, 'accuracy': 0.7}`.
github-repos
def get_cohesive_energy(self, material_id, per_atom=False): entry = self.get_entry_by_material_id(material_id) ebulk = entry.energy / \ entry.composition.get_integer_formula_and_factor()[1] comp_dict = entry.composition.reduced_composition.as_dict() isolated_atom_e_sum, n = 0, 0 for el in comp_dict.keys(): e = self._make_request("/element/%s/tasks/isolated_atom" % (el), mp_decode=False)[0] isolated_atom_e_sum += e['output']["final_energy"] * comp_dict[el] n += comp_dict[el] ecoh_per_formula = isolated_atom_e_sum - ebulk return ecoh_per_formula/n if per_atom else ecoh_per_formula
Gets the cohesive for a material (eV per formula unit). Cohesive energy is defined as the difference between the bulk energy and the sum of total DFT energy of isolated atoms for atom elements in the bulk. Args: material_id (str): Materials Project material_id, e.g. 'mp-123'. per_atom (bool): Whether or not to return cohesive energy per atom Returns: Cohesive energy (eV).
juraj-google-style
def remove_roles(self, databaseName, roleNames, collectionName=None): for roleName in roleNames: self.remove_role(databaseName, roleName, collectionName)
Remove multiple roles Args: databaseName (str): Database Name roleNames (list of RoleSpecs): roles Keyword Args: collectionName (str): Collection
juraj-google-style
def find(name, arg=None): for p in get_processes(): if p.name.lower().find(name.lower()) != -1: if arg is not None: for a in p.cmdline or []: if a.lower().find(arg.lower()) != -1: return p else: return p return None
Find process by name or by argument in command line. Args: name (str): Process name to search for. arg (str): Command line argument for a process to search for. Returns: tea.process.base.IProcess: Process object if found.
juraj-google-style
def set_weather_from_metar(metar: typing.Union[(Metar.Metar, str)], in_file: typing.Union[(str, Path)], out_file: typing.Union[(str, Path)]=None) -> typing.Tuple[(typing.Union[(str, None)], typing.Union[(str, None)])]: (error, metar) = custom_metar.CustomMetar.get_metar(metar) if error: return (error, None) if metar: LOGGER.debug('METAR: %s', metar.code) in_file = elib.path.ensure_file(in_file) if (out_file is None): out_file = in_file else: out_file = elib.path.ensure_file(out_file, must_exist=False) LOGGER.debug('applying metar: %s -> %s', in_file, out_file) try: LOGGER.debug('building MissionWeather') _mission_weather = mission_weather.MissionWeather(metar) with Miz(str(in_file)) as miz: _mission_weather.apply_to_miz(miz) miz.zip(str(out_file)) return (None, f'successfully applied METAR to {in_file}') except ValueError: error = f return (error, None)
Applies the weather from a METAR object to a MIZ file Args: metar: metar object in_file: path to MIZ file out_file: path to output MIZ file (will default to in_file) Returns: tuple of error, success
codesearchnet
def CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed): feed_item_service = client.GetService('FeedItemService', 'v201809') now = datetime.now() mars_date = datetime(now.year, now.month, 1, 0, 0) venus_date = datetime(now.year, now.month, 15, 0, 0) time_format = '%Y%m%d %H%M%S' feed_item_operations = [ CreateFeedItemAddOperation( 'Mars', '$1234.56', mars_date.strftime(time_format), ad_customizer_feed), CreateFeedItemAddOperation( 'Venus', '$1450.00', venus_date.strftime(time_format), ad_customizer_feed) ] response = feed_item_service.mutate(feed_item_operations) if 'value' in response: for feed_item in response['value']: print 'Added FeedItem with ID %d.' % feed_item['feedItemId'] else: raise errors.GoogleAdsError('No FeedItems were added.') for feed_item, adgroup_id in zip(response['value'], adgroup_ids): RestrictFeedItemToAdGroup(client, feed_item, adgroup_id)
Creates FeedItems for the specified AdGroups. These FeedItems contain values to use in ad customizations for the AdGroups. Args: client: an AdWordsClient instance. adgroup_ids: a list containing two AdGroup Ids. ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems with. Raises: GoogleAdsError: if no FeedItems were added.
juraj-google-style
def _num_elements(self): return math_ops.reduce_prod(self.inner_shape)
Number of elements in a shape. Returns: The number of elements in the shape.
github-repos
def unpack_message(buffer): hdr_size = Header().get_size() hdr_buff, msg_buff = buffer[:hdr_size], buffer[hdr_size:] header = Header() header.unpack(hdr_buff) message = new_message_from_header(header) message.unpack(msg_buff) return message
Unpack the whole buffer, including header pack. Args: buffer (bytes): Bytes representation of a openflow message. Returns: object: Instance of openflow message.
juraj-google-style
def malware(self, malware, password, file_name): if (not self.can_update()): self._tcex.handle_error(910, [self.type]) self._data['malware'] = malware self._data['password'] = password self._data['fileName'] = file_name request = {'malware': malware, 'password': password, 'fileName': file_name} return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
Uploads to malware vault. Args: malware: password: file_name:
codesearchnet
def omim_terms(case_obj): LOG.info("Collecting OMIM disorders for case {}".format(case_obj.get('display_name'))) disorders = [] case_disorders = case_obj.get('diagnosis_phenotypes') if case_disorders: for disorder in case_disorders: disorder_obj = { "id" : ':'.join([ 'MIM', str(disorder)]) } disorders.append(disorder_obj) return disorders
Extract all OMIM phenotypes available for the case Args: case_obj(dict): a scout case object Returns: disorders(list): a list of OMIM disorder objects
juraj-google-style
def shared_s3_app_bucket(self, include_region=False): if include_region: shared_s3_app_bucket = self.format['shared_s3_app_region_bucket'].format(**self.data) else: shared_s3_app_bucket = self.format['shared_s3_app_bucket'].format(**self.data) return shared_s3_app_bucket
Generate shared s3 application bucket name. Args: include_region (bool): Include region in the name generation.
juraj-google-style
def add_cell_argument(self, name, help, required=False): for action in self._actions: if (action.dest == name): raise ValueError(('Arg "%s" was added by add_argument already.' % name)) self._cell_args[name] = {'required': required, 'help': help}
Add a cell only argument. Args: name: name of the argument. No need to start with "-" or "--". help: the help string of the argument. required: Whether it is required in cell content.
codesearchnet
def run(self, args): jlink = self.create_jlink(args) if args.product: print(('Product: %s' % jlink.product_name)) manufacturer = ('SEGGER' if (jlink.oem is None) else jlink.oem) print(('Manufacturer: %s' % manufacturer)) print(('Hardware Version: %s' % jlink.hardware_version)) print(('Firmware: %s' % jlink.firmware_version)) print(('DLL Version: %s' % jlink.version)) print(('Features: %s' % ', '.join(jlink.features))) elif args.jtag: status = jlink.hardware_status print(('TCK Pin Status: %d' % status.tck)) print(('TDI Pin Status: %d' % status.tdi)) print(('TDO Pin Status: %d' % status.tdo)) print(('TMS Pin Status: %d' % status.tms)) print(('TRES Pin Status: %d' % status.tres)) print(('TRST Pin Status: %d' % status.trst))
Runs the information command. Args: self (InfoCommand): the ``InfoCommand`` instance args (Namespace): the arguments passed on the command-line Returns: ``None``
codesearchnet
def get_open_clinvar_submission(self, user_id, institute_id): LOG.info("Retrieving an open clinvar submission for user '%s' and institute %s", user_id, institute_id) query = dict(user_id=user_id, institute_id=institute_id, status='open') submission = self.clinvar_submission_collection.find_one(query) if submission is None: submission_id = self.create_submission(user_id, institute_id) submission = self.clinvar_submission_collection.find_one({'_id':submission_id}) return submission
Retrieve the database id of an open clinvar submission for a user and institute, if none is available then create a new submission and return it Args: user_id(str): a user ID institute_id(str): an institute ID Returns: submission(obj) : an open clinvar submission object
juraj-google-style
def parse_args(): parser = argparse.ArgumentParser() parser.register('type', 'bool', lambda v: v.lower() == 'true') parser.add_argument('--max_steps', type=int, default=10, help='Number of steps to run trainer.') parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size used during training.') parser.add_argument('--learning_rate', type=float, default=0.025, help='Initial learning rate.') parser.add_argument('--data_dir', type=str, default='/tmp/mnist_data', help='Directory for storing data') parser.add_argument('--ui_type', type=str, default='readline', help='Command-line user interface type (only readline is supported)') parser.add_argument('--fake_data', type='bool', nargs='?', const=True, default=False, help='Use fake MNIST data for unit testing') parser.add_argument('--debug', type='bool', nargs='?', const=True, default=False, help='Use debugger to track down bad values during training. Mutually exclusive with the --tensorboard_debug_address flag.') parser.add_argument('--tensorboard_debug_address', type=str, default=None, help='Connect to the TensorBoard Debugger Plugin backend specified by the gRPC address (e.g., localhost:1234). Mutually exclusive with the --debug flag.') parser.add_argument('--use_random_config_path', type='bool', nargs='?', const=True, default=False, help='If set, set config file path to a random file in the temporary\n directory.') return parser.parse_known_args()
Parses commandline arguments. Returns: A tuple (parsed, unparsed) of the parsed object and a group of unparsed arguments that did not match the parser.
github-repos
def save(value: Any, path: str, *args, **kwargs) -> Any: save_handler = flags.get_save_handler() or default_save_handler return save_handler(value, path, *args, **kwargs)
Save a symbolic value using the global save handler. Example:: @pg.members([ ('x', pg.typing.Any()) ]) class A(pg.Object): pass a1 = A(1) file = 'my_file.json' a1.save(file) a2 = pg.load(file) assert pg.eq(a1, a2) Args: value: value to save. path: A path string for saving `value`. *args: Positional arguments that will be passed through to the global save handler. **kwargs: Keyword arguments that will be passed through to the global save handler. Returns: Return value from the global save handler. Raises: RuntimeError: if global save handler is not set.
github-repos
def was_init(): mask = lib.SDL_WasInit(0) return enumtools.get_items(InitFlags, mask, {InitFlags.everything})
This function returns the subsystems which have previously been initialized. Returns: Set[InitFlag]: Flags indicating which subsystems have been initialized.
codesearchnet
def __init__(self, saved_model_dir, saved_model_tags=None, saved_model_exported_names=None, trackable_obj=None): super(TFLiteSavedModelConverterV2, self).__init__() self.saved_model_dir = saved_model_dir self._saved_model_tags = saved_model_tags self._saved_model_exported_names = saved_model_exported_names self._trackable_obj = trackable_obj self._parse_saved_model_args(always_enable_saved_model_import=True)
Constructor for TFLiteConverter. Args: saved_model_dir: Directory of the SavedModel. saved_model_tags: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. (default {tf.saved_model.SERVING}). saved_model_exported_names: Names to be exported when the saved model import path is on. trackable_obj: tf.AutoTrackable object associated with `funcs`. A reference to this object needs to be maintained so that Variables do not get garbage collected since functions have a weak reference to Variables. This is only required when the tf.AutoTrackable object is not maintained by the user (e.g. `from_saved_model`).
github-repos
def regularizer(name, regularization_fn, name_filter='weights'): regex = re.compile(name_filter) def fn(var_name, variable, phase): if ((phase is pt.Phase.train) and regex.search(var_name)): with tf.name_scope(None, name, [variable]): loss = regularization_fn(variable) if (loss is not None): tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, loss) return variable return fn
Wraps a regularizer in a parameter-function. Args: name: The name scope for this regularizer. regularization_fn: A function with signature: fn(variable) -> loss `Tensor` or `None`. name_filter: A regex that will be used to filter variables by name. Returns: A parameter modification function that adds the loss to the REGULARIZATION_LOSSES graph key.
codesearchnet
def read_tree_newick(newick): if not isinstance(newick, str): try: newick = str(newick) except: raise TypeError("newick must be a str") if newick.lower().endswith('.gz'): f = gopen(expanduser(newick)); ts = f.read().decode().strip(); f.close() elif isfile(expanduser(newick)): f = open(expanduser(newick)); ts = f.read().strip(); f.close() else: ts = newick.strip() lines = ts.splitlines() if len(lines) != 1: return [read_tree_newick(l) for l in lines] try: t = Tree(); t.is_rooted = ts.startswith('[&R]') if ts[0] == '[': ts = ']'.join(ts.split(']')[1:]).strip(); ts = ts.replace(', ',',') n = t.root; i = 0 while i < len(ts): if ts[i] == ';': if i != len(ts)-1 or n != t.root: raise RuntimeError(INVALID_NEWICK) elif ts[i] == '(': c = Node(); n.add_child(c); n = c elif ts[i] == ')': n = n.parent elif ts[i] == ',': n = n.parent; c = Node(); n.add_child(c); n = c elif ts[i] == ':': i += 1; ls = '' while ts[i] != ',' and ts[i] != ')' and ts[i] != ';': ls += ts[i]; i += 1 n.edge_length = float(ls); i -= 1 else: label = '' while ts[i] != ':' and ts[i] != ',' and ts[i] != ';' and ts[i] != ')': label += ts[i]; i += 1 i -= 1; n.label = label i += 1 except Exception as e: raise RuntimeError("Failed to parse string as Newick: %s"%ts) return t
Read a tree from a Newick string or file Args: ``newick`` (``str``): Either a Newick string or the path to a Newick file (plain-text or gzipped) Returns: ``Tree``: The tree represented by ``newick``. If the Newick file has multiple trees (one per line), a ``list`` of ``Tree`` objects will be returned
juraj-google-style
def _set_notification(self, conn, char, enabled, timeout=1.0): if 'client_configuration' not in char: return False, {'reason': 'Cannot enable notification without a client configuration attribute for characteristic'} props = char['properties'] if not props.notify: return False, {'reason': 'Cannot enable notification on a characteristic that does not support it'} value = char['client_configuration']['value'] current_state = bool(value & (1 << 0)) if current_state == enabled: return if enabled: value |= 1 << 0 else: value &= ~(1 << 0) char['client_configuration']['value'] = value valarray = struct.pack("<H", value) return self._write_handle(conn, char['client_configuration']['handle'], True, valarray, timeout)
Enable/disable notifications on a GATT characteristic Args: conn (int): The connection handle for the device we should interact with char (dict): The characteristic we should modify enabled (bool): Should we enable or disable notifications timeout (float): How long to wait before failing
juraj-google-style
def create_sns_topic(self, region): sns = self.session.client('sns', region_name=region) self.log.info('Creating SNS topic for {}/{}'.format(self.account, region)) res = sns.create_topic(Name=self.topic_name) arn = res['TopicArn'] tmpl = get_template('cloudtrail_sns_policy.json') policy = tmpl.render(region=region, account_id=self.account.account_number, topic_name=self.topic_name) sns.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue=policy) auditlog(event='cloudtrail.create_sns_topic', actor=self.ns, data={'account': self.account.account_name, 'region': region}) return arn
Creates an SNS topic if needed. Returns the ARN if the created SNS topic Args: region (str): Region name Returns: `str`
codesearchnet
def headless(self, value): if value is True: self._arguments.append('-headless') elif '-headless' in self._arguments: self._arguments.remove('-headless')
Sets the headless argument Args: value: boolean value indicating to set the headless option
juraj-google-style
def indexSearch(self, indexes): if not self._dataFrame.empty: filter0 = self._dataFrame.index == -9999 for index in indexes: filter1 = self._dataFrame.index == index filter0 = np.logical_or(filter0, filter1) return filter0 else: return []
Filters the data by a list of indexes. Args: indexes (list of int): List of index numbers to return. Returns: list: A list containing all indexes with filtered data. Matches will be `True`, the remaining items will be `False`. If the dataFrame is empty, an empty list will be returned.
juraj-google-style
def locked_put(self, credentials): entity = self._model.get_or_insert(self._key_name) setattr(entity, self._property_name, credentials) entity.put() if self._cache: self._cache.set(self._key_name, credentials.to_json())
Write a Credentials to the datastore. Args: credentials: Credentials, the credentials to store.
juraj-google-style
def update(dst, src): for k, v in src.items(): if isinstance(v, Mapping): r = update(dst.get(k, {}), v) dst[k] = r else: dst[k] = src[k] return dst
Recursively update values in dst from src. Unlike the builtin dict.update() function, this method will decend into nested dicts, updating all nested values. Arguments: dst (dict): Destination dict. src (dict): Source dict. Returns: dict: dst updated with entries from src.
juraj-google-style
def __init__(self, use_memory_view_min_size=4096): self.use_memory_view_min_size = use_memory_view_min_size self._deque = collections.deque() self.clear()
Constructor. Args: use_memory_view_min_size (int): minimum size before using memoryview objects (advanced option, the default is probably good for you).
juraj-google-style
def DeserializeFromDB(buffer): m = StreamManager.GetStream(buffer) reader = BinaryReader(m) uns = UnspentCoinState() uns.Deserialize(reader) StreamManager.ReleaseStream(m) return uns
Deserialize full object. Args: buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from. Returns: UnspentCoinState:
juraj-google-style
def write(self, data): start_time = time.time() self._get_write_buffer().write(data) ctx = context.get() operation.counters.Increment(COUNTER_IO_WRITE_BYTES, len(data))(ctx) operation.counters.Increment(COUNTER_IO_WRITE_MSEC, int(((time.time() - start_time) * 1000)))(ctx)
Write data to the GoogleCloudStorage file. Args: data: string containing the data to be written.
codesearchnet
def create_queue(self, register=False): queue = asyncio.Queue(loop=self._loop) if register: self._work_queues.add(queue) return queue
Create a new work queue and optionally register it. This will make sure the queue is attached to the correct event loop. You can optionally choose to automatically register it so that wait_idle() will block until the queue is empty. Args: register (bool): Whether to call register_workqueue() automatically. Returns: asyncio.Queue: The newly created queue.
juraj-google-style
def _CreateShapesFolder(self, schedule, doc): if not schedule.GetShapeList(): return None shapes_folder = self._CreateFolder(doc, 'Shapes') shapes = list(schedule.GetShapeList()) shapes.sort(key=lambda x: x.shape_id) for shape in shapes: placemark = self._CreatePlacemark(shapes_folder, shape.shape_id) self._CreateLineStringForShape(placemark, shape) if self.shape_points: self._CreateShapePointFolder(shapes_folder, shape) return shapes_folder
Create a KML Folder containing all the shapes in a schedule. The folder contains a placemark for each shape. If there are no shapes in the schedule then the folder is not created and None is returned. Args: schedule: The transitfeed.Schedule instance. doc: The KML Document ElementTree.Element instance. Returns: The Folder ElementTree.Element instance or None.
juraj-google-style
def validate(self, config): if not isinstance(config, dict): raise errors.SchemeValidationError( 'Scheme can only validate a dictionary config, but was given ' '{} (type: {})'.format(config, type(config)) ) for arg in self.args: if arg.name in config: arg.validate(config[arg.name]) else: if arg.required: raise errors.SchemeValidationError( 'Option "{}" is required, but not found.'.format(arg.name) )
Validate the given config against the `Scheme`. Args: config (dict): The configuration to validate. Raises: errors.SchemeValidationError: The configuration fails validation against the `Schema`.
juraj-google-style
def __init__(self, api_key=None): try: self.api_key = api_key or os.environ['AIRTABLE_API_KEY'] except KeyError: raise KeyError('Api Key not found. Pass api_key as a kwarg \ or set an env var AIRTABLE_API_KEY with your key')
Authentication used by Airtable Class Args: api_key (``str``): Airtable API Key. Optional. If not set, it will look for enviroment variable ``AIRTABLE_API_KEY``
juraj-google-style
def stage_tc_batch(self, owner, staging_data): batch = self.tcex.batch(owner) for group in (staging_data.get('group') or []): variable = group.pop('variable', None) path = group.pop('path', None) data = self.path_data(group, path) if (group.get('xid') is None): group['xid'] = self.stage_tc_batch_xid(group.get('type'), group.get('name'), owner) group['ownerName'] = owner batch.add_group(group) if ((variable is not None) and (data is not None)): self.stage_redis(variable, self.stage_tc_group_entity(data)) for indicator in (staging_data.get('indicator') or []): variable = indicator.pop('variable', None) path = indicator.pop('path', None) if (indicator.get('xid') is None): indicator['xid'] = self.stage_tc_batch_xid(indicator.get('type'), indicator.get('summary'), owner) indicator['ownerName'] = owner batch.add_indicator(indicator) data = self.path_data(dict(indicator), path) if ((variable is not None) and (data is not None)): self.stage_redis(variable, self.stage_tc_indicator_entity(data)) batch_results = batch.submit() self.log.debug('[stage] Batch Results: {}'.format(batch_results)) for error in (batch_results.get('errors') or []): self.log.error('[stage] {}'.format(error))
Stage data in ThreatConnect Platform using batch API. Args: owner (str): The ThreatConnect owner to submit batch job. staging_data (dict): A dict of ThreatConnect batch data.
codesearchnet
def _close_open_file(self, file_des): self.open_files[file_des] = None heapq.heappush(self._free_fd_heap, file_des)
Remove file object with given descriptor from the list of open files. Sets the entry in open_files to None. Args: file_des: Descriptor of file object to be removed from open files list.
codesearchnet
def parsed_top_level_errors(parsed, errors, component_type: str = "") -> Errors: fn_cnt = 0 rel_cnt = 0 nested_cnt = 0 for key in parsed: if parsed[key]["type"] == "Function": fn_cnt += 1 if parsed[key]["type"] == "Relation": rel_cnt += 1 if parsed[key]["type"] == "Nested": nested_cnt += 1 if not component_type: if nested_cnt > 1: errors.append( ( "Error", "Too many nested objects - can only have one per BEL Assertion", ) ) if nested_cnt: if rel_cnt > 2: errors.append( ( "Error", "Too many relations - can only have two in a nested BEL Assertion", ) ) elif fn_cnt > 4: errors.append(("Error", "Too many BEL subject and object candidates")) else: if rel_cnt > 1: errors.append( ( "Error", "Too many relations - can only have one in a BEL Assertion", ) ) elif fn_cnt > 2: errors.append(("Error", "Too many BEL subject and object candidates")) elif component_type == "subject": if rel_cnt > 0: errors.append( ("Error", "Too many relations - cannot have any in a BEL Subject") ) elif fn_cnt > 1: errors.append( ("Error", "Too many BEL subject candidates - can only have one") ) elif component_type == "object": if nested_cnt: if rel_cnt > 1: errors.append( ( "Error", "Too many relations - can only have one in a nested BEL object", ) ) elif fn_cnt > 2: errors.append( ( "Error", "Too many BEL subject and object candidates in a nested BEL object", ) ) else: if rel_cnt > 0: errors.append( ("Error", "Too many relations - cannot have any in a BEL Subject") ) elif fn_cnt > 1: errors.append( ("Error", "Too many BEL subject candidates - can only have one") ) return errors
Check full parse for errors Args: parsed: errors: component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input
juraj-google-style
def _generate_visualization(template_file: str, loader: jinja2.BaseLoader, **kwargs) -> str: env = jinja2.Environment(loader=loader) template = env.get_template(template_file) return template.render(cytoscape_url=_CYTOSCAPE_URL, dagre_url=_DAGRE_URL, cytoscape_dagre_url=_CYTOSCAPE_DAGRE_URL, **kwargs)
Generate the visualization webpage. Args: template_file: str. A jinja2 template filename. loader: jinja2.BaseLoader. The loader needs to be able to load files in this file's directory. **kwargs: Additional args passed on to the template. Returns: str. The rendered visualization page.
github-repos
def AddEventAttribute(self, attribute_name, attribute_value): if (attribute_name in self._extra_event_attributes): raise KeyError('Event attribute {0:s} already set'.format(attribute_name)) self._extra_event_attributes[attribute_name] = attribute_value
Adds an attribute that will be set on all events produced. Setting attributes using this method will cause events produced via this mediator to have an attribute with the provided name set with the provided value. Args: attribute_name (str): name of the attribute to add. attribute_value (str): value of the attribute to add. Raises: KeyError: if the event attribute is already set.
codesearchnet
def closest_point_to(self, point, thr=20.0): i = 0 point_arr = point.gen2arr() def closest_in_line(pointA, pointB): temp = closest_point(pointA.gen2arr(), pointB.gen2arr(), point_arr) return Point(temp[1], temp[0], None) for (p_a, p_b) in pairwise(self.points): candidate = closest_in_line(p_a, p_b) if (candidate.distance(point) <= thr): if (p_a.distance(point) <= thr): return (i, p_a) elif (p_b.distance(point) <= thr): return ((i + 1), p_b) else: return (i, candidate) i = (i + 1) return ((- 1), None)
Finds the closest point in the segment to a given point Args: point (:obj:`Point`) thr (float, optional): Distance threshold, in meters, to be considered the same point. Defaults to 20.0 Returns: (int, Point): Index of the point. -1 if doesn't exist. A point is given if it's along the segment
codesearchnet
def __init__(self, *args, **kwargs): super(MemoryStream, self).__init__(*args, **kwargs)
Create an instance. Args: *args: **kwargs:
juraj-google-style
def wait_for_contract(self, contract_address_hex, timeout=None): contract_address = decode_hex(contract_address_hex) start_time = time.time() result = self._raiden.chain.client.web3.eth.getCode(to_checksum_address(contract_address)) current_time = time.time() while (not result): if (timeout and ((start_time + timeout) > current_time)): return False result = self._raiden.chain.client.web3.eth.getCode(to_checksum_address(contract_address)) gevent.sleep(0.5) current_time = time.time() return (len(result) > 0)
Wait until a contract is mined Args: contract_address_hex (string): hex encoded address of the contract timeout (int): time to wait for the contract to get mined Returns: True if the contract got mined, false otherwise
codesearchnet
def build_institute(internal_id, display_name, sanger_recipients=None, coverage_cutoff=None, frequency_cutoff=None): LOG.info("Building institute %s with display name %s", internal_id,display_name) institute_obj = Institute( internal_id=internal_id, display_name=display_name, sanger_recipients=sanger_recipients, coverage_cutoff = coverage_cutoff, frequency_cutoff = frequency_cutoff ) for key in list(institute_obj): if institute_obj[key] is None: institute_obj.pop(key) return institute_obj
Build a institute object Args: internal_id(str) display_name(str) sanger_recipients(list(str)): List with email addresses Returns: institute_obj(scout.models.Institute)
juraj-google-style
def to_string( self, fmt: str = "medium", canonicalize: bool = False, decanonicalize: bool = False, orthologize: str = None, ) -> str: arg_string = ", ".join([a.to_string(fmt=fmt) for a in self.args]) if fmt in ["short", "medium"]: function_name = self.name_short else: function_name = self.name return "{}({})".format(function_name, arg_string)
Convert AST object to string Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format Returns: str: string version of BEL AST
juraj-google-style
def stop(self, accountID, **kwargs): return self.create(accountID, order=StopOrderRequest(**kwargs))
Shortcut to create a Stop Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a StopOrderRequest Returns: v20.response.Response containing the results from submitting the request
codesearchnet
def _ExtractPath(response, pathspec_attribute=None): path_specification = response if (pathspec_attribute is not None): if response.HasField(pathspec_attribute): path_specification = response.Get(pathspec_attribute) if path_specification.HasField('pathspec'): path_specification = path_specification.pathspec if path_specification.HasField('path'): path_specification = path_specification.path if isinstance(path_specification, Text): return path_specification return None
Returns the path from a client action response as a string. Args: response: A client action response. pathspec_attribute: Specifies the field which stores the pathspec. Returns: The path as a string or None if no path is found.
codesearchnet
def register_trainable(name, trainable): from ray.tune.trainable import Trainable from ray.tune.function_runner import wrap_function if isinstance(trainable, type): logger.debug("Detected class for trainable.") elif isinstance(trainable, FunctionType): logger.debug("Detected function for trainable.") trainable = wrap_function(trainable) elif callable(trainable): logger.warning( "Detected unknown callable for trainable. Converting to class.") trainable = wrap_function(trainable) if not issubclass(trainable, Trainable): raise TypeError("Second argument must be convertable to Trainable", trainable) _global_registry.register(TRAINABLE_CLASS, name, trainable)
Register a trainable function or class. Args: name (str): Name to register. trainable (obj): Function or tune.Trainable class. Functions must take (config, status_reporter) as arguments and will be automatically converted into a class during registration.
juraj-google-style
def start(self, extra_args="", tag=""): if self.started: return utils.create_dir(self.log_path) if tag: tag = tag + ',' out_file_name = "IPerfServer,{},{}{}.log".format( self.port, tag, len(self.log_files)) full_out_path = os.path.join(self.log_path, out_file_name) cmd = '%s %s > %s' % (self.iperf_str, extra_args, full_out_path) self.iperf_process = utils.start_standing_subprocess(cmd, shell=True) self.log_files.append(full_out_path) self.started = True
Starts iperf server on specified port. Args: extra_args: A string representing extra arguments to start iperf server with. tag: Appended to log file name to identify logs from different iperf runs.
juraj-google-style
def _super_stack(inputs, attention_bias, hparams, mp, padding="LEFT"): layers = hparams.layers.strip(",").split(",") moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")] if hparams.diet_experts: hsize, = moe_hidden_sizes def _diet_expert(x): return diet.diet_expert(x, hsize, diet.diet_adam_optimizer_params()) expert_fn = _diet_expert else: expert_fn = expert_utils.ffn_expert_fn( hparams.hidden_size, moe_hidden_sizes, hparams.hidden_size) attention_bias_3d = mp(tf.squeeze, attention_bias, 1) mix_size = int(hparams.mix_fraction * hparams.hidden_size) accumulator = inputs x = inputs extra_losses = [] for layer_num, layer_type in enumerate(layers): with tf.variable_scope("%s_%d" % (layer_type, layer_num)): tf.logging.info("%s_%d" % (layer_type, layer_num)) if layer_type == "a": accumulator = mp(tf.add, x, accumulator) x = accumulator elif layer_type == "n": x = mp(common_layers.apply_norm, x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) elif layer_type == "d": x = mp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout) elif layer_type == "m": def _split(t): return tuple(tf.split( t, [mix_size, hparams.hidden_size - mix_size], 2)) to_mix, to_keep = mp(_split, x) mixed = expert_utils.all_reduce_ring(to_mix, mp) mixed = mp(tf.multiply, mixed, mp.n ** -0.5) x = mp(lambda a, b: tf.concat([a, b], 2), mixed, to_keep) elif layer_type == "att": q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, name="q_transform") x = mp( common_attention.scaled_dot_product_attention_simple, q, x, x, attention_bias_3d) x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, name="o_transform") elif layer_type == "multihead-att": x = mp( common_attention.multihead_attention, x, None, attention_bias, hparams.multihead_attention_key_channels or hparams.hidden_size, hparams.multihead_attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.multihead_attention_num_heads, hparams.attention_dropout) elif layer_type == "ffn": x = mp( common_layers.dense_relu_dense, x, hparams.filter_size, hparams.hidden_size) elif layer_type == "conv": x = mp( common_layers.conv1d, x, hparams.hidden_size, hparams.kernel_height, activation=tf.nn.relu, padding=padding, ) elif layer_type == "moe": x, loss = mp( expert_utils.local_moe, x, train=hparams.mode == tf.estimator.ModeKeys.TRAIN, expert_fn=expert_fn, num_experts=hparams.moe_num_experts, k=hparams.moe_k, loss_coef=hparams.moe_loss_coef) extra_losses.extend(loss) else: assert False, "unknown sublayer %s" % layer_type if extra_losses: extra_loss = tf.add_n(extra_losses) else: extra_loss = None return x, extra_loss
A stack of super_lm layers. Args: inputs: a list of Tensors attention_bias: list of bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model mp: a Parallelism object padding: a string Returns: y: a list of Tensors extra_loss: an optional scalar
juraj-google-style
def vel_in_A_to_vel_in_B(vel_A, ang_vel_A, pose_A_in_B): pos_A_in_B = pose_A_in_B[:3, 3] rot_A_in_B = pose_A_in_B[:3, :3] skew_symm = _skew_symmetric_translation(pos_A_in_B) vel_B = rot_A_in_B.dot(vel_A) + skew_symm.dot(rot_A_in_B.dot(ang_vel_A)) ang_vel_B = rot_A_in_B.dot(ang_vel_A) return vel_B, ang_vel_B
Converts linear and angular velocity of a point in frame A to the equivalent in frame B. Args: vel_A: 3-dim iterable for linear velocity in A ang_vel_A: 3-dim iterable for angular velocity in A pose_A_in_B: numpy array of shape (4,4) corresponding to the pose of A in frame B Returns: vel_B, ang_vel_B: two numpy arrays of shape (3,) for the velocities in B
juraj-google-style
def verify_gmt_integrity(gmt): set_ids = [d[SET_IDENTIFIER_FIELD] for d in gmt] assert len(set(set_ids)) == len(set_ids), ( "Set identifiers should be unique. set_ids: {}".format(set_ids))
Make sure that set ids are unique. Args: gmt (GMT object): list of dicts Returns: None
juraj-google-style
def GetConfig(self, request, global_params=None): config = self.GetMethodConfig('GetConfig') return self._RunMethod(config, request, global_params=global_params)
Get encoded debug configuration for component. Not cacheable. Args: request: (DataflowProjectsJobsDebugGetConfigRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GetDebugConfigResponse) The response message.
github-repos