code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _check_arg_equality(node_a, node_b, attr_name): return getattr(node_a, attr_name) == getattr(node_b, attr_name)
Check equality of nodes based on the comparison of their attributes named attr_name. Args: node_a (astroid.node): first node to compare. node_b (astroid.node): second node to compare. attr_name (str): name of the nodes attribute to use for comparison. Returns: bool: True if node_a.attr_name == node_b.attr_name, False otherwise.
juraj-google-style
def start_publishing(mysql_settings, **kwargs): _logger.info('Start publishing from %s with:\n%s' % (mysql_settings, kwargs)) kwargs.setdefault('server_id', random.randint(1000000000, 4294967295)) kwargs.setdefault('freeze_schema', True) stream = pymysqlreplication.BinLogStreamReader( mysql_settings, only_events=[row_event.DeleteRowsEvent, row_event.UpdateRowsEvent, row_event.WriteRowsEvent], **kwargs ) for event in stream: if not isinstance(event, row_event.RowsEvent): continue _logger.debug('Send binlog signal "%s@%s.%s"' % ( event.__class__.__name__, event.schema, event.table )) signals.binlog_signal.send(event, stream=stream) signals.binlog_position_signal.send((stream.log_file, stream.log_pos))
Start publishing MySQL row-based binlog events to blinker signals Args: mysql_settings (dict): information to connect to mysql via pymysql **kwargs: The additional kwargs will be passed to :py:class:`pymysqlreplication.BinLogStreamReader`.
juraj-google-style
def __call__(self, dumper: 'Dumper', data: Any) -> yaml.MappingNode: logger.info('Representing {} of class {}'.format( data, self.class_.__name__)) represented = dumper.represent_str(data.name) snode = Node(represented) if hasattr(self.class_, 'yatiml_sweeten'): self.class_.yatiml_sweeten(snode) represented = snode.yaml_node logger.debug('End representing {}'.format(data)) return represented
Represents the class as a ScalarNode. Args: dumper: The dumper to use. data: The user-defined object to dump. Returns: A yaml.Node representing the object.
juraj-google-style
def fit2d(samples, e_x, e_y, remove_zeros=False, p_est=None, **kw): if (p_est is None): height = (len(e_y) - 1) width = (len(e_x) - 1) (p_est, _) = np.histogramdd(samples, (e_x, e_y)) else: p_est = p_est.T (width, height) = p_est.shape shape = p_est.shape p_est = (p_est / sum(p_est.flat)).reshape(shape) mx = p_est.sum(1) my = p_est.sum(0) p_est = p_est.T.flatten() (basis, knots) = spline_base2d(width, height, marginal_x=mx, marginal_y=my, **kw) model = linear_model.BayesianRidge() if remove_zeros: non_zero = (~ (p_est == 0)) model.fit(basis[(:, non_zero)].T, p_est[non_zero]) else: non_zero = (p_est >= 0) p_est[((~ non_zero), :)] = np.finfo(float).eps model.fit(basis.T, p_est) return (model.predict(basis.T).reshape((height, width)), p_est.reshape((height, width)), knots)
Fits a 2D distribution with splines. Input: samples: Matrix or list of arrays If matrix, it must be of size Nx2, where N is the number of observations. If list, it must contain two arrays of length N. e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y)
codesearchnet
def pad_to_best_fit(images: 'torch.Tensor', target_size: Tuple[int, int], background_color: Union[int, Tuple[int, int, int]]=0) -> 'torch.Tensor': num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0] if isinstance(background_color, int): background_color = [background_color] + [0] * (num_channels - 1) elif len(background_color) != num_channels: raise ValueError(f'background_color must have no more than {num_channels} elements to match the number of channels') height, width = images.shape[-2:] target_height, target_width = target_size paste_x_right = target_width - width paste_y_right = target_height - height padded_images = F.pad(images, padding=[0, 0, paste_x_right, paste_y_right], fill=background_color) return padded_images
Pads an image to fit the target size. Args: images (`np.ndarray`): The images to pad. background_color (`int` or `Tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in mutli-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images.
github-repos
def ismount(self, path): path = make_string_path(path) if not path: return False normed_path = self.filesystem.absnormpath(path) sep = self.filesystem._path_separator(path) if self.filesystem.is_windows_fs: if self.filesystem.alternative_path_separator is not None: path_seps = ( sep, self.filesystem._alternative_path_separator(path) ) else: path_seps = (sep, ) drive, rest = self.filesystem.splitdrive(normed_path) if drive and drive[:1] in path_seps: return (not rest) or (rest in path_seps) if rest in path_seps: return True for mount_point in self.filesystem.mount_points: if normed_path.rstrip(sep) == mount_point.rstrip(sep): return True return False
Return true if the given path is a mount point. Args: path: Path to filesystem object to be checked Returns: `True` if path is a mount point added to the fake file system. Under Windows also returns True for drive and UNC roots (independent of their existence).
juraj-google-style
def Open(self, file_object, ascii_codepage='cp1252'): registry_file = dfwinreg_regf.REGFWinRegistryFile( ascii_codepage=ascii_codepage) registry_file.Open(file_object) return registry_file
Opens a Windows Registry file-like object. Args: file_object (dfvfs.FileIO): Windows Registry file-like object. ascii_codepage (Optional[str]): ASCII string codepage. Returns: WinRegistryFile: Windows Registry file or None.
juraj-google-style
def test_encode_with_backbone_element_constraint_succeeds(self, fhir_path_expression: str, expected_sql_expression: str, expected_fhir_path_sql_expression: str, expected_fields_referenced: List[str]): constraint = self.build_constraint(fhir_path_expression=fhir_path_expression) self.assert_constraint_is_equal_to_expression(base_id='Patient', element_definition_id='Patient.contact.name', constraint=constraint, expected_sql_expression=expected_sql_expression, expected_fhir_path_sql_expression=expected_fhir_path_sql_expression, expected_fields_referenced=expected_fields_referenced)
Tests encoding of a "transitive constraint" defined on a BackboneElement. A "transitive constraint" is a constraint defined relative to a resource elsewhere in the FHIR resource graph than what we're querying against. Args: fhir_path_expression: The FHIRPath expression to encode. expected_sql_expression: The expected generated Standard SQL from v1. expected_fhir_path_sql_expression: The expected generated Standard SQL without any contextual subqueries. expected_fields_referenced: The expected fields_referenced_by_expression attribute on the resulting constraint.
github-repos
def flownet2_fusion(self, x): with argscope([tf.layers.conv2d], activation=(lambda x: tf.nn.leaky_relu(x, 0.1)), padding='valid', strides=2, kernel_size=3, data_format='channels_first'), argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity, data_format='channels_first', strides=2, kernel_size=4): conv0 = tf.layers.conv2d(pad(x, 1), 64, name='conv0', strides=1) x = tf.layers.conv2d(pad(conv0, 1), 64, name='conv1') conv1 = tf.layers.conv2d(pad(x, 1), 128, name='conv1_1', strides=1) x = tf.layers.conv2d(pad(conv1, 1), 128, name='conv2') conv2 = tf.layers.conv2d(pad(x, 1), 128, name='conv2_1', strides=1) flow2 = tf.layers.conv2d(pad(conv2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity) flow2_up = tf.layers.conv2d_transpose(flow2, 2, name='upsampled_flow2_to_1') x = tf.layers.conv2d_transpose(conv2, 32, name='deconv1', activation=(lambda x: tf.nn.leaky_relu(x, 0.1))) concat1 = tf.concat([conv1, x, flow2_up], axis=1, name='concat1') interconv1 = tf.layers.conv2d(pad(concat1, 1), 32, strides=1, name='inter_conv1', activation=tf.identity) flow1 = tf.layers.conv2d(pad(interconv1, 1), 2, name='predict_flow1', strides=1, activation=tf.identity) flow1_up = tf.layers.conv2d_transpose(flow1, 2, name='upsampled_flow1_to_0') x = tf.layers.conv2d_transpose(concat1, 16, name='deconv0', activation=(lambda x: tf.nn.leaky_relu(x, 0.1))) concat0 = tf.concat([conv0, x, flow1_up], axis=1, name='concat0') interconv0 = tf.layers.conv2d(pad(concat0, 1), 16, strides=1, name='inter_conv0', activation=tf.identity) flow0 = tf.layers.conv2d(pad(interconv0, 1), 2, name='predict_flow0', strides=1, activation=tf.identity) return tf.identity(flow0, name='flow2')
Architecture in Table 4 of FlowNet 2.0. Args: x: NCHW tensor, where C=11 is the concatenation of 7 items of [3, 2, 2, 1, 1, 1, 1] channels.
codesearchnet
def str_of_constant(self, printer: 'Callable[[_base.BaseValue], str]') -> str: del printer return repr(self.pyval)
Get a string representation of this constant. Args: printer: A BaseValue -> str function that will be used to print abstract values. Returns: A string of self.pyval.
github-repos
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): vision_data = {} if image_sizes is not None: num_image_tokens = [self.image_seq_length] * len(image_sizes) num_image_patches = [1] * len(image_sizes) vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches}) return MultiModalData(**vision_data)
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (List[List[str]], *optional*): The input sizes formatted as (height, width) per each image. Returns: Dict[str, List[int]]: A dictionary mapping each modality ("image", "video", "audio") to a list containing the number of placeholder tokens required. If the model doesn't accept a certain modality or no input sizes are provided, the dict value is set to an empty list.
github-repos
def CompleteBreakpoint(self, breakpoint_id): with self._lock: self._completed.add(breakpoint_id) if breakpoint_id in self._active: self._active.pop(breakpoint_id).Clear()
Marks the specified breaking as completed. Appends the ID to set of completed breakpoints and clears it. Args: breakpoint_id: breakpoint ID to complete.
juraj-google-style
def as_dict(self, voigt=False): input_array = self.voigt if voigt else self d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "input_array": input_array.tolist()} if voigt: d.update({"voigt": voigt}) return d
Serializes the tensor object Args: voigt (bool): flag for whether to store entries in voigt-notation. Defaults to false, as information may be lost in conversion. Returns (Dict): serialized format tensor object
juraj-google-style
def map_feeds(self, ad_feed, ad_creative_assignment, ad_placement_assignment, ad_event_tag_assignment, placement_feed, event_tag_profile_feed): for ad in ad_feed: ad['creative_assignment'] = [association for association in ad_creative_assignment if self._assignment_matches(ad, association)] ad['placement_assignment'] = [association for association in ad_placement_assignment if self._assignment_matches(ad, association)] if ad.get(FieldMap.PLACEMENT_ID, None) or ad.get(FieldMap.PLACEMENT_NAME, None): ad['placement_assignment'].append(ad) ad['event_tag_assignment'] = [association for association in ad_event_tag_assignment if self._assignment_matches(ad, association)] if ad.get(FieldMap.EVENT_TAG_ID, None) or ad.get(FieldMap.EVENT_TAG_NAME, None): ad['event_tag_assignment'].append(ad) ad['placement_event_tag_profile'] = [] for placement_assignment in ad['placement_assignment']: placement = self._placement_dao.get(placement_assignment, required=True) if placement: ad_placement = None for item in placement_feed: if int(placement['id']) == item.get(FieldMap.PLACEMENT_ID, None): ad_placement = item if ad_placement: event_tag_profile_name = ad_placement.get(FieldMap.EVENT_TAG_PROFILE_NAME, '') if event_tag_profile_name: ad['placement_event_tag_profile'] += [event_tag_profile for event_tag_profile in event_tag_profile_feed if event_tag_profile.get(FieldMap.EVENT_TAG_PROFILE_NAME, None) == event_tag_profile_name]
Maps subfeeds to the corresponding ad. The Ad is an object that has several other dependent entities, they could be other entities like creative assignment, or complex sub objects in the ad entity like the placement assignment. This function maps those feeds by ID and injects the child feeds into the feed item representing the ad. Also, the ad level is where placement event tag profiles are assigned, and therefore this function is also responsible to determining if the placement event tag profile should be used, or if the direct event tag assignment in the ad should be used. Args: ad_feed: Ad feed. ad_creative_assignment: Ad creative assignment feed. ad_placement_assignment: Ad placement assignment feed. placement_feed: Placement feed. event_tag_profile_feed: Event tag profile feed.
github-repos
def __init__(self, urns, service, managed_replacement=None): self._urns = urns self._service = service self._schema_transforms = None self._managed_replacement = managed_replacement
Initializes the ExternalProvider. Args: urns: a set of URNs that uniquely identify the transforms supported. service: the gradle target that identified the expansion service jar. managed_replacement (Optional): a map that defines the transform for which the SDK may replace the transform with an available managed transform.
github-repos
def _infer_all_output_dims(self, inputs): batch_size = tf.expand_dims(tf.shape(inputs)[0], 0) out_channels = (self.output_channels,) if self._n == 1: out_shape = (1,) + self.output_shape else: out_shape = self.output_shape if self._data_format.startswith("NC"): out_shape_tuple = out_channels + out_shape elif self._data_format.startswith("N") and self._data_format.endswith("C"): out_shape_tuple = out_shape + out_channels output_shape = tf.concat([batch_size, out_shape_tuple], 0) return output_shape
Calculate the output shape for `inputs` after a deconvolution. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: output_shape: A tensor of shape (`batch_size`, `conv_output_shape`).
juraj-google-style
def _mel_to_hertz(mel_values, name=None): with ops.name_scope(name, 'mel_to_hertz', [mel_values]): mel_values = ops.convert_to_tensor(mel_values) return _MEL_BREAK_FREQUENCY_HERTZ * (math_ops.exp(mel_values / _MEL_HIGH_FREQUENCY_Q) - 1.0)
Converts frequencies in `mel_values` from the mel scale to linear scale. Args: mel_values: A `Tensor` of frequencies in the mel scale. name: An optional name for the operation. Returns: A `Tensor` of the same shape and type as `mel_values` containing linear scale frequencies in Hertz.
github-repos
def bridge_delete(br, if_exists=True): param_if_exists = _param_if_exists(if_exists) cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists) result = __salt__['cmd.run_all'](cmd) retcode = result['retcode'] return _retcode_to_bool(retcode)
Deletes bridge and all of its ports. Args: br: A string - bridge name if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False. Returns: True on success, else False. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' openvswitch.bridge_delete br0
juraj-google-style
def calculate_energy(self, energies): return sum([amt * energies[c] for amt, c in zip(self._coeffs, self._all_comp)])
Calculates the energy of the reaction. Args: energies ({Composition: float}): Energy for each composition. E.g ., {comp1: energy1, comp2: energy2}. Returns: reaction energy as a float.
juraj-google-style
def import_tsv(self, tsv_file): r = fapi.upload_entities_tsv(self.namespace, self.name, self.tsv_file, self.api_url) fapi._check_response_code(r, 201)
Upload entity data to workspace from tsv loadfile. Args: tsv_file (file): Tab-delimited file of entity data
juraj-google-style
def save_tiles(tiles, prefix='', directory=os.getcwd(), format='png'): for tile in tiles: tile.save(filename=tile.generate_filename(prefix=prefix, directory=directory, format=format), format=format) return tuple(tiles)
Write image files to disk. Create specified folder(s) if they don't exist. Return list of :class:`Tile` instance. Args: tiles (list): List, tuple or set of :class:`Tile` objects to save. prefix (str): Filename prefix of saved tiles. Kwargs: directory (str): Directory to save tiles. Created if non-existant. Returns: Tuple of :class:`Tile` instances.
juraj-google-style
def pad(self, image: np.ndarray, size: Dict[str, int], aspect_ratio: Tuple[int, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: _validate_size(size) image_height, image_width = get_image_size(image, channel_dim=input_data_format) num_tiles_height, num_tiles_width = aspect_ratio padded_height = num_tiles_height * size['height'] padded_width = num_tiles_width * size['width'] pad_size = ((0, padded_height - image_height), (0, padded_width - image_width)) image = pad(image, pad_size, mode=PaddingMode.CONSTANT, constant_values=0, data_format=data_format, input_data_format=input_data_format) return image
Pad an image to the `size` x `aspect_ratio`. For example, if size is {height: 224, width: 224} and aspect ratio is (1, 2), the image will be padded to 224x448. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. aspect_ratio (`Tuple[int, int]`): The aspect ratio of the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. Returns: `np.ndarray`: The padded image.
github-repos
def send_message(host, data, timeout=None, properties=None): channel = _get_channel(host, timeout) if not properties: properties = pika.BasicProperties( content_type="application/json", delivery_mode=2, headers={"UUID": str(uuid.uuid4())} ) parameters = settings.get_amqp_settings()[host] channel.basic_publish( exchange=parameters["exchange"], routing_key=parameters["in_key"], properties=properties, body=data )
Send message to given `host`. Args: host (str): Specified host: aleph/ftp/whatever available host. data (str): JSON data. timeout (int, default None): How much time wait for connection.
juraj-google-style
def VisitUnionType(self, union): if not any((isinstance(t, pytd.GenericType) for t in union.type_list)): return union union = pytd_utils.JoinTypes(union.type_list) if not isinstance(union, pytd.UnionType): union = pytd.UnionType((union,)) merge_tuples = self._should_merge(pytd.TupleType, union) merge_callables = self._should_merge(pytd.CallableType, union) if merge_tuples or merge_callables: type_list = [] for t in union.type_list: if merge_tuples and isinstance(t, pytd.TupleType): t = pytd.GenericType(base_type=t.base_type, parameters=(pytd_utils.JoinTypes(t.parameters),)) elif merge_callables and isinstance(t, pytd.CallableType): t = pytd.GenericType(base_type=t.base_type, parameters=(pytd.AnythingType(), t.ret)) type_list.append(t) union = union.Replace(type_list=tuple(type_list)) collect = {} has_redundant_base_types = False for t in union.type_list: if isinstance(t, pytd.GenericType): key = self._key(t) if key in collect: has_redundant_base_types = True collect[key] = tuple((pytd_utils.JoinTypes([p1, p2]) for p1, p2 in zip(collect[key], t.parameters))) else: collect[key] = t.parameters if not has_redundant_base_types: return union result = pytd.NothingType() done = set() for t in union.type_list: if isinstance(t, pytd.GenericType): key = self._key(t) if key in done: continue parameters = collect[key] add = t.Replace(parameters=tuple((p.Visit(CombineContainers()) for p in parameters))) done.add(key) else: add = t result = pytd_utils.JoinTypes([result, add]) return result
Push unions down into containers. This collects similar container types in unions and merges them into single instances with the union type pushed down to the element_type level. Arguments: union: A pytd.Union instance. Might appear in a parameter, a return type, a constant type, etc. Returns: A simplified pytd.Union.
github-repos
def country_code_for_valid_region(region_code): metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None) if metadata is None: raise Exception("Invalid region code %s" % region_code) return metadata.country_code
Returns the country calling code for a specific region. For example, this would be 1 for the United States, and 64 for New Zealand. Assumes the region is already valid. Arguments: region_code -- The region that we want to get the country calling code for. Returns the country calling code for the region denoted by region_code.
juraj-google-style
def unpack(self, buff, offset=0): super().unpack(buff, offset) if self.tpid.value: self._validate() self.tpid = self.tpid.value self.pcp = self._tci.value >> 13 self.cfi = (self._tci.value >> 12) & 1 self.vid = self._tci.value & 4095 else: self.tpid = EtherType.VLAN self.pcp = None self.cfi = None self.vid = None
Unpack a binary struct into this object's attributes. Return the values instead of the lib's basic types. After unpacking, the abscence of a `tpid` value causes the assignment of None to the field values to indicate that there is no VLAN information. Args: buff (bytes): Binary buffer. offset (int): Where to begin unpacking. Raises: :exc:`~.exceptions.UnpackException`: If unpack fails.
juraj-google-style
def timeRange(start: datetime.time, end: datetime.time, step: float) -> Iterator[datetime.datetime]: assert (step > 0) start = _fillDate(start) end = _fillDate(end) delta = datetime.timedelta(seconds=step) t = start while (t < datetime.datetime.now()): t += delta while (t <= end): waitUntil(t) (yield t) t += delta
Iterator that waits periodically until certain time points are reached while yielding those time points. Args: start: Start time, can be specified as datetime.datetime, or as datetime.time in which case today is used as the date end: End time, can be specified as datetime.datetime, or as datetime.time in which case today is used as the date step (float): The number of seconds of each period
codesearchnet
def _dominant_task_for_jobs(tasks): per_job = _group_tasks_by_jobid(tasks) ret = [] for job_id in per_job.keys(): tasks_in_salience_order = sorted(per_job[job_id], key=_importance_of_task) ret.append(tasks_in_salience_order[0]) return ret
A list with, for each job, its dominant task. The dominant task is the one that exemplifies its job's status. It is either: - the first (FAILURE or CANCELED) task, or if none - the first RUNNING task, or if none - the first SUCCESS task. Args: tasks: a list of tasks to consider Returns: A list with, for each job, its dominant task.
codesearchnet
def sys_check_for_event( mask: int, k: Optional[Key], m: Optional[Mouse] ) -> int: return int( lib.TCOD_sys_check_for_event( mask, k.key_p if k else ffi.NULL, m.mouse_p if m else ffi.NULL ) )
Check for and return an event. Args: mask (int): :any:`Event types` to wait for. k (Optional[Key]): A tcod.Key instance which might be updated with an event. Can be None. m (Optional[Mouse]): A tcod.Mouse instance which might be updated with an event. Can be None. .. deprecated:: 9.3 Use the :any:`tcod.event.get` function to check for events.
juraj-google-style
def GetFormatterObject(cls, data_type): data_type = data_type.lower() if data_type not in cls._formatter_objects: formatter_object = None if data_type in cls._formatter_classes: formatter_class = cls._formatter_classes[data_type] formatter_object = formatter_class() if not formatter_object: logger.warning( 'Using default formatter for data type: {0:s}'.format(data_type)) formatter_object = default.DefaultFormatter() cls._formatter_objects[data_type] = formatter_object return cls._formatter_objects[data_type]
Retrieves the formatter object for a specific data type. Args: data_type (str): data type. Returns: EventFormatter: corresponding formatter or the default formatter if not available.
juraj-google-style
def _load_saved_model(self, saved_model_dir, saved_model_tags): graph = _ops.Graph() saved_model = _loader_impl.SavedModelLoader(saved_model_dir) saved_model.load_graph(graph, tags=saved_model_tags) meta_graph = saved_model.get_meta_graph_def_from_tags(saved_model_tags) graph_def = meta_graph.graph_def signature_def = meta_graph.signature_def[_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] input_tensors = [graph.get_tensor_by_name(signature_def.inputs[key].name) for key in signature_def.inputs] output_tensors = [graph.get_tensor_by_name(signature_def.outputs[key].name) for key in signature_def.outputs] return (graph_def, input_tensors, output_tensors)
Load graph_def from saved model with the default serving signature key. Args: saved_model_dir: Directory of the SavedModel. saved_model_tags: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. Returns: graph_def: The loaded GraphDef. input_tensors: List of input tensors. output_tensors: List of output tensors.
github-repos
def equal(x, y): return math_ops.equal(x, y)
Element-wise equality between two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor.
github-repos
def alloc(self): if (not self._free): self._expand() id = self._free.pop() self._used.add(id) return id
Allocate an ID value and return it. Raises: ValueError: Out of capacity in ID pool.
codesearchnet
def to_csv(self, filename: str, latexify_names: bool = False): elements = set() for entry in self.entries: elements.update(entry.composition.elements) elements = sorted(list(elements), key=lambda a: a.X) writer = csv.writer(open(filename, "w"), delimiter=unicode2str(","), quotechar=unicode2str("\""), quoting=csv.QUOTE_MINIMAL) writer.writerow(["Name"] + elements + ["Energy"]) for entry in self.entries: row = [entry.name if not latexify_names else re.sub(r"([0-9]+)", r"_{\1}", entry.name)] row.extend([entry.composition[el] for el in elements]) row.append(entry.energy) writer.writerow(row)
Exports PDEntries to a csv Args: filename: Filename to write to. entries: PDEntries to export. latexify_names: Format entry names to be LaTex compatible, e.g., Li_{2}O
juraj-google-style
def get_path(self, temp_ver): if (temp_ver not in self): raise RuntimeError('Template: {} not present'.format(temp_ver.name)) return self._prefixed(temp_ver.name)
Get the path of the given version in this store Args: temp_ver TemplateVersion: version to look for Returns: str: The path to the template version inside the store Raises: RuntimeError: if the template is not in the store
codesearchnet
def convertDateStrToDateTimeStr(date, time='00:00:00'): if not date == None: date = '%sT%sZ' % (date, time) return date
Convert Date string (YYYY-MM-DD) to a datetime string by adding the desired time (YYYY-MM-DDTHH:mm:SSZ) Args: date: the date as a string to be converted time: the time as a string to be added to the date Returns: A string representation of a datetime in the following format YYYY-MM-DDTHH:mm:SSZ
github-repos
def GetValidHostsForCert(cert): if ('subjectAltName' in cert): return [x[1] for x in cert['subjectAltName'] if (x[0].lower() == 'dns')] else: return [x[0][1] for x in cert['subject'] if (x[0][0].lower() == 'commonname')]
Returns a list of valid host globs for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. Returns: list: A list of valid host globs.
codesearchnet
def open_streaming_interface(self): super(ReferenceDevice, self).open_streaming_interface() self.rpc(8, rpcs.SG_GRAPH_INPUT, 8, streams.COMM_TILE_OPEN) return []
Called when someone opens a streaming interface to the device. This method will automatically notify sensor_graph that there is a streaming interface opened. Returns: list: A list of IOTileReport objects that should be sent out the streaming interface.
codesearchnet
def ParseLocalEntryRow(self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs): query_hash = hash(query) inode_number = self._GetRowValue(query_hash, row, 'inode_number') local_path = self.GetLocalPath(inode_number, cache, database) event_data = GoogleDriveSnapshotLocalEntryEventData() event_data.path = local_path event_data.query = query event_data.size = self._GetRowValue(query_hash, row, 'size') timestamp = self._GetRowValue(query_hash, row, 'modified') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a local entry row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. cache (Optional[SQLiteCache]): cache. database (Optional[SQLiteDatabase]): database.
codesearchnet
def optionally_with_args(phase, **kwargs): if isinstance(phase, PhaseGroup): return phase.with_args(**kwargs) if isinstance(phase, collections.Iterable): return [optionally_with_args(p, **kwargs) for p in phase] if (not isinstance(phase, phase_descriptor.PhaseDescriptor)): phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(phase) return phase.with_known_args(**kwargs)
Apply only the args that the phase knows. If the phase has a **kwargs-style argument, it counts as knowing all args. Args: phase: phase_descriptor.PhaseDescriptor or PhaseGroup or callable, or iterable of those, the phase or phase group (or iterable) to apply with_args to. **kwargs: arguments to apply to the phase. Returns: phase_descriptor.PhaseDescriptor or PhaseGroup or iterable with the updated args.
codesearchnet
def diff_configurations(model_config, bench_config, model_bundle, bench_bundle): diff_dict = LIVVDict() model_data = model_bundle.parse_config(model_config) bench_data = bench_bundle.parse_config(bench_config) if model_data == {} and bench_data == {}: return elements.error("Configuration Comparison", "Could not open file: " + model_config.split(os.path.sep)[-1]) model_sections = set(six.iterkeys(model_data)) bench_sections = set(six.iterkeys(bench_data)) all_sections = set(model_sections.union(bench_sections)) for s in all_sections: model_vars = set(six.iterkeys(model_data[s])) if s in model_sections else set() bench_vars = set(six.iterkeys(bench_data[s])) if s in bench_sections else set() all_vars = set(model_vars.union(bench_vars)) for v in all_vars: model_val = model_data[s][v] if s in model_sections and v in model_vars else 'NA' bench_val = bench_data[s][v] if s in bench_sections and v in bench_vars else 'NA' same = True if model_val == bench_val and model_val != 'NA' else False diff_dict[s][v] = (same, model_val, bench_val) return elements.file_diff("Configuration Comparison", diff_dict)
Description Args: model_config: a dictionary with the model configuration data bench_config: a dictionary with the benchmark configuration data model_bundle: a LIVVkit model bundle object bench_bundle: a LIVVkit model bundle object Returns: A dictionary created by the elements object corresponding to the results of the bit for bit testing
juraj-google-style
def prettify(unicode_text): import xml.dom.minidom reparsed = xml.dom.minidom.parseString(unicode_text.encode('utf-8')) return reparsed.toprettyxml(indent=" ", newl="\n")
Return a pretty-printed version of a unicode XML string. Useful for debugging. Args: unicode_text (str): A text representation of XML (unicode, *not* utf-8). Returns: str: A pretty-printed version of the input.
juraj-google-style
def json_to_url(json, symbol): start = json[0]['date'] end = json[-1]['date'] diff = end - start periods = [300, 900, 1800, 7200, 14400, 86400] diffs = {} for p in periods: diffs[p] = abs(1 - (p / (diff / len(json)))) period = min(diffs, key=diffs.get) url = ('https: '=returnChartData&currencyPair={0}&start={1}' '&end={2}&period={3}').format(symbol, start, end, period) return url
Converts a JSON to a URL by the Poloniex API Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. symbol: String of currency pair, like a ticker symbol. Returns: String URL to Poloniex API representing the given JSON.
juraj-google-style
def concat(cartesians, ignore_index=False, keys=None): frames = [molecule._frame for molecule in cartesians] new = pd.concat(frames, ignore_index=ignore_index, keys=keys, verify_integrity=True) if (type(ignore_index) is bool): new = pd.concat(frames, ignore_index=ignore_index, keys=keys, verify_integrity=True) else: new = pd.concat(frames, ignore_index=True, keys=keys, verify_integrity=True) if (type(ignore_index) is int): new.index = range(ignore_index, (ignore_index + len(new))) else: new.index = ignore_index return cartesians[0].__class__(new)
Join list of cartesians into one molecule. Wrapper around the :func:`pandas.concat` function. Default values are the same as in the pandas function except for ``verify_integrity`` which is set to true in case of this library. Args: ignore_index (sequence, bool, int): If it is a boolean, it behaves like in the description of :meth:`pandas.DataFrame.append`. If it is a sequence, it becomes the new index. If it is an integer, ``range(ignore_index, ignore_index + len(new))`` becomes the new index. keys (sequence): If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level Returns: Cartesian:
codesearchnet
def get_class_in_module(class_name: str, module_path: Union[str, os.PathLike], *, force_reload: bool=False) -> type: name = os.path.normpath(module_path) if name.endswith('.py'): name = name[:-3] name = name.replace(os.path.sep, '.') module_file: Path = Path(HF_MODULES_CACHE) / module_path with _HF_REMOTE_CODE_LOCK: if force_reload: sys.modules.pop(name, None) importlib.invalidate_caches() cached_module: Optional[ModuleType] = sys.modules.get(name) module_spec = importlib.util.spec_from_file_location(name, location=module_file) module_files: list[Path] = [module_file] + sorted(map(Path, get_relative_import_files(module_file))) module_hash: str = hashlib.sha256(b''.join((bytes(f) + f.read_bytes() for f in module_files))).hexdigest() module: ModuleType if cached_module is None: module = importlib.util.module_from_spec(module_spec) sys.modules[name] = module else: module = cached_module if getattr(module, '__transformers_module_hash__', '') != module_hash: module_spec.loader.exec_module(module) module.__transformers_module_hash__ = module_hash return getattr(module, class_name)
Import a module on the cache directory for modules and extract a class from it. Args: class_name (`str`): The name of the class to import. module_path (`str` or `os.PathLike`): The path to the module to import. force_reload (`bool`, *optional*, defaults to `False`): Whether to reload the dynamic module from file if it already exists in `sys.modules`. Otherwise, the module is only reloaded if the file has changed. Returns: `typing.Type`: The class looked for.
github-repos
def CopyFromDateTimeString(self, time_string): super(APFSTime, self)._CopyFromDateTimeString(time_string) if ((self._timestamp is None) or (self._timestamp < self._INT64_MIN) or (self._timestamp > self._INT64_MAX)): raise ValueError('Date time value not supported.')
Copies a APFS timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC. Raises: ValueError: if the date and time value is not supported.
codesearchnet
def _ReadLine(self, file_object): if (len(self._buffer) < self._buffer_size): content = file_object.read(self._buffer_size) content = content.decode(self._encoding) self._buffer = ''.join([self._buffer, content]) (line, new_line, self._buffer) = self._buffer.partition('\n') if ((not line) and (not new_line)): line = self._buffer self._buffer = '' self._current_offset += len(line) if line.endswith('\r'): line = line[:(- len('\r'))] if new_line: line = ''.join([line, '\n']) self._current_offset += len('\n') return line
Reads a line from the file object. Args: file_object (dfvfs.FileIO): file-like object. Returns: str: line read from the file-like object.
codesearchnet
def astype(array, y): if isinstance(y, autograd.core.Node): return array.astype(numpy.array(y.value).dtype) return array.astype(numpy.array(y).dtype)
A functional form of the `astype` method. Args: array: The array or number to cast. y: An array or number, as the input, whose type should be that of array. Returns: An array or number with the same dtype as `y`.
codesearchnet
def _convert_path(path, name): table = os.path.splitext(path)[0] table = table.replace(os.path.sep, '__') if name is not None: table = '___'.join([table, name]) table = re.sub('[^0-9a-zA-Z_]+', '_', table) table = table.lower() return table
Convert resource's path and name to storage's table name. Args: path (str): resource path name (str): resource name Returns: str: table name
juraj-google-style
def add_lambda_permissions(function='', statement_id='', action='lambda:InvokeFunction', principal='', source_arn='', env='', region='us-east-1'): session = boto3.Session(profile_name=env, region_name=region) lambda_client = session.client('lambda') response_action = None prefixed_sid = FOREMAST_PREFIX + statement_id add_permissions_kwargs = { 'FunctionName': function, 'StatementId': prefixed_sid, 'Action': action, 'Principal': principal, } if source_arn: add_permissions_kwargs['SourceArn'] = source_arn try: lambda_client.add_permission(**add_permissions_kwargs) response_action = 'Add permission with Sid: {}'.format(prefixed_sid) except boto3.exceptions.botocore.exceptions.ClientError as error: LOG.debug('Add permission error: %s', error) response_action = "Did not add permissions" LOG.debug('Related StatementId (SID): %s', prefixed_sid) LOG.info(response_action)
Add permission to Lambda for the event trigger. Args: function (str): Lambda function name statement_id (str): IAM policy statement (principal) id action (str): Lambda action to allow principal (str): AWS principal to add permissions source_arn (str): ARN of the source of the event. Only needed for S3 env (str): Environment/account of function region (str): AWS region of function
juraj-google-style
def _static_cache_update(k_cache: torch.Tensor, v_cache: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, cache_position: Optional[torch.LongTensor]) -> Tuple[torch.Tensor, torch.Tensor]: if cache_position is None: k_cache.copy_(key_states) v_cache.copy_(value_states) else: try: k_cache.index_copy_(2, cache_position, key_states) v_cache.index_copy_(2, cache_position, value_states) except NotImplementedError: k_cache[:, :, cache_position] = key_states v_cache[:, :, cache_position] = value_states return (k_cache, v_cache)
Updates the static cache tensors in place. Args: k_cache (`torch.Tensor`): The key cache tensor to update. v_cache (`torch.Tensor`): The value cache tensor to update. key_states (`torch.Tensor`): The new key states to add. value_states (`torch.Tensor`): The new value states to add. cache_position (`Optional[torch.LongTensor]`): The position indices where the new states should be inserted. If None, the entire cache is overwritten (prefill). Returns: Tuple[`torch.Tensor`, `torch.Tensor`]: The updated key and value cache tensors (modified in-place).
github-repos
def join(input_files, output_file): final_features = [] for file in input_files: with open(file) as f: feat_collection = geojson.load(f) final_features += feat_collection['features'] feat_collection['features'] = final_features with open(output_file, 'w') as f: geojson.dump(feat_collection, f)
Join geojsons into one. The spatial reference system of the output file is the same as the one of the last file in the list. Args: input_files (list): List of file name strings. output_file (str): Output file name.
codesearchnet
def findLocalOptima(self,fast=False,verbose=True,n_times=10,lambd=None): if not self.init: self.initGP(fast) opt_list = [] fixed0 = SP.zeros_like(self.gp.getParams()['dataTerm']) for i in range(n_times): scales1 = self._getScalesRand() fixed1 = 1e-1*SP.randn(fixed0.shape[0],fixed0.shape[1]) conv = self.trainGP(fast=fast,scales0=scales1,fixed0=fixed1,lambd=lambd) if conv: temp=1 for j in range(len(opt_list)): if SP.allclose(abs(self.getScales()),abs(opt_list[j]['scales'])): temp=0 opt_list[j]['counter']+=1 break if temp==1: opt = {} opt['counter'] = 1 opt['LML'] = self.getLML() opt['scales'] = self.getScales() opt_list.append(opt) LML = SP.array([opt_list[i]['LML'] for i in range(len(opt_list))]) index = LML.argsort()[::-1] out = [] if verbose: print("\nLocal mimima\n") print("n_times\t\tLML") print("------------------------------------") for i in range(len(opt_list)): out.append(opt_list[index[i]]) if verbose: print(("%d\t\t%f" % (opt_list[index[i]]['counter'], opt_list[index[i]]['LML']))) print("") return out
Train the model repeadly up to a number specified by the users with random restarts and return a list of all relative minima that have been found Args: fast: Boolean. if set to True initalize kronSumGP verbose: Boolean. If set to True, verbose output is produced. (default True) n_times: number of re-starts of the optimization. (default 10)
juraj-google-style
def get_extension_by_name(cert_obj, extension_name): try: return cert_obj.extensions.get_extension_for_oid( getattr(cryptography.x509.oid.ExtensionOID, extension_name) ) except cryptography.x509.ExtensionNotFound: pass
Get a standard certificate extension by attribute name. Args: cert_obj: cryptography.Certificate Certificate containing a standard extension. extension_name : str Extension name. E.g., 'SUBJECT_DIRECTORY_ATTRIBUTES'. Returns: Cryptography.Extension
juraj-google-style
async def _async_supervisor(func, animation_, step, *args, **kwargs): with ThreadPoolExecutor(max_workers=2) as pool: with _terminating_event() as event: pool.submit(animate_cli, animation_, step, event) result = await func(*args, **kwargs) return result
Supervisor for running an animation with an asynchronous function. Args: func: A function to be run alongside an animation. animation_: An infinite generator that produces strings for the animation. step: Seconds between each animation frame. *args: Arguments for func. **kwargs: Keyword arguments for func. Returns: The result of func(*args, **kwargs) Raises: Any exception that is thrown when executing func.
juraj-google-style
def loads(serialized_messages): try: messages_dicts = json.loads(serialized_messages) except ValueError: _log.error('Loading serialized messages failed.') raise messages = [] for message_dict in messages_dicts: try: headers = message_dict['headers'] except KeyError: _log.error('Message saved without headers.') raise try: MessageClass = get_class(headers['fedora_messaging_schema']) except KeyError: _log.error('Message (headers=%r) saved without a schema header.', headers) raise try: body = message_dict['body'] except KeyError: _log.error('Message saved without body.') raise try: id = message_dict['id'] except KeyError: _log.error('Message saved without id.') raise try: queue = message_dict['queue'] except KeyError: _log.warning('Message saved without queue.') queue = None try: topic = message_dict['topic'] except KeyError: _log.error('Message saved without topic.') raise try: severity = headers['fedora_messaging_severity'] except KeyError: _log.error('Message saved without a severity.') raise message = MessageClass(body=body, topic=topic, headers=headers, severity=severity) try: message.validate() _log.debug('Successfully validated message %r', message) except jsonschema.exceptions.ValidationError as e: _log.error('Message validation of %r failed: %r', message, e) raise ValidationError(e) message.queue = queue message.id = id messages.append(message) return messages
Deserialize messages from a JSON formatted str Args: serialized_messages (JSON str): Returns: list: Deserialized message objects. Raises: ValidationError: If deserialized message validation failed. KeyError: If serialized_messages aren't properly serialized. ValueError: If serialized_messages is not valid JSON
codesearchnet
def single_conv_dist(name, x, output_channels=None): with tf.variable_scope(name, reuse=tf.AUTO_REUSE): x_shape = common_layers.shape_list(x) if (output_channels is None): output_channels = x_shape[(- 1)] mean_log_scale = conv('conv2d', x, output_channels=(2 * output_channels), conv_init='zeros', apply_actnorm=False) mean = mean_log_scale[(:, :, :, 0::2)] log_scale = mean_log_scale[(:, :, :, 1::2)] return tf.distributions.Normal(mean, tf.exp(log_scale))
A 3x3 convolution mapping x to a standard normal distribution at init. Args: name: variable scope. x: 4-D Tensor. output_channels: number of channels of the mean and std.
codesearchnet
def in_train_phase(x, alt, training=None): from tensorflow.python.keras.engine import base_layer_utils if training is None: training = base_layer_utils.call_context().training if training is None: training = learning_phase() if not tensor_util.is_tf_type(training): if training == 1 or training is True: if callable(x): return x() else: return x elif training == 0 or training is False: if callable(alt): return alt() else: return alt x = switch(training, x, alt) return x
Selects `x` in train phase, and `alt` otherwise. Note that `alt` should have the *same shape* as `x`. Args: x: What to return in train phase (tensor or callable that returns a tensor). alt: What to return otherwise (tensor or callable that returns a tensor). training: Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase. Returns: Either `x` or `alt` based on the `training` flag. the `training` flag defaults to `K.learning_phase()`.
github-repos
def random_shift(image, wsr=0.1, hsr=0.1): (height, width, _) = common_layers.shape_list(image) (width_range, height_range) = ((wsr * width), (hsr * height)) height_translations = tf.random_uniform((1,), (- height_range), height_range) width_translations = tf.random_uniform((1,), (- width_range), width_range) translations = tf.concat((height_translations, width_translations), axis=0) return tf.contrib.image.translate(image, translations=translations)
Apply random horizontal and vertical shift to images. This is the default data-augmentation strategy used on CIFAR in Glow. Args: image: a 3-D Tensor wsr: Width shift range, as a float fraction of the width. hsr: Height shift range, as a float fraction of the width. Returns: images: images translated by the provided wsr and hsr.
codesearchnet
def _FormatUsername(self, event): username = self._output_mediator.GetUsername(event) return self._SanitizeField(username)
Formats the username. Args: event (EventObject): event. Returns: str: formatted username field.
juraj-google-style
def predict_features(self, df_features, df_target, idx=0, **kwargs): X = df_features.values y = df_target.values[:, 0] rr = ReliefF() rr.fit(X, y) return rr.feature_importances_
For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target
juraj-google-style
def dump_connection_info(engine: Engine, fileobj: TextIO=sys.stdout) -> None: meta = MetaData(bind=engine) writeline_nl(fileobj, sql_comment('Database info: {}'.format(meta)))
Dumps some connection info, as an SQL comment. Obscures passwords. Args: engine: the SQLAlchemy :class:`Engine` to dump metadata information from fileobj: the file-like object (default ``sys.stdout``) to write information to
codesearchnet
def write_xml_root(xml_root, output_loc=None, filename=None): if xml_root is None: raise Dump2PolarionException("No data to write.") filename_fin = _get_filename(output_loc=output_loc, filename=filename) et = etree.ElementTree(xml_root) et.write(filename_fin, xml_declaration=True, pretty_print=True, encoding="utf-8") logger.info("Data written to '%s'", filename_fin)
Outputs the XML content (from XML element) into a file. If `output_loc` is supplied and it's a file (not directory), the output will be saved there and the `filename` is ignored. Args: xml_root: root element ot the XML document output_loc: file or directory for saving the file filename: file name that will be used if `output_loc` is directory If it is needed and is not supplied, it will be generated
juraj-google-style
def detect_deprecated_references_in_contract(self, contract): results = [] for state_variable in contract.variables: if (state_variable.contract != contract): continue if state_variable.expression: deprecated_results = self.detect_deprecation_in_expression(state_variable.expression) if deprecated_results: results.append((state_variable, deprecated_results)) for function in (contract.functions + contract.modifiers): if (function.contract != contract): continue for node in function.nodes: deprecated_results = self.detect_deprecated_references_in_node(node) for ir in node.irs: if isinstance(ir, LowLevelCall): for dep_llc in self.DEPRECATED_LOW_LEVEL_CALLS: if (ir.function_name == dep_llc[0]): deprecated_results.append(dep_llc) if deprecated_results: results.append((node, deprecated_results)) return results
Detects the usage of any deprecated built-in symbols. Returns: list of tuple: (state_variable | node, (detecting_signature, original_text, recommended_text))
codesearchnet
def get_dict_table_schema(schema): if isinstance(schema, (dict, value_provider.ValueProvider)) or callable(schema) or schema is None: return schema elif isinstance(schema, str): table_schema = get_table_schema_from_string(schema) return table_schema_to_dict(table_schema) elif isinstance(schema, bigquery.TableSchema): return table_schema_to_dict(schema) else: raise TypeError('Unexpected schema argument: %s.' % schema)
Transform the table schema into a dictionary instance. Args: schema (str, dict, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema): The schema to be used if the BigQuery table to write has to be created. This can either be a dict or string or in the TableSchema format. Returns: Dict[str, Any]: The schema to be used if the BigQuery table to write has to be created but in the dictionary format.
github-repos
def resolve_path(path, config_file): if os.path.isabs(path): return path return os.path.relpath(path, os.path.dirname(config_file))
Resolve path relative to config file location. Args: path: Path to be resolved. config_file: Path to config file, which `path` is specified relative to. Returns: Path relative to the `config_file` location. If `path` is an absolute path then it will be returned without change.
juraj-google-style
def set_source_interface(self, name): cmd = self.command_builder('ntp source', value=name) return self.configure(cmd)
Assign the NTP source on the node Args: name (string): The interface port that specifies the NTP source. Returns: True if the operation succeeds, otherwise False.
juraj-google-style
def run(self, *args, **kwargs): accounts = list(AWSAccount.get_all(include_disabled=False).values()) s3_acl = get_template('cloudtrail_s3_bucket_policy.json') s3_bucket_name = self.dbconfig.get('bucket_name', self.ns) s3_bucket_region = self.dbconfig.get('bucket_region', self.ns, 'us-west-2') s3_bucket_account = AWSAccount.get(self.dbconfig.get('bucket_account', self.ns)) CloudTrail.create_s3_bucket(s3_bucket_name, s3_bucket_region, s3_bucket_account, s3_acl) self.validate_sqs_policy(accounts) for account in accounts: ct = CloudTrail(account, s3_bucket_name, s3_bucket_region, self.log) ct.run()
Entry point for the scheduler Args: *args: Optional arguments **kwargs: Optional keyword arguments Returns: None
juraj-google-style
def convert_softmax(params, w_name, scope_name, inputs, layers, weights, names): print('Converting softmax ...') if (names == 'short'): tf_name = ('SMAX' + random_string(4)) elif (names == 'keep'): tf_name = w_name else: tf_name = (w_name + str(random.random())) def target_layer(x, dim=params['dim']): import keras return keras.activations.softmax(x, axis=dim) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert softmax layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
codesearchnet
def __make_request_headers(self, teststep_dict, entry_json): teststep_headers = {} for header in entry_json['request'].get('headers', []): if (header['name'].lower() in IGNORE_REQUEST_HEADERS): continue teststep_headers[header['name']] = header['value'] if teststep_headers: teststep_dict['request']['headers'] = teststep_headers
parse HAR entry request headers, and make teststep headers. header in IGNORE_REQUEST_HEADERS will be ignored. Args: entry_json (dict): { "request": { "headers": [ {"name": "Host", "value": "httprunner.top"}, {"name": "Content-Type", "value": "application/json"}, {"name": "User-Agent", "value": "iOS/10.3"} ], }, "response": {} } Returns: { "request": { headers: {"Content-Type": "application/json"} }
codesearchnet
def add_chain(self, name, order): if (name not in self.chains): setattr(self.chains, name, MarkovChain(order=order)) else: raise ValueError('Chain with this name already exists')
Add chain to current shelve file Args: name: chain name order: markov chain order
codesearchnet
def as_matrix(self, depth=0): if depth in self._matrix_cache: return self._matrix_cache[depth] self._matrix_cache[depth] = matrix = Matrix(self, depth=depth) return matrix
Create a matrix with self as node, cache it, return it. Args: depth (int): depth of the matrix. Returns: Matrix: an instance of Matrix.
juraj-google-style
def inspect(self, **kwargs): scf_cycle = abiinspect.PhononScfCycle.from_file(self.output_file.path) if (scf_cycle is not None): if ('title' not in kwargs): kwargs['title'] = str(self) return scf_cycle.plot(**kwargs)
Plot the Phonon SCF cycle results with matplotlib. Returns: `matplotlib` figure, None if some error occurred.
codesearchnet
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] _cls = [self.cls_token_id] _sep = [self.sep_token_id] return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ErnieM sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of input_id with the appropriate special tokens.
github-repos
def register_agent(self, short_name): self._loop.run_coroutine(self._client.register_agent(short_name))
Register to act as the RPC agent for this service. After this cal succeeds, all requests to send RPCs to this service will be routed through this agent. Args: short_name (str): A unique short name for this service that functions as an id
juraj-google-style
def compute_matmul_output_shape(shape1, shape2): if len(shape1) == 1: shape1 = (1, shape1[0]) if len(shape2) == 1: shape2 = (shape2[0], 1) if shape1[-1] is not None and shape2[-2] is not None and (shape1[-1] != shape2[-2]): raise ValueError(f'Inner dimensions (`x1.shape[-1]` and `x2.shape[-2]`) must be equal, but received `x1.shape={shape1}` and `x2.shape={shape2}`.') leading_shape = broadcast_shapes(shape1[:-2], shape2[:-2]) last_2_dims_shape = [shape1[-2], shape2[-1]] output_shape = leading_shape + last_2_dims_shape if len(shape1) == 1: del output_shape[-2] if len(shape2) == 1: del output_shape[-1] return tuple(output_shape)
Compute the output shape of a `matmul` operation. Args: shape1: Shape of the left operand. shape2: Shape of the right operand. Returns: Tuple of ints: The output shape for the `matmul` operation.
github-repos
def _new_from_rft(self, base_template, rft_file): self._add_entry(base_template) self._add_entry(templates.NEW_FROM_RFT .format(rft_file_path=rft_file, rft_file_name=op.basename(rft_file)))
Append a new file from .rft entry to the journal. This instructs Revit to create a new model based on the provided .rft template. Args: base_template (str): new file journal template from rmj.templates rft_file (str): full path to .rft template to be used
juraj-google-style
def run_tasks(header, tasks): tasks = list(tasks) with timed_display(header) as print_message: with tqdm(tasks, position=1, desc='Progress', disable=None, bar_format='{desc}{percentage:3.0f}% |{bar}|', total=sum(t[2] if len(t) > 2 else 1 for t in tasks), dynamic_ncols=True) as pbar: for task in tasks: print_message(task[0]) with display_status(): try: task[1]() finally: pbar.update(task[2] if len(task) > 2 else 1)
Run a group of tasks with a header, footer and success/failure messages. Args: header: A message to print in the header bar before the tasks are run. tasks: A list of tuples containing a task title, a task, and a weight. If the tuple only contains two values, the weight is assumed to be one.
juraj-google-style
def get_backbone_element_fields(structdef: StructureDefinition, path: str) -> List[str]: results = [] struct_id = cast(Any, structdef).id.value qualified_path = struct_id + '.' + path if path else struct_id for elem in cast(Any, structdef).snapshot.element: if elem.id.value.startswith(qualified_path): relative_path = elem.id.value[len(qualified_path) + 1:] if relative_path and '.' not in relative_path: if relative_path.endswith('[x]'): relative_path = relative_path[:-3] results.append(relative_path) return results
Returns the field under the path to the given FHIR backbone element. Args: structdef: a FHIR StructureDefinition proto. path: a path to a backbone element within the structure definition. Returns: A list of nested field names.
github-repos
def lookup_package(self, definition_name): while True: descriptor = self.lookup_descriptor(definition_name) if isinstance(descriptor, FileDescriptor): return descriptor.package else: index = definition_name.rfind('.') if index < 0: return None definition_name = definition_name[:index]
Determines the package name for any definition. Determine the package that any definition name belongs to. May check parent for package name and will resolve missing descriptors if provided descriptor loader. Args: definition_name: Definition name to find package for.
juraj-google-style
def write_file(path, data, mode='w'): with open(path, mode) as f: f.write(data)
Write data to a file. Args: path (str): path to the file. data (str): data to be written to the file. mode (str): mode which the file will be open.
juraj-google-style
def write_sample(binary, payload, path, filename): if (not os.path.exists(path)): os.makedirs(path) sample = os.path.join(path, filename) if binary: with open(sample, 'wb') as f: f.write(base64.b64decode(payload)) else: with open(sample, 'w') as f: f.write(payload)
This function writes a sample on file system. Args: binary (bool): True if it's a binary file payload: payload of sample, in base64 if it's a binary path (string): path of file filename (string): name of file hash_ (string): file hash
codesearchnet
def build_evaluation(variant_specific, variant_id, user_id, user_name, institute_id, case_id, classification, criteria): criteria = (criteria or []) evaluation_obj = dict(variant_specific=variant_specific, variant_id=variant_id, institute_id=institute_id, case_id=case_id, classification=classification, user_id=user_id, user_name=user_name, created_at=datetime.datetime.now()) criteria_objs = [] for info in criteria: criteria_obj = {} criteria_obj['term'] = info['term'] if ('comment' in info): criteria_obj['comment'] = info['comment'] if ('links' in info): criteria_obj['links'] = info['links'] criteria_objs.append(criteria_obj) evaluation_obj['criteria'] = criteria_objs return evaluation_obj
Build a evaluation object ready to be inserted to database Args: variant_specific(str): md5 string for the specific variant variant_id(str): md5 string for the common variant user_id(str) user_name(str) institute_id(str) case_id(str) classification(str): The ACMG classification criteria(list(dict)): A list of dictionaries with ACMG criterias Returns: evaluation_obj(dict): Correctly formatted evaluation object
codesearchnet
def notify_batches_finished(self, statuses): with self._wait_condition: self._statuses = statuses self._wait_condition.notify()
Called by the BatchTracker the _BatchWaiter is observing. Should not be called by handlers. Args: statuses (dict of int): A dict with keys of batch ids, and values of status enums
codesearchnet
def sanger_ordered(self, institute_id=None, user_id=None): query = {'$match': {'$and': [{'verb': 'sanger'}]}} if institute_id: query['$match']['$and'].append({'institute': institute_id}) if user_id: query['$match']['$and'].append({'user_id': user_id}) results = self.event_collection.aggregate([query, {'$group': {'_id': '$case', 'vars': {'$addToSet': '$variant_id'}}}]) sanger_ordered = [item for item in results] return sanger_ordered
Get all variants with validations ever ordered. Args: institute_id(str) : The id of an institute user_id(str) : The id of an user Returns: sanger_ordered(list) : a list of dictionaries, each with "case_id" as keys and list of variant ids as values
codesearchnet
def compile(self, model): log = SensorLog(InMemoryStorageEngine(model), model) self.sensor_graph = SensorGraph(log, model) allocator = StreamAllocator(self.sensor_graph, model) self._scope_stack = [] root = RootScope(self.sensor_graph, allocator) self._scope_stack.append(root) for statement in self.statements: statement.execute(self.sensor_graph, self._scope_stack) self.sensor_graph.initialize_remaining_constants() self.sensor_graph.sort_nodes()
Compile this file into a SensorGraph. You must have preivously called parse_file to parse a sensor graph file into statements that are then executed by this command to build a sensor graph. The results are stored in self.sensor_graph and can be inspected before running optimization passes. Args: model (DeviceModel): The device model that we should compile this sensor graph for.
juraj-google-style
def get_auth(credentials_prompt, refresh_token_cache, manual_login=False): with requests.Session() as session: session.headers = {'user-agent': USER_AGENT} try: logger.info('Authenticating with refresh token') refresh_token = refresh_token_cache.get() if (refresh_token is None): raise GoogleAuthError('Refresh token not found') access_token = _auth_with_refresh_token(session, refresh_token) except GoogleAuthError as e: logger.info('Failed to authenticate using refresh token: %s', e) logger.info('Authenticating with credentials') if manual_login: authorization_code = credentials_prompt.get_authorization_code() else: authorization_code = _get_authorization_code(session, credentials_prompt) (access_token, refresh_token) = _auth_with_code(session, authorization_code) refresh_token_cache.set(refresh_token) logger.info('Authentication successful') return _get_session_cookies(session, access_token)
Authenticate with Google. Args: refresh_token_cache (RefreshTokenCache): Cache to use so subsequent logins may not require credentials. credentials_prompt (CredentialsPrompt): Prompt to use if credentials are required to log in. manual_login (bool): If true, prompt user to log in through a browser and enter authorization code manually. Defaults to false. Returns: dict: Google session cookies. Raises: GoogleAuthError: If authentication with Google fails.
codesearchnet
def write_edges( edges: Mapping[str, Any], filename: str, jsonlines: bool = False, gzipflag: bool = False, yaml: bool = False, ): pass
Write edges to file Args: edges (Mapping[str, Any]): in edges JSON Schema format filename (str): filename to write jsonlines (bool): output in JSONLines format? gzipflag (bool): create gzipped file? yaml (bool): create yaml file?
juraj-google-style
def process_results(qry_results): i_info = {} for (i, j) in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {'Name': ''} debg.dprint('numInstances: ', len(i_info)) debg.dprintx('Details except AMI-name') debg.dprintx(i_info, True) return i_info
Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details.
codesearchnet
def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): new_lm_head_bias = {} for attr, weight in old_lm_head_bias.items(): first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) size_diff = new_num_tokens - old_num_tokens final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens] if tf.math.greater(size_diff, 0): padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy] bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True) bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False) else: slice_from = [0] if first_dim is None else [0, 0] current_bias = tf.slice(weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape)) bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True) new_bias = self.add_weight(shape=final_shape, initializer='zeros', trainable=True, name=weight.name.split(':')[0]) init_bias = tf.where(bias_mask, current_bias, new_bias.value()) new_bias.assign(init_bias) new_lm_head_bias[attr] = new_bias return new_lm_head_bias
Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_bias (`tf.Variable`): Old lm head bias to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns None Return: `tf.Variable`: Pointer to the resized bias.
github-repos
def load(self, context): try: import tensorflow from tensorflow.python.eager import profiler_client except ImportError: return from tensorboard.plugins.profile.profile_plugin import ProfilePlugin return ProfilePlugin(context)
Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A ProfilePlugin instance or None if it couldn't be loaded.
juraj-google-style
def __recognize_dict(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a dict') if not issubclass(generic_type_args(expected_type)[0], str): raise RuntimeError( 'YAtiML only supports dicts with strings as keys') if not isinstance(node, yaml.MappingNode): message = '{}{}Expected a dict/mapping here'.format( node.start_mark, os.linesep) return [], message value_type = generic_type_args(expected_type)[1] for _, value in node.value: recognized_value_types, message = self.recognize(value, value_type) if len(recognized_value_types) == 0: return [], message if len(recognized_value_types) > 1: return [ Dict[str, t] for t in recognized_value_types ], message return [expected_type], ''
Recognize a node that we expect to be a dict of some kind. Args: node: The node to recognize. expected_type: Dict[str, ...something...] Returns: expected_type if it was recognized, [] otherwise.
juraj-google-style
def move_added_token(self, token: str, target_idx: int): assert token in self.added_tokens_encoder, 'Token which should be moved has to be an added token' assert token not in self.idx2sym, 'Token which should be moved is already in vocab' self.idx2sym.insert(target_idx, token) self.sym2idx[token] = target_idx for idx in range(target_idx + 1, len(self.idx2sym)): current_sym = self.idx2sym[idx] self.sym2idx[current_sym] = idx old_index = self._added_tokens_encoder.pop(token) self._added_tokens_decoder.pop(old_index)
Moves an added token to a specific position in the vocab. This method should be used when resizing an embedding layer other than the last one in the `AdaptiveEmbedding` in order to move the token in the tokenizer from the default position (at the very end) to the desired one. Args: token: The token to move to a specific position in the vocab. target_idx: The position where the token should be moved to.
github-repos
def genome_name_from_fasta_path(fasta_path): filename = os.path.basename(fasta_path) return re.sub('(\\.fa$)|(\\.fas$)|(\\.fasta$)|(\\.fna$)|(\\.\\w{1,}$)', '', filename)
Extract genome name from fasta filename Get the filename without directory and remove the file extension. Example: With fasta file path ``/path/to/genome_1.fasta``:: fasta_path = '/path/to/genome_1.fasta' genome_name = genome_name_from_fasta_path(fasta_path) print(genome_name) # => "genome_1" Args: fasta_path (str): fasta file path Returns: str: genome name
codesearchnet
def _preprocess_padding(padding): if padding == 'same': padding = 'SAME' elif padding == 'valid': padding = 'VALID' else: raise ValueError('Invalid padding: ' + str(padding)) return padding
Convert keras' padding to TensorFlow's padding. Args: padding: string, one of 'same' , 'valid' Returns: a string, one of 'SAME', 'VALID'. Raises: ValueError: if invalid `padding'`
github-repos
def add(self, handler): self._handlers.append(handler) static_paths = set((h.static_path() for h in self.handlers)) static_paths.discard(None) if (len(static_paths) > 1): raise RuntimeError(('More than one static path requested for app: %r' % list(static_paths))) elif (len(static_paths) == 1): self._static_path = static_paths.pop() else: self._static_path = None
Add a handler to the pipeline used to initialize new documents. Args: handler (Handler) : a handler for this Application to use to process Documents
codesearchnet
def avg_pool(x, pool_size, strides, padding): x = tf_np.asarray(x) return tf_np.asarray(nn_ops.pool(input=x, window_shape=pool_size, pooling_type='AVG', strides=strides, padding=padding))
Performs an N-D average pooling. Args: x: ndarray of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]`. Pooling happens over the spatial dimensions only. pool_size: sequence of N ints. strides: sequence of N ints. padding: a string, the padding algorithm. Must be "SAME" or "VALID". Returns: An (N+2)-D array, of shape [batch_size] + output_spatial_shape + [num_channels], where `output_spatial_shape` depends on the value of padding: If padding = "SAME": output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) If padding = "VALID": output_spatial_shape[i] = ceil((input_spatial_shape[i] - (pool_size[i] - 1)) / strides[i]).
github-repos
def values(self): return {n: getattr(self, n) for n in self._hparam_types.keys()}
Return the hyperparameter values as a Python dictionary. Returns: A dictionary with hyperparameter names as keys. The values are the hyperparameter values.
codesearchnet
def bot(self, id): json = self.skype.conn("GET", "{0}/agents".format(SkypeConnection.API_BOT), params={"agentId": id}, auth=SkypeConnection.Auth.SkypeToken).json().get("agentDescriptions", []) return self.merge(SkypeBotUser.fromRaw(self.skype, json[0])) if json else None
Retrieve a single bot. Args: id (str): UUID or username of the bot Returns: SkypeBotUser: resulting bot user object
juraj-google-style
def purview(repertoire): if repertoire is None: return None return tuple(i for i, dim in enumerate(repertoire.shape) if dim == 2)
The purview of the repertoire. Args: repertoire (np.ndarray): A repertoire Returns: tuple[int]: The purview that the repertoire was computed over.
juraj-google-style