code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def add_tree(self, tree, parent=None): if tree.path in self.path_db: self.remove_tree_by_path(tree.path) for index in tree.indexes: if not getattr(tree, index): continue self._add_to( getattr(self, index + "_db"), getattr(tree, index), tree, ) if parent: self._add_to(self.parent_db, tree.path, parent) for sub_tree in tree.sub_trees: assert sub_tree.path.startswith(tree.path) for sub_tree in tree.sub_trees: self.add_tree(sub_tree, parent=tree)
Add `tree` into database. Args: tree (obj): :class:`.Tree` instance. parent (ref, default None): Reference to parent tree. This is used for all sub-trees in recursive call.
juraj-google-style
def _offset(value): o = int(value) if (o == 0): return 0 a = abs(o) s = ((a * 36) + ((a % 100) * 24)) return ((o
Parse timezone to offset in seconds. Args: value: A timezone in the '+0000' format. An integer would also work. Returns: The timezone offset from GMT in seconds as an integer.
codesearchnet
def _BuildStations(self, stoplist): stations = [] dists = self._EuclidianDistances(stoplist) stations = self._CalculateYLines(dists) return stations
Dispatches the best algorithm for calculating station line position. Args: # Class Stop is defined in transitfeed.py stoplist: [Stop, Stop, ...] # Class Trip is defined in transitfeed.py triplist: [Trip, Trip, ...] Returns: # One integer y-coordinate for each station normalized between # 0 and X, where X is the height of the graph in pixels [0, 33, 140, ... , X]
codesearchnet
def _FloatingPointEncoder(wire_type, format): value_size = struct.calcsize(format) if (value_size == 4): def EncodeNonFiniteOrRaise(write, value): if (value == _POS_INF): write(b'\x00\x00\x80\x7f') elif (value == _NEG_INF): write(b'\x00\x00\x80\xff') elif (value != value): write(b'\x00\x00\xc0\x7f') else: raise elif (value_size == 8): def EncodeNonFiniteOrRaise(write, value): if (value == _POS_INF): write(b'\x00\x00\x00\x00\x00\x00\xf0\x7f') elif (value == _NEG_INF): write(b'\x00\x00\x00\x00\x00\x00\xf0\xff') elif (value != value): write(b'\x00\x00\x00\x00\x00\x00\xf8\x7f') else: raise else: raise ValueError(("Can't encode floating-point values that are %d bytes long (only 4 or 8)" % value_size)) def SpecificEncoder(field_number, is_repeated, is_packed): local_struct_pack = struct.pack if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, (len(value) * value_size)) for element in value: try: write(local_struct_pack(format, element)) except SystemError: EncodeNonFiniteOrRaise(write, element) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) try: write(local_struct_pack(format, element)) except SystemError: EncodeNonFiniteOrRaise(write, element) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) try: write(local_struct_pack(format, value)) except SystemError: EncodeNonFiniteOrRaise(write, value) return EncodeField return SpecificEncoder
Return a constructor for an encoder for float fields. This is like StructPackEncoder, but catches errors that may be due to passing non-finite floating-point values to struct.pack, and makes a second attempt to encode those values. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack().
codesearchnet
def triggered(self, manual=False): if (self.walker is None): raise InternalError('You can only check if a streamer is triggered if you create it with a SensorLog') if ((not self.automatic) and (not manual)): return False return self.has_data()
Check if this streamer should generate a report. Streamers can be triggered automatically whenever they have data or they can be triggered manually. This method returns True if the streamer is currented triggered. A streamer is triggered if it: - (has data AND is automatic) OR - (has data AND is manually triggered) Args: manual (bool): Indicate that the streamer has been manually triggered. Returns: bool: Whether the streamer can generate a report right now.
codesearchnet
def mme_matches(case_obj, institute_obj, mme_base_url, mme_token): data = { 'institute' : institute_obj, 'case' : case_obj, 'server_errors' : [] } matches = {} if not case_obj.get('mme_submission'): return None for patient in case_obj['mme_submission']['patients']: patient_id = patient['id'] matches[patient_id] = None url = ''.join([ mme_base_url, '/matches/', patient_id]) server_resp = matchmaker_request(url=url, token=mme_token, method='GET') if 'status_code' in server_resp: pat_matches = [] if server_resp.get('matches'): pat_matches = parse_matches(patient_id, server_resp['matches']) matches[patient_id] = pat_matches else: LOG.warning('Server returned error message: {}'.format(server_resp['message'])) data['server_errors'].append(server_resp['message']) data['matches'] = matches return data
Show Matchmaker submission data for a sample and eventual matches. Args: case_obj(dict): a scout case object institute_obj(dict): an institute object mme_base_url(str) base url of the MME server mme_token(str) auth token of the MME server Returns: data(dict): data to display in the html template
juraj-google-style
def from_bytes_list(cls, function_descriptor_list): assert isinstance(function_descriptor_list, list) if (len(function_descriptor_list) == 0): return FunctionDescriptor.for_driver_task() elif ((len(function_descriptor_list) == 3) or (len(function_descriptor_list) == 4)): module_name = ensure_str(function_descriptor_list[0]) class_name = ensure_str(function_descriptor_list[1]) function_name = ensure_str(function_descriptor_list[2]) if (len(function_descriptor_list) == 4): return cls(module_name, function_name, class_name, function_descriptor_list[3]) else: return cls(module_name, function_name, class_name) else: raise Exception('Invalid input for FunctionDescriptor.from_bytes_list')
Create a FunctionDescriptor instance from list of bytes. This function is used to create the function descriptor from backend data. Args: cls: Current class which is required argument for classmethod. function_descriptor_list: list of bytes to represent the function descriptor. Returns: The FunctionDescriptor instance created from the bytes list.
codesearchnet
def _create_extractors(col_params): result = [] for col_param in col_params: result.append(_create_extractor(col_param)) return result
Creates extractors to extract properties corresponding to 'col_params'. Args: col_params: List of ListSessionGroupsRequest.ColParam protobufs. Returns: A list of extractor functions. The ith element in the returned list extracts the column corresponding to the ith element of _request.col_params
codesearchnet
def console(discord_token, discord_client_id): state, response = datatools.get_compare_version() logger.info("Starting Modis in console") logger.info(response) import threading import asyncio logger.debug("Loading packages") from modis.discord_modis import main as discord_modis_console from modis.reddit_modis import main as reddit_modis_console from modis.facebook_modis import main as facebook_modis_console logger.debug("Initiating threads") loop = asyncio.get_event_loop() discord_thread = threading.Thread( target=discord_modis_console.start, args=[discord_token, discord_client_id, loop]) reddit_thread = threading.Thread( target=reddit_modis_console.start, args=[]) facebook_thread = threading.Thread( target=facebook_modis_console.start, args=[]) logger.debug("Starting threads") discord_thread.start() reddit_thread.start() facebook_thread.start() logger.debug("Root startup completed")
Start Modis in console format. Args: discord_token (str): The bot token for your Discord application discord_client_id: The bot's client ID
juraj-google-style
def any_soco(): cls = config.SOCO_CLASS try: device = next((d for d in cls._instances[cls._class_group].values() if d.is_visible)) except (KeyError, StopIteration): devices = discover() return (None if (devices is None) else devices.pop()) return device
Return any visible soco device, for when it doesn't matter which. Try to obtain an existing instance, or use `discover` if necessary. Note that this assumes that the existing instance has not left the network. Returns: SoCo: A `SoCo` instance (or subclass if `config.SOCO_CLASS` is set, or `None` if no instances are found
codesearchnet
def detect(self, text): t = text.encode("utf-8") reliable, index, top_3_choices = cld2.detect(t, bestEffort=False) if not reliable: self.reliable = False reliable, index, top_3_choices = cld2.detect(t, bestEffort=True) if not self.quiet: if not reliable: raise UnknownLanguage("Try passing a longer snippet of text") else: logger.warning("Detector is not able to detect the language reliably.") self.languages = [Language(x) for x in top_3_choices] self.language = self.languages[0] return self.language
Decide which language is used to write the text. The method tries first to detect the language with high reliability. If that is not possible, the method switches to best effort strategy. Args: text (string): A snippet of text, the longer it is the more reliable we can detect the language used to write the text.
juraj-google-style
def reach_max_num(self): if self.signal.get('reach_max_num'): return True if ((self.max_num > 0) and (self.fetched_num >= self.max_num)): return True else: return False
Check if downloaded images reached max num. Returns: bool: if downloaded images reached max num.
codesearchnet
def file_exists(file_path, credentials=None): if file_path.startswith('gs: return _file_exists_in_gcs(file_path, credentials) else: return os.path.isfile(file_path)
Check whether the file exists, on local disk or GCS. Args: file_path: The target file path; should have the 'gs://' prefix if in gcs. credentials: Optional credential to be used to load the file from gcs. Returns: True if the file's there.
juraj-google-style
def get_file(self, filename, scope='all'): filename = os.path.abspath(os.path.join(self.root, filename)) layouts = self._get_layouts_in_scope(scope) for ly in layouts: if filename in ly.files: return ly.files[filename] return None
Returns the BIDSFile object with the specified path. Args: filename (str): The path of the file to retrieve. Must be either an absolute path, or relative to the root of this BIDSLayout. scope (str, list): Scope of the search space. If passed, only BIDSLayouts that match the specified scope will be searched. See BIDSLayout docstring for valid values. Returns: A BIDSFile, or None if no match was found.
juraj-google-style
def _StartMonitoringProcess(self, process): if process is None: raise ValueError('Missing process.') pid = process.pid if pid in self._process_information_per_pid: raise KeyError( 'Already monitoring process (PID: {0:d}).'.format(pid)) if pid in self._rpc_clients_per_pid: raise KeyError( 'RPC client (PID: {0:d}) already exists'.format(pid)) rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient() rpc_port = process.rpc_port.value time_waited_for_process = 0.0 while not rpc_port: time.sleep(0.1) rpc_port = process.rpc_port.value time_waited_for_process += 0.1 if time_waited_for_process >= self._RPC_SERVER_TIMEOUT: raise IOError( 'RPC client unable to determine server (PID: {0:d}) port.'.format( pid)) hostname = 'localhost' if not rpc_client.Open(hostname, rpc_port): raise IOError(( 'RPC client unable to connect to server (PID: {0:d}) ' 'http: self._rpc_clients_per_pid[pid] = rpc_client self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)
Starts monitoring a process. Args: process (MultiProcessBaseProcess): process. Raises: IOError: if the RPC client cannot connect to the server. KeyError: if the process is not registered with the engine or if the process is already being monitored. OSError: if the RPC client cannot connect to the server. ValueError: if the process is missing.
juraj-google-style
def get_fastq_dxfile_objects(self,barcode=None): fq_ext_glob = "*{}".format(self.FQEXT) name = fq_ext_glob if barcode: name = "*_{barcode}_*{FQEXT}".format(barcode=barcode, FQEXT=self.FQEXT) fastqs= dxpy.find_data_objects(project=self.dx_project_id,folder=self.DX_FASTQ_FOLDER,name=name,name_mode="glob") if not fastqs: fastqs= dxpy.find_data_objects(project=self.dx_project_id,name=name,name_mode="glob") if not fastqs: msg = "No FASTQ files found for run {run} ".format(run=proj_name) if barcode: msg += "and barcode {barcode}.".format(barcode=barcode) raise FastqNotFound(msg) fastqs = [dxpy.DXFile(project=x["project"],dxid=x["id"]) for x in fastqs] return fastqs
Retrieves all the FASTQ files in project self.dx_project_name as DXFile objects. Args: barcode: `str`. If set, then only FASTQ file properties for FASTQ files having the specified barcode are returned. Returns: `list` of DXFile objects representing FASTQ files. Raises: `dnanexus_utils.FastqNotFound`: No FASTQ files were found.
juraj-google-style
def sanity_check_tensor_sync(tensor: torch.Tensor, mesh: DeviceMesh, rtol: float=0.0001, atol: float=0.0001, not_sync: bool=False) -> None: if not dist.is_initialized() or mesh.size() == 1: return pg = mesh.get_group() if hasattr(tensor, 'to_local'): local_tensor = tensor.to_local() else: local_tensor = tensor world_size = dist.get_world_size(pg) gathered_tensors = [torch.empty_like(local_tensor) for _ in range(world_size)] dist.all_gather(gathered_tensors, local_tensor, group=pg) for i in range(1, world_size): try: torch.testing.assert_close(gathered_tensors[0], gathered_tensors[i], rtol=rtol, atol=atol) except AssertionError as e: if not_sync: continue raise e
Verify that a tensor is synchronized (or not synchronized) across all processes in the mesh's process group. Handles both regular tensors and DTensors. Args: tensor (torch.Tensor): The tensor to check for synchronization (can be DTensor) mesh (DeviceMesh): The device mesh containing the process group rtol (float): Relative tolerance for comparison atol (float): Absolute tolerance for comparison not_sync (bool): If True, asserts that tensors are NOT synchronized. If False, asserts they are synchronized.
github-repos
def plot(self, data): import IPython if (((sys.version_info.major > 2) and isinstance(data, str)) or ((sys.version_info.major <= 2) and isinstance(data, basestring))): data = bq.Query(data) if isinstance(data, bq.Query): df = data.execute().result().to_dataframe() data = self._get_lantern_format(df) elif isinstance(data, pd.core.frame.DataFrame): data = self._get_lantern_format(data) else: raise Exception('data needs to be a sql query, or a pandas DataFrame.') HTML_TEMPLATE = '<link rel="import" href="/nbextensions/gcpdatalab/extern/lantern-browser.html" >\n <lantern-browser id="{html_id}"></lantern-browser>\n <script>\n var browser = document.querySelector(\' metrics_str = str(map(str, data[0]['metricValues'].keys())) data_str = str([{str(k): json.dumps(v) for (k, v) in elem.iteritems()} for elem in data]) html_id = ('l' + datalab.utils.commands.Html.next_id()) html = HTML_TEMPLATE.format(html_id=html_id, metrics=metrics_str, data=data_str) IPython.display.display(IPython.display.HTML(html))
Plots a featire slice view on given data. Args: data: Can be one of: A string of sql query. A sql query module defined by "%%sql --module module_name". A pandas DataFrame. Regardless of data type, it must include the following columns: "feature": identifies a slice of features. For example: "petal_length:4.0-4.2". "count": number of instances in that slice of features. All other columns are viewed as metrics for its feature slice. At least one is required.
codesearchnet
def repay_funding(self, amount, currency): params = { 'amount': amount, 'currency': currency } return self._send_message('post', '/funding/repay', data=json.dumps(params))
Repay funding. Repays the older funding records first. Args: amount (int): Amount of currency to repay currency (str): The currency, example USD Returns: Not specified by cbpro.
juraj-google-style
def get_by_addr(self, address): addr = address if isinstance(address, str) and len(address) == 34: addr = Helper.AddrStrToScriptHash(address) if not isinstance(addr, UInt160): raise Exception("Incorrect address format") addrlist_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_ADDR).snapshot() results = [] for val in addrlist_snapshot.iterator(prefix=bytes(addr.Data), include_key=False): if len(val) > 4: try: event = SmartContractEvent.FromByteArray(val) results.append(event) except Exception as e: logger.error("could not parse event: %s %s" % (e, val)) return results
Lookup a set of notifications by address Args: address (UInt160 or str): hash of address for notifications Returns: list: a list of notifications
juraj-google-style
def sparse_grid(func, order, dim=None, skew=None): if not isinstance(order, int): orders = numpy.array(order).flatten() dim = orders.size m_order = int(numpy.min(orders)) skew = [order-m_order for order in orders] return sparse_grid(func, m_order, dim, skew) abscissas, weights = [], [] bindex = chaospy.bertran.bindex(order-dim+1, order, dim) if skew is None: skew = numpy.zeros(dim, dtype=int) else: skew = numpy.array(skew, dtype=int) assert len(skew) == dim for idx in range( chaospy.bertran.terms(order, dim) - chaospy.bertran.terms(order-dim, dim)): idb = bindex[idx] abscissa, weight = func(skew+idb) weight *= (-1)**(order-sum(idb))*comb(dim-1, order-sum(idb)) abscissas.append(abscissa) weights.append(weight) abscissas = numpy.concatenate(abscissas, 1) weights = numpy.concatenate(weights, 0) abscissas = numpy.around(abscissas, 15) order = numpy.lexsort(tuple(abscissas)) abscissas = abscissas.T[order].T weights = weights[order] diff = numpy.diff(abscissas.T, axis=0) unique = numpy.ones(len(abscissas.T), bool) unique[1:] = (diff != 0).any(axis=1) length = len(weights) idx = 1 while idx < length: while idx < length and unique[idx]: idx += 1 idy = idx+1 while idy < length and not unique[idy]: idy += 1 if idy-idx > 1: weights[idx-1] = numpy.sum(weights[idx-1:idy]) idx = idy+1 abscissas = abscissas[:, unique] weights = weights[unique] return abscissas, weights
Smolyak sparse grid constructor. Args: func (:py:data:typing.Callable): Function that takes a single argument ``order`` of type ``numpy.ndarray`` and with ``order.shape = (dim,)`` order (int, numpy.ndarray): The order of the grid. If ``numpy.ndarray``, it overrides both ``dim`` and ``skew``. dim (int): Number of dimension. skew (list): Order skewness.
juraj-google-style
def drift(data, n=3, **kwargs): yi = data[(- n)] yf = data[(- 1)] slope = ((yf - yi) / (n - 1)) forecast = (yf + slope) return forecast
The drift forecast for the next point is a linear extrapolation from the previous ``n`` points in the series. Args: data (np.array): Observed data, presumed to be ordered in time. n (int): period over which to calculate linear model for extrapolation Returns: float: a single-valued forecast for the next value in the series.
codesearchnet
def inspect(self, **kwargs): what = kwargs.pop('what', 'hist') if (what == 'hist'): with self.open_hist() as hist: return (hist.plot(**kwargs) if hist else None) elif (what == 'scf'): relaxation = abiinspect.Relaxation.from_file(self.output_file.path) if ('title' not in kwargs): kwargs['title'] = str(self) return (relaxation.plot(**kwargs) if (relaxation is not None) else None) else: raise ValueError(('Wrong value for what %s' % what))
Plot the evolution of the structural relaxation with matplotlib. Args: what: Either "hist" or "scf". The first option (default) extracts data from the HIST file and plot the evolution of the structural parameters, forces, pressures and energies. The second option, extracts data from the main output file and plot the evolution of the SCF cycles (etotal, residuals, etc). Returns: `matplotlib` figure, None if some error occurred.
codesearchnet
def equal(x, y): if PY_3: return test_case().assertEqual(x, y) or True assert x == y
Shortcut function for ``unittest.TestCase.assertEqual()``. Arguments: x (mixed) y (mixed) Raises: AssertionError: in case of assertion error. Returns: bool
juraj-google-style
def _SetValues(self, values): def _ToStr(value): 'Convert individul list entries to string.' if isinstance(value, (list, tuple)): result = [] for val in value: result.append(str(val)) return result else: return str(value) if isinstance(values, Row): if (self._keys != values.header): raise TypeError('Attempt to append row with mismatched header.') self._values = copy.deepcopy(values.values) elif isinstance(values, dict): for key in self._keys: if (key not in values): raise TypeError('Dictionary key mismatch with row.') for key in self._keys: self[key] = _ToStr(values[key]) elif (isinstance(values, list) or isinstance(values, tuple)): if (len(values) != len(self._values)): raise TypeError('Supplied list length != row length') for (index, value) in enumerate(values): self._values[index] = _ToStr(value) else: raise TypeError('Supplied argument must be Row, dict or list, not %s', type(values))
Set values from supplied dictionary or list. Args: values: A Row, dict indexed by column name, or list. Raises: TypeError: Argument is not a list or dict, or list is not equal row length or dictionary keys don't match.
codesearchnet
def set_ipv4_routing(self, vrf_name, default=False, disable=False): cmd = ('ip routing vrf %s' % vrf_name) if default: cmd = ('default %s' % cmd) elif disable: cmd = ('no %s' % cmd) cmd = make_iterable(cmd) return self.configure(cmd)
Configures ipv4 routing for the vrf Args: vrf_name (str): The VRF name to configure default (bool): Configures ipv4 routing for the vrf value to default if this value is true disable (bool): Negates the ipv4 routing for the vrf if set to true Returns: True if the operation was successful otherwise False
codesearchnet
def get_raw(tree): if isinstance(tree, Tree): words = [] for child in tree: words.append(get_raw(child)) return ' '.join(words) else: return tree
Get the exact words in lowercase in the tree object. Args: tree (Tree): Parsed tree structure Returns: Resulting string of tree ``(Ex: "The red car")``
juraj-google-style
def _GetMountpointBlacklist(xdev): if xdev == rdf_file_finder.FileFinderArgs.XDev.NEVER: return _GetMountpoints(only_physical=False) if xdev == rdf_file_finder.FileFinderArgs.XDev.LOCAL: physical = _GetMountpoints(only_physical=True) return _GetMountpoints(only_physical=False) - physical if xdev == rdf_file_finder.FileFinderArgs.XDev.ALWAYS: return set() raise ValueError("Incorrect `xdev` value: %s" % xdev)
Builds a list of mountpoints to ignore during recursive searches. Args: xdev: A `XDev` value that determines policy for crossing device boundaries. Returns: A set of mountpoints to ignore. Raises: ValueError: If `xdev` value is invalid.
juraj-google-style
def compile_state_action_constraints(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> List[TensorFluent]: scope = self.transition_scope(state, action) constraints = [] with self.graph.as_default(): with tf.name_scope('state_action_constraints'): for p in self.rddl.domain.constraints: fluent = self._compile_expression(p, scope) constraints.append(fluent) return constraints
Compiles the state-action constraints given current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A list of :obj:`rddl2tf.fluent.TensorFluent`.
juraj-google-style
def cache_connect(database=None): if (database is None): database = cache_file() if os.path.isfile(database): conn = sqlite3.connect(database) else: conn = sqlite3.connect(database) conn.executescript(schema) with conn as cur: cur.execute('PRAGMA foreign_keys = ON;') conn.row_factory = sqlite3.Row return conn
Returns a connection object to a sqlite database. Args: database (str, optional): The path to the database the user wishes to connect to. If not specified, a default is chosen using :func:`.cache_file`. If the special database name ':memory:' is given, then a temporary database is created in memory. Returns: :class:`sqlite3.Connection`
codesearchnet
def videos(self, **kwargs): path = self._get_id_path('videos') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the videos (trailers, teasers, clips, etc...) for a specific movie id. Args: append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
juraj-google-style
def as_objective(obj): if isinstance(obj, Objective): return obj elif callable(obj): return obj elif isinstance(obj, str): (layer, n) = obj.split(':') (layer, n) = (layer.strip(), int(n)) return channel(layer, n)
Convert obj into Objective class. Strings of the form "layer:n" become the Objective channel(layer, n). Objectives are returned unchanged. Args: obj: string or Objective. Returns: Objective
codesearchnet
def get_max_size(pool, num_option, item_length): max_items = (POOL_SIZE / item_length) existing = ((POOL_OPTION_MIN_SIZE * num_option) + sum([max(0, (len(pool.get(i, {})) - 5)) for i in xrange(num_option)])) return int((max_items - existing))
Calculate the max number of item that an option can stored in the pool at give time. This is to limit the pool size to POOL_SIZE Args: option_index (int): the index of the option to calculate the size for pool (dict): answer pool num_option (int): total number of options available for the question item_length (int): the length of the item Returns: int: the max number of items that `option_index` can have
codesearchnet
def similar_movies(self, **kwargs): path = self._get_id_path('similar_movies') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the similar movies for a specific movie id. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
juraj-google-style
def _CreateFeedItems(client, feed_details, label_name): feed_item_service = client.GetService('FeedItemService', version='v201809') urls = ('http: 'http: 'http: operations = [{ 'operand': { 'feedId': feed_details.feed_id, 'attributeValues': [ { 'feedAttributeId': feed_details.url_attribute_id, 'stringValues': [url] }, { 'feedAttributeId': feed_details.label_attribute_id, 'stringValues': [label_name] } ] }, 'operator': 'ADD' } for url in urls] feed_item_service.mutate(operations)
Creates the page URLs in the DSA page feed. Args: client: an AdWordsClient instance. feed_details: a _DSAFeedDetails instance. label_name: a str containing the page feed URL label.
juraj-google-style
def init_properties(env='dev', app='unnecessary', **_): aws_env = boto3.session.Session(profile_name=env) s3client = aws_env.resource('s3') generated = get_details(app=app, env=env) archaius = generated.archaius() archaius_file = '{path}/application.properties'.format(path=archaius['path']) try: s3client.Object(archaius['bucket'], archaius_file).get() LOG.info('Found: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file}) return True except boto3.exceptions.botocore.client.ClientError: s3client.Object(archaius['bucket'], archaius_file).put() LOG.info('Created: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file}) return False
Make sure _application.properties_ file exists in S3. For Applications with Archaius support, there needs to be a file where the cloud environment variable points to. Args: env (str): Deployment environment/account, i.e. dev, stage, prod. app (str): GitLab Project name. Returns: True when application.properties was found. False when application.properties needed to be created.
codesearchnet
def make_sgf( move_history, result_string, ruleset="Chinese", komi=7.5, white_name=PROGRAM_IDENTIFIER, black_name=PROGRAM_IDENTIFIER, comments=[] ): boardsize = go.N game_moves = ''.join(translate_sgf_move(*z) for z in itertools.zip_longest(move_history, comments)) result = result_string return SGF_TEMPLATE.format(**locals())
Turn a game into SGF. Doesn't handle handicap games or positions with incomplete history. Args: move_history: iterable of PlayerMoves result_string: "B+R", "W+0.5", etc. comments: iterable of string/None. Will be zipped with move_history.
juraj-google-style
def _encode_reference_type_constraints(self, builder: expressions.Builder, elem: message.Message) -> List[validation_pb2.SqlRequirement]: field_name = _last_path_token(builder) constraint_key = f'{field_name}-resource-type-exclusivity' if constraint_key in self._options.skip_keys: return [] element_definition = cast(Any, elem) type_codes = _utils.element_type_codes(element_definition) if type_codes != ['Reference']: return [] allowed_reference_types = [target_profile.value for target_profile in element_definition.type[0].target_profile] if len(allowed_reference_types) <= 1: return [] num_references_exist: expressions.Builder = _num_fields_exist((builder.getReferenceKey(reference_type) for reference_type in sorted(allowed_reference_types))) constraint: expressions.Builder = num_references_exist <= 1 if _fhir_path_data_types.is_collection(builder.return_type): constraint: expressions.Builder = builder.all(constraint) constraint_sql = self._encode_fhir_path_builder_constraint(constraint, builder.get_parent_builder()) if constraint_sql is None: return [] reference_type_path = self._abs_path_invocation(builder) column_name = f'{_path_to_sql_column_name(reference_type_path)}_{_key_to_sql_column_name(constraint_key)}' parent_path = self._abs_path_invocation(builder.get_parent_builder()) description = f'Reference type {reference_type_path} links to multiple resources or to resources of a type restricted by the profile.' return [validation_pb2.SqlRequirement(column_name=column_name, sql_expression=constraint_sql.sql, severity=validation_pb2.ValidationSeverity.SEVERITY_ERROR, type=validation_pb2.ValidationType.VALIDATION_TYPE_REFERENCE_TYPE, element_path=parent_path, description=description, fhir_path_key=constraint_key, fhir_path_expression=constraint_sql.builder.fhir_path, fields_referenced_by_expression=[field_name])]
Generates constraints for reference types. Ensures that a reference type only has a value for one of the resourceId columns across each of the possible resources the reference can link. Args: builder: The builder to the reference type for which to encode constraints. elem: Element definition of the builder. Returns: A constraint enforcing the above requirements for the given reference type.
github-repos
def _value_set_from_url(self, url: str) -> Optional[value_set_pb2.ValueSet]: url, version = url_utils.parse_url_version(url) value_set = self._package_manager.get_resource(url) if value_set is None: logging.info('Unable to find value set for url: %s in given resolver packages.', url) return None elif not isinstance(value_set, value_set_pb2.ValueSet): raise ValueError('URL: %s does not refer to a value set, found: %s' % (url, value_set.DESCRIPTOR.name)) elif version is not None and version != value_set.version.value: logging.warning('Found incompatible version for value set with url: %s. Requested: %s, found: %s', url, version, value_set.version.value) return None else: return value_set
Retrieves the value set for the given URL. The value set is assumed to be a member of one of the packages contained in self._package_manager. This function will not attempt to look up resources over the network in other locations. Args: url: The url of the value set to retrieve. Returns: The value set for the given URL or None if it can not be found in the package manager. Raises: ValueError: If the URL belongs to a resource that is not a value set.
github-repos
def translate_sites(self, indices, vector, frac_coords=True, to_unit_cell=True): if not isinstance(indices, collections.abc.Iterable): indices = [indices] for i in indices: site = self._sites[i] if frac_coords: fcoords = site.frac_coords + vector else: fcoords = self._lattice.get_fractional_coords( site.coords + vector) if to_unit_cell: fcoords = np.mod(fcoords, 1) self._sites[i].frac_coords = fcoords
Translate specific sites by some vector, keeping the sites within the unit cell. Args: indices: Integer or List of site indices on which to perform the translation. vector: Translation vector for sites. frac_coords (bool): Whether the vector corresponds to fractional or cartesian coordinates. to_unit_cell (bool): Whether new sites are transformed to unit cell
juraj-google-style
def stft_magnitude(signal, fft_length, hop_length=None, window_length=None): frames = frame(signal, window_length, hop_length) window = periodic_hann(window_length) windowed_frames = (frames * window) return np.abs(np.fft.rfft(windowed_frames, int(fft_length)))
Calculate the short-time Fourier transform magnitude. Args: signal: 1D np.array of the input time-domain signal. fft_length: Size of the FFT to apply. hop_length: Advance (in samples) between each frame passed to FFT. window_length: Length of each block of samples to pass to FFT. Returns: 2D np.array where each row contains the magnitudes of the fft_length/2+1 unique values of the FFT for the corresponding frame of input samples.
codesearchnet
def __init__(self, plist_filename): self.filename = plist_filename with open(self.filename, 'r') as plist_file: self.soup = BeautifulSoup(plist_file, 'lxml-xml') self.properties = self.soup.findChild(name='dict') if self.properties is None: raise RuntimeError('Invalid property list file provided')
Initialize a property list representation from an existing file. Args: plist_filename: A string containing the full path to a Doxygen-generated property list file. Raises: OSError / FileNotFoundError: Input file cannot be read RuntimeError: The property list file is not of the expected format
juraj-google-style
def check_cell_type(cell, cell_type): if ((cell_type == None) or (cell_type == type(None))): return ((cell == None) or (isinstance(cell, basestring) and (not cell))) else: return isinstance(cell, cell_type)
Checks the cell type to see if it represents the cell_type passed in. Args: cell_type: The type id for a cell match or None for empty match.
codesearchnet
def profile_settings_args_layout_json(self, required): profile_args = {} self.db_create_table(self.input_table, self.install_json_params().keys()) self.db_insert_record(self.input_table, self.install_json_params().keys()) self.gen_permutations() try: for pn in self._input_permutations[self.args.permutation_id]: p = self.install_json_params().get(pn.get('name')) if p.get('required', False) != required: continue if p.get('type').lower() == 'boolean': profile_args[p.get('name')] = pn.get('value') elif p.get('type').lower() == 'choice': profile_args[p.get('name')] = pn.get('value') elif p.get('name') in ['api_access_id', 'api_secret_key']: pass else: types = '|'.join(p.get('playbookDataType', [])) if types: profile_args[p.get('name')] = p.get('default', '<{}>'.format(types)) else: profile_args[p.get('name')] = p.get('default', '') except IndexError: self.handle_error('Invalid permutation index provided.') return profile_args
Return args based on layout.json and conditional rendering. Args: required (bool): If True only required args will be returned. Returns: dict: Dictionary of required or optional App args.
juraj-google-style
def _bash_comp_command(self, cmd, add_help=True): out = (['-h', '--help'] if add_help else []) cmd_dict = (self._opt_cmds[cmd] if cmd else self._opt_bare) for (opt, sct) in cmd_dict: out.extend(_names(self._conf[sct], opt)) return out
Build a list of all options for a given command. Args: cmd (str): command name, set to None or '' for bare command. add_help (bool): add an help option. Returns: list of str: list of CLI options strings.
codesearchnet
def body(self, body): if isinstance(body, bytes): body = body.decode('utf-8') self._body = body
Defines response body data. Arguments: body (str|bytes): response body to use. Returns: self: ``pook.Response`` current instance.
juraj-google-style
def _time_delta_from_info(info): delta_seconds = int(time.time()) - info.start_time return str(datetime.timedelta(seconds=delta_seconds))
Format the elapsed time for the given TensorBoardInfo. Args: info: A TensorBoardInfo value. Returns: A human-readable string describing the time since the server described by `info` started: e.g., "2 days, 0:48:58".
juraj-google-style
def get_unrecognized_field_info(self, key, value_default=None, variant_default=None): value, variant = self.__unrecognized_fields.get(key, (value_default, variant_default)) return value, variant
Get the value and variant of an unknown field in this message. Args: key: The name or number of the field to retrieve. value_default: Value to be returned if the key isn't found. variant_default: Value to be returned as variant if the key isn't found. Returns: (value, variant), where value and variant are whatever was passed to set_unrecognized_field.
juraj-google-style
def measure_topology(fbasename=None, log=None, ml_version=ml_version): ml_script1_file = 'TEMP3D_measure_topology.mlx' ml_script1 = mlx.FilterScript(file_in=fbasename, ml_version=ml_version) compute.measure_topology(ml_script1) ml_script1.save_to_file(ml_script1_file) ml_script1.run_script(log=log, script_file=ml_script1_file) topology = ml_script1.topology return topology
Measures mesh topology Args: fbasename (str): input filename. log (str): filename to log output Returns: dict: dictionary with the following keys: vert_num (int): number of vertices edge_num (int): number of edges face_num (int): number of faces unref_vert_num (int): number or unreferenced vertices boundry_edge_num (int): number of boundary edges part_num (int): number of parts (components) in the mesh. manifold (bool): True if mesh is two-manifold, otherwise false. non_manifold_edge (int): number of non_manifold edges. non_manifold_vert (int): number of non-manifold verices genus (int or str): genus of the mesh, either a number or 'undefined' if the mesh is non-manifold. holes (int or str): number of holes in the mesh, either a number or 'undefined' if the mesh is non-manifold.
codesearchnet
def get_table(bq_legacy_client: BigQueryLegacyClient, table_metadata: TableMetadata) -> Table | None: table: Table | None try: table = bq_legacy_client.get_table(table_metadata.full_table_id) except NotFound: table = None return table
Get a table if it exists in BigQuery given the ID. Args: * bq_legacy_client: BigQuery Legacy API client * table_metadata: TableMetadata object Returns: * Table object if it exists, else None
github-repos
def __init__(self, encoding='utf-8'): super(StdoutOutputWriter, self).__init__(sys.stdout, encoding=encoding)
Initializes a stdout output writer. Args: encoding (Optional[str]): output encoding.
juraj-google-style
def add_tasks_r(addon_module, package_module, package_name): module_dict = package_module.__dict__ for attr_name, attr_val in module_dict.items(): if isinstance(attr_val, fabric.tasks.WrappedCallableTask): addon_module.__dict__[attr_name] = attr_val elif attr_name != package_name \ and isinstance(attr_val, types.ModuleType) \ and attr_val.__name__.startswith('fabsetup_') \ and attr_name.split('.')[-1] != package_name: submodule_name = flo('{addon_module.__name__}.{attr_name}') submodule = get_or_create_module_r(submodule_name) package_module = attr_val add_tasks_r(submodule, package_module, package_name) addon_module.__dict__[attr_name] = submodule
Recursively iterate through 'package_module' and add every fabric task to the 'addon_module' keeping the task hierarchy. Args: addon_module(types.ModuleType) package_module(types.ModuleType) package_name(str): Required, to avoid redundant addition of tasks Return: None
juraj-google-style
def _MergeSameAgency(self, a_agency_id, b_agency_id): a_agency_id = (a_agency_id or self.feed_merger.a_schedule.GetDefaultAgency().agency_id) b_agency_id = (b_agency_id or self.feed_merger.b_schedule.GetDefaultAgency().agency_id) a_agency = self.feed_merger.a_schedule.GetAgency( a_agency_id)._migrated_entity b_agency = self.feed_merger.b_schedule.GetAgency( b_agency_id)._migrated_entity if a_agency != b_agency: raise MergeError('agency must be the same') return a_agency.agency_id
Merge agency ids to the corresponding agency id in the merged schedule. Args: a_agency_id: an agency id from the old schedule b_agency_id: an agency id from the new schedule Returns: The agency id of the corresponding merged agency. Raises: MergeError: If a_agency_id and b_agency_id do not correspond to the same merged agency. KeyError: Either aaid or baid is not a valid agency id.
juraj-google-style
def __closely_associated_score(self, normalized_sentences, top_n_words): scores_list = [] sentence_idx = -1 for sentence in normalized_sentences: self.tokenize(sentence) sentence = self.token sentence_idx += 1 word_idx = [] for w in top_n_words: try: word_idx.append(sentence.index(w)) except ValueError: pass word_idx.sort() if len(word_idx) == 0: continue clusters = [] cluster = [word_idx[0]] i = 1 while i < len(word_idx): if word_idx[i] - word_idx[i - 1] < self.cluster_threshold: cluster.append(word_idx[i]) else: clusters.append(cluster[:]) cluster = [word_idx[i]] i += 1 clusters.append(cluster) max_cluster_score = 0 for c in clusters: significant_words_in_cluster = len(c) total_words_in_cluster = c[-1] - c[0] + 1 score = 1.0 * significant_words_in_cluster \ * significant_words_in_cluster / total_words_in_cluster if score > max_cluster_score: max_cluster_score = score scores_list.append((sentence_idx, score)) return scores_list
Scoring the sentence with closely associations. Args: normalized_sentences: The list of sentences. top_n_words: Important sentences. Returns: The list of scores.
juraj-google-style
def _maybe_repeat(self, x): if isinstance(x, list): assert (len(x) == self.n) return x else: return ([x] * self.n)
Utility function for processing arguments that are singletons or lists. Args: x: either a list of self.n elements, or not a list. Returns: a list of self.n elements.
codesearchnet
def _compute_fans(shape): if (len(shape) < 1): fan_in = fan_out = 1 elif (len(shape) == 1): fan_in = fan_out = shape[0] elif (len(shape) == 2): fan_in = shape[0] fan_out = shape[1] else: receptive_field_size = 1.0 for dim in shape[:(- 2)]: receptive_field_size *= dim fan_in = (shape[(- 2)] * receptive_field_size) fan_out = (shape[(- 1)] * receptive_field_size) if isinstance(fan_in, tf.Dimension): fan_in = fan_in.value if isinstance(fan_out, tf.Dimension): fan_out = fan_out.value return (fan_in, fan_out)
Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of scalars (fan_in, fan_out).
codesearchnet
def check_compatibility(self): usr_keys = list(self.usr_config.keys()) for k in self.usr_config.keys(): if k not in usr_keys: err_msg = '[Error] Required config not found in user config.' err_msg += '(required = %s, ' % str(k) err_msg += 'user configs = %s)' % str(usr_keys) logging.error(err_msg) self.error_msg.append(err_msg) self.failures.append([k, err_msg]) return False overall_status = True for config_name, spec in self.usr_config.items(): temp_status = True in_required = config_name in list(self.required.keys()) in_optional = config_name in list(self.optional.keys()) in_unsupported = config_name in list(self.unsupported.keys()) in_dependency = config_name in list(self.dependency.keys()) if not (in_required or in_optional or in_unsupported or in_dependency): warn_msg = '[Error] User config not defined in config file.' warn_msg += '(user config = %s)' % str(config_name) logging.warning(warn_msg) self.warning_msg.append(warn_msg) self.failures.append([config_name, warn_msg]) temp_status = False else: if in_unsupported: if self.in_range(spec, self.unsupported[config_name]): err_msg = '[Error] User config is unsupported. It is ' err_msg += "defined under 'Unsupported' section in the config file." err_msg += ' (config = %s, spec = %s)' % (config_name, str(spec)) logging.error(err_msg) self.error_msg.append(err_msg) self.failures.append([config_name, err_msg]) temp_status = False if in_required: if not self.in_range(spec, self.required[config_name]): err_msg = '[Error] User config cannot be supported. It is not in ' err_msg += "the supported range as defined in the 'Required' " err_msg += 'section. (config = %s, ' % config_name err_msg += 'spec = %s)' % str(spec) logging.error(err_msg) self.error_msg.append(err_msg) self.failures.append([config_name, err_msg]) temp_status = False if in_optional: if not self.in_range(spec, self.optional[config_name]): err_msg = '[Error] User config cannot be supported. It is not in ' err_msg += "the supported range as defined in the 'Optional' " err_msg += 'section. (config = %s, ' % config_name err_msg += 'spec = %s)' % str(spec) logging.error(err_msg) self.error_msg.append(err_msg) self.failures.append([config_name, err_msg]) temp_status = False if in_dependency: dep_list = self.dependency[config_name] if dep_list: for rule in dep_list: cfg = rule[0] cfg_req = rule[1] dep = rule[2] dep_req = rule[3] try: cfg_name = self.usr_config[cfg] dep_name = self.usr_config[dep] cfg_status = self.in_range(cfg_name, cfg_req) dep_status = self.in_range(dep_name, dep_req) if cfg_status: if not dep_status: err_msg = '[Error] User config has a dependency that cannot' err_msg += ' be supported. ' err_msg += "'%s' has a dependency on " % str(config_name) err_msg += "'%s'." % str(dep) logging.error(err_msg) self.error_msg.append(err_msg) self.failures.append([config_name, err_msg]) temp_status = False except KeyError: err_msg = '[Error] Dependency is missing from `Required`. ' err_msg += '(config = %s, dep = %s)' % (cfg, dep) logging.error(err_msg) self.error_msg.append(err_msg) self.failures.append([config_name, err_msg]) temp_status = False if temp_status: self.successes.append([config_name, spec]) else: overall_status = False return overall_status
Checks version and dependency compatibility for a given configuration. `check_compatibility` immediately returns with `False` (or failure status) if any child process or checks fail. For error and warning messages, either print `self.(error_msg|warning_msg)` or call `_print` function. Returns: Boolean that is a status of the compatibility check result.
github-repos
def destroy(ads): for ad in ads: try: ad.services.stop_all() except Exception: ad.log.exception('Failed to clean up properly.')
Cleans up AndroidDevice objects. Args: ads: A list of AndroidDevice objects.
github-repos
def from_str(self, in_str): parts = in_str.split(';') for part in parts: (var_name, value) = part.split(':') if (var_name == 'Obs_Threshold'): self.obs_threshold = float(value) elif (var_name == 'Thresholds'): self.thresholds = np.array(value.split(), dtype=float) self.contingency_tables = pd.DataFrame(columns=self.contingency_tables.columns, data=np.zeros((self.thresholds.size, self.contingency_tables.columns.size))) elif (var_name in self.contingency_tables.columns): self.contingency_tables[var_name] = np.array(value.split(), dtype=int)
Read the DistributedROC string and parse the contingency table values from it. Args: in_str (str): The string output from the __str__ method
codesearchnet
def devectorize(vectorized_mat, method='col'): vectorized_mat = np.array(vectorized_mat) dimension = int(np.sqrt(vectorized_mat.size)) if (len(vectorized_mat) != (dimension * dimension)): raise Exception('Input is not a vectorized square matrix') if (method == 'col'): return vectorized_mat.reshape(dimension, dimension, order='F') elif (method == 'row'): return vectorized_mat.reshape(dimension, dimension, order='C') elif (method in ['pauli', 'pauli_weights']): num_qubits = int(np.log2(dimension)) if (dimension != (2 ** num_qubits)): raise Exception('Input state must be n-qubit state') if (method == 'pauli_weights'): pgroup = pauli_group(num_qubits, case='weight') else: pgroup = pauli_group(num_qubits, case='tensor') pbasis = (np.array([p.to_matrix() for p in pgroup]) / (2 ** num_qubits)) return np.tensordot(vectorized_mat, pbasis, axes=1) return None
Devectorize a vectorized square matrix. Args: vectorized_mat (ndarray): a vectorized density matrix. method (str): the method of devectorization. Allowed values are - 'col' (default): flattens to column-major vector. - 'row': flattens to row-major vector. - 'pauli': flattens in the n-qubit Pauli basis. - 'pauli-weights': flattens in the n-qubit Pauli basis ordered by weight. Returns: ndarray: the resulting matrix. Raises: Exception: if input state is not a n-qubit state
codesearchnet
def clone(self, callable=None, **overrides): old = {k: v for k, v in self.get_param_values() if k not in ['callable', 'name']} params = dict(old, **overrides) callable = self.callable if callable is None else callable return self.__class__(callable, **params)
Clones the Callable optionally with new settings Args: callable: New callable function to wrap **overrides: Parameter overrides to apply Returns: Cloned Callable object
juraj-google-style
def write_config(params, config_path=None): if config_path is None: config_path = tempfile.mktemp(prefix="mongo-") cfg = params.copy() if 'setParameter' in cfg: set_parameters = cfg.pop('setParameter') try: for key, value in set_parameters.items(): cfg['setParameter = ' + key] = value except AttributeError: reraise(RequestError, 'Not a valid value for setParameter: %r ' 'Expected "setParameter": {<param name> : value, ...}' % set_parameters) for key, value in cfg.items(): if isinstance(value, bool): cfg[key] = json.dumps(value) with open(config_path, 'w') as fd: data = '\n'.join('%s=%s' % (key, item) for key, item in cfg.items()) fd.write(data) return config_path
write mongo*'s config file Args: params - options wich file contains config_path - path to the config_file, will create if None Return config_path where config_path - path to mongo*'s options file
juraj-google-style
def _assert_sparse_indices_are_ragged_right(indices): index_prefix = indices[:, :-1] index_suffix = indices[:, -1] index_prefix_changed = math_ops.reduce_any(math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1) index_ok = array_ops.where(index_prefix_changed, math_ops.equal(index_suffix[1:], 0), math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1)) sparse_indices_are_ragged_right = math_ops.logical_and(math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)), math_ops.reduce_all(index_ok)) message = ['SparseTensor is not right-ragged', 'SparseTensor.indices =', indices] return [control_flow_assert.Assert(sparse_indices_are_ragged_right, message)]
Checks that the given SparseTensor.indices tensor is ragged-right. Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right because the entry `[3, 1]` skips a cell. Args: indices: The SparseTensor indices to check. Returns: A list of control dependency op tensors.
github-repos
def kill_plasma_store(self, check_alive=True): self._kill_process_type( ray_constants.PROCESS_TYPE_PLASMA_STORE, check_alive=check_alive)
Kill the plasma store. Args: check_alive (bool): Raise an exception if the process was already dead.
juraj-google-style
def agg_dims(arr, stat): axis = None if arr.ndim > 2: axis = 1 arr = arr.reshape(arr.shape[0], -1) module = np.ma if hasattr(arr, 'mask') else np return getattr(module, stat)(arr, axis)
Returns a 1D array with higher dimensions aggregated using stat fn. Arguments: arr -- ndarray stat -- numpy or numpy.ma function as str to call
juraj-google-style
def double(self, count: float=0) -> float: return 2 * count
Returns the input multiplied by 2. Args: count: Input number that you want to double. Returns: A number that is the double of count.
github-repos
def inspect_service(self, service, insert_defaults=None): url = self._url('/services/{0}', service) params = {} if (insert_defaults is not None): if utils.version_lt(self._version, '1.29'): raise errors.InvalidVersion('insert_defaults is not supported in API version < 1.29') params['insertDefaults'] = insert_defaults return self._result(self._get(url, params=params), True)
Return information about a service. Args: service (str): Service name or ID. insert_defaults (boolean): If true, default values will be merged into the service inspect output. Returns: (dict): A dictionary of the server-side representation of the service, including all relevant properties. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def ParseFileEntryMetadata(self, parser_mediator, file_entry): if self._filestat_parser: self._ParseFileEntryWithParser( parser_mediator, self._filestat_parser, file_entry)
Parses the file entry metadata e.g. file system data. Args: parser_mediator (ParserMediator): parser mediator. file_entry (dfvfs.FileEntry): file entry.
juraj-google-style
def IsSimpleGroup(component): assert isinstance(component, dict) for unused_key, value in component.items(): if not IsValue(value) and (not isinstance(value, (list, dict))): return False return True
If a group is simple enough, then we treat it as a value in PrintResult. Only if a group contains all value types do we consider it simple enough to print as a value. Args: component: The group to check for value-group status. Returns: A boolean indicating if the group should be treated as a value for printing purposes.
github-repos
def _CreateReadAccessHelper(self): h = CheckAccessHelper('read') h.Allow('aff4:/') h.Allow('aff4:/users') h.Allow('aff4:/users/*', self._IsHomeDir) h.Allow('aff4:/foreman', self._UserHasAdminLabel) h.Allow('aff4:/blobs') h.Allow('aff4:/blobs/*') h.Allow('aff4:/FP') h.Allow('aff4:/FP/*') h.Allow('aff4:/files') h.Allow('aff4:/files/*') h.Allow('aff4:/index') h.Allow('aff4:/index/*') h.Allow('aff4:/client_index') h.Allow('aff4:/client_index/*') h.Allow('aff4:/ACL') h.Allow('aff4:/ACL/*') h.Allow('aff4:/stats') h.Allow('aff4:/stats/*') h.Allow('aff4:/config') h.Allow('aff4:/config/*') h.Allow('aff4:/flows') h.Allow('aff4:/flows/*') h.Allow('aff4:/hunts') h.Allow('aff4:/hunts/*') h.Allow('aff4:/cron') h.Allow('aff4:/cron/*') h.Allow('aff4:/audit') h.Allow('aff4:/audit/*') h.Allow('aff4:/audit/logs') h.Allow('aff4:/audit/logs/*') h.Allow(self.CLIENT_URN_PATTERN) h.Allow((self.CLIENT_URN_PATTERN + '/*'), self._HasAccessToClient) h.Allow('aff4:/artifact_store') h.Allow('aff4:/artifact_store/*') return h
Creates a CheckAccessHelper for controlling read access. This function and _CreateQueryAccessHelper essentially define GRR's ACL policy. Please refer to these 2 functions to either review or modify GRR's ACLs. Read access gives you the ability to open and read aff4 objects for which you already have the URN. Returns: CheckAccessHelper for controlling read access.
codesearchnet
def _manage_location(attr): return property((lambda self: getattr(self, ('_%s' % attr))), (lambda self, value: self._set_location(attr, value)))
Build managed property interface. Args: attr (str): Property's name Returns: property: Managed property interface
codesearchnet
def getslice_slot(self, node: cfg.CFGNode, start_var: cfg.Variable, end_var: cfg.Variable) -> tuple[cfg.CFGNode, cfg.Variable]: node, ret = self.call_pytd(node, '__getslice__', start_var, end_var) results = [] unresolved = False if self.is_concrete: for start_val, end_val in cfg_utils.variable_product([start_var, end_var]): try: start = self._get_index(start_val.data) end = self._get_index(end_val.data) except abstract_utils.ConversionError: unresolved = True else: results.append(List(self.pyval[start:end], self.ctx).to_variable(node)) if unresolved or not self.is_concrete: results.append(ret) return (node, self.ctx.join_variables(node, results))
Implements __getslice__ for List. Arguments: node: The current CFG node. start_var: A Variable containing the i in lst[i:j]. end_var: A Variable containing the j in lst[i:j]. Returns: Tuple of (node, return_variable). node may be the same as the argument. return_variable is a Variable with bindings of the possible return values.
github-repos
def get_inheritance(obj_name, obj_type='file'): obj_dacl = dacl(obj_name=obj_name, obj_type=obj_type) inherited = win32security.INHERITED_ACE for i in range(0, obj_dacl.dacl.GetAceCount()): ace = obj_dacl.dacl.GetAce(i) if ((ace[0][1] & inherited) == inherited): return True return False
Get an object's inheritance. Args: obj_name (str): The name of the object obj_type (Optional[str]): The type of object. Only three object types allow inheritance. Valid objects are: - file (default): This is a file or directory - registry - registry32 (for WOW64) The following should return False as there is no inheritance: - service - printer - share Returns: bool: True if enabled, otherwise False Usage: .. code-block:: python salt.utils.win_dacl.get_inheritance('HKLM\\SOFTWARE\\salt', 'registry')
codesearchnet
def to_concat_skip_model(self, start_id, end_id): self.operation_history.append(("to_concat_skip_model", start_id, end_id)) filters_end = self.layer_list[end_id].output.shape[-1] filters_start = self.layer_list[start_id].output.shape[-1] start_node_id = self.layer_id_to_output_node_ids[start_id][0] pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0] end_node_id = self.layer_id_to_output_node_ids[end_id][0] skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id) concat_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) self._redirect_edge(pre_end_node_id, end_node_id, concat_input_node_id) concat_layer = StubConcatenate() concat_layer.input = [ self.node_list[concat_input_node_id], self.node_list[skip_output_id], ] concat_output_node_id = self._add_node(Node(concat_layer.output_shape)) self._add_edge(concat_layer, concat_input_node_id, concat_output_node_id) self._add_edge(concat_layer, skip_output_id, concat_output_node_id) concat_layer.output = self.node_list[concat_output_node_id] self.node_list[concat_output_node_id].shape = concat_layer.output_shape new_conv_layer = get_conv_class(self.n_dim)( filters_start + filters_end, filters_end, 1 ) self._add_edge(new_conv_layer, concat_output_node_id, end_node_id) new_conv_layer.input = self.node_list[concat_output_node_id] new_conv_layer.output = self.node_list[end_node_id] self.node_list[end_node_id].shape = new_conv_layer.output_shape if self.weighted: filter_shape = (1,) * self.n_dim weights = np.zeros((filters_end, filters_end) + filter_shape) for i in range(filters_end): filter_weight = np.zeros((filters_end,) + filter_shape) center_index = (i,) + (0,) * self.n_dim filter_weight[center_index] = 1 weights[i, ...] = filter_weight weights = np.concatenate( (weights, np.zeros((filters_end, filters_start) + filter_shape)), axis=1 ) bias = np.zeros(filters_end) new_conv_layer.set_weights( (add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) )
Add a weighted add concatenate connection from after start node to end node. Args: start_id: The convolutional layer ID, after which to start the skip-connection. end_id: The convolutional layer ID, after which to end the skip-connection.
juraj-google-style
def pack_x_y_sample_weight(x, y=None, sample_weight=None): if y is None: if not isinstance(x, (tuple, list)): return x else: return (x,) elif sample_weight is None: return (x, y) else: return (x, y, sample_weight)
Packs user-provided data into a tuple. This is a convenience utility for packing data into the tuple formats that `Model.fit()` uses. Example: >>> x = ops.ones((10, 1)) >>> data = pack_x_y_sample_weight(x) >>> isinstance(data, ops.Tensor) True >>> y = ops.ones((10, 1)) >>> data = pack_x_y_sample_weight(x, y) >>> isinstance(data, tuple) True >>> x, y = data Args: x: Features to pass to `Model`. y: Ground-truth targets to pass to `Model`. sample_weight: Sample weight for each element. Returns: Tuple in the format used in `Model.fit()`.
github-repos
def create_effect(self, label: str, name: str, *args, **kwargs) -> Effect: effect_cls = effects.find_effect_class(name) effect = effect_cls(*args, **kwargs) effect._label = label if label in self._effects: raise ValueError("An effect with label '{}' already exists".format(label)) self._effects[label] = effect return effect
Create an effect instance adding it to the internal effects dictionary using the label as key. Args: label (str): The unique label for the effect instance name (str): Name or full python path to the effect class we want to instantiate args: Positional arguments to the effect initializer kwargs: Keyword arguments to the effect initializer Returns: The newly created Effect instance
juraj-google-style
def get_index(uid, i): return _SHARED_SEQUENCES[uid][i]
Get the value from the PyDataset `uid` at index `i`. To allow multiple PyDatasets to be used at the same time, we use `uid` to get a specific one. A single PyDataset would cause the validation to overwrite the training PyDataset. This methods is called from worker threads. Args: uid: int, PyDataset identifier i: index Returns: The value at index `i`.
github-repos
def WriteFileEntry(self, path): string = '{0:s}\n'.format(path) encoded_string = self._EncodeString(string) self._file_object.write(encoded_string)
Writes the file path to file. Args: path (str): path of the file.
codesearchnet
def _duplicate_example(self, request): index = int(request.args.get('index')) if (index >= len(self.examples)): return http_util.Respond(request, {'error': 'invalid index provided'}, 'application/json', code=400) new_example = self.example_class() new_example.CopyFrom(self.examples[index]) self.examples.append(new_example) self.updated_example_indices.add((len(self.examples) - 1)) self.generate_sprite([ex.SerializeToString() for ex in self.examples]) return http_util.Respond(request, {}, 'application/json')
Duplicates the specified example. Args: request: A request that should contain 'index'. Returns: An empty response.
codesearchnet
def set_metadata(self, key: str, value: str): if ((not isinstance(key, str)) or (not isinstance(value, str))): raise TypeError("'key' and 'value' of metadata MUST be strings") self.metadata[key] = value
Add a new metadata to the message Args: key (str): name of the metadata value (str): value of the metadata
codesearchnet
def image(array, domain=None, width=None, format='png', **kwargs): image_data = serialize_array(array, fmt=format, domain=domain) image = IPython.display.Image(data=image_data, format=format, width=width) IPython.display.display(image)
Display an image. Args: array: NumPy array representing the image fmt: Image format e.g. png, jpeg domain: Domain of pixel values, inferred from min & max values if None w: width of output image, scaled using nearest neighbor interpolation. size unchanged if None
codesearchnet
def make_multiscale(image, resolutions, resize_method=tf.image.ResizeMethod.BICUBIC, num_channels=3): scaled_images = [] for height in resolutions: scaled_image = tf.image.resize_images( image, size=[height, height], method=resize_method) scaled_image = tf.to_int64(scaled_image) scaled_image.set_shape([height, height, num_channels]) scaled_images.append(scaled_image) return scaled_images
Returns list of scaled images, one for each resolution. Args: image: Tensor of shape [height, height, num_channels]. resolutions: List of heights that image's height is resized to. resize_method: tf.image.ResizeMethod. num_channels: Number of channels in image. Returns: List of Tensors, one for each resolution with shape given by [resolutions[i], resolutions[i], num_channels].
juraj-google-style
def call(self, inputs): del inputs with tf.compat.v1.name_scope(self._name): return tfd.MultivariateNormalDiag(self.loc, self.scale_diag)
Runs the model to generate multivariate normal distribution. Args: inputs: Unused. Returns: A MultivariateNormalDiag distribution with event shape [dimensions], batch shape [], and sample shape [sample_shape, dimensions].
codesearchnet
def remove_tree_by_path(self, path): with transaction.manager: trees = self.path_db.get(path, None) if not trees: return for tree in trees: return self._remove_tree(tree)
Remove the tree from database by given `path`. Args: path (str): Path of the tree.
juraj-google-style
def _unable_to_call_layer_due_to_serialization_issue(layer, *unused_args, **unused_kwargs): raise ValueError('Cannot call custom layer {} of type {}, because the call function was not serialized to the SavedModel.Please try one of the following methods to fix this issue:\n\n(1) Implement `get_config` and `from_config` in the layer/model class, and pass the object to the `custom_objects` argument when loading the model. For more details, see: https:
Replaces the `layer.call` if the layer was not fully serialized. Keras Model/Layer serialization is relatively relaxed because SavedModels are not always loaded back as keras models. Thus, when there is an issue tracing a non-signature function, a warning is logged instead of raising an error. This results in a SavedModel where the model's call function is saved, but the internal layer call functions are not. When deserialized with `tf.keras.models.load_model`, the internal layers which do not have serialized call functions should raise an error when called. Args: layer: Layer without the serialized call function. Raises: ValueError
github-repos
def add_signature(key, inputs, outputs): _check_dict_maps_to_tensors_or_sparse_tensors(inputs) _check_dict_maps_to_tensors_or_sparse_tensors(outputs) input_info = {input_name: tf_v1.saved_model.utils.build_tensor_info(tensor) for (input_name, tensor) in inputs.items()} output_info = {output_name: tf_v1.saved_model.utils.build_tensor_info(tensor) for (output_name, tensor) in outputs.items()} signature = tf_v1.saved_model.signature_def_utils.build_signature_def(input_info, output_info) tf_v1.add_to_collection(_SIGNATURE_COLLECTION, (key, signature))
Adds a signature to current graph. Args: key: Signature key as a string. inputs: Signature inputs as a map from string to Tensor or SparseTensor. outputs: Signature outputs as a map from string to Tensor or SparseTensor. (Recall that a Variable is not a Tensor, but Variable.value() is.) Raises: TypeError: if the arguments have the wrong types.
codesearchnet
def get_segment(neuron, section_id, segment_id): sec = neuron.sections[section_id] return sec.points[segment_id:(segment_id + 2)][(:, COLS.XYZR)]
Get a segment given a section and segment id Returns: array of two [x, y, z, r] points defining segment
codesearchnet
def __init__(self, channel): self.CreateCluster = channel.unary_unary( "/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster", request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.CreateClusterRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.UpdateCluster = channel.unary_unary( "/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster", request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.UpdateClusterRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.DeleteCluster = channel.unary_unary( "/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster", request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.DeleteClusterRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.GetCluster = channel.unary_unary( "/google.cloud.dataproc.v1beta2.ClusterController/GetCluster", request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.GetClusterRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.Cluster.FromString, ) self.ListClusters = channel.unary_unary( "/google.cloud.dataproc.v1beta2.ClusterController/ListClusters", request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.ListClustersRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.ListClustersResponse.FromString, ) self.DiagnoseCluster = channel.unary_unary( "/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster", request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.DiagnoseClusterRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def query(self, attributes=None, filters=None, only_unique=True, use_attr_names=False, dtypes=None): root = ElementTree.Element('Query') root.set('virtualSchemaName', self._virtual_schema) root.set('formatter', 'TSV') root.set('header', '1') root.set('uniqueRows', native_str(int(only_unique))) root.set('datasetConfigVersion', '0.6') dataset = ElementTree.SubElement(root, 'Dataset') dataset.set('name', self.name) dataset.set('interface', 'default') if (attributes is None): attributes = list(self.default_attributes.keys()) for name in attributes: try: attr = self.attributes[name] self._add_attr_node(dataset, attr) except KeyError: raise BiomartException('Unknown attribute {}, check dataset attributes for a list of valid attributes.'.format(name)) if (filters is not None): for (name, value) in filters.items(): try: filter_ = self.filters[name] self._add_filter_node(dataset, filter_, value) except KeyError: raise BiomartException('Unknown filter {}, check dataset filters for a list of valid filters.'.format(name)) response = self.get(query=ElementTree.tostring(root)) if ('Query ERROR' in response.text): raise BiomartException(response.text) try: result = pd.read_csv(StringIO(response.text), sep='\t', dtype=dtypes) except TypeError as err: raise ValueError('Non valid data type is used in dtypes') if use_attr_names: column_map = {self.attributes[attr].display_name: attr for attr in attributes} result.rename(columns=column_map, inplace=True) return result
Queries the dataset to retrieve the contained data. Args: attributes (list[str]): Names of attributes to fetch in query. Attribute names must correspond to valid attributes. See the attributes property for a list of valid attributes. filters (dict[str,any]): Dictionary of filters --> values to filter the dataset by. Filter names and values must correspond to valid filters and filter values. See the filters property for a list of valid filters. only_unique (bool): Whether to return only rows containing unique values (True) or to include duplicate rows (False). use_attr_names (bool): Whether to use the attribute names as column names in the result (True) or the attribute display names (False). dtypes (dict[str,any]): Dictionary of attributes --> data types to describe to pandas how the columns should be handled Returns: pandas.DataFrame: DataFrame containing the query results.
codesearchnet
def apply( self, func, num_splits=None, other_axis_partition=None, maintain_partitioning=True, **kwargs ): import dask if num_splits is None: num_splits = len(self.list_of_blocks) if other_axis_partition is not None: return [ DaskFramePartition(dask.delayed(obj)) for obj in deploy_func_between_two_axis_partitions( self.axis, func, num_splits, len(self.list_of_blocks), kwargs, *dask.compute( *tuple( self.list_of_blocks + other_axis_partition.list_of_blocks ) ) ) ] args = [self.axis, func, num_splits, kwargs, maintain_partitioning] args.extend(dask.compute(*self.list_of_blocks)) return [ DaskFramePartition(dask.delayed(obj)) for obj in deploy_axis_func(*args) ]
Applies func to the object. See notes in Parent class about this method. Args: func: The function to apply. num_splits: The number of times to split the result object. other_axis_partition: Another `DaskFrameAxisPartition` object to apply to func with this one. Returns: A list of `DaskFramePartition` objects.
juraj-google-style
def create(self, interface, vrid, **kwargs): if ('enable' not in kwargs): kwargs['enable'] = False return self._vrrp_set(interface, vrid, **kwargs)
Creates a vrrp instance from an interface Note: This method will attempt to create a vrrp in the node's operational config. If the vrrp already exists on the interface, then this method will set the properties of the existing vrrp to those that have been passed in, if possible. Args: interface (string): The interface to configure. vrid (integer): The vrid number for the vrrp to be created. kwargs (dict): A dictionary specifying the properties to be applied to the new vrrp instance. See library documentation for available keys and values. Returns: True if the vrrp could be created otherwise False (see Node)
codesearchnet
def addBorrowers(self, *borrowers): self._borrowers.extend(borrowers) ((debug.logger & debug.flagCompiler) and debug.logger(('current MIB borrower(s): %s' % ', '.join([str(x) for x in self._borrowers])))) return self
Add more transformed MIBs repositories to borrow MIBs from. Whenever MibCompiler.compile encounters MIB module which neither of the *searchers* can find or fetched ASN.1 MIB module can not be parsed (due to syntax errors), these *borrowers* objects will be invoked in order of their addition asking each if already transformed MIB can be fetched (borrowed). Args: borrowers: borrower object(s) Returns: reference to itself (can be used for call chaining)
codesearchnet
def get_csv_row_count(filename: str) -> int: row_count = 0 with open(filename, 'r') as f: for _ in f: row_count += 1 if row_count != 0: row_count -= 1 return row_count
Quickly count number of rows in the given csv file. Args: * filename: Path to CSV file Returns: * number of rows, minus header
github-repos
def save(self, checkpoint_dir=None): checkpoint_dir = os.path.join(checkpoint_dir or self.logdir, "checkpoint_{}".format(self._iteration)) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) checkpoint = self._save(checkpoint_dir) saved_as_dict = False if isinstance(checkpoint, string_types): if (not checkpoint.startswith(checkpoint_dir) or checkpoint == checkpoint_dir): raise ValueError( "The returned checkpoint path must be within the " "given checkpoint dir {}: {}".format( checkpoint_dir, checkpoint)) if not os.path.exists(checkpoint): raise ValueError( "The returned checkpoint path does not exist: {}".format( checkpoint)) checkpoint_path = checkpoint elif isinstance(checkpoint, dict): saved_as_dict = True checkpoint_path = os.path.join(checkpoint_dir, "checkpoint") with open(checkpoint_path, "wb") as f: pickle.dump(checkpoint, f) else: raise ValueError( "`_save` must return a dict or string type: {}".format( str(type(checkpoint)))) with open(checkpoint_path + ".tune_metadata", "wb") as f: pickle.dump({ "experiment_id": self._experiment_id, "iteration": self._iteration, "timesteps_total": self._timesteps_total, "time_total": self._time_total, "episodes_total": self._episodes_total, "saved_as_dict": saved_as_dict }, f) return checkpoint_path
Saves the current model state to a checkpoint. Subclasses should override ``_save()`` instead to save state. This method dumps additional metadata alongside the saved path. Args: checkpoint_dir (str): Optional dir to place the checkpoint. Returns: Checkpoint path that may be passed to restore().
juraj-google-style
def _open_config_files(self, command_line_args): config_files = [open(f) for files in map(glob.glob, map(os.path.expanduser, self._default_config_files)) for f in files] user_config_file_arg_actions = [ a for a in self._actions if getattr(a, "is_config_file_arg", False)] if not user_config_file_arg_actions: return config_files for action in user_config_file_arg_actions: arg_parser = argparse.ArgumentParser( prefix_chars=self.prefix_chars, add_help=False) arg_parser._add_action(action) def error_method(self, message): pass arg_parser.error = types.MethodType(error_method, arg_parser) parsed_arg = arg_parser.parse_known_args(args=command_line_args) if not parsed_arg: continue namespace, _ = parsed_arg user_config_file = getattr(namespace, action.dest, None) if not user_config_file: continue user_config_file = os.path.expanduser(user_config_file) if not os.path.isfile(user_config_file): self.error('File not found: %s' % user_config_file) config_files += [open(user_config_file)] return config_files
Tries to parse config file path(s) from within command_line_args. Returns a list of opened config files, including files specified on the commandline as well as any default_config_files specified in the constructor that are present on disk. Args: command_line_args: List of all args (already split on spaces)
juraj-google-style
def get_interpolated_value(self, energy): f = {} for spin in self.densities.keys(): f[spin] = get_linear_interpolated_value(self.energies, self.densities[spin], energy) return f
Returns interpolated density for a particular energy. Args: energy: Energy to return the density for.
juraj-google-style
def LoadFromStorage(cls, path=None): if path is None: path = os.path.join(os.path.expanduser('~'), 'googleads.yaml') return cls(**googleads.common.LoadFromStorage( path, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES, cls._OPTIONAL_INIT_VALUES))
Creates an AdWordsClient with information stored in a yaml file. Args: [optional] path: The path string to the file containing cached AdWords data. Returns: An AdWordsClient initialized with the values cached in the file. Raises: A GoogleAdsValueError if the given yaml file does not contain the information necessary to instantiate a client object - either a required key was missing or an OAuth2 key was missing.
juraj-google-style
def bessel_k1e(x, name=None): with ops.name_scope(name, 'bessel_k1e', [x]): return gen_special_math_ops.bessel_k1e(x)
Computes the Bessel k1e function of `x` element-wise. Modified Bessel function of order 1. >>> tf.math.special.bessel_k1e([0.5, 1., 2., 4.]).numpy() array([2.73100971, 1.63615349, 1.03347685, 0.68157595], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.k1e @end_compatibility
github-repos
def _GetTextInside(text, start_pattern): matching_punctuation = {'(': ')', '{': '}', '[': ']'} closing_punctuation = set(itervalues(matching_punctuation)) match = re.search(start_pattern, text, re.M) if (not match): return None start_position = match.end(0) assert (start_position > 0), 'start_pattern must ends with an opening punctuation.' assert (text[(start_position - 1)] in matching_punctuation), 'start_pattern must ends with an opening punctuation.' punctuation_stack = [matching_punctuation[text[(start_position - 1)]]] position = start_position while (punctuation_stack and (position < len(text))): if (text[position] == punctuation_stack[(- 1)]): punctuation_stack.pop() elif (text[position] in closing_punctuation): return None elif (text[position] in matching_punctuation): punctuation_stack.append(matching_punctuation[text[position]]) position += 1 if punctuation_stack: return None return text[start_position:(position - 1)]
r"""Retrieves all the text between matching open and close parentheses. Given a string of lines and a regular expression string, retrieve all the text following the expression and between opening punctuation symbols like (, [, or {, and the matching close-punctuation symbol. This properly nested occurrences of the punctuations, so for the text like printf(a(), b(c())); a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. start_pattern must match string having an open punctuation symbol at the end. Args: text: The lines to extract text. Its comments and strings must be elided. It can be single line and can span multiple lines. start_pattern: The regexp string indicating where to start extracting the text. Returns: The extracted text. None if either the opening string or ending punctuation could not be found.
codesearchnet
def establish_ssh_connection(ip, ssh_private_key_file, ssh_user, port, attempts=5, timeout=None): client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) while attempts: try: client.connect(ip, port=port, username=ssh_user, key_filename=ssh_private_key_file, timeout=timeout) except: attempts -= 1 time.sleep(10) else: return client raise IpaSSHException('Failed to establish SSH connection to instance.')
Establish ssh connection and return paramiko client. Raises: IpaSSHException: If connection cannot be established in given number of attempts.
codesearchnet