code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def to_json_str(self): adict = dict(vars(self), sort_keys=True) adict['type'] = self.__class__.__name__ return json.dumps(adict)
Convert data to json string representation. Returns: json representation as string.
codesearchnet
def get_organisation(self, **query_params): organisation_json = self.get_organisations_json(self.base_uri, query_params=query_params) return self.create_organisation(organisation_json)
Get the Organisation for this board. Returns Organisation object. Returns: list(Organisation): The organisation attached to this board
codesearchnet
def get_airport_details(self, iata, page=1, limit=100): url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit) details = self._fr24.get_airport_details(url) weather = self._fr24.get_airport_weather(url) details['position']['elevation'] = weather['elevation'] return details
Retrieve the details of an airport Given the IATA code of an airport, this method returns the detailed information like lat lon, full name, URL, codes etc. Args: iata (str): The IATA code for an airport, e.g. HYD page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_airport_details('HYD') f.get_airport_details('HYD',page=1,limit=10)
codesearchnet
def create(self, name, passphrase=None, wallet_data=None): if (not self.application): raise RoundError('User accounts are limited to one wallet. Make an account or shoot us an email <dev@gem.co> if you have a compelling use case for more.') if ((not passphrase) and (not wallet_data)): raise ValueError('Usage: wallets.create(name, passphrase [, wallet_data])') elif passphrase: wallet_data = generate(passphrase, trees=(['primary', 'backup'] if self.application else ['primary'])) wallet = dict(primary_private_seed=wallet_data['primary']['encrypted_seed'], primary_public_seed=wallet_data['primary']['public_seed'], name=name) if self.application: wallet['backup_public_seed'] = wallet_data['backup']['public_seed'] resource = self.resource.create(wallet) wallet = self.wrap(resource) return ((wallet_data['backup']['private_seed'], self.add(wallet)) if self.application else self.add(wallet))
Create a new Wallet object and add it to this Wallets collection. This is only available in this library for Application wallets. Users must add additional wallets in their User Console Args: name (str): wallet name passphrase (str, optional): A passphrase with which to encrypt a user wallet. If not supplied, wallet_data is mandatory. wallet_data (dict): Output from wallets.generate. For User Wallets, only the primary tree is used. For Application Wallets, the primary and backup trees are used. Returns: A tuple of the (backup_private_seed, round.Wallet).
codesearchnet
def merge(self, workdir, pot_files, out_dvdb, delete_source=True): pot_files = [os.path.abspath(s) for s in list_strings(pot_files)] if (not os.path.isabs(out_dvdb)): out_dvdb = os.path.join(os.path.abspath(workdir), os.path.basename(out_dvdb)) if self.verbose: print(('Will merge %d files into output DVDB %s' % (len(pot_files), out_dvdb))) for (i, f) in enumerate(pot_files): print((' [%d] %s' % (i, f))) if (len(pot_files) == 1): with open(pot_files[0], 'r') as inh, open(out_dvdb, 'w') as out: for line in inh: out.write(line) return out_dvdb (self.stdin_fname, self.stdout_fname, self.stderr_fname) = map(os.path.join, (3 * [os.path.abspath(workdir)]), ['mrgdvdb.stdin', 'mrgdvdb.stdout', 'mrgdvdb.stderr']) inp = StringIO() inp.write((out_dvdb + '\n')) inp.write((str(len(pot_files)) + '\n')) for fname in pot_files: inp.write((fname + '\n')) self.stdin_data = [s for s in inp.getvalue()] with open(self.stdin_fname, 'wt') as fh: fh.writelines(self.stdin_data) fh.flush() os.fsync(fh.fileno()) retcode = self.execute(workdir) if ((retcode == 0) and delete_source): for f in pot_files: try: os.remove(f) except IOError: pass return out_dvdb
Merge POT files containing 1st order DFPT potential return the absolute path of the new database in workdir. Args: delete_source: True if POT1 files should be removed after (successful) merge.
codesearchnet
def kill_raylet_monitor(self, check_alive=True): self._kill_process_type(ray_constants.PROCESS_TYPE_RAYLET_MONITOR, check_alive=check_alive)
Kill the raylet monitor. Args: check_alive (bool): Raise an exception if the process was already dead.
codesearchnet
def period_end_day(self, value=None): if (value is not None): try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str for field `period_end_day`'.format(value)) if (',' in value): raise ValueError('value should not contain a comma for field `period_end_day`') self._period_end_day = value
Corresponds to IDD Field `period_end_day` Args: value (str): value for IDD Field `period_end_day` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def label(self, name): if isinstance(name, str): self._label = name else: raise TypeError('label expects a string')
Set snapshot label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None.
codesearchnet
def get_unique_variable(name): candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name) if not candidates: raise ValueError('Couldnt find variable %s' % name) for candidate in candidates: if candidate.op.name == name: return candidate raise ValueError('Variable %s does not uniquely identify a variable', name)
Gets the variable uniquely identified by that name. Args: name: a name that uniquely identifies the variable. Returns: a tensorflow variable. Raises: ValueError: if no variable uniquely identified by the name exists.
juraj-google-style
def save(self, output_saved_model_dir): assert self._converted if self._need_calibration: assert self._calibration_data_collected if self._input_graph_def: raise ValueError('Not able to save to a SavedModel since input is a GraphDef') def _restore_collections(dest_graph, src_meta_graph_def, collection_keys): scope = '' for key in collection_keys: collection_def = src_meta_graph_def.collection_def[key] kind = collection_def.WhichOneof('kind') if kind is None: logging.error('Cannot identify data type for collection %s. Skipping.', key) continue from_proto = ops.get_from_proto_function(key) if from_proto and kind == 'bytes_list': proto_type = ops.get_collection_proto_type(key) for value in collection_def.bytes_list.value: proto = proto_type() proto.ParseFromString(value) try: new_value = from_proto(proto, import_scope=scope) except: continue dest_graph.add_to_collection(key, new_value) else: field = getattr(collection_def, kind) if kind == 'node_list': for value in field.value: name = ops.prepend_name_scope(value, scope) try: col_op = dest_graph.as_graph_element(name) except (TypeError, ValueError, KeyError): continue dest_graph.add_to_collection(key, col_op) elif kind == 'int64_list': for value in field.value: dest_graph.add_to_collection(key, int(value)) else: for value in field.value: dest_graph.add_to_collection(key, ops.prepend_name_scope(value, scope)) saved_model_builder = builder.SavedModelBuilder(output_saved_model_dir) with ops.Graph().as_default(): importer.import_graph_def(self._converted_graph_def, name='') _restore_collections(ops.get_default_graph(), self._grappler_meta_graph_def, self._collections_to_keep(self._grappler_meta_graph_def.collection_def)) with session.Session() as sess: saved_model_builder.add_meta_graph_and_variables(sess, self._input_saved_model_tags, signature_def_map=self._grappler_meta_graph_def.signature_def) saved_model_builder.save()
Save the converted graph as a SavedModel. Args: output_saved_model_dir: construct a SavedModel using the converted GraphDef and save it to the specified directory. This option only works when the input graph is loaded from a SavedModel, i.e. when input_saved_model_dir is specified and input_graph_def is None in __init__(). Raises: ValueError: if the input to the converter is a GraphDef instead of a SavedModel.
github-repos
def track(self, event_key, user_id, attributes=None, event_tags=None): if (not self.is_valid): self.logger.error(enums.Errors.INVALID_DATAFILE.format('track')) return if (not validator.is_non_empty_string(event_key)): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('event_key')) return if (not isinstance(user_id, string_types)): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id')) return if (not self._validate_user_inputs(attributes, event_tags)): return event = self.config.get_event(event_key) if (not event): self.logger.info(('Not tracking user "%s" for event "%s".' % (user_id, event_key))) return conversion_event = self.event_builder.create_conversion_event(event_key, user_id, attributes, event_tags) self.logger.info(('Tracking event "%s" for user "%s".' % (event_key, user_id))) self.logger.debug(('Dispatching conversion event to URL %s with params %s.' % (conversion_event.url, conversion_event.params))) try: self.event_dispatcher.dispatch_event(conversion_event) except: self.logger.exception('Unable to dispatch conversion event!') self.notification_center.send_notifications(enums.NotificationTypes.TRACK, event_key, user_id, attributes, event_tags, conversion_event)
Send conversion event to Optimizely. Args: event_key: Event key representing the event which needs to be recorded. user_id: ID for user. attributes: Dict representing visitor attributes and values which need to be recorded. event_tags: Dict representing metadata associated with the event.
codesearchnet
def GetCodeObjectAtLine(module, line): if (not hasattr(module, '__file__')): return (False, (None, None)) prev_line = 0 next_line = six.MAXSIZE for code_object in _GetModuleCodeObjects(module): for co_line_number in _GetLineNumbers(code_object): if (co_line_number == line): return (True, code_object) elif (co_line_number < line): prev_line = max(prev_line, co_line_number) elif (co_line_number > line): next_line = min(next_line, co_line_number) break prev_line = (None if (prev_line == 0) else prev_line) next_line = (None if (next_line == six.MAXSIZE) else next_line) return (False, (prev_line, next_line))
Searches for a code object at the specified line in the specified module. Args: module: module to explore. line: 1-based line number of the statement. Returns: (True, Code object) on success or (False, (prev_line, next_line)) on failure, where prev_line and next_line are the closest lines with code above and below the specified line, or None if they do not exist.
codesearchnet
def __init__(self, identifier, text_format=False): super(FormatSpecification, self).__init__() self._text_format = text_format self.identifier = identifier self.signatures = []
Initializes a format specification. Args: identifier (str): unique name for the format. text_format (Optional[bool]): True if the format is a text format, False otherwise.
juraj-google-style
def _get_column_alias(builder: column_expression_builder.ColumnExpressionBuilder) -> str: if builder.column_name: return builder.column_name else: invoke_node = builder.node while invoke_node and (not hasattr(invoke_node, 'identifier') or not invoke_node.identifier): invoke_node = invoke_node.parent_node if _fhir_path_data_types.returns_collection(invoke_node.return_type): return f'{invoke_node.identifier}_element_' else: return invoke_node.identifier
Determine the column alias based on the builder's state. Args: builder: A ColumnExpressionBuilder object. Returns: A string representing the column alias.
github-repos
def CreateBudget(client): budget_service = client.GetService('BudgetService', version='v201809') budget = { 'name': 'Interplanetary Cruise App Budget 'amount': { 'microAmount': '50000000' }, 'deliveryMethod': 'STANDARD', 'isExplicitlyShared': False } budget_operations = [{ 'operator': 'ADD', 'operand': budget }] budget_id = budget_service.mutate(budget_operations)['value'][0]['budgetId'] return budget_id
Creates a budget and returns its budgetId. Args: client: An AdWordsClient instance. Returns: An int budgetId for the created Budget.
juraj-google-style
def setup(__pkg: ModuleType) -> Tuple[Callable[[str], str], Callable[[str, str, int], str]]: package_locale = path.join(path.dirname(__pkg.__file__), 'locale') gettext.install(__pkg.__name__, package_locale) return gettext.gettext, gettext.ngettext
Configure ``gettext`` for given package. Args: __pkg: Package to use as location for :program:`gettext` files Returns: :program:`gettext` functions for singular and plural translations
juraj-google-style
def minutes(start, end=None): return iterate.between(start, datetime.timedelta(minutes=1), end)
Iterate over the minutes between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a minute apart.
codesearchnet
def _GetClientLibCallback(args, client_func=_GetClientLib): client_paths = client_func( args.service, args.language, args.output, args.build_system, hostname=args.hostname, application_path=args.application) for client_path in client_paths: print 'API client library written to %s' % client_path
Generate discovery docs and client libraries to files. Args: args: An argparse.Namespace object to extract parameters from. client_func: A function that generates client libraries and stores them to files, accepting a list of service names, a client library language, an output directory, a build system for the client library language, and a hostname.
juraj-google-style
def _bfd_rx(self, **kwargs): method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' \ 'bfd_interval_min_rx' bfd_rx = getattr(self._rbridge, method_name) config = bfd_rx(**kwargs) if kwargs['delete']: tag = 'min-rx' config.find('. pass return config
Return the BFD minimum receive interval XML. You should not use this method. You probably want `BGP.bfd`. Args: min_rx (str): BFD receive interval in milliseconds (300, 500, etc) delete (bool): Remove the configuration if ``True``. Returns: XML to be passed to the switch. Raises: None
juraj-google-style
def GetSystemConfigurationArtifact(self, session_identifier=CURRENT_SESSION): system_configuration = artifacts.SystemConfigurationArtifact() system_configuration.code_page = self.GetValue( 'codepage', default_value=self._codepage) system_configuration.hostname = self._hostnames.get( session_identifier, None) system_configuration.keyboard_layout = self.GetValue('keyboard_layout') system_configuration.operating_system = self.GetValue('operating_system') system_configuration.operating_system_product = self.GetValue( 'operating_system_product') system_configuration.operating_system_version = self.GetValue( 'operating_system_version') date_time = datetime.datetime(2017, 1, 1) time_zone = self._time_zone.tzname(date_time) if time_zone and isinstance(time_zone, py2to3.BYTES_TYPE): time_zone = time_zone.decode('ascii') system_configuration.time_zone = time_zone user_accounts = self._user_accounts.get(session_identifier, {}) system_configuration.user_accounts = list(user_accounts.values()) return system_configuration
Retrieves the knowledge base as a system configuration artifact. Args: session_identifier (Optional[str])): session identifier, where CURRENT_SESSION represents the active session. Returns: SystemConfigurationArtifact: system configuration artifact.
juraj-google-style
def build_info(self): if self.is_bootloader: self.log.error('Device is in fastboot mode, could not get build info.') return if self._build_info is None or self._is_rebooting: info = {} build_info = self.adb.getprops(CACHED_SYSTEM_PROPS) for build_info_constant in BuildInfoConstants: info[build_info_constant.build_info_key] = build_info.get(build_info_constant.system_prop_key, '') self._build_info = info return info return self._build_info
Gets the build info of this Android device, including build id and type. This is not available if the device is in bootloader mode. Returns: A dict with the build info of this Android device, or None if the device is in bootloader mode.
github-repos
def get_dihedral(self, i: int, j: int, k: int, l: int) -> float: v1 = (self[k].coords - self[l].coords) v2 = (self[j].coords - self[k].coords) v3 = (self[i].coords - self[j].coords) v23 = np.cross(v2, v3) v12 = np.cross(v1, v2) return math.degrees(math.atan2((np.linalg.norm(v2) * np.dot(v1, v23)), np.dot(v12, v23)))
Returns dihedral angle specified by four sites. Args: i: Index of first site j: Index of second site k: Index of third site l: Index of fourth site Returns: Dihedral angle in degrees.
codesearchnet
def parse_received(received): values_by_clause = {} for pattern in RECEIVED_COMPILED_LIST: matches = [match for match in pattern.finditer(received)] if len(matches) == 0: log.debug("No matches found for %s in %s" % ( pattern.pattern, received)) continue elif len(matches) > 1: msg = "More than one match found for %s in %s" % ( pattern.pattern, received) log.error(msg) raise MailParserReceivedParsingError(msg) else: log.debug("Found one match for %s in %s" % ( pattern.pattern, received)) match = matches[0].groupdict() if six.PY2: values_by_clause[match.keys()[0]] = match.values()[0] elif six.PY3: key = list(match.keys())[0] value = list(match.values())[0] values_by_clause[key] = value if len(values_by_clause) == 0: msg = "Unable to match any clauses in %s" % (received) log.error(msg) raise MailParserReceivedParsingError(msg) return values_by_clause
Parse a single received header. Return a dictionary of values by clause. Arguments: received {str} -- single received header Raises: MailParserReceivedParsingError -- Raised when a received header cannot be parsed Returns: dict -- values by clause
juraj-google-style
def mknod(self, filename, mode=None, device=None, dir_fd=None): if self.filesystem.is_windows_fs: raise (AttributeError, "module 'os' has no attribute 'mknode'") if (mode is None): mode = (S_IFREG | 384) if (device or ((not (mode & S_IFREG)) and (not is_root()))): self.filesystem.raise_os_error(errno.EPERM) filename = self._path_with_dir_fd(filename, self.mknod, dir_fd) (head, tail) = self.path.split(filename) if (not tail): if self.filesystem.exists(head, check_link=True): self.filesystem.raise_os_error(errno.EEXIST, filename) self.filesystem.raise_os_error(errno.ENOENT, filename) if (tail in (b'.', u'.', b'..', u'..')): self.filesystem.raise_os_error(errno.ENOENT, filename) if self.filesystem.exists(filename, check_link=True): self.filesystem.raise_os_error(errno.EEXIST, filename) try: self.filesystem.add_object(head, FakeFile(tail, (mode & (~ self.filesystem.umask)), filesystem=self.filesystem)) except IOError as e: self.filesystem.raise_os_error(e.errno, filename)
Create a filesystem node named 'filename'. Does not support device special files or named pipes as the real os module does. Args: filename: (str) Name of the file to create mode: (int) Permissions to use and type of file to be created. Default permissions are 0o666. Only the stat.S_IFREG file type is supported by the fake implementation. The umask is applied to this mode. device: not supported in fake implementation dir_fd: If not `None`, the file descriptor of a directory, with `filename` being relative to this directory. New in Python 3.3. Raises: OSError: if called with unsupported options or the file can not be created.
codesearchnet
def add_business_days(self, date_tensor, num_days, roll_convention=constants.BusinessDayConvention.NONE): pass
Adds given number of business days to given dates. Note that this is different from calling `add_period_and_roll` with PeriodType.DAY. For example, adding 5 business days to Monday gives the next Monday (unless there are holidays on this week or next Monday). Adding 5 days and rolling means landing on Saturday and then rolling either to next Monday or to Friday of the same week, depending on the roll convention. If any of the dates in `date_tensor` are not business days, they will be rolled to business days before doing the addition. If `roll_convention` is `NONE`, and any dates are not business days, an exception is raised. Args: date_tensor: DateTensor of dates to advance from. num_days: Tensor of int32 type broadcastable to `date_tensor`. roll_convention: BusinessDayConvention. Determines how to roll a date that falls on a holiday. Returns: The resulting DateTensor.
github-repos
def assert_corofunction(**kw): for name, value in kw.items(): if not asyncio.iscoroutinefunction(value): raise TypeError( 'paco: {} must be a coroutine function'.format(name))
Asserts if a given values are a coroutine function. Arguments: **kw (mixed): value to check if it is an iterable. Raises: TypeError: if assertion fails.
juraj-google-style
def gen_pdf(rst_content, style_text, header=None, footer=FOOTER): out_file_obj = StringIO() with NamedTemporaryFile() as f: f.write(style_text) f.flush() pdf = _init_pdf(f.name, header, footer) pdf.createPdf(text=rst_content, output=out_file_obj, compressed=True) out_file_obj.seek(0) return out_file_obj
Create PDF file from `rst_content` using `style_text` as style. Optinally, add `header` or `footer`. Args: rst_content (str): Content of the PDF file in restructured text markup. style_text (str): Style for the :mod:`rst2pdf` module. header (str, default None): Header which will be rendered to each page. footer (str, default FOOTER): Footer, which will be rendered to each page. See :attr:`FOOTER` for details. Returns: obj: StringIO file instance containing PDF file.
codesearchnet
def _check_root_tag(self, root): supported = self.supported_tags() if root.tag in supported: return error = "Document root element ({0}) not one of ({1})" raise UnsupportedRootElementError( message=error.format(root.tag, supported), expected=supported, found=root.tag, )
Check that the XML element tree has a supported root element. Args: root (etree.Element) Raises: UnsupportedRootElementError
juraj-google-style
def get_instances_with_configs(configs): results = [] for c in configs: try: serial = c.pop('serial') except KeyError: raise Error( 'Required value "serial" is missing in AndroidDevice config %s.' % c) is_required = c.get(KEY_DEVICE_REQUIRED, True) try: ad = AndroidDevice(serial) ad.load_config(c) except Exception: if is_required: raise ad.log.exception('Skipping this optional device due to error.') continue results.append(ad) return results
Create AndroidDevice instances from a list of dict configs. Each config should have the required key-value pair 'serial'. Args: configs: A list of dicts each representing the configuration of one android device. Returns: A list of AndroidDevice objects.
juraj-google-style
def run(self, args): jlink = pylink.JLink() if args.test: if jlink.test(): print('Self-test succeeded.') else: print('Self-test failed.') elif ((args.list is None) or (args.list in ['usb', 'ip'])): host = pylink.JLinkHost.USB_OR_IP if (args.list == 'usb'): host = pylink.JLinkHost.USB elif (args.list == 'ip'): host = pylink.JLinkHost.IP emulators = jlink.connected_emulators(host) for (index, emulator) in enumerate(emulators): if (index > 0): print('') print(('Product Name: %s' % emulator.acProduct.decode())) print(('Serial Number: %s' % emulator.SerialNumber)) usb = bool(emulator.Connection) if (not usb): print(('Nickname: %s' % emulator.acNickname.decode())) print(('Firmware: %s' % emulator.acFWString.decode())) print(('Connection: %s' % ('USB' if usb else 'IP'))) if (not usb): print(('IP Address: %s' % emulator.aIPAddr)) elif (args.supported is not None): device = args.supported[0] num_supported_devices = jlink.num_supported_devices() for i in range(num_supported_devices): found_device = jlink.supported_device(i) if (device.lower() == found_device.name.lower()): print(('Device Name: %s' % device)) print(('Core ID: %s' % found_device.CoreId)) print(('Flash Address: %s' % found_device.FlashAddr)) print(('Flash Size: %s bytes' % found_device.FlashSize)) print(('RAM Address: %s' % found_device.RAMAddr)) print(('RAM Size: %s bytes' % found_device.RAMSize)) print(('Manufacturer: %s' % found_device.manufacturer)) break else: print(('%s is not supported :(' % device)) return None
Runs the emulator command. Args: self (EmulatorCommand): the ``EmulatorCommand`` instance args (Namespace): arguments to parse Returns: ``None``
codesearchnet
def expected_h(nvals, fit='RANSAC'): rsvals = [expected_rs(n) for n in nvals] poly = poly_fit(np.log(nvals), np.log(rsvals), 1, fit=fit) return poly[0]
Uses expected_rs to calculate the expected value for the Hurst exponent h based on the values of n used for the calculation. Args: nvals (iterable of int): the values of n used to calculate the individual (R/S)_n KWargs: fit (str): the fitting method to use for the line fit, either 'poly' for normal least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which is more robust to outliers Returns: float: expected h for white noise
codesearchnet
def mixture_stddev(mixture_weight_vector, mean_vector, stddev_vector): tensorshape_util.assert_has_rank(mixture_weight_vector.shape, 2) if (not tensorshape_util.is_compatible_with(mean_vector.shape, mixture_weight_vector.shape)): raise ValueError('Expecting means to have same shape as mixture weights.') if (not tensorshape_util.is_compatible_with(stddev_vector.shape, mixture_weight_vector.shape)): raise ValueError('Expecting stddevs to have same shape as mixture weights.') pi_for_dot_prod = tf.expand_dims(mixture_weight_vector, axis=1) mu_for_dot_prod = tf.expand_dims(mean_vector, axis=2) sigma_for_dot_prod = tf.expand_dims(stddev_vector, axis=2) mean_wa = tf.matmul(pi_for_dot_prod, mu_for_dot_prod) mean_wa = tf.reshape(mean_wa, ((- 1),)) var_wa = tf.matmul(pi_for_dot_prod, tf.square(sigma_for_dot_prod)) var_wa = tf.reshape(var_wa, ((- 1),)) sq_mean_wa = tf.matmul(pi_for_dot_prod, tf.square(mu_for_dot_prod)) sq_mean_wa = tf.reshape(sq_mean_wa, ((- 1),)) mixture_variance = ((var_wa + sq_mean_wa) - tf.square(mean_wa)) return tf.sqrt(mixture_variance)
Computes the standard deviation of a mixture distribution. This function works regardless of the component distribution, so long as each component's mean and standard deviation can be provided. Args: mixture_weight_vector: A 2D tensor with shape [batch_size, num_components] mean_vector: A 2D tensor of mixture component means. Has shape `[batch_size, num_components]`. stddev_vector: A 2D tensor of mixture component standard deviations. Has shape `[batch_size, num_components]`. Returns: A 1D tensor of shape `[batch_size]` representing the standard deviation of the mixture distribution with given weights and component means and standard deviations. Raises: ValueError: If the shapes of the input tensors are not as expected.
codesearchnet
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) return [1] + [0] * len(token_ids_0)
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def _parse_config(self): config = self.get_block('mlag configuration') cfg = dict() cfg.update(self._parse_domain_id(config)) cfg.update(self._parse_local_interface(config)) cfg.update(self._parse_peer_address(config)) cfg.update(self._parse_peer_link(config)) cfg.update(self._parse_shutdown(config)) return dict(config=cfg)
Parses the mlag global configuration Returns: dict: A dict object that is intended to be merged into the resource dict
codesearchnet
def _register_notification_callback(self, connection_handle, attribute_handle, callback, once=False): notification_id = (connection_handle, attribute_handle) with self.notification_callbacks_lock: self.notification_callbacks[notification_id] = (callback, once)
Register a callback as a notification callback. It will be called if a notification with the matching connection_handle and attribute_handle is received. Args: connection_handle (int): The connection handle to watch attribute_handle (int): The attribute handle to watch callback (func): The callback function to call once the notification has been received once (bool): Should the callback only be called once (and then removed from the notification callbacks)
codesearchnet
def derivative_extraction(feat, DeltaWindows): rows, cols = feat.shape DIF = np.zeros(feat.shape, dtype=feat.dtype) Scale = 0 FEAT = np.lib.pad(feat, ((0, 0), (DeltaWindows, DeltaWindows)), 'edge') for i in range(DeltaWindows): offset = DeltaWindows Range = i + 1 dif = Range * FEAT[:, offset + Range:offset + Range + cols] - FEAT[:, offset - Range:offset - Range + cols] Scale += 2 * np.power(Range, 2) DIF += dif return DIF / Scale
This function the derivative features. Args: feat (array): The main feature vector(For returning the second order derivative it can be first-order derivative). DeltaWindows (int): The value of DeltaWindows is set using the configuration parameter DELTAWINDOW. Returns: array: Derivative feature vector - A NUMFRAMESxNUMFEATURES numpy array which is the derivative features along the features.
juraj-google-style
def datasets_insert(self, dataset_name, friendly_name=None, description=None): url = (Api._ENDPOINT + (Api._DATASETS_PATH % (dataset_name.project_id, ''))) data = {'kind': 'bigquery if friendly_name: data['friendlyName'] = friendly_name if description: data['description'] = description return datalab.utils.Http.request(url, data=data, credentials=self._credentials)
Issues a request to create a dataset. Args: dataset_name: the name of the dataset to create. friendly_name: (optional) the friendly name for the dataset description: (optional) a description for the dataset Returns: A parsed result object. Raises: Exception if there is an error performing the operation.
codesearchnet
def options(self, section): if not self.has_section(section): raise NoSectionError(section) from None return self.__getitem__(section).options()
Returns list of configuration options for the named section. Args: section (str): name of section Returns: list: list of option names
juraj-google-style
def call_remoteckan(self, *args, **kwargs): requests_kwargs = kwargs.get('requests_kwargs', dict()) credentials = self._get_credentials() if credentials: requests_kwargs['auth'] = credentials kwargs['requests_kwargs'] = requests_kwargs apikey = kwargs.get('apikey', self.get_api_key()) kwargs['apikey'] = apikey return self.remoteckan().call_action(*args, **kwargs)
Calls the remote CKAN Args: *args: Arguments to pass to remote CKAN call_action method **kwargs: Keyword arguments to pass to remote CKAN call_action method Returns: Dict: The response from the remote CKAN call_action method
juraj-google-style
def export_model(module_spec, class_count, saved_model_dir): (sess, in_image, _, _, _, _) = build_eval_session(module_spec, class_count) with sess.graph.as_default() as graph: tf.saved_model.simple_save(sess, saved_model_dir, inputs={'image': in_image}, outputs={'prediction': graph.get_tensor_by_name('final_result:0')}, legacy_init_op=tf.group(tf.tables_initializer(), name='legacy_init_op'))
Exports model for serving. Args: module_spec: The hub.ModuleSpec for the image module being used. class_count: The number of classes. saved_model_dir: Directory in which to save exported model and variables.
codesearchnet
def SignBuffer(self, in_buffer): precondition.AssertType(in_buffer, bytes) with tempfile.NamedTemporaryFile() as temp_in: temp_in.write(in_buffer) temp_in.seek(0) outfile = self.SignFile(temp_in.name) with io.open(outfile, 'rb') as filedesc: return filedesc.read()
Sign a buffer via temp files. Our signing tool can't sign a buffer, so we work around it using temporary files. Args: in_buffer: data to sign Returns: signed data
codesearchnet
def _parse_description(self, config): value = None match = re.search(r'description (.+)$', config, re.M) if match: value = match.group(1) return dict(description=value)
Scans the specified config block and returns the description value Args: config (str): The interface config block to scan Returns: dict: Returns a dict object with the description value retrieved from the config block. If the description value is not configured, None is returned as the value. The returned dict is intended to be merged into the interface resource dict.
juraj-google-style
def load_checkpoints(self, checkpointDirs): self.memo_lookup_table = None if (not checkpointDirs): return {} if (type(checkpointDirs) is not list): raise BadCheckpoint('checkpointDirs expects a list of checkpoints') return self._load_checkpoints(checkpointDirs)
Load checkpoints from the checkpoint files into a dictionary. The results are used to pre-populate the memoizer's lookup_table Kwargs: - checkpointDirs (list) : List of run folder to use as checkpoints Eg. ['runinfo/001', 'runinfo/002'] Returns: - dict containing, hashed -> future mappings
codesearchnet
def indicators(self, indicator_type=None, filters=None, params=None): indicator = self._tcex.ti.indicator(indicator_type) for i in self.tc_requests.indicators_from_tag(indicator, self.name, filters=filters, params=params): (yield i)
Gets all indicators from a tag. Args: params: filters: indicator_type:
codesearchnet
def sever_sink_ports(self, context, ports, connected_to=None): if connected_to: source_port_lookup = self._source_port_lookup( ports.get(connected_to, [])) else: source_port_lookup = True sink_ports = self._get_flowgraph_ports(ports, SinkPortInfo) if sink_ports and source_port_lookup: child = context.block_view(self.mri) attribute_values = {} for name, port_info in sink_ports.items(): if source_port_lookup is True or source_port_lookup.get( child[name].value, None) == port_info.port: attribute_values[name] = port_info.disconnected_value child.put_attribute_values(attribute_values)
Conditionally sever Sink Ports of the child. If connected_to is then None then sever all, otherwise restrict to connected_to's Source Ports Args: context (Context): The context to use ports (dict): {part_name: [PortInfo]} connected_to (str): Restrict severing to this part
juraj-google-style
def get_nn(self, structure, n): return [e['site'] for e in self.get_nn_info(structure, n)]
Get near neighbors of site with index n in structure. Args: structure (Structure): input structure. n (integer): index of site in structure for which to determine neighbors. Returns: sites (list of Site objects): near neighbors.
juraj-google-style
def tflite_to_tosa_bytecode(flatbuffer, bytecode, use_external_constant=False, ordered_input_arrays=None, ordered_output_arrays=None): pywrap_mlir.experimental_tflite_to_tosa_bytecode(flatbuffer, bytecode, use_external_constant, ordered_input_arrays, ordered_output_arrays)
Converts TFLite flatbuffer to TOSA dialect in MLIR bytecode. Args: flatbuffer: Path to flatbuffer. bytecode: Path to output bytecode. use_external_constant: Whether to create `tfl.external_const` instead of `tfl.const`. ordered_input_arrays: ordered_output_arrays: If ordered_output_arrays is not empty, then the function will only return nodes in ordered_output_arrays in the same order
github-repos
def op_priority(op_type): if op_type in ('Const', 'Shape', 'BroadcastGradientArgs', 'Range', 'VariableShape', 'Fill', 'OneHot', 'ShapeN'): return 7 if op_type in ('Identity', 'Cast', 'Reshape', 'ExpandDims', 'StopGradient', 'PreventGradient', 'Squeeze', 'Gather', 'GatherNd'): return 6 if op_type in ('ConcatV2', 'Concat', 'StridedSlice', 'Slice', 'Pack', 'Tile', 'CollectivePermute', 'SplitV', 'DynamicPartition'): return 5 if op_type in ('Pad', 'RandomUniformInt', 'GreaterEqual'): return 4 if op_type in ('Sum', 'AddV2', 'Add', 'AddN', 'BiasAdd', 'CrossReplicaSum'): return 3 if op_type in ('Neg', 'Sub'): return 2 if op_type in ('Mul', 'Square', 'MatMul', 'RandomUniform', 'Select', 'Maximum', 'Mean', 'Variance', 'Exp', 'Rsqrt'): return 1 return 2
Returns the priority of the op. If the priority of the op is k, it will be traced if trace_level>=k. Args: op_type: String name of the operation type. Returns: Integer value corresponding the priority of the op.
github-repos
def verify_exhausted_iterator(self, ds_fn, num_outputs, sparse_tensors=False, assert_items_equal=False): del assert_items_equal self.gen_outputs(ds_fn, [], num_outputs, verify_exhausted=True, sparse_tensors=sparse_tensors) actual = self.gen_outputs(ds_fn, [], 0, ckpt_saved=True, verify_exhausted=True, sparse_tensors=sparse_tensors) self.assertLen(actual, 0)
Verifies that saving and restoring an exhausted iterator works. An exhausted iterator is one which has returned an OutOfRange error. Args: ds_fn: 0-argument function that returns a Dataset. num_outputs: Total number of outputs expected from this Dataset. sparse_tensors: Whether dataset is built from SparseTensor(s). assert_items_equal: Tests the output has the expected elements regardless of order. Raises: AssertionError if any test fails.
github-repos
def _catch_errors(a_func, to_catch): def inner(*args, **kwargs): try: return a_func(*args, **kwargs) except tuple(to_catch) as exception: utils.raise_with_traceback( gax.errors.create_error('RPC failed', cause=exception)) return inner
Updates a_func to wrap exceptions with GaxError Args: a_func (callable): A callable. to_catch (list[Exception]): Configures the exceptions to wrap. Returns: Callable: A function that will wrap certain exceptions with GaxError
juraj-google-style
def multithread_predict_dataflow(dataflows, model_funcs): num_worker = len(model_funcs) assert len(dataflows) == num_worker if num_worker == 1: return predict_dataflow(dataflows[0], model_funcs[0]) kwargs = {'thread_name_prefix': 'EvalWorker'} if sys.version_info.minor >= 6 else {} with ThreadPoolExecutor(max_workers=num_worker, **kwargs) as executor, \ tqdm.tqdm(total=sum([df.size() for df in dataflows])) as pbar: futures = [] for dataflow, pred in zip(dataflows, model_funcs): futures.append(executor.submit(predict_dataflow, dataflow, pred, pbar)) all_results = list(itertools.chain(*[fut.result() for fut in futures])) return all_results
Running multiple `predict_dataflow` in multiple threads, and aggregate the results. Args: dataflows: a list of DataFlow to be used in :func:`predict_dataflow` model_funcs: a list of callable to be used in :func:`predict_dataflow` Returns: list of dict, in the format used by `DetectionDataset.eval_or_save_inference_results`
juraj-google-style
def get_blob(profile, sha): resource = ('/blobs/' + sha) data = api.get_request(profile, resource) return prepare(data)
Fetch a blob. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of the blob to fetch. Returns: A dict with data about the blob.
codesearchnet
def __init__(self, **kwargs): prefix_chars = kwargs.get('prefix_chars', '-') if prefix_chars != '-': raise ValueError( 'argparse_flags.ArgumentParser only supports "-" as the prefix ' 'character, found "{}".'.format(prefix_chars)) self._inherited_absl_flags = kwargs.pop('inherited_absl_flags', flags.FLAGS) super(ArgumentParser, self).__init__(**kwargs) if self.add_help: self.add_argument( '--helpshort', action='help', default=argparse.SUPPRESS, help=argparse.SUPPRESS) self.add_argument( '--helpfull', action=_HelpFullAction, default=argparse.SUPPRESS, help='show full help message and exit') if self._inherited_absl_flags: self.add_argument('--undefok', help=argparse.SUPPRESS) self._define_absl_flags(self._inherited_absl_flags)
Initializes ArgumentParser. Args: **kwargs: same as argparse.ArgumentParser, except: 1. It also accepts `inherited_absl_flags`: the absl flags to inherit. The default is the global absl.flags.FLAGS instance. Pass None to ignore absl flags. 2. The `prefix_chars` argument must be the default value '-'. Raises: ValueError: Raised when prefix_chars is not '-'.
juraj-google-style
def char(self, c: str) -> None: if self.peek() == c: self.offset += 1 else: raise UnexpectedInput(self, f"char '{c}'")
Parse the specified character. Args: c: One-character string. Raises: EndOfInput: If past the end of `self.input`. UnexpectedInput: If the next character is different from `c`.
juraj-google-style
def _transform(transformer_chain: Sequence[Tuple[(DataTransformer, Type)]], data: S, context: PipelineContext=None) -> T: for (transformer, target_type) in transformer_chain: data = transformer.transform(target_type, data, context) return data
Transform data to a new type. Args: transformer_chain: A sequence of (transformer, type) pairs to convert the data. data: The data to be transformed. context: The context of the transformations (mutable). Returns: The transformed data.
codesearchnet
def task_address(self, job_name, task_index): try: job = self._cluster_spec[job_name] except KeyError: raise ValueError('No such job in cluster: %r' % job_name) try: return job[task_index] except KeyError: raise ValueError('No task with index %r in job %r' % (task_index, job_name))
Returns the address of the given task in the given job. Args: job_name: The string name of a job in this cluster. task_index: A non-negative integer. Returns: The address of the given task in the given job. Raises: ValueError: If `job_name` does not name a job in this cluster, or no task with index `task_index` is defined in that job.
github-repos
def has_open_file(self, file_object): return (file_object in [wrappers[0].get_object() for wrappers in self.open_files if wrappers])
Return True if the given file object is in the list of open files. Args: file_object: The FakeFile object to be checked. Returns: `True` if the file is open.
codesearchnet
def _submitQuery(self, gitquery, gitvars={}, verbose=False, rest=False): errOut = (DEVNULL if (not verbose) else None) authhead = ('Authorization: bearer ' + self.__githubApiToken) bashcurl = ('curl -iH TMPauthhead -X POST -d TMPgitquery https: bashcurl_list = bashcurl.split() bashcurl_list[2] = authhead if (not rest): gitqueryJSON = json.dumps({'query': gitquery, 'variables': json.dumps(gitvars)}) bashcurl_list[6] = gitqueryJSON fullResponse = check_output(bashcurl_list, stderr=errOut).decode() _vPrint(verbose, ('\n' + fullResponse)) fullResponse = fullResponse.split('\r\n\r\n') heads = fullResponse[0].split('\r\n') if (len(fullResponse) > 1): result = fullResponse[1] else: result = '' http = heads[0].split() statusNum = int(http[1]) headDict = {} headDict['http'] = heads[0] for header in heads[1:]: h = header.split(': ') headDict[h[0]] = h[1] linkDict = None if ('Link' in headDict): linkProperties = headDict['Link'].split(', ') propDict = {} for item in linkProperties: divided = re.split('<https: propDict[divided[2]] = divided[1] linkDict = propDict return {'statusNum': statusNum, 'headDict': headDict, 'linkDict': linkDict, 'result': result}
Send a curl request to GitHub. Args: gitquery (str): The query or endpoint itself. Examples: query: 'query { viewer { login } }' endpoint: '/user' gitvars (Optional[Dict]): All query variables. Defaults to empty. verbose (Optional[bool]): If False, stderr prints will be suppressed. Defaults to False. rest (Optional[bool]): If True, uses the REST API instead of GraphQL. Defaults to False. Returns: { 'statusNum' (int): The HTTP status code. 'headDict' (Dict[str]): The response headers. 'linkDict' (Dict[int]): Link based pagination data. 'result' (str): The body of the response. }
codesearchnet
def basis(sample_paths): samples = tf.convert_to_tensor(sample_paths) dim = samples.shape.as_list()[-1] grid = tf.range(0, degree + 1, dtype=samples.dtype) samples_centered = samples - tf.math.reduce_mean(samples, axis=0) samples_centered = tf.expand_dims(samples_centered, -2) grid = tf.meshgrid(*dim * [grid]) grid = tf.reshape(tf.stack(grid, -1), [-1, dim]) basis_expansion = tf.reduce_prod(samples_centered ** grid, -1) return tf.transpose(basis_expansion)
Computes polynomial basis expansion at the given sample points. Args: sample_paths: A `Tensor`s of either `flot32` or `float64` dtype and of shape `[num_samples, dim]` where `dim` has to be statically known. Returns: A `Tensor`s of shape `[degree * dim, num_samples]`.
github-repos
def register_views(self, app): self.add_resource(LoginRedirectView, '/auth/login') self.add_resource(LogoutRedirectView, '/auth/logout') for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.auth']['plugins']: cls = entry_point.load() app.available_auth_systems[cls.name] = cls if app.register_auth_system(cls): for vcls in cls.views: self.add_resource(vcls, *vcls.URLS) logger.debug('Registered auth system view {} for paths: {}'.format(cls.__name__, ', '.join(vcls.URLS))) if (not app.active_auth_system): logger.error('No auth systems active, please enable an auth system and then start the system again') sys.exit((- 1)) for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.views']['plugins']: view = entry_point.load() self.add_resource(view, *view.URLS) app.register_menu_item(view.MENU_ITEMS) logger.debug('Registered view {} for paths: {}'.format(view.__name__, ', '.join(view.URLS)))
Iterates all entry points for views and auth systems and dynamically load and register the routes with Flask Args: app (`CINQFlask`): CINQFlask object to register views for Returns: `None`
codesearchnet
def Match(self, event): if (not self._matcher): return True self._decision = self._matcher.Matches(event) return self._decision
Determines if an event matches the filter. Args: event (EventObject): an event. Returns: bool: True if the event matches the filter.
codesearchnet
def match_pattern(expr_or_pattern: object, expr: object) -> MatchDict: try: return expr_or_pattern.match(expr) except AttributeError: if expr_or_pattern == expr: return MatchDict() else: res = MatchDict() res.success = False res.reason = "Expressions '%s' and '%s' are not the same" % ( repr(expr_or_pattern), repr(expr)) return res
Recursively match `expr` with the given `expr_or_pattern` Args: expr_or_pattern: either a direct expression (equal to `expr` for a successful match), or an instance of :class:`Pattern`. expr: the expression to be matched
juraj-google-style
def eigvals(tensor, name=None): if tensor.dtype == dtypes.float32 or tensor.dtype == dtypes.complex64: out_dtype = dtypes.complex64 elif tensor.dtype == dtypes.float64 or tensor.dtype == dtypes.complex128: out_dtype = dtypes.complex128 e, _ = gen_linalg_ops.eig(tensor, Tout=out_dtype, compute_v=False, name=name) return e
Computes the eigenvalues of one or more matrices. Note: If your program backpropagates through this function, you should replace it with a call to tf.linalg.eig (possibly ignoring the second output) to avoid computing the eigen decomposition twice. This is because the eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See _SelfAdjointEigV2Grad in linalg_grad.py. Args: tensor: `Tensor` of shape `[..., N, N]`. name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N` eigenvalues of `tensor[..., :, :]`.
github-repos
def return_type(type_name, formatter=None): def _returns(func): annotated(func) func.metadata.typed_returnvalue(type_name, formatter) return func return _returns
Specify that this function returns a typed value. Args: type_name (str): A type name known to the global typedargs type system formatter (str): An optional name of a formatting function specified for the type given in type_name.
juraj-google-style
def _ConvertCollectionsCounterToDict(cls, collections_counter): if (not isinstance(collections_counter, collections.Counter)): raise TypeError json_dict = {'__type__': 'collections.Counter'} for (attribute_name, attribute_value) in iter(collections_counter.items()): if (attribute_value is None): continue if isinstance(attribute_value, py2to3.BYTES_TYPE): attribute_value = {'__type__': 'bytes', 'stream': '{0:s}'.format(binascii.b2a_qp(attribute_value))} json_dict[attribute_name] = attribute_value return json_dict
Converts a collections.Counter object into a JSON dictionary. The resulting dictionary of the JSON serialized objects consists of: { '__type__': 'collections.Counter' ... } Here '__type__' indicates the object base type. In this case 'collections.Counter'. The rest of the elements of the dictionary make up the collections.Counter object attributes. Args: collections_counter (collections.Counter): counter. Returns: dict[str, object]: JSON serialized objects. Raises: TypeError: if not an instance of collections.Counter.
codesearchnet
def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: raise NotImplementedError
For each query in the batch, retrieves `n_docs` documents. Args: question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`): An array of query vectors. n_docs (`int`): The number of docs retrieved per query. Returns: `np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of shape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents.
github-repos
def _set_advertising_data(self, packet_type, data): payload = struct.pack("<BB%ss" % (len(data)), packet_type, len(data), bytes(data)) response = self._send_command(6, 9, payload) result, = unpack("<H", response.payload) if result != 0: return False, {'reason': 'Error code from BLED112 setting advertising data', 'code': result} return True, None
Set the advertising data for advertisements sent out by this bled112 Args: packet_type (int): 0 for advertisement, 1 for scan response data (bytearray): the data to set
juraj-google-style
def _StartAnalysisProcesses(self, storage_writer, analysis_plugins): logger.info('Starting analysis plugins.') for analysis_plugin in analysis_plugins.values(): self._analysis_plugins[analysis_plugin.NAME] = analysis_plugin process = self._StartWorkerProcess(analysis_plugin.NAME, storage_writer) if not process: logger.error('Unable to create analysis process: {0:s}'.format( analysis_plugin.NAME)) logger.info('Analysis plugins running')
Starts the analysis processes. Args: storage_writer (StorageWriter): storage writer. analysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that should be run and their names.
juraj-google-style
def seek(self, offset, whence=os.SEEK_SET): if not self._is_open: raise IOError('Not opened.') self._vslvm_logical_volume.seek(offset, whence)
Seeks to an offset within the file-like object. Args: offset (int): offset to seek to. whence (Optional(int)): value that indicates whether offset is an absolute or relative position within the file. Raises: IOError: if the seek failed. OSError: if the seek failed.
juraj-google-style
def __generate_localization_dictionary_from_file(file_path, localization_entry_attribute_name_for_key): localization_dictionary = {} f = open_strings_file(file_path, "r+") header_comment_key_value_tuples = extract_header_comment_key_value_tuples_from_file(f) if len(header_comment_key_value_tuples) == 0: logging.warning("Couldn't find any strings in file '%s'. Check encoding and format." % file_path) for header_comment, comments, key, value in header_comment_key_value_tuples: localization_entry = LocalizationEntry(comments, key, value) localization_dictionary[ localization_entry.__getattribute__(localization_entry_attribute_name_for_key)] = localization_entry f.close() return localization_dictionary
Generates a dictionary mapping between keys (defined by the given attribute name) and localization entries. Args: file_path (str): The strings file path. localization_entry_attribute_name_for_key: The name of the attribute of LocalizationEntry to use as key. Returns: dict: A dictionary mapping between keys (defined by the given attribute name) and localization entries.
juraj-google-style
def GetMissingChunks(self, fd, length, offset): start_chunk = (offset end_chunk = (((offset + length) - 1) relevant_chunks = range(start_chunk, (end_chunk + 1)) missing_chunks = set(relevant_chunks) for (idx, metadata) in iteritems(fd.ChunksMetadata(relevant_chunks)): if (not self.DataRefreshRequired(last=metadata.get('last', None))): missing_chunks.remove(idx) return sorted(missing_chunks)
Return which chunks a file doesn't have. Specifically, we return a list of the chunks specified by a length-offset range which are not in the datastore. Args: fd: The database object to read chunks from. length: Length to read. offset: File offset to read from. Returns: A list of chunk numbers.
codesearchnet
def description(self, force_refresh=False): if force_refresh: self.clear_cache() if not self._tuning_job_describe_result: self._tuning_job_describe_result = self._sage_client.describe_hyper_parameter_tuning_job( HyperParameterTuningJobName=self.name ) return self._tuning_job_describe_result
Call ``DescribeHyperParameterTuningJob`` for the hyperparameter tuning job. Args: force_refresh (bool): Set to True to fetch the latest data from SageMaker API. Returns: dict: The Amazon SageMaker response for ``DescribeHyperParameterTuningJob``.
juraj-google-style
def add_field_with_label(self, key, label_description, field): self.inputs[key] = field label = Label(label_description) label.style['margin'] = '0px 5px' label.style['min-width'] = '30%' container = HBox() container.style.update({'justify-content': 'space-between', 'overflow': 'auto', 'padding': '3px'}) container.append(label, key=('lbl' + key)) container.append(self.inputs[key], key=key) self.container.append(container, key=key)
Adds a field to the dialog together with a descriptive label and a unique identifier. Note: You can access to the fields content calling the function GenericDialog.get_field(key). Args: key (str): The unique identifier for the field. label_description (str): The string content of the description label. field (Widget): The instance of the field Widget. It can be for example a TextInput or maybe a custom widget.
codesearchnet
def draw_mask(im, mask, alpha=0.5, color=None): if color is None: color = PALETTE_RGB[np.random.choice(len(PALETTE_RGB))][::-1] im = np.where(np.repeat((mask > 0)[:, :, None], 3, axis=2), im * (1 - alpha) + color * alpha, im) im = im.astype('uint8') return im
Overlay a mask on top of the image. Args: im: a 3-channel uint8 image in BGR mask: a binary 1-channel image of the same size color: if None, will choose automatically
juraj-google-style
def stop(self, timeout_s=None): self.stopped.set() if self.thread: self.thread.join(timeout_s) return (not self.thread.isAlive()) else: return True
Stops the interval. If a timeout is provided and stop returns False then the thread is effectively abandoned in whatever state it was in (presumably dead-locked). Args: timeout_s: The time in seconds to wait on the thread to finish. By default it's forever. Returns: False if a timeout was provided and we timed out.
codesearchnet
def with_input_types(self, input_type_hint): input_type_hint = native_type_compatibility.convert_to_beam_type(input_type_hint) validate_composite_type_param(input_type_hint, 'Type hints for a PTransform') return super().with_input_types(input_type_hint)
Annotates the input type of a :class:`PTransform` with a type-hint. Args: input_type_hint (type): An instance of an allowed built-in type, a custom class, or an instance of a :class:`~apache_beam.typehints.typehints.TypeConstraint`. Raises: TypeError: If **input_type_hint** is not a valid type-hint. See :obj:`apache_beam.typehints.typehints.validate_composite_type_param()` for further details. Returns: PTransform: A reference to the instance of this particular :class:`PTransform` object. This allows chaining type-hinting related methods.
github-repos
def _SetPath(self, path): old_path = self._path if (old_path and (not io_wrapper.IsCloudPath(old_path))): try: size = tf.io.gfile.stat(old_path).length logger.debug('Setting latest size of %s to %d', old_path, size) self._finalized_sizes[old_path] = size except tf.errors.OpError as e: logger.error('Unable to get size of %s: %s', old_path, e) self._path = path self._loader = self._loader_factory(path)
Sets the current path to watch for new events. This also records the size of the old path, if any. If the size can't be found, an error is logged. Args: path: The full path of the file to watch.
codesearchnet
def sym_get(self, path: Union[utils.KeyPath, str, int], default: Any=RAISE_IF_NOT_FOUND, use_inferred: bool=False) -> Any: path = utils.KeyPath.from_value(path) if default is RAISE_IF_NOT_FOUND: return path.query(self, use_inferred=use_inferred) else: return path.get(self, default, use_inferred=use_inferred)
Returns a sub-node by path. NOTE: there is no `sym_set`, use `sym_rebind`. Args: path: A KeyPath object or equivalence. default: Default value if path does not exists. If absent, `KeyError` will be thrown. use_inferred: If True, return inferred value instead of the symbolic form of `pg.Inferential` objects. Returns: Value of symbolic attribute specified by path if found, otherwise the default value if it's specified. Raises: KeyError if `path` does not exist and `default` is not specified.
github-repos
def _get_relationships(self, dna: pg.DNA) -> Tuple[List[pg.DNA], List[Optional[pg.DNA]], List[Optional[int]]]: def is_mutable_node(obj): return self._is_mutable_node(obj) results = pg.query(dna, where=is_mutable_node, enter_selected=True) child_nodes = list(results.values()) parent_nodes = [n.parent_dna for n in child_nodes] child_indexes = [n.sym_path.key if n.parent_dna else None for n in child_nodes] return (child_nodes, parent_nodes, child_indexes)
Extracts the parent-child node relationships in a DNA. Note that PyGlove represents the nodes in a DNA instance as DNA instances themselves. Args: dna: the DNA that will be mutated. Returns: A tuple of 3 lists of the same length with corresponding elements: -child_nodes: a list of every node in the DNA. -parent_nodes: a list of the parent node of the corresponding node in `child_nodes`. -child_indexes: a list of indexes. For all j, child_nodes[j] is the i-th child of parent_nodes[j], where i = child_indexes[j]. Note that the root is included as a "child" with a `None` parent.
github-repos
def patch_deepCopy(self, patches): patchesCopy = [] for patch in patches: patchCopy = patch_obj() patchCopy.diffs = patch.diffs[:] patchCopy.start1 = patch.start1 patchCopy.start2 = patch.start2 patchCopy.length1 = patch.length1 patchCopy.length2 = patch.length2 patchesCopy.append(patchCopy) return patchesCopy
Given an array of patches, return another array that is identical. Args: patches: Array of Patch objects. Returns: Array of Patch objects.
codesearchnet
def _FormatDateTime(self, event): if (not event.timestamp): return 'N/A' date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=event.timestamp) (year, month, day_of_month) = date_time.GetDate() (hours, minutes, seconds) = date_time.GetTimeOfDay() try: return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'.format(year, month, day_of_month, hours, minutes, seconds) except (TypeError, ValueError): self._ReportEventError(event, 'unable to copy timestamp: {0!s} to a human readable date and time. Defaulting to: "0000-00-00 00:00:00"'.format(event.timestamp)) return '0000-00-00 00:00:00'
Formats the date and time. Args: event (EventObject): event. Returns: str: date and time string or "N/A" if no event timestamp is available.
codesearchnet
def _get_common_params(self, user_id, attributes): commonParams = {} commonParams[self.EventParams.PROJECT_ID] = self._get_project_id() commonParams[self.EventParams.ACCOUNT_ID] = self._get_account_id() visitor = {} visitor[self.EventParams.END_USER_ID] = user_id visitor[self.EventParams.SNAPSHOTS] = [] commonParams[self.EventParams.USERS] = [] commonParams[self.EventParams.USERS].append(visitor) commonParams[self.EventParams.USERS][0][self.EventParams.ATTRIBUTES] = self._get_attributes(attributes) commonParams[self.EventParams.SOURCE_SDK_TYPE] = 'python-sdk' commonParams[self.EventParams.ENRICH_DECISIONS] = True commonParams[self.EventParams.SOURCE_SDK_VERSION] = version.__version__ commonParams[self.EventParams.ANONYMIZE_IP] = self._get_anonymize_ip() commonParams[self.EventParams.REVISION] = self._get_revision() return commonParams
Get params which are used same in both conversion and impression events. Args: user_id: ID for user. attributes: Dict representing user attributes and values which need to be recorded. Returns: Dict consisting of parameters common to both impression and conversion events.
juraj-google-style
def SplitKeyPath(key_path, path_separator=definitions.KEY_PATH_SEPARATOR): return list(filter(None, key_path.split(path_separator)))
Splits the key path into path segments. Args: key_path (str): key path. path_separator (Optional[str]): path separator. Returns: list[str]: key path segments without the root path segment, which is an empty string.
codesearchnet
def create_channels(self, dataset, token, new_channels_data): channels = {} for channel_new in new_channels_data: self._check_channel(channel_new.name) if channel_new.channel_type not in ['image', 'annotation']: raise ValueError('Channel type must be ' + 'neuroRemote.IMAGE or ' + 'neuroRemote.ANNOTATION.') if channel_new.readonly * 1 not in [0, 1]: raise ValueError("readonly must be 0 (False) or 1 (True).") channels[channel_new.name] = { "channel_name": channel_new.name, "channel_type": channel_new.channel_type, "datatype": channel_new.dtype, "readonly": channel_new.readonly * 1 } req = requests.post(self.url("/{}/project/".format(dataset) + "{}".format(token)), json={"channels": {channels}}, verify=False) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req.text)) else: return True
Creates channels given a dictionary in 'new_channels_data' , 'dataset' name, and 'token' (project) name. Arguments: token (str): Token to identify project dataset (str): Dataset name to identify dataset to download from new_channels_data (dict): New channel data to upload into new channels Returns: bool: Process completed succesfully or not
juraj-google-style
def Instance(reactor=None): if (NodeLeader._LEAD is None): NodeLeader._LEAD = NodeLeader(reactor) return NodeLeader._LEAD
Get the local node instance. Args: reactor: (optional) custom reactor to use in NodeLeader. Returns: NodeLeader: instance.
codesearchnet
def supported_features_mapping(*supported_features: str, onnx_config_cls: Optional[str]=None) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]: if onnx_config_cls is None: raise ValueError('A OnnxConfig class must be provided') config_cls = transformers for attr_name in onnx_config_cls.split('.'): config_cls = getattr(config_cls, attr_name) mapping = {} for feature in supported_features: if '-with-past' in feature: task = feature.replace('-with-past', '') mapping[feature] = partial(config_cls.with_past, task=task) else: mapping[feature] = partial(config_cls.from_model_config, task=feature) return mapping
Generate the mapping between supported the features and their corresponding OnnxConfig for a given model. Args: *supported_features: The names of the supported features. onnx_config_cls: The OnnxConfig full name corresponding to the model. Returns: The dictionary mapping a feature to an OnnxConfig constructor.
github-repos
def ToJson(self, index): return { 'n': index, 'asset': self.AssetId.To0xString(), 'value': self.Value.ToNeoJsonString(), 'address': self.Address }
Convert object members to a dictionary that can be parsed as JSON. Args: index (int): The index of the output in a transaction Returns: dict:
juraj-google-style
def __init__( self, location=None, parent=None, part_index=None, start_offset=None, **kwargs): if not parent: raise ValueError('Missing parent value.') super(TSKPartitionPathSpec, self).__init__(parent=parent, **kwargs) self.location = location self.part_index = part_index self.start_offset = start_offset
Initializes a path specification. Note that the TSK partition path specification must have a parent. Args: location (Optional[str]): location. parent (Optional[PathSpec]): parent path specification. part_index (Optional[int]): part index. start_offset (Optional[int]): start offset. Raises: ValueError: when parent is not set.
juraj-google-style
def __init__(self, *args, allow_comments=False, directory=None, **kwargs): super().__init__(*args, **kwargs) self.allow_comments = allow_comments self.dir = directory
Constructor. Also see Entry.__init__. Args: allow_comments (bool): Whether to allow comments. Default False. directory (str): Optional. If the page should live in a subdirectory instead of at the web root, specify it here instead of making it part of the slug.
juraj-google-style
def open_repository(path, spor_dir='.spor'): root = _find_root_dir(path, spor_dir) return Repository(root, spor_dir)
Open an existing repository. Args: path: Path to any file or directory within the repository. spor_dir: The name of the directory containing spor data. Returns: A `Repository` instance. Raises: ValueError: No repository is found.
codesearchnet
def zoom_blur(x, severity=1): c = [ np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01), np.arange(1, 1.21, 0.02), np.arange(1, 1.26, 0.02), np.arange(1, 1.31, 0.03) ][severity - 1] x = (np.array(x) / 255.).astype(np.float32) out = np.zeros_like(x) for zoom_factor in c: out += clipped_zoom(x, zoom_factor) x = (x + out) / (len(c) + 1) x_clip = np.clip(x, 0, 1) * 255 return around_and_astype(x_clip)
Zoom blurring to images. Applying zoom blurring to images by zooming the central part of the images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.
juraj-google-style
def console_get_height_rect(con: tcod.console.Console, x: int, y: int, w: int, h: int, fmt: str) -> int: return int(lib.TCOD_console_get_height_rect_fmt(_console(con), x, y, w, h, _fmt(fmt)))
Return the height of this text once word-wrapped into this rectangle. Returns: int: The number of lines of text once word-wrapped. .. deprecated:: 8.5 Use :any:`Console.get_height_rect` instead.
codesearchnet
def inspect(self, nids=None, wslice=None, **kwargs): figs = [] for task in self.select_tasks(nids=nids, wslice=wslice): if hasattr(task, "inspect"): fig = task.inspect(**kwargs) if fig is None: cprint("Cannot inspect Task %s" % task, color="blue") else: figs.append(fig) else: cprint("Task %s does not provide an inspect method" % task, color="blue") return figs
Inspect the tasks (SCF iterations, Structural relaxation ...) and produces matplotlib plots. Args: nids: List of node identifiers. wslice: Slice object used to select works. kwargs: keyword arguments passed to `task.inspect` method. .. note:: nids and wslice are mutually exclusive. If nids and wslice are both None, all tasks in self are inspected. Returns: List of `matplotlib` figures.
juraj-google-style
def get_broker() -> 'Broker': global global_broker if (global_broker is None): from .brokers.rabbitmq import RabbitmqBroker set_broker(RabbitmqBroker(host='127.0.0.1', port=5672, heartbeat=5, connection_attempts=5, blocked_connection_timeout=30)) return global_broker
Get the global broker instance. If no global broker is set, this initializes a RabbitmqBroker and returns it. Returns: Broker: The default Broker.
codesearchnet
def MetaGraph(self): if (self._meta_graph is None): raise ValueError('There is no metagraph in this EventAccumulator') meta_graph = meta_graph_pb2.MetaGraphDef() meta_graph.ParseFromString(self._meta_graph) return meta_graph
Return the metagraph definition, if there is one. Raises: ValueError: If there is no metagraph for this run. Returns: The `meta_graph_def` proto.
codesearchnet
def get_snpeff_info(snpeff_string, snpeff_header): snpeff_annotations = [dict(zip(snpeff_header, snpeff_annotation.split('|'))) for snpeff_annotation in snpeff_string.split(',')] return snpeff_annotations
Make the vep annotations into a dictionaries A snpeff dictionary will have the snpeff column names as keys and the vep annotations as values. The dictionaries are stored in a list. One dictionary for each transcript. Args: snpeff_string (string): A string with the ANN annotation snpeff_header (list): A list with the vep header Return: snpeff_annotations (list): A list of vep dicts
codesearchnet
def __init__(self, n, key=None, reverse=False): super().__init__() self._n = n self._key = key self._reverse = reverse
Creates a global Top operation. The arguments 'key' and 'reverse' may be passed as keyword arguments, and have the same meaning as for Python's sort functions. Args: n: number of elements to extract from pcoll. key: (optional) a mapping of elements to a comparable key, similar to the key argument of Python's sorting methods. reverse: (optional) whether to order things smallest to largest, rather than largest to smallest
github-repos
def __toString(self, values): for key in values: if (not (values[key] is str)): values[key] = str(values[key]) return values
Will replace dict values with string values Args: values (dict): Dictionary of values Returns: Updated values dict
codesearchnet
def path_of_module(self, mod: nn.Module) -> str: try: return super().path_of_module(mod) except NameError as e: if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and (len(list(mod.buffers())) == 0): path = self._insert_module_as_submodule(mod) return path raise e
Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the string "foo.bar". Args: mod (str): The `Module` to retrieve the qualified name for.
github-repos
def _process_debug_op_state_changes(self, event_reply=None): if event_reply is None: event_reply = debug_service_pb2.EventReply() while not self._debug_ops_state_change_queue.empty(): state_change = self._debug_ops_state_change_queue.get() debug_node_key = (state_change.node_name, state_change.output_slot, state_change.debug_op) if state_change.state == debug_service_pb2.EventReply.DebugOpStateChange.READ_WRITE: logging.info('Adding breakpoint %s:%d:%s', state_change.node_name, state_change.output_slot, state_change.debug_op) self._breakpoints.add(debug_node_key) elif state_change.state == debug_service_pb2.EventReply.DebugOpStateChange.READ_ONLY: logging.info('Adding watchpoint %s:%d:%s', state_change.node_name, state_change.output_slot, state_change.debug_op) if debug_node_key in self._breakpoints: self._breakpoints.discard(debug_node_key) elif state_change.state == debug_service_pb2.EventReply.DebugOpStateChange.DISABLED: logging.info('Removing watchpoint or breakpoint: %s:%d:%s', state_change.node_name, state_change.output_slot, state_change.debug_op) if debug_node_key in self._breakpoints: self._breakpoints.discard(debug_node_key) else: logging.warn('Attempting to remove a non-existent debug node key: %s', debug_node_key) new_state_change = event_reply.debug_op_state_changes.add() new_state_change.CopyFrom(state_change) return event_reply
Dequeue and process all the queued debug-op state change protos. Include all the debug-op state change protos in a `EventReply` proto. Args: event_reply: An `EventReply` to add the `DebugOpStateChange` protos to, or `None`. Returns: An `EventReply` proto with the dequeued `DebugOpStateChange` protos (if any) added.
github-repos