content
stringlengths
22
815k
id
int64
0
4.91M
def straightenImage(im, imextent, mvx=1, mvy=None, verbose=0, interpolation=cv2_interpolation): """ Scale image to make square pixels Arguments --------- im: array input image imextend: list of 4 floats coordinates of image region (x0, x1, y0, y1) mvx, mvy : float number of mV per pixel requested Returns ------- ims: numpy array transformed image (fw, fh, mvx, mvy, H) : data H is the homogeneous transform from original to straightened image """ if cv2 is None: raise Exception('opencv is not installed, method straightenImage is not available') dxmv = imextent[1] - imextent[0] dymv = imextent[3] - imextent[2] dx = im.shape[1] dy = im.shape[0] mvx0 = dxmv / float(dx - 1) # mv/pixel mvy0 = dymv / float(dy - 1) if mvy is None: mvy = mvx fw = np.abs((float(mvx0) / mvx)) fh = np.abs((float(mvy0) / mvy)) if fw < .5: fwx = fw fac = 1 ims = im while (fwx < .5): ims = cv2.resize( ims, None, fx=.5, fy=1, interpolation=cv2.INTER_LINEAR) fwx *= 2 fac *= 2 ims = cv2.resize( ims, None, fx=fac * fw, fy=fh, interpolation=interpolation) else: ims = cv2.resize(im, None, fx=fw, fy=fh, interpolation=interpolation) if verbose: print('straightenImage: size %s fx %.4f fy %.4f' % (im.shape, fw, fh)) print('straightenImage: result size %s mvx %.4f mvy %.4f' % (ims.shape, mvx, mvy)) H = pgeometry.pg_transl2H([-.5, -.5]) .dot(np.diag([fw, fh, 1]).dot(pgeometry.pg_transl2H([.5, .5]))) return ims, (fw, fh, mvx, mvy, H)
8,300
def H(r2, H_s, H_d, a_s, a_d, gamma_s, gamma_d, G, v): """ """ pi = math.pi sqrt = math.sqrt r = sqrt(r2) H2_s = H_s**2 H2_d = H_d**2 R2_s = r2 + H2_s R2_d = r2 + H2_d alpha_s = 1.0 if gamma_s == 1.0 else 4 * H2_s / (pi*R2_s) alpha_d = 1.0 if gamma_d == 1.0 else 4 * H2_d / (pi*R2_d) f_s = a_s**3 * alpha_s * (1-v) / (G * (H2_s+r2)**1.5) f_d = a_d**3 * alpha_d * (1-v) / (G * (H2_d+r2)**1.5) H = [ [ r*f_s, r*f_d ], # the radial H [ H_s*f_s, H_d*f_d ] # the vertical H ] return H
8,301
def refine_resolution(src_tif_path, dst_tif_path, dst_resolution, resample_alg='near'): """ near: nearest neighbour resampling (default, fastest algorithm, worst interpolation quality). bilinear: bilinear resampling. cubic: cubic resampling. cubicspline: cubic spline resampling. lanczos: Lanczos windowed sinc resampling. average: average resampling, computes the weighted average of all non-NODATA contributing pixels. mode: mode resampling, selects the value which appears most often of all the sampled points. """ result = gdal.Warp(dst_tif_path, src_tif_path, xRes=dst_resolution, yRes=dst_resolution, resampleAlg=resample_alg) result = None
8,302
def test_mystery_3b_4() -> None: """Test mystery_3b for an expected return value of 4.""" expected = 4 actual = mystery_3b(-1, 0, -1) assert actual == expected
8,303
def parse_time(event_time): """Take a string representation of time from the blockchain, and parse it into datetime object.""" return datetime.strptime(event_time, '%Y-%m-%dT%H:%M:%S')
8,304
def do_pdftk_cat_first_page(pdf_file): """The cmd_args magick identify is very slow on page pages hence it examines every page. We extract the first page to get some informations about the dimensions of the PDF file.""" output_file = os.path.join(tmp_dir, 'identify.pdf') cmd_args = ['pdftk', str(pdf_file), 'cat', '1', 'output', output_file] run.run(cmd_args) return output_file
8,305
def addBenchmark(df): """Add benchmark to df.""" # Compute the inverse of the distance distance_inv = (1. / df.filter(regex='^distance*', axis=1)).values # Extract the value at the nearest station values = df.filter(regex='value_*', axis=1) # Compute the benchmark numer = (distance_inv * values).sum(axis=1) denom = (distance_inv * (values != 0)).sum(axis=1) # Compute the benchmark benchmark = numer / denom df["Benchmark"] = benchmark return df
8,306
def is_file_like(f): """Check to see if ```f``` has a ```read()``` method.""" return hasattr(f, 'read') and callable(f.read)
8,307
def before_trading_start(context, data): """ Called every day before market open. This is where we get our stocks that made it through the pipeline. """ #pipeline_output returns a pandas dataframe that has columns for each factor we added to the pipeline, using pipe.add(), and has a row for each security that made it through context.output = pipeline_output('mean_reversion_algo') context.results = pipeline_output('mean_reversion_algo').iloc[:200] log.info(context.results.iloc[:5]) #get the securities we want to long from our pipeline output #note on python syntax. context.output[] returns the pandas dataframe. context.output[context.output['column']] returns a particular column. context.long_secs = context.output[context.output['low_returns']] #get our shorts securities context.short_secs = context.output[context.output['high_returns']] #get our list of securities for the day context.security_list = context.short_secs.index.union(context.long_secs.index).tolist() #convert them to a set, for faster lookup context.security_set = set(context.security_list)
8,308
def _string_to_days_since_date(dateStrings, referenceDate='0001-01-01'): """ Turn an array-like of date strings into the number of days since the reference date """ dates = [_string_to_datetime(string) for string in dateStrings] days = _datetime_to_days(dates, referenceDate=referenceDate) days = np.array(days) return days
8,309
def safe_as_int(val, atol=1e-3): """ Attempt to safely cast values to integer format. Parameters ---------- val : scalar or iterable of scalars Number or container of numbers which are intended to be interpreted as integers, e.g., for indexing purposes, but which may not carry integer type. atol : float Absolute tolerance away from nearest integer to consider values in ``val`` functionally integers. Returns ------- val_int : NumPy scalar or ndarray of dtype `cupy.int64` Returns the input value(s) coerced to dtype `cupy.int64` assuming all were within ``atol`` of the nearest integer. Notes ----- This operation calculates ``val`` modulo 1, which returns the mantissa of all values. Then all mantissas greater than 0.5 are subtracted from one. Finally, the absolute tolerance from zero is calculated. If it is less than ``atol`` for all value(s) in ``val``, they are rounded and returned in an integer array. Or, if ``val`` was a scalar, a NumPy scalar type is returned. If any value(s) are outside the specified tolerance, an informative error is raised. Examples -------- >>> safe_as_int(7.0) 7 >>> safe_as_int([9, 4, 2.9999999999]) array([9, 4, 3]) >>> safe_as_int(53.1) Traceback (most recent call last): ... ValueError: Integer argument required but received 53.1, check inputs. >>> safe_as_int(53.01, atol=0.01) 53 """ mod = np.asarray(val) % 1 # Extract mantissa # Check for and subtract any mod values > 0.5 from 1 if mod.ndim == 0: # Scalar input, cannot be indexed if mod > 0.5: mod = 1 - mod else: # Iterable input, now ndarray mod[mod > 0.5] = 1 - mod[mod > 0.5] # Test on each side of nearest int try: np.testing.assert_allclose(mod, 0, atol=atol) except AssertionError: raise ValueError( "Integer argument required but received " "{0}, check inputs.".format(val) ) return np.around(val).astype(np.int64)
8,310
def check_protocol(protocol): """ Check if a given protocol works by computing the qubit excitation probabilities """ qubit_weight = {} qubit_weight[protocol[0][0][0]] = 1.0 for pair_set in protocol: for i, j, p in pair_set: qubit_weight[j] = qubit_weight[i] * (1.0 - p) qubit_weight[i] *= p return qubit_weight
8,311
def detachVolume(**kargs): """ detachVolume your Additional Volume * Args: - zone(String, Required) : [KR-CA, KR-CB, KR-M, KR-M2] - id(String, Required) : Volume disk ID * Examples : print(server.detachVolume(zone='KR-M', id='7f933f86-e8bf-4600-9423-09e8f1c84460')) """ my_apikey, my_secretkey = c.read_config() if not 'zone' in kargs: return c.printZoneHelp() if not 'id' in kargs: return '[ktcloud] Missing required argument \"id\" (disk volume id)' kargs['zoneid'] = c.getzoneidbyhname(kargs['zone']) M2Bool = c.IsM2(kargs['zone']) del kargs['zone'] baseurl = c.geturl(ctype='server', m2=M2Bool) kargs['command'] = 'detachVolume' kargs['response'] = 'json' kargs['apikey'] = my_apikey return c.makerequest(kargs, baseurl, my_secretkey)
8,312
def observe(metric: str, accept_on: List[Type[Exception]] = [], # pylint: disable=E1136 decline_on: List[Type[Exception]] = [], # pylint: disable=E1136 static_tags: List[str] = [], # pylint: disable=E1136 tags_from: Optional[Dict[str, List[str]]] = None, # pylint: disable=E1136 trace_id_from: Optional[Dict[str, str]] = None) -> Any: # pylint: disable=E1136 """This operator will, based on the provided setup generate logs, metrics, notifications on each call for that execution. Args: metric (str): The root-metric which will be updated during execution in e.g. DogStatsd. accept_on (Optional[List[Exception]], optional): A list of exceptions on which the message will be acknowledged. decline_on (Optional[List[Exception]], optional): A list of exceptions on which the message will be declined. static_tags (Optional[List[str]], optional): A list of tags to be appended on each metric update. tags_from (Optional[Dict[str, List[str]]], optional): A list of tags to be dynamically extracted from the key dictionary. trace_id_from (Optional[Dict[str, str]], optional): A trace_id to be appended on each log from the key dictionary. """ def arrange(func: Callable[..., Any]): @wraps(func) def inner(*args: Any, **kwargs: Any) -> Any: # setup tracing and tags trace_id = Resolver.resolve_trace_id(trace_id_from=trace_id_from, **kwargs) identity = Resolver.resolve_identity(*args, func=func, trace_id=trace_id) additional_tags = Resolver.resolve_tags_from(tags_from=tags_from, **kwargs) all_tags = additional_tags + static_tags imetric = Provider.get_metric(*args) # start timing time_start: float = time.monotonic() try: # actual function execution response: Any = func(*args, **kwargs) # calculate process time process_time = int(time.monotonic() - time_start) * 1000 # append extra tags all_tags.append(Resolver.resolve_observed_sli_tag(process_time=process_time)) # send metrics, finished successfully imetric.timing("%s.time.finished" % metric, process_time, all_tags) imetric.gauge("%s.time_gauge.finished" % metric, process_time, all_tags) imetric.increment("%s.finished" % metric, 1, all_tags) except Exception as ex: # calculate process time process_time = int(time.monotonic() - time_start) * 1000 # append extra tags all_tags.append('exception:%s' % type(ex).__name__) all_tags.append(Resolver.resolve_observed_sli_tag(process_time=process_time)) # accept on, returns True if type(ex) in accept_on: # log warning Provider.get_logger(*args).warning("%s: %s(%s) during '%s' accepted.\n%s" % ( identity, type(ex).__name__, ex, func.__name__, traceback.format_exc())) # send metrics, raised but accepted imetric.timing("%s.time.accepted" % metric, process_time, all_tags) imetric.gauge("%s.time_gauge.accepted" % metric, process_time, all_tags) imetric.increment('%s.exception.accepted' % metric, 1, all_tags) # return truthy, to be acknowledged return True # decline on, returns False if type(ex) in decline_on: # log error Provider.get_logger(*args).error("%s: %s(%s) during '%s' declined.\n%s" % ( identity, type(ex).__name__, ex, func.__name__, traceback.format_exc())) # send metrics, raised but declined imetric.timing("%s.time.declined" % metric, process_time, all_tags) imetric.gauge("%s.time_gauge.declined" % metric, process_time, all_tags) imetric.increment('%s.exception.declined' % metric, 1, all_tags) # return falsy, not to be acknowledged return False # unhandled exception, log error Provider.get_logger(*args).error("%s: %s(%s) during '%s' raised.\n%s" % ( identity, type(ex).__name__, ex, func.__name__, traceback.format_exc())) # send metrics, raised and unhandled imetric.timing("%s.time.raised" % metric, process_time, all_tags) imetric.gauge("%s.time_gauge.raised" % metric, process_time, all_tags) imetric.increment('%s.exception.raised' % metric, 1, all_tags) # check if notification client available slack = Provider.get_slack(*args) if slack: # notify slack.error(header=identity, title=type(ex).__name__, text=f"{ex}\n{traceback.format_exc()}") # re-raise raise ex finally: # send metric, start imetric.increment("%s.start" % metric, 1, all_tags) # return actual response of the function return response return inner return arrange
8,313
def action_load_more_reviews(move_down=False): """Load more reviews.""" print 'loading more requests.' interface = get_interface(_rboard_url) review_requests = interface.get_review_requests(current_lineval()) current_buffer = vim.current.buffer for review_request in review_requests: changenum = review_request['changenum'] changenum = changenum and ('[%s]' % changenum) or '' summary = ' '.join(review_request['summary'].splitlines()) line = "%07s %07s %12s -- %s" % ( review_request['id'], review_request['status'], review_request.links['submitter']['title'], (summary + changenum)[:140]) current_buffer.append(line) if int(move_down) == 1: vim.command('silent! norm! G')
8,314
def append_subdirs_to_mypy_paths(root_directory: str) -> str: """ Appends all immediate sudirs of the root_directory to the MYPYPATH , separated by column ':' TODO: Windows ? in order to be able to use that in a shellscript (because the ENV of the subshell gets lost) we also return it as a string. This is already in preparation to remove the testloop shellscript with a python script. >>> # Setup >>> save_mypy_path = get_env_data(env_variable='MYPYPATH') >>> # Test >>> append_subdirs_to_mypy_paths(str(pathlib.Path(__file__).parent.parent.resolve())) '...' >>> assert str(pathlib.Path(__file__).parent.resolve()) in get_env_data(env_variable='MYPYPATH') >>> append_subdirs_to_mypy_paths('non_existing') '' >>> # Teardown >>> set_env_data(env_variable='MYPYPATH', env_str=save_mypy_path) """ path_root_directory = pathlib.Path(root_directory).resolve() if not path_root_directory.is_dir(): logger.warning(f'add mypy paths : the given root directory "{path_root_directory}" does not exist') return '' l_subdirs = [str(path_root_directory / _dir) for _dir in next(os.walk(path_root_directory))[1]] str_current_mypy_paths = get_env_data(env_variable='MYPYPATH') if str_current_mypy_paths: l_subdirs.insert(0, str_current_mypy_paths) str_new_mypy_paths = ':'.join(l_subdirs) set_env_data(env_variable='MYPYPATH', env_str=str_new_mypy_paths) return str_new_mypy_paths
8,315
def test__upsert_service_info_insert(): """Test for creating service info document in database.""" app = Flask(__name__) app.config['FOCA'] = Config( db=MongoConfig(**MONGO_CONFIG), endpoints=ENDPOINT_CONFIG, ) app.config['FOCA'].db.dbs['drsStore'].collections['service_info'] \ .client = mongomock.MongoClient().db.collection data = deepcopy(SERVICE_INFO_CONFIG) del data['contactUrl'] with app.app_context(): service_info = RegisterServiceInfo() service_info._upsert_service_info(data=data) assert service_info.get_service_info() == data assert service_info.get_service_info() != SERVICE_INFO_CONFIG
8,316
def spherical_to_cartesian(lons, lats, depths): """ Return the position vectors (in Cartesian coordinates) of list of spherical coordinates. For equations see: http://mathworld.wolfram.com/SphericalCoordinates.html. Parameters are components of spherical coordinates in a form of scalars, lists or numpy arrays. ``depths`` can be ``None`` in which case it's considered zero for all points. :returns: ``numpy.array`` of 3d vectors representing points' coordinates in Cartesian space. The array has the same shape as parameter arrays. In particular it means that if ``lons`` and ``lats`` are scalars, the result is a single 3d vector. Vector of length ``1`` represents distance of 1 km. See also :func:`cartesian_to_spherical`. """ phi = numpy.radians(lons) theta = numpy.radians(lats) if depths is None: rr = EARTH_RADIUS else: rr = EARTH_RADIUS - numpy.array(depths) cos_theta_r = rr * numpy.cos(theta) xx = cos_theta_r * numpy.cos(phi) yy = cos_theta_r * numpy.sin(phi) zz = rr * numpy.sin(theta) vectors = numpy.array([xx.transpose(), yy.transpose(), zz.transpose()]) \ .transpose() return vectors
8,317
def get_remote_webdriver(hub_url, browser, browser_ver, test_name): """ This functions returns remote web-driver instance created in selenoid machine. :param hub_url :param browser: browser name :param browser_ver: version for browser :param test_name: test name :return: remote web-driver instance for specified browser """ test_name = browser + browser_ver + "_" + test_name + "-" + time.strftime( "%m_%d_%y_%H_%M_%S", time.localtime()) driver_local = None desired_capabilities = { "version": browser_ver, "enableVNC": True, "enableVideo": True, "enableLog": True, "videoName": test_name + ".mp4", "logName": test_name + ".log", "name": test_name, "timeZone": "Asia/Kolkata", "sessionTimeout": "180s" } if browser == 'firefox': profile = webdriver.FirefoxProfile() profile.set_preference("dom.disable_beforeunload", True) desired_capabilities["browserName"] = "firefox" desired_capabilities["requireWindowFocus"] = True desired_capabilities["enablePersistentHover"] = False driver_local = webdriver.Remote( command_executor=hub_url, desired_capabilities=desired_capabilities, browser_profile=profile) elif browser == 'chrome': options = Options() options.add_argument("--window-size=1280,1024") desired_capabilities["browserName"] = "chrome" driver_local = webdriver.Remote( command_executor=hub_url, desired_capabilities=desired_capabilities, options=options) else: print("Specified browser does not exist.") # maximize browser window driver_local.maximize_window() # driver_local.implicitly_wait(2) return driver_local
8,318
def process_images(rel_root_path, item_type, item_ids, skip_test, split_attr, gen_image_specs_func, trafo_image_func, trafo_image_extra_kwargs=None, img_obj_type=None, img_attr=None, dimensions=(256, 256), max_valset_size=10000): """ This function downloads all photos which are part of the dataset. This is a general function which can be used for lots of different layers. It returns a dictionary which contains the downloaded image paths. Key: dataset split identifier, can be 'E', 'V', 'R' Value: tuple of (item indexes in the item_ids array, corresponding image paths) :param rel_root_path: The root path of the photos and generated training files relative to the Caffe root path. :param item_type: The type of the model class for the items which are classified (e.g. FgPhoto). This class should have 'photo', 'matclass_dataset_split' attributes/properties. The photo attribute should have most of the Photo model's fields. It is advised to use an actual Photo instance here. The matclass_dataset_split attribute should indicate in which dataset split this item is in. The possible dataset splits are 'E' (test), 'V' (validation), 'R' (training). :param item_ids: List (or numpy array) of ids into the :ref:`item_type` table. It should contain the training, validation and test set. :param skip_test: If true, skip generating file and downloading images for the test split. :param split_attr: The attribute name which represents the dataset split in the database. It should be one character, 'E' meaning test, 'V' meaning validation, 'R' meaning training. :param gen_image_specs_func: Function which generates an id, photo id, image path triplet for each item which we later use to download the images. :param trafo_image_func: If None, we don't apply any transformation on the images. Function which transforms an image given the image path and the extra parameters, it should return the path of the transformed image, which can be the original image path or a new path. :ref:`trafo_image_extra_kwargs` will be passed as extra parameters to this function. :param trafo_image_extra_kwargs: Extra keyword arguments which will be passed to :ref:`trafo_image_func` function. All of them should be a list which has the same order as :ref:`item_ids`. :param img_obj_type: The type of the model class which holds an image. :param img_attr: The attribute of `img_obj_type` which holds the image. :param dimensions: The dimensions to resize the downloaded images to. If None, keep the image as original size. :param max_valset_size: The maximum size for the validation set. """ item_id_to_idx = {id: idx for idx, id in enumerate(item_ids)} abbr, fnames = get_abbr_fname(skip_test) # The return value image_data = {} for mc_ds_s, fname in zip(abbr, fnames): data_path = os.path.join(rel_root_path, 'data') ensuredir(os.path.join(settings.CAFFE_ROOT, data_path)) print 'Generating split file and downloading images for {} split...'.format(fname) print 'Generating a list of images to download...' image_specs = [] for item_ids_batch in progress_bar(iter_batch(item_ids, 10000)): # Note that the order is not going to be the same as # item_ids_batch, so we expect the data layer to shuffle the data! items_split = ( item_type.objects. filter(**{split_attr: mc_ds_s}). filter(id__in=item_ids_batch). order_by() ) # A list of item_id, image_url, image_path tuples image_specs += gen_image_specs_func(data_path, items_split) if not image_specs: image_data[mc_ds_s] = ([], []) continue # We want the validation step to finish in tractable time, so we have a # maximum threshold on the validation set size if mc_ds_s == 'V' and len(image_specs) > max_valset_size: print 'Sampling {} images to reduce the size of the validation set...'.format(max_valset_size) # For reproducibility random.seed(125) image_specs = random.sample(image_specs, max_valset_size) item_ids_perm, img_obj_ids, image_paths_list = zip(*image_specs) # A corresponding list of indices into the item_ids array item_idxs = [item_id_to_idx[item_id] for item_id in item_ids_perm] # Add caffe root to all paths for downloading full_image_paths_list = [ [ os.path.join(settings.CAFFE_ROOT, ip) for ip in ipl ] for ipl in image_paths_list ] # Downloading images download_images( item_type=img_obj_type, item_ids=list(itertools.chain.from_iterable(img_obj_ids)), img_attr=img_attr, image_paths=list(itertools.chain.from_iterable(full_image_paths_list)), format='JPEG', dimensions=dimensions, ) if trafo_image_func: print 'Transforming images...' new_image_paths_list = [] new_item_idxs = [] for item_idx, image_paths, full_image_paths in progress_bar(zip(item_idxs, image_paths_list, full_image_paths_list)): new_image_paths = trafo_image_func( image_paths, full_image_paths, **index_kwargs(trafo_image_extra_kwargs, item_idx) ) if not new_image_paths: print ':( {}'.format(full_image_paths) continue new_image_paths_list.append(new_image_paths) new_item_idxs.append(item_idx) image_paths_list = new_image_paths_list item_idxs = new_item_idxs image_data[mc_ds_s] = (item_idxs, image_paths_list) return image_data
8,319
def evaluate(config: Config) -> typing.Dict[str, float]: """ Load and evaluate model on a list generator Return: dict of metrics for the model run """ logger.info('Running evaluation process...') net_name = config.net_name pp_dir = config.paths['preprocess_dir'] pr_dir = config.paths['processed_dir'] model_path = os.path.join(pr_dir, f"{config.net_name}.h5") logger.info('Loading model...') custom_object = {} custom_object['rank_hinge_loss'] = losses.rank_hinge_loss model = load_model(model_path, custom_objects=custom_object) logger.info('Loading preprocessed test data...') processed_test = datapack.load_datapack(pp_dir, name=net_name + "_test") generator_test = generators.ListGenerator(processed_test, stage='train') res = {} res['MAP'] = 0.0 res['NCDG@3'] = 0.0 res['NCDG@5'] = 0.0 num_valid = 0 logger.info('Evaluating model...') for i in range(len(generator_test)): input_data, y_true = generator_test[i] y_pred = model.predict(input_data, batch_size=len(y_true), verbose=0) res['MAP'] += mean_average_precision(y_true, y_pred) res['NCDG@3'] += ndcg(3)(y_true, y_pred) res['NCDG@5'] += ndcg(5)(y_true, y_pred) num_valid += 1 logger.info('\t'.join( [f"{k}={v / num_valid:.3f}" for k, v in res.items()])) return res
8,320
def get_transform_dict(args, strong_aug: Callable): """ Generates dictionary with transforms for all datasets Parameters ---------- args: argparse.Namespace Namespace object that contains all command line arguments with their corresponding values strong_aug: Callable Callable object implementing the applied strong augmentation strategy, i.e. RandAugment or CTAugment (not implemented yet). Returns ------- transform_dict: Dict Dictionary containing transforms for the labeled train set, unlabeled train set and the validation / test set """ img_size = IMG_SIZE[args.dataset] padding = int(0.125 * img_size) return { "train": FixMatchTransform.labeled(args.dataset, img_size, padding), "train_unlabeled": FixMatchTransform.unlabeled(args.dataset, strong_aug, img_size, padding), "test": get_normalizer(args.dataset), }
8,321
def choose_first_not_none(*args): """ Choose first non None alternative in args. :param args: alternative list :return: the first non None alternative. """ for a in args: if a is not None: return a return None
8,322
def _format_compact(value, short=True): """Compact number formatting using proper suffixes based on magnitude. Compact number formatting has slightly idiosyncratic behavior mainly due to two rules. First, if the value is below 1000, the formatting should just be a 2 digit decimal formatting. Second, the number is always truncated to leave at least 2 digits. This means that a number with one digit more than the magnitude, such as 1250, is still left with 1.2K, whereas one more digit would leave it without the decimal, such as 12500 becoming 12K. Args: value: The value to format. short: Whether to use the short form suffixes or long form suffixes. Returns: A formatted number as a string. """ if value < 1000: return '{0:.2f}'.format(value).rstrip('0').rstrip('.') suffixes = _SHORT_SUFFIXES if short else _LONG_SUFFIXES for key, suffix in sorted(suffixes.items(), reverse=True): if value >= key: value = value / float(key) if value >= 10: pattern = '{0:,.0f}' + suffix else: pattern = '{0:.1f}' + suffix return pattern.format(value)
8,323
def matrix(mat,nrow=1,ncol=1,byrow=False): """Given a two dimensional array, write the array in a matrix form""" nr=len(mat) rscript='m<-matrix(data=c(' try: nc=len(mat[0]) for m in mat: rscript+=str(m)[1:-1]+ ', ' rscript=rscript[:-2]+'), nrow=%d, ncol=%d, byrow=TRUE,' %(nr,nc) except TypeError: rscript+=str(mat)[1:-1]+',' rscript=rscript[:-1]+'), nrow=%d, ncol=%d,' %(nrow,ncol) if byrow: rscript+='byrow=TRUE,' rscript=rscript[:-1]+')\n' return rscript
8,324
async def test_create_event_invalid_date( client: _TestClient, mocker: MockFixture, token: MockFixture, event: dict ) -> None: """Should return 400 Bad request.""" ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95" mocker.patch( "event_service.services.events_service.create_id", return_value=ID, ) mocker.patch( "event_service.adapters.events_adapter.EventsAdapter.create_event", return_value=ID, ) event_invalid_date = deepcopy(event) event_invalid_date["date_of_event"] = "9999-99-99" headers = { hdrs.CONTENT_TYPE: "application/json", hdrs.AUTHORIZATION: f"Bearer {token}", } with aioresponses(passthrough=["http://127.0.0.1"]) as m: m.post("http://example.com:8081/authorize", status=204) resp = await client.post("/events", headers=headers, json=event_invalid_date) assert resp.status == 400
8,325
def all(x, axis=None, keepdims=False): """Bitwise reduction (logical AND). """ return T.all(x, axis=axis, keepdims=keepdims)
8,326
def config_find_permitted(cookie, dn, in_class_id, in_filter, in_hierarchical=YesOrNo.FALSE): """ Auto-generated UCS XML API Method. """ method = ExternalMethod("ConfigFindPermitted") method.cookie = cookie method.dn = dn method.in_class_id = in_class_id method.in_filter = in_filter method.in_hierarchical = (("false", "true")[in_hierarchical in ucsgenutils.AFFIRMATIVE_LIST]) xml_request = method.to_xml(option=WriteXmlOption.DIRTY) return xml_request
8,327
def reformat_medication_statuses(data: FilteredData) -> FilteredData: """ Reformats medication statuses to binary indicators. Args: data: The data containing medication statuses to reformat. Returns: Data with reformatted medication statuses. """ for j in data.medications.columns[data.medications.columns.str.contains( '_status$')]: data.medications[j] = (~(data.medications[j].isin( ['NONE', 'NEW']))).astype(int) return data
8,328
def segmentwidget(img, params=None, alg=None): """Generate GUI. Produce slider for each parameter for the current segmentor. Show both options for the masked image. Keyword arguments: img -- original image gmask -- ground truth segmentation mask for the image params -- list of parameter options alg -- algorithm to search parameters over """ if params: if alg: params['algorithm'] = alg; seg = segmentor.algoFromParams(params) else: if alg: algorithm_gen = segmentor.algorithmspace[alg] seg = algorithm_gen() else: seg = segmentor() widg = dict() widglist = [] for ppp, ind in zip(seg.paramindexes, range(len(seg.paramindexes))): thislist = seg.params.ranges[ppp] name = ppp current_value = seg.params[ppp] if not current_value in thislist: #TODO: We should find the min distance between current_value and this list and use that instead. current_value = thislist[0] thiswidg = widgets.SelectionSlider(options=tuple(thislist), disabled=False, description=name, value=current_value, continuous_update=False, orientation='horizontal', readout=True ) widglist.append(thiswidg) widg[ppp] = thiswidg def func(**kwargs): """Find mask and fitness for current algorithm. Show masked image.""" print(seg.params["algorithm"]) for k in kwargs: seg.params[k] = kwargs[k] mask = seg.evaluate(img) #fit = Segmentors.FitnessFunction(mask, gmask) fig = showtwo(img, mask) # I like the idea of printing the sharepython but it should be below the figures. #print(seg.sharepython(img)) # plt.title('Fitness Value: ' + str(fit[0])) layout = widgets.Layout(grid_template_columns='1fr 1fr 1fr') u_i = widgets.GridBox(widglist, layout=layout) out = widgets.interactive_output(func, widg) display(u_i, out) return seg.params
8,329
def list_daemons(dut): """Get daemon table from ovsdb-server.""" daemon_list = {} c = ovs_vsctl + "--format json list daemon" out = dut(c, shell="bash") json_out = json.loads(out)['data'] # The output is in the following format # [["uuid","19b943b0-096c-4d7c-bc0c-5b6ac2f83014"],0,true,"ops-pmd"] for item in json_out: daemon_list[item[3]] = {'is_hw_handler': item[2]} return daemon_list
8,330
def Cleanse(obj, encoding="utf-8"): """Makes Python object appropriate for JSON serialization. - Replaces instances of Infinity/-Infinity/NaN with strings. - Turns byte strings into unicode strings. - Turns sets into sorted lists. - Turns tuples into lists. Args: obj: Python data structure. encoding: Charset used to decode byte strings. Returns: Unicode JSON data structure. """ if isinstance(obj, int): return obj elif isinstance(obj, float): if obj == _INFINITY: return "Infinity" elif obj == _NEGATIVE_INFINITY: return "-Infinity" elif math.isnan(obj): return "NaN" else: return obj elif isinstance(obj, bytes): return obj.decode(encoding) elif isinstance(obj, (list, tuple)): return [Cleanse(i, encoding) for i in obj] elif isinstance(obj, set): return [Cleanse(i, encoding) for i in sorted(obj)] elif isinstance(obj, dict): return collections.OrderedDict( (Cleanse(k, encoding), Cleanse(v, encoding)) for k, v in obj.items() ) else: return obj
8,331
def wait_no_exception(lfunction, exc_class=None, exc_matcher=None): """Stops waiting on success.""" start_time = time.time() if exc_matcher is not None: exc_class = boto.exception.BotoServerError if exc_class is None: exc_class = BaseException while True: result = None try: result = lfunction() LOG.info('No Exception in %d second', time.time() - start_time) return result except exc_class as exc: if exc_matcher is not None: res = exc_matcher.match(exc) if res is not None: LOG.info(res) raise exc # Let the other exceptions propagate dtime = time.time() - start_time if dtime > default_timeout: raise TestCase.failureException("Wait timeout exceeded! (%ds)" % dtime) time.sleep(default_check_interval)
8,332
def testapp(app, init_database): """Create Webtest app.""" testapp = TestApp(app) #testapp = TestApp(app, extra_environ=dict(REMOTE_USE='test')) # testapp.set_authorization(('Basic', (app.config['USERNAME'],app.config['PASSWORD']))) # testapp.get_authorization() return testapp
8,333
def dois(self, key, value): """Translates dois fields.""" _identifiers = self.get("identifiers", []) for v in force_list(value): material = mapping( MATERIALS, clean_val("q", v, str, transform="lower"), raise_exception=True, ) doi = { "value": clean_val("a", v, str, req=True), "material": material, "source": clean_val("9", v, str), "scheme": "DOI", } if doi not in _identifiers: _identifiers.append(doi) return _identifiers
8,334
def transition(src, dest, state=None, permissions=None, required=None, commit_record=True, **kwargs): """Decorator that marks the wrapped function as a state transition. :params parameters for transition object, see documentation for details. :returns: A wrapper around a wrapped function, with added `_fsm` field containing the `Transition` spec. """ if permissions is not None and not isinstance(permissions, (list, tuple)): permissions = [permissions] if required is not None and not isinstance(required, (list, tuple)): required = [required] if not isinstance(src, (list, tuple)): src = [src] t = Transition( src=src, dest=dest, state=state, permissions=permissions, required=required, commit_record=commit_record, **kwargs ) def inner(f): @has_required_params(t) def wrapper(self, *args, **kwargs): record = self t.check_valid_state(record) t.check_permissions(record) t.execute(record=record, **kwargs) return f(self, *args, **kwargs) wrapper._fsm = t t.function = wrapper t.original_function = f return wrapper return inner
8,335
def get_html(url): """Returns HTML object based on given Gumtree URL. :param url: Offer URL. :return: Offer HTML object. """ session = HTMLSession() try: r = session.get(url) return r.html except ParserError: return None
8,336
def test_default_skorecard_class(df): """Test a workflow, when no bucketer is defined.""" X = df.drop("default", axis=1) y = df["default"] features = ["LIMIT_BAL", "BILL_AMT1"] skorecard_model = Skorecard(verbose=0, variables=features) skorecard_model.fit(X, y) assert isinstance(skorecard_model.bucketing_, BucketingProcess) bucketer = None expected_probas = np.array([[0.862, 0.138], [0.748, 0.252]]) run_checks(X, y, bucketer, features, expected_probas) bucketer = None features = [] expected_probas = np.array([[0.895, 0.105], [0.752, 0.248]]) run_checks(X, y, bucketer, features, expected_probas)
8,337
def markovtalk_learn(text_line): """ this is the function were a text line gets learned """ text_line = msg_to_array(text_line) length = len(text_line) order = [TOKEN, ] * ORDER_K for i in range(length-1): order.insert(0, text_line[i]) order = order[:ORDER_K] next_word = text_line[i+1] key = markovchains.setdefault(o2i(order), []) if not next_word in key: key.append(mw(next_word))
8,338
def _get_lattice_parameters(lattice): """Return basis vector lengths Parameters ---------- lattice : array_like Basis vectors given as column vectors shape=(3, 3), dtype='double' Returns ------- ndarray, shape=(3,), dtype='double' """ return np.array(np.sqrt(np.dot(lattice.T, lattice).diagonal()), dtype='double')
8,339
def save_batches(current_memory, id_tmp_dir, batch_num): """ batch_num : corresponds to the gradient update number """ target_csv = id_tmp_dir + "/batch" + str(batch_num) + ".csv" obs_copy = deepcopy(current_memory['current_obs']) reward_copy = deepcopy(current_memory['rewards']) current_obs_batch = obs_copy.cpu().numpy() obs_x = current_obs_batch[:,0] obs_y = current_obs_batch[:,1] reward_batch = reward_copy.cpu().numpy() batch_list = np.column_stack((obs_x, obs_y, reward_batch)) fileheader = 'X-Position, Y-Position, Reward' np.savetxt(target_csv, batch_list, delimiter=' ', header=fileheader) return 0
8,340
def ikrvea_mm( reference_point: np.ndarray, individuals: np.ndarray, objectives: np.ndarray, uncertainity: np.ndarray, problem: MOProblem, u: int) -> float: """ Selects the solutions that need to be reevaluated with the original functions. This model management is based on the following papaer: 'P. Aghaei Pour, T. Rodemann, J. Hakanen, and K. Miettinen, “Surrogate assisted interactive multiobjective optimization in energy system design of buildings,” Optimization and Engineering, 2021.' Args: reference_front (np.ndarray): The reference front that the current front is being compared to. Should be an one-dimensional array. individuals (np.ndarray): Current individuals generated by using surrogate models objectives (np.ndarray): Current objectives generated by using surrogate models uncertainity (np.ndarray): Current Uncertainty values generated by using surrogate models problem : the problem class Returns: float: the new problem object that has an updated archive. """ nd = remove_duplicate(individuals, problem.archive.drop( problem.objective_names, axis=1).to_numpy()) #removing duplicate solutions if len(nd) == 0: return problem else: non_duplicate_dv = individuals[nd] non_duplicate_obj = objectives[nd] non_duplicate_unc = uncertainity[nd] # Selecting solutions with lowest ASF values asf_solutions = SimpleASF([1]*problem.n_of_objectives).__call__(non_duplicate_obj, reference_point) idx = np.argpartition(asf_solutions, 2*u) asf_unc = np.max(non_duplicate_unc [idx[0:2*u]], axis= 1) # index of solutions with lowest Uncertainty lowest_unc_index = np.argpartition(asf_unc, u)[0:u] # evaluating the solutions in asf_unc with lowest uncertainty. The archive will get update in problem.evaluate() problem.evaluate(non_duplicate_dv[lowest_unc_index], use_surrogate=False)[0] problem.train(models=GaussianProcessRegressor,\ model_parameters={'kernel': Matern(nu=1.5)}) return problem
8,341
def main(): """ Call appropriate drawing functions depending on command-line arguments. """ data_path = os.path.dirname(args.measurement_filenames[0]) data_all_files = graph_common.read_latencies_files( args.measurement_filenames) if not args.no_histograms: draw_histograms(data_all_files, args.histogram_merge_all_files, args.fast, args.cutoff_time_ms, data_path, args.output_postfix) if not args.no_timeseries: draw_timeseries(data_all_files, args.cutoff_time_ms, data_path, args.output_postfix) if not args.noninteractive: plt.show()
8,342
def pylint(): """pyltin""" sh("pylint ./pyfuzzy/")
8,343
def homepage(request): """Main view of app. We will display page with few step CTA links? :param request: WSGIRequest instance """ if logged_as_admin(request): offers = Offer.objects.get_for_administrator() else: offers = Offer.objects.get_weightened() return render( request, 'homepage.html', { 'offers': offers, 'MEDIA_URL': settings.MEDIA_URL, } )
8,344
def add_dict(dct1, dct2): """Returns a new dictionaries where the content of the dictionaries `dct1` and `dct2` are merged together.""" result = dct1.copy() result.update(dct2) return result
8,345
def _handle_cam_removal(serial): """ Handles removing a camera """ print(serial + ' - removed') # Remove cam stuff from dict _SERIAL_DICT.pop(serial, None)
8,346
def edit_temp(contents="", name=""): """ Create a temporary file and open it in the system's default editor for the user to edit. The saved contents of the file will be returned when the editor is closed. :param contents: Pre-fill the file with the given text. :param name: Ensure that the temp filename has the given name. :return: Contents of the file when the editor is closed. """ # Create a temp file, ensure it has requested name and contents td = tempfile.TemporaryDirectory() tfpath = Path(td.name) / (name or DEFAULT_TEMPFILE) write_file(tfpath, contents) # Edit interactively return edit(tfpath)
8,347
def _new_primitive_control( rabi_rotation=None, azimuthal_angle=0., maximum_rabi_rate=2. * np.pi, **kwargs): """ Primitive driven control. Parameters ---------- rabi_rotation : float, optional The total rabi rotation to be performed by the driven control. maximum_rabi_rate : float, optional Defaults to 2.*np.pi The maximum rabi frequency for the driven control. azimuthal_angle : float, optional The azimuthal position of the driven control. kwargs : dict Other keywords required to make a qctrlopencontrols.DrivenControls. Returns ------- qctrlopencontrols.DrivenControl The driven control. """ (maximum_rabi_rate, rabi_rotation, azimuthal_angle) = _predefined_common_attributes( maximum_rabi_rate, rabi_rotation, azimuthal_angle) return DrivenControl( rabi_rates=[maximum_rabi_rate], azimuthal_angles=[azimuthal_angle], detunings=[0], durations=[rabi_rotation/maximum_rabi_rate], **kwargs)
8,348
def check_fit_params( X: TwoDimArrayLikeType, y: OneDimArrayLikeType, sample_weight: Optional[OneDimArrayLikeType] = None, estimator: Optional[BaseEstimator] = None, **kwargs: Any ) -> Tuple[TwoDimArrayLikeType, OneDimArrayLikeType, OneDimArrayLikeType]: """Check `X`, `y` and `sample_weight`. Parameters ---------- X Data. y Target. sample_weight Weights of data. estimator Object to use to fit the data. **kwargs Other keywords passed to `sklearn.utils.check_array`. Returns ------- X Converted and validated data. y Converted and validated target. sample_weight Converted and validated weights of data. """ X = check_X(X, estimator=estimator, **kwargs) if not isinstance(y, pd.Series): y = column_or_1d(y, warn=True) _assert_all_finite(y) if is_classifier(estimator): check_classification_targets(y) if sample_weight is None: n_samples = _num_samples(X) sample_weight = np.ones(n_samples) sample_weight = np.asarray(sample_weight) class_weight = getattr(estimator, "class_weight", None) if class_weight is not None: sample_weight *= compute_sample_weight(class_weight, y) check_consistent_length(X, y, sample_weight) return X, y, sample_weight
8,349
async def example_quest(request: web.Request) -> web.Response: """ Example quest handler that handles a POST request with a computer science trivia question :param request: The request object """ # Verify that it is a POST request, since that's what this quest is supposed to handle if request.method == 'POST': # We will always get JSON from the server, so convert it to a Python dict data = await request.json() # Let's see what the server is asking print(f'Server sent POST to /my-simple-quest:', data) # Ok so we know that the question is "Who invented C++?" # The request always contains a "msg" field, and the response always expects an "answer" field response = { 'answer': 'bjarne stroustrup' } # The server always expects a JSON response return web.json_response(response) else: Log.error('This quest is supposed to handle POST requests')
8,350
def load_decrypt_bs_data(filepath, shape): """ load the encrypted data and reconstruct """ part_readers = [] for id in six.moves.range(3): part_readers.append(mpc_du.load_shares(filepath, id=id, shape=shape)) mpc_share_reader = paddle.reader.compose(part_readers[0], part_readers[1], part_readers[2]) for instance in mpc_share_reader(): p = np.bitwise_xor(np.array(instance[0]), np.array(instance[1])) p = np.bitwise_xor(p, np.array(instance[2])) logger.info(p)
8,351
def generate_token_backend_conf(fout): """ Generate token backend """ with open('./conf_templates/token_backend.template') as fin: template = ConfTemplate(fin.read()) print(template.substitute(options), file=fout)
8,352
def view_api_image(image_type, catalog_name, source_id): """Source spectrum image.""" import matplotlib.pyplot as plt catalog = source_catalogs[catalog_name] source = catalog[source_id] plt.style.use('fivethirtyeight') if image_type == 'spectrum': fig, ax = plt.subplots() source.plot_spectrum(ax=ax) if image_type == 'lightcurve': fig, ax = plt.subplots() source.plot_lightcurve(ax=ax) elif image_type == 'test': fig, ax = plt.subplots() ax.plot([2, 4, 3]) else: raise ValueError('Invalid image_type: {}'.format(image_type)) fig.tight_layout() # fig.canvas.draw() img = BytesIO() fig.savefig(img) img.seek(0) del fig, ax return send_file(img, mimetype='image/png')
8,353
def send_interface_connection_table(dispatcher, connections, filter_type, value): """Send request large table to Slack Channel.""" header = ["Device A", "Interface A", "Device B", "Interface B", "Connection Status"] rows = [ ( add_asterisk(connection._termination_a_device, filter_type, value), # pylint: disable=protected-access str(connection.termination_a), add_asterisk(connection._termination_b_device, filter_type, value), # pylint: disable=protected-access str(connection.termination_b), str(connection.status), ) for connection in connections ] rows = list(sorted(set(rows))) dispatcher.send_large_table(header, rows)
8,354
def get_record_base_model(type_enum): """Return the dimension model class for a DimensionType.""" dim_model = _DIMENSION_TO_MODEL.get(type_enum) if dim_model is None: raise DSGInvalidDimension(f"no mapping for {type_enum}") return dim_model
8,355
def cmd(command): """ Run a command and return its stdout """ try: completed = subprocess.run( command.split(" "), stdout=subprocess.PIPE, ) except FileNotFoundError: panic(f"Command `{command}` not found.") if completed.returncode > 0: panic(f"Command `{command}` returned a non 0 status code.") return completed.stdout.decode('utf-8').rstrip()
8,356
def test_test_image(): """Make sure our test image, face.jpg, is white on the edges""" img = _get_test_img() all = np.all(img[::, :10:] > 240, axis=2) assert np.all(all)
8,357
def copy_file(from_path, to_path): """ Copy file from one path to other, creating the target directory if not exists. Arguments: from_path : str to_path : str Returns: None """ create_dir(os.path.dirname(to_path)) shutil.copyfile(from_path, to_path)
8,358
def sas_to_pandas(sas_code, wrds_id, fpath): """Function that runs SAS code on WRDS or local server and returns a Pandas data frame.""" p = get_process(sas_code, wrds_id, fpath) if wrds_id: df = pd.read_csv(StringIO(p.read().decode('utf-8'))) else: df = pd.read_csv(StringIO(p.read())) df.columns = map(str.lower, df.columns) p.close() return(df)
8,359
def subprocess_run(*popenargs, input=None, timeout=None, check=False, **kwargs): # pylint: disable=redefined-builtin """Run command with arguments and return a CompletedProcess instance. The returned instance will have attributes args, returncode, stdout and stderr. By default, stdout and stderr are not captured, and those attributes will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them. If check is True and the exit code was non-zero, it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute, and output & stderr attributes if those streams were captured. If timeout is given, and the process takes too long, a TimeoutExpired exception will be raised. There is an optional argument "input", allowing you to pass a string to the subprocess's stdin. If you use this argument you may not also use the Popen constructor's "stdin" argument, as it will be used internally. The other arguments are the same as for the Popen constructor. If universal_newlines=True is passed, the "input" argument must be a string and stdout/stderr in the returned object will be strings rather than bytes. """ if input is not None: if 'stdin' in kwargs: raise ValueError('stdin and input arguments may not both be used.') kwargs['stdin'] = subprocess.PIPE with subprocess.Popen(*popenargs, **kwargs) as process: try: stdout, stderr = process.communicate(input, timeout=timeout) except subprocess.TimeoutExpired: process.kill() stdout, stderr = process.communicate() raise subprocess.TimeoutExpired(process.args, timeout, output=stdout, stderr=stderr) except: process.kill() process.wait() raise retcode = process.poll() if check and retcode: raise subprocess.CalledProcessError(retcode, process.args, output=stdout) return _CompletedProcess(process.args, retcode, stdout, stderr)
8,360
def _client_ip_address(request): """Return client ip address for flask `request`. """ if request.headers.getlist("X-PNG-Query-For"): ip_addr = request.headers.getlist("X-PNG-Query-For")[0] if ip_addr.startswith('::ffff:'): ip_addr = ip_addr[7:] elif request.headers.getlist("X-Forwarded-For"): ip_addr = request.headers.getlist("X-Forwarded-For")[0] if ip_addr.startswith('::ffff:'): ip_addr = ip_addr[7:] else: ip_addr = request.remote_addr return ip_addr
8,361
def rollout_and_save( path: str, policy: AnyPolicy, venv: VecEnv, sample_until: GenTrajTerminationFn, *, unwrap: bool = True, exclude_infos: bool = True, verbose: bool = True, **kwargs, ) -> None: """Generate policy rollouts and save them to a pickled list of trajectories. The `.infos` field of each Trajectory is set to `None` to save space. Args: path: Rollouts are saved to this path. policy: Can be any of the following: 1) A stable_baselines3 policy or algorithm trained on the gym environment. 2) A Callable that takes an ndarray of observations and returns an ndarray of corresponding actions. 3) None, in which case actions will be sampled randomly. venv: The vectorized environments. sample_until: End condition for rollout sampling. unwrap: If True, then save original observations and rewards (instead of potentially wrapped observations and rewards) by calling `unwrap_traj()`. exclude_infos: If True, then exclude `infos` from pickle by setting this field to None. Excluding `infos` can save a lot of space during pickles. verbose: If True, then print out rollout stats before saving. **kwargs: Passed through to `generate_trajectories`. """ trajs = generate_trajectories(policy, venv, sample_until, **kwargs) if unwrap: trajs = [unwrap_traj(traj) for traj in trajs] if exclude_infos: trajs = [dataclasses.replace(traj, infos=None) for traj in trajs] if verbose: stats = rollout_stats(trajs) logging.info(f"Rollout stats: {stats}") types.save(path, trajs)
8,362
def plot_marker(id_marker_ref, id_marker: int, ocp: OptimalControlProgram, nlp: list[NonLinearProgram]): """ plot the markers posiions Parameters ---------- id_marker_ref: int The marker's id hidden in the cost function id_marker: int The marker's id ocp: OptimalControlProgram Optimal control program nlp: list[NonLinearProgram] """ ocp.add_plot( f"{'Marker'} {nlp[0].model.markerNames()[id_marker].to_string()}", lambda t, x, u, p: marker_ref(t, nlp[0], id_marker_ref), legend=[f"Marker {id_marker} x", f"Marker {id_marker} y", f"Marker {id_marker} z"], plot_type=PlotType.STEP, node_idx=[nlp[0].dt * i for i in range(0, nlp[0].ns + 1)], ) ocp.add_plot( f"{'Marker'} {nlp[0].model.markerNames()[id_marker].to_string()}", lambda t, x, u, p: marker_model(x, nlp[0], id_marker), legend=[f"Marker {id_marker} x", f"Marker {id_marker} y", f"Marker {id_marker} z"], plot_type=PlotType.PLOT, node_idx=None, )
8,363
def create_bb_points(vehicle): """ Extract the eight vertices of the bounding box from the vehicle. Parameters ---------- vehicle : opencda object Opencda ObstacleVehicle that has attributes. Returns ------- bbx : np.ndarray 3d bounding box, shape:(8, 4). """ bbx = np.zeros((8, 4)) extent = vehicle.bounding_box.extent bbx[0, :] = np.array([extent.x, extent.y, -extent.z, 1]) bbx[1, :] = np.array([-extent.x, extent.y, -extent.z, 1]) bbx[2, :] = np.array([-extent.x, -extent.y, -extent.z, 1]) bbx[3, :] = np.array([extent.x, -extent.y, -extent.z, 1]) bbx[4, :] = np.array([extent.x, extent.y, extent.z, 1]) bbx[5, :] = np.array([-extent.x, extent.y, extent.z, 1]) bbx[6, :] = np.array([-extent.x, -extent.y, extent.z, 1]) bbx[7, :] = np.array([extent.x, -extent.y, extent.z, 1]) return bbx
8,364
def _ugly_fix_invalid_coords_inplace(invalid_rows_df, tofix_df): """note: used global variable concat_no_invalid_rows""" concat_no_invalid_rows = raw_concat_traintest_df[ raw_concat_traintest_df['Y'] != 90.0 ] for row_idx, row in invalid_rows_df.iterrows(): addr_occurences_in_concat = concat_no_invalid_rows[ concat_no_invalid_rows['Address'] == row['Address'] ] if addr_occurences_in_concat.shape[0]: # Fix longtitude tofix_df.iloc[row_idx, tofix_df.columns.get_loc('X')] = addr_occurences_in_concat['X'].iloc[0] # Fix latitude tofix_df.iloc[row_idx, tofix_df.columns.get_loc('Y')] = addr_occurences_in_concat['Y'].iloc[0]
8,365
def LockAllLiveGroups(): """ Locks the contents of all LiveGroup nodes that are non-editable, starting from the specified node. @type node: C{NodegraphAPI.Node} @param node: The node at which to start locking contents of LiveGroup nodes. """ pass
8,366
def mkstemp( open_kwargs=None, # type: Optional[Dict[Text, Any]] text=True, # type: bool name_only=False, # type: bool *args, **kwargs): # type: (...) -> Union[(IO[AnyStr], Text), Text] """ WARNING: the returned file object is strict about its input type, make sure to feed it binary/text input in correspondence to the ``text`` argument :param open_kwargs: keyword arguments for ``io.open`` :param text: open in text mode :param name_only: close the file and return its name :param args: tempfile.mkstemp args :param kwargs: tempfile.mkstemp kwargs """ fd, name = tempfile.mkstemp(text=text, *args, **kwargs) mode = 'w+' if not text: mode += 'b' if name_only: os.close(fd) return name return io.open(fd, mode, **open_kwargs or {}), name
8,367
def greater_than_or_equal_to_zero(param, *args): # type: (int, *int) -> None """ Fails if the input integer parameter is negative. """ if args: # adaptation for attr library param = args[1] if 0 >= param: raise ValueError('{} < 0'.format(get_name_from_param(param)))
8,368
def test_calculate_data_subject_rights(data_subject_rights: dict) -> None: """Tests different strategy options for returning data subject rights.""" rights = DataSubjectRights(**data_subject_rights) return_str_value = export_helpers.calculate_data_subject_rights(rights.dict()) assert return_str_value is not None if data_subject_rights["strategy"] in ["INCLUDE", "EXCLUDE"]: assert return_str_value == "Informed, Erasure"
8,369
def _create_local_dt_indices(time_series, key_prefix): """ local_dt is an embedded document, but we will query it using the individual fields """ time_series.create_index([("%s.year" % key_prefix, pymongo.DESCENDING)], sparse=True) time_series.create_index([("%s.month" % key_prefix, pymongo.DESCENDING)], sparse=True) time_series.create_index([("%s.day" % key_prefix, pymongo.DESCENDING)], sparse=True) time_series.create_index([("%s.hour" % key_prefix, pymongo.DESCENDING)], sparse=True) time_series.create_index([("%s.minute" % key_prefix, pymongo.DESCENDING)], sparse=True) time_series.create_index([("%s.second" % key_prefix, pymongo.DESCENDING)], sparse=True) time_series.create_index([("%s.weekday" % key_prefix, pymongo.DESCENDING)], sparse=True)
8,370
def featuredrep_set_groups(sender, **kwargs): """Set permissions to groups.""" app_label = sender.label if (isinstance(app_label, basestring) and app_label != 'featuredrep'): return True perms = {'can_edit_featured': ['Admin', 'Council', 'Peers'], 'can_delete_featured': ['Admin', 'Council', 'Peers']} add_permissions_to_groups('featuredrep', perms)
8,371
def download(loc, rem): """download rem to loc""" # does the remote file exist if not rem.exists(): return ReturnCode.NO_SOURCE # does the local file exist # if it doesnt, copy rem to loc, isLogged = False if not loc.is_file(): return do_copy(rem, loc, False) # is the local file older than remote if not is_older_than(loc, rem): return ReturnCode.NOT_OLDER if outs.question_override(rem, loc): return do_copy(rem, loc, False) else: return ReturnCode.USER_CANCEL
8,372
def read_dict (conf_dict = {}, filename = "SWIM_config"): """ Open and read a dictionary of key-value pairs from the file given by filename. Use the read-in values to augment or update the dictionary passed in, then return the new dictionary. """ from utils import publish_event try: config_file = open(filename, "r") if config_file: line = config_file.readline().strip() else: line = "" except: message = "Unable to open config file " + filename publish_event(message, topic = FSP_log, action = "halt_run") print message raise IOError, "Unable to open config file in read_dict" try: while line: name, val = line.split("=") name = name.strip() val = val.strip() conf_dict[name] = val if config_file: line = config_file.readline().strip() else: line = "" config_file.close() return conf_dict except Exception, ex: print "Unable to augment conf_dict in read_dict: %s" % ex raise IOError, "Unable to augment conf_dict in read_dict"
8,373
def ordinals_to_ordinals(s): """ Example: 'third' -> '3rd' Up to 31st (intended for dates) """ for val in ordinals.keys(): s = s.replace(val, ordinals[val]) return s
8,374
def create_dataframe_schema(): """ Create dataframe schema """ return pd.DataFrame(columns=['Station_id', 'Name'])
8,375
def shutdown_check_handler(): """This checks the AWS instance data URL to see if there's a pending shutdown for the instance. This is useful for AWS spot instances. If there is a pending shutdown posted to the instance data URL, we'll use the result of this function break out of the processing loop and shut everything down ASAP before the instance dies. Returns ------- bool - True if the instance is going to die soon. - False if the instance is still safe. """ url = 'http://169.254.169.254/latest/meta-data/spot/instance-action' try: resp = requests.get(url, timeout=1.0) resp.raise_for_status() stopinfo = resp.json() if 'action' in stopinfo and stopinfo['action'] in ('stop', 'terminate', 'hibernate'): stoptime = stopinfo['time'] LOGWARNING('instance is going to %s at %s' % (stopinfo['action'], stoptime)) resp.close() return True else: resp.close() return False except HTTPError: resp.close() return False except Exception: resp.close() return False
8,376
def cal_d(date=cal_date.today(), zf=True): """ Month, optionally left-padded with zeroes (default: pad) """ day_num = "d" if zf else "-d" # optionally zero fill return date.strftime(f"%{day_num}")
8,377
def get_max_total(map_of_maps: Mapping[Sequence[str], Mapping[Tuple, float]]) -> float: """ >>> df = get_sample_df() >>> get_max_total(calculate_kls_for_attackers(df, [1])) 1.3861419037664793 >>> get_max_total(calculate_kls_for_attackers(df)) 3.0817041659455104 """ return max(get_map_of_totals(map_of_maps).values())
8,378
def get_keeper_token(host: str, username: str, password: str) -> str: """Get a temporary auth token from LTD Keeper. Parameters ---------- host : `str` Hostname of the LTD Keeper API (e.g., ``'https://keeper.lsst.codes'``). username : `str` Username. password : `str` Password. Returns ------- token : `str` LTD Keeper API token. Raises ------ KeeperError Raised if the LTD Keeper API cannot return a token. """ token_endpoint = urljoin(host, "/token") r = requests.get(token_endpoint, auth=(username, password)) if r.status_code != 200: raise KeeperError( "Could not authenticate to {0}: error {1:d}\n{2}".format( host, r.status_code, r.json() ) ) return r.json()["token"]
8,379
def edit_comment(post_id, comment_id): """Edit a comment from a specific post""" post = posts.get(post_id) if not post: return json.dumps({"error": "Post Not Found"}), 404 comments = post["comments"] comment = comments.get(comment_id) if not comment: return json.dumps({"error": "Comment Not Found"}), 404 body = json.loads(request.data) text = body.get("text") if not text: return json.dumps({"error": "Missing fields in the body"}), 400 comment["text"] = text return json.dumps(comment), 200
8,380
def update_transition_dirichlet( pB, B, actions, qs, qs_prev, lr=1.0, return_numpy=True, factors="all" ): """ Update Dirichlet parameters that parameterize the transition model of the generative model (describing the probabilistic mapping between hidden states over time). Parameters ----------- - pB [numpy nd.array, array-of-arrays (with np.ndarray entries), or Dirichlet (either single-modality or AoA)]: The prior Dirichlet parameters of the generative model, parameterizing the agent's beliefs about the transition likelihood. - B [numpy nd.array, object-like array of arrays, or Categorical (either single-modality or AoA)]: The transition likelihood of the generative model. - actions [tuple]: A tuple containing the action(s) performed at a given timestep. - Qs_curr [numpy 1D array, array-of-arrays (where each entry is a numpy 1D array), or Categorical (either single-factor or AoA)]: Current marginal posterior beliefs about hidden state factors - Qs_prev [numpy 1D array, array-of-arrays (where each entry is a numpy 1D array), or Categorical (either single-factor or AoA)]: Past marginal posterior beliefs about hidden state factors - eta [float, optional]: Learning rate. - return_numpy [bool, optional]: Logical flag to determine whether output is a numpy array or a Dirichlet - which_factors [list, optional]: Indices (in terms of range(Nf)) of the hidden state factors to include in learning. Defaults to 'all', meaning that transition likelihood matrices for all hidden state factors are updated as a function of transitions in the different control factors (i.e. actions) """ pB = utils.to_numpy(pB) if utils.is_arr_of_arr(pB): n_factors = len(pB) else: n_factors = 1 if return_numpy: pB_updated = pB.copy() else: pB_updated = utils.to_dirichlet(pB.copy()) if not utils.is_distribution(qs): qs = utils.to_categorical(qs) if factors == "all": if n_factors == 1: db = qs.cross(qs_prev, return_numpy=True) db = db * (B[:, :, actions[0]] > 0).astype("float") pB_updated = pB_updated + (lr * db) elif n_factors > 1: for f in range(n_factors): db = qs[f].cross(qs_prev[f], return_numpy=True) db = db * (B[f][:, :, actions[f]] > 0).astype("float") pB_updated[f] = pB_updated[f] + (lr * db) else: for f_idx in factors: db = qs[f_idx].cross(qs_prev[f_idx], return_numpy=True) db = db * (B[f_idx][:, :, actions[f_idx]] > 0).astype("float") pB_updated[f_idx] = pB_updated[f_idx] + (lr * db) return pB_updated
8,381
def MRP2Euler231(q): """ MRP2Euler231(Q) E = MRP2Euler231(Q) translates the MRP vector Q into the (2-3-1) euler angle vector E. """ return EP2Euler231(MRP2EP(q))
8,382
def _convert_from_node_attribute( G, attr_name, node_types, node_type_name=None, node_type_default=None, dtype="f" ): """ Transform the node attributes to feature vectors, for use with machine learning models. Each node is assumed to have a numeric array stored in the attribute_name and which is suitable for use in machine learning models. Args: G: NetworkX graph attr_name: Name of node attribute to use for conversion node_types: Node types in graph node_type_name: (optional) The name of the node attribute specifying the type. node_type_default: (optional) The node type of nodes without explicit type. dtype: (optional) The numpy datatype to create the features array. Returns: index_map: a dictionary of node_type -> {node_id: node_index} attribute_arrays: a dictionary of node_type -> numpy array storing the features """ attribute_arrays = {} node_index_map = {} # Enumerate all nodes in graph nodes_by_type = { # XXX: This lookup does not really make sense if node_type_name is not specified - why is it optional? nt: [ n for n, ndata in G.nodes(data=True) if ndata.get(node_type_name, node_type_default) == nt ] for nt in node_types } # Get the target values for each node type for nt in node_types: nt_node_list = nodes_by_type[nt] # Add None to node list as ID of unknown nodes nt_node_list.append(None) # Create map between node id and index (including None) node_index_map[nt] = {nid: ii for ii, nid in enumerate(nt_node_list)} # The node data attr_data = [ v if v is None else G.nodes[v].get(attr_name) for v in nt_node_list ] # Get the size of the features data_sizes = { np.size(G.nodes[v].get(attr_name, [])) for v in nt_node_list if v is not None } # Warn if nodes don't have the attribute if 0 in data_sizes: print( "Warning: Some nodes have no value for attribute '{}', " "using default value.".format(attr_name) ) data_sizes.discard(0) # Check all are the same for this node type if len(data_sizes) > 1: raise ValueError( "Data sizes in nodes of type {} are inconsistent " "for the attribute '{}' ".format(nt, attr_name) ) # If some node_type have no nodes with the attribute, skip them if len(data_sizes) == 0: continue # Create zero attribute array data_size = data_sizes.pop() # Dummy feature/target value for invalid nodes, # this will be inserted into the array in two cases: # 1. node ID of None (representing sampling for a missing neighbour) # 2. node with no attribute # TODO: Make these two cases more explicit, allow custom values. default_value = np.zeros(data_size) # Convert to numpy array attribute_arrays[nt] = np.asarray( [x if x is not None else default_value for x in attr_data] ) return node_index_map, attribute_arrays
8,383
def handle_args(parser: argparse.ArgumentParser, section: Text) -> argparse.Namespace: """ Verify default arguments """ hostname = socket.gethostname() hostname_short = socket.gethostname().split(".")[0] host_config_name = f"{CONFIG_NAME}-{hostname}" host_short_config_name = f"{CONFIG_NAME}-{hostname_short}" if (Path(caep.get_config_dir(CONFIG_ID)) / host_config_name).is_file(): config_name = host_config_name elif (Path(caep.get_config_dir(CONFIG_ID)) / host_short_config_name).is_file(): config_name = host_short_config_name else: config_name = CONFIG_NAME args = caep.handle_args(parser, CONFIG_ID, config_name, section) setup_logging(args.loglevel, args.logfile) info(f"args: {args}") info(f"config: {CONFIG_ID}/{config_name}") args.chat_prefix = args.chat_prefix.strip() if not args.server: fatal("--server not specified") if not args.user: fatal("--user not specified") if args.no_verify: requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning) if args.password_pass_entry: args.password = gettpassentry(args.password_pass_entry) if not args.password: fatal("Must specify either --password or --password-pass-entry") return args
8,384
def get_entry_or_none(base: dict, target, var_type=None): """Helper function that returns an entry or None if key is missing. :param base: dictionary to query. :param target: target key. :param var_type: Type of variable this is supposed to be (for casting). :return: entry or None. """ if target not in base: return None if var_type is not None: return var_type(base[target]) return base[target]
8,385
def group_list(request): """ List all gourps, or create a new group. """ if request.method == 'GET': tasks = Group.objects.all() serializer = GroupSerializer(tasks, many=True) return Response(serializer.data) elif request.method == 'POST': unique_name = request.data.get("unique_name") display_name = request.data.get("display_name") if unique_name and display_name: checkgoup = Group.objects.filter(unique_name=unique_name).first() if checkgoup: res = {"code": 400, "message": "Ops!, Unique name already exists"} return Response(data=res, status=400) else: res = {"code": 400, "message": "Ops!, Unique name and display name can't be null"} return Response(data=res, status=400) group = Group.create(unique_name, display_name) group.save() serializer = GroupSerializer(group, many=False) return JsonResponse(serializer.data, safe=False)
8,386
def CreateBooleanSplit(meshesToSplit, meshSplitters, multiple=False): """ Splits a set of meshes with another set. Args: meshesToSplit (IEnumerable<Mesh>): A list, an array, or any enumerable set of meshes to be split. If this is null, None will be returned. meshSplitters (IEnumerable<Mesh>): A list, an array, or any enumerable set of meshes that cut. If this is null, None will be returned. Returns: Mesh[]: A new mesh array, or None on error. """ url = "rhino/geometry/mesh/createbooleansplit-mesharray_mesharray" if multiple: url += "?multiple=true" args = [meshesToSplit, meshSplitters] if multiple: args = list(zip(meshesToSplit, meshSplitters)) response = Util.ComputeFetch(url, args) response = Util.DecodeToCommonObject(response) return response
8,387
def template14(): """Simple ML workflow""" script = """ ## (Enter,datasets) << host = chemml << function = load_cep_homo >> smiles 0 >> homo 4 ## (Store,file) << host = chemml << function = SaveFile << format = smi << header = False << filename = smiles >> 0 df >> filepath 1 ## (Represent,molecular descriptors) << host = chemml << function = RDKitFingerprint >> 1 molfile >> df 2 >> df 3 ## (Store,file) << host = chemml << function = SaveFile << filename = fps_rdkfp >> 2 df ## (Prepare,split) << host = sklearn << function = train_test_split >> 3 dfx >> 4 dfy >> dfx_train 5 >> dfy_train 6 >> dfx_test 8 >> dfy_test 11 ## (Model,regression) << host = sklearn << function = MLPRegressor << func_method = fit >> 5 dfx >> 6 dfy >> api 7 ## (Model,regression) << host = sklearn << function = MLPRegressor << func_method = predict >> 7 api >> 8 dfx >> dfy_predict 9 >> dfy_predict 10 ## (Store,file) << host = chemml << function = SaveFile << filename = dfy_predict >> 9 df ## (Visualize,plot) << host = chemml << function = scatter2D << x = 0 << y = 0 >> 10 dfx >> 11 dfy >> fig 12 ## (Store,figure) << host = chemml << function = SavePlot << filename = dfy_actual_vs_dfy_predict << output_directory = . >> 13 fig ## (Visualize,artist) << host = chemml << function = decorator << title = true vs. predicted HOMO energy << xlabel = predicted HOMO energy (eV) << ylabel = true HOMO energy (eV) << grid = True << grid_color = g << size = 18 >> 12 fig >> fig 13 """ return script.strip().split('\n')
8,388
def get_group_selector(*args): """ get_group_selector(grpsel) -> sel_t Get common selector for a group of segments. @param grpsel: selector of group segment (C++: sel_t) @return: common selector of the group or 'grpsel' if no such group is found """ return _ida_segment.get_group_selector(*args)
8,389
def wrap(text=cert_text) -> str: """Wraps the given text using '\n' to fit the desired width.""" wrapped_text = textwrap.fill(text, fit_char()) return wrapped_text
8,390
def encode(data): """calls simplejson's encoding stuff with our needs""" return simplejson.dumps( data, cls=CahootsEncoder, ensure_ascii=False, encoding='utf8', indent=4 )
8,391
def SE_HRNet_W48_C(pretrained=False, use_ssld=False, **kwargs): """ SE_HRNet_W48_C Args: pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. If str, means the path of the pretrained model. use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. Returns: model: nn.Layer. Specific `SE_HRNet_W48_C` model depends on args. """ model = HRNet(width=48, has_se=True, **kwargs) _load_pretrained(pretrained, model, MODEL_URLS["SE_HRNet_W48_C"], use_ssld) return model
8,392
def manifest(argument: Union[Any, List[Any]], data: bytearray) -> Union[Any, List[Any]]: """ Returns the manifestation of a `refinery.lib.argformats.LazyEvaluation` on the given data. This function can change the data. """ if isinstance(argument, (list, tuple)): return [manifest(x, data) for x in argument] return argument(data) if isinstance(argument, LazyEvaluation) else argument
8,393
def get_child_hwnd_by_class(hwnd: int, window_class: str) -> int: """Enumerates the child windows that belong to the specified parent window by passing the handle to each child window. :param hwnd: HWND in decimal :param window_class: window class name :return: window handle (HWND) """ def callback(hwnd, extra): if extra['equals'] == win32gui.GetClassName(hwnd): extra['res'] = hwnd extra = {'res': None, 'equals': window_class} win32gui.EnumChildWindows(hwnd, callback, extra) return extra['res']
8,394
def uniq(lst): """ this is like list(set(lst)) except that it gets around unhashability by stringifying everything. If str(a) == str(b) then this will get rid of one of them. """ seen = {} result = [] for item in lst: if str(item) not in seen: result.append(item) seen[str(item)]=True return result
8,395
async def test_dhcp_flow(hass: HomeAssistant) -> None: """Test that DHCP discovery works.""" with patch( "homeassistant.components.qnap_qsw.QnapQswApi.get_live", return_value=LIVE_MOCK, ): result = await hass.config_entries.flow.async_init( DOMAIN, data=DHCP_SERVICE_INFO, context={"source": config_entries.SOURCE_DHCP}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "discovered_connection" with patch( "homeassistant.components.qnap_qsw.async_setup_entry", return_value=True, ) as mock_setup_entry, patch( "homeassistant.components.qnap_qsw.QnapQswApi.get_live", return_value=LIVE_MOCK, ), patch( "homeassistant.components.qnap_qsw.QnapQswApi.get_system_board", return_value=SYSTEM_BOARD_MOCK, ), patch( "homeassistant.components.qnap_qsw.QnapQswApi.post_users_login", return_value=USERS_LOGIN_MOCK, ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], { CONF_USERNAME: TEST_USERNAME, CONF_PASSWORD: TEST_PASSWORD, }, ) assert result2["type"] == "create_entry" assert result2["data"] == { CONF_USERNAME: TEST_USERNAME, CONF_PASSWORD: TEST_PASSWORD, } assert len(mock_setup_entry.mock_calls) == 1
8,396
async def test_trigger_flow(server): """ test cascading async trigger flow from client to sever and back Request the server to call us back later """ async with WebSocketRpcClient(uri, ClientMethods(), default_response_timeout=4) as client: time_delta=0.5 name = "Logan Nine Fingers" # Ask for a wake up call await client.other.register_wake_up_call(time_delta=time_delta, name=name) # Wait for our wake-up call (or fail on timeout) await asyncio.wait_for(client.methods.woke_up_event.wait(), 5) # Note: each channel has its own copy of the methods object assert client.channel.methods.name == name assert client.channel.methods.message == MESSAGE
8,397
def write_version_to_file(version_number: str) -> None: """ Writes the version to the VERSION.txt file. Args: version_number: (str) the version to be written to the file Returns: None """ version_file_path: str = str(pathlib.Path(__file__).parent.absolute()) + "/monolithcaching/version.py" if os.path.exists(version_file_path): os.remove(version_file_path) with open(version_file_path, "w") as f: f.write(f"VERSION='{version_number}'")
8,398
def as_datetime(dct): """Decode datetime objects in data responses while decoding json.""" try: type, val = dct['__jsonclass__'] if type == 'datetime': # trac doesn't specify an offset for its timestamps, assume UTC return dateparse(val).astimezone(utc) except KeyError: return dct
8,399