content
stringlengths
22
815k
id
int64
0
4.91M
def xreplace_constrained(exprs, make, rule=None, costmodel=lambda e: True, repeat=False): """ Unlike ``xreplace``, which replaces all objects specified in a mapper, this function replaces all objects satisfying two criteria: :: * The "matching rule" -- a function returning True if a node within ``expr`` satisfies a given property, and as such should be replaced; * A "cost model" -- a function triggering replacement only if a certain cost (e.g., operation count) is exceeded. This function is optional. Note that there is not necessarily a relationship between the set of nodes for which the matching rule returns True and those nodes passing the cost model check. It might happen for example that, given the expression ``a + b``, all of ``a``, ``b``, and ``a + b`` satisfy the matching rule, but only ``a + b`` satisfies the cost model. :param exprs: The target SymPy expression, or a collection of SymPy expressions. :param make: Either a mapper M: K -> V, indicating how to replace an expression in K with a symbol in V, or a function, used to construct new, unique symbols. Such a function should take as input a parameter, used to enumerate the new symbols. :param rule: The matching rule (a lambda function). May be left unspecified if ``make`` is a mapper. :param costmodel: The cost model (a lambda function, optional). :param repeat: Repeatedly apply ``xreplace`` until no more replacements are possible (optional, defaults to False). """ found = OrderedDict() rebuilt = [] # Define /replace()/ based on the user-provided /make/ if isinstance(make, dict): rule = rule if rule is not None else (lambda i: i in make) replace = lambda i: make[i] else: assert callable(make) and callable(rule) def replace(expr): if isinstance(make, dict): return make[expr] temporary = found.get(expr) if temporary: return temporary else: temporary = make(replace.c) found[expr] = temporary replace.c += 1 return temporary replace.c = 0 # Unique identifier for new temporaries def run(expr): if expr.is_Atom or expr.is_Indexed: return expr, rule(expr) elif expr.is_Pow: base, flag = run(expr.base) if flag and costmodel(base): return expr.func(replace(base), expr.exp, evaluate=False), False else: return expr.func(base, expr.exp, evaluate=False), flag else: children = [run(a) for a in expr.args] matching = [a for a, flag in children if flag] other = [a for a, _ in children if a not in matching] if matching: matched = expr.func(*matching, evaluate=False) if len(matching) == len(children) and rule(expr): # Go look for longer expressions first return matched, True elif rule(matched) and costmodel(matched): # Replace what I can replace, then give up rebuilt = expr.func(*(other + [replace(matched)]), evaluate=False) return rebuilt, False else: # Replace flagged children, then give up replaced = [replace(e) for e in matching if costmodel(e)] unreplaced = [e for e in matching if not costmodel(e)] rebuilt = expr.func(*(other + replaced + unreplaced), evaluate=False) return rebuilt, False return expr.func(*other, evaluate=False), False # Process the provided expressions for expr in as_tuple(exprs): assert expr.is_Equality root = expr.rhs while True: ret, _ = run(root) if repeat and ret != root: root = ret else: rebuilt.append(expr.func(expr.lhs, ret)) break # Post-process the output found = [Eq(v, k) for k, v in found.items()] return found + rebuilt, found
5,800
def save_melspectrogram(directory_path, file_name, sampling_rate=44100): """ Will save spectogram into current directory""" path_to_file = os.path.join(directory_path, file_name) data, sr = librosa.load(path_to_file, sr=sampling_rate, mono=True) data = scale(data) melspec = librosa.feature.melspectrogram(y=data, sr=sr, n_mels=128) # Convert to log scale (dB) using the peak power (max) as reference # per suggestion from Librbosa: https://librosa.github.io/librosa/generated/librosa.feature.melspectrogram.html log_melspec = librosa.power_to_db(melspec, ref=np.max) librosa.display.specshow(log_melspec, sr=sr) # create saving directory directory = './melspectrograms' if not os.path.exists(directory): os.makedirs(directory) plt.savefig(directory + '/' + file_name.strip('.wav') + '.png')
5,801
def get_comp_rules() -> str: """ Download the comp rules from Wizards site and return it :return: Comp rules text """ response = download_from_wizards(COMP_RULES) # Get the comp rules from the website (as it changes often) # Also split up the regex find so we only have the URL comp_rules_url: str = re.findall(r"href=\".*\.txt\"", response)[0][6:-1] response = download_from_wizards(comp_rules_url).replace("’", "'") return response
5,802
def delete_vpc(vpc_id): """Delete a VPC.""" client = get_client("ec2") params = {} params["VpcId"] = vpc_id return client.delete_vpc(**params)
5,803
def test_network_xor(alpha = 0.1, iterations = 1000): """Creates and trains a network against the XOR/XNOR data""" n, W, B = network_random_gaussian([2, 2, 2]) X, Y = xor_data() return n.iterate_network(X, Y, alpha, iterations)
5,804
def assemble_book(draft__dir: Path, work_dir: Path, text_dir: Path) -> Path: """Merge contents of draft book skeleton with test-specific files for the book contents. """ book_dir = work_dir / "test-book" # Copy skeleton from draft__dir shutil.copytree(draft__dir, book_dir) # Add metadata and text files for test book if (text_dir / "content.opf").is_file(): shutil.copy(text_dir / "content.opf", book_dir / "src" / "epub") for file in text_dir.glob("*.xhtml"): shutil.copy(file, book_dir / "src" / "epub" / "text") # Rebuild file metadata must_run(f"se print-manifest-and-spine --in-place {book_dir}") must_run(f"se print-toc --in-place {book_dir}") return book_dir
5,805
def merid_advec_spharm(arr, v, radius): """Meridional advection using spherical harmonics.""" _, d_dy = horiz_gradient_spharm(arr, radius) return v * d_dy
5,806
def run_win_pct(team_name, df): """ Function that calculates a teams winning percentage Year over Year (YoY) Calculation: Number of wins by the total number of competitions. Then multiply by 100 = win percentage. Number of loses by the total number of competitions. Then multiply by 100 = loss percentage this function also takes into account the home and away win/loss percentages. :param team_name: Takes in the state of the team_names dropdown :return:a dataframe That returns percentages for specific teams """ df['home_team'] = df['home_team'].str.lower() df['away_team'] = df['away_team'].str.lower() team_name = team_name.lower() df_home = df[df['home_team'] == team_name] df_away = df[df['away_team'] == team_name] frames = [df_home,df_away] df_fill = pd.concat(frames) df = home_vs_away(df_fill, team_name) home_matches = df[df['home_team'] == team_name] away_matches = df[df['away_team'] == team_name] home_matches = home_matches.drop(columns = ['away_team']) away_matches = away_matches.drop(columns = ['home_team']) #wins per season home_team_win = home_matches.groupby(["home_team","dateYear"])["outcome"].apply( lambda x: x[x.str.contains("win")].count()).reset_index() away_team_win = away_matches.groupby(['away_team','dateYear'])['outcome'].apply( lambda x: x[x.str.contains('win')].count()).reset_index() home_team_loss = home_matches.groupby(['home_team','dateYear'])['outcome'].apply( lambda x: x[x.str.contains('lose')].count()).reset_index() away_team_loss = away_matches.groupby(['away_team','dateYear'])['outcome'].apply( lambda x: x[x.str.contains('lose')].count()).reset_index() home_team_tie = home_matches.groupby(['home_team','dateYear'])['outcome'].apply( lambda x: x[x.str.contains('draw')].count()).reset_index() away_team_tie = away_matches.groupby(['away_team','dateYear'])['outcome'].apply( lambda x: x[x.str.contains('draw')].count()).reset_index() #matches played per season searchFor = ['win','lose','draw'] matches_home = home_matches.groupby(['home_team','dateYear'])['outcome'].apply( lambda x: x[x.str.contains('|'.join(searchFor))].count()).reset_index() matches_away = away_matches.groupby(['away_team', 'dateYear'])['outcome'].apply( lambda x: x[x.str.contains('|'.join(searchFor))].count()).reset_index() #goals for and against match_numbers = matches_home.merge(matches_away, how='left', left_on='dateYear', right_on='dateYear') loss_merge = home_team_loss.merge(away_team_loss, how='left', left_on='dateYear', right_on='dateYear') tie_merge = home_team_tie.merge(away_team_tie, how='left', left_on='dateYear', right_on='dateYear') fin = home_team_win.merge(away_team_win, how = 'left', left_on='dateYear', right_on='dateYear') fin['Total Wins'] = fin['outcome_x'] + fin['outcome_y'] fin['Total Losses'] = loss_merge['outcome_x'] + loss_merge['outcome_y'] fin['Total Draws'] = tie_merge['outcome_x'] + tie_merge['outcome_y'] fin['Total Matches'] = match_numbers['outcome_x'] + match_numbers['outcome_y'] fin['Win PCT'] = (fin['Total Wins'] / fin['Total Matches'] * 100).round(2) fin['Loss PCT'] = (fin['Total Losses'] / fin['Total Matches'] * 100).round(2) fin['Draw PCT'] = (fin['Total Draws'] / fin['Total Matches'] * 100).round(2) #home match percentage fin['Home Win PCT'] = (home_team_win['outcome'] / matches_home['outcome'] * 100).round(2) fin['Away Win PCT'] = (away_team_win['outcome'] / matches_away['outcome'] * 100).round(2) fin['Home Loss PCT'] = (home_team_loss['outcome'] / matches_home['outcome'] * 100).round(2) fin['Away Loss PCT'] = (away_team_loss['outcome'] / matches_away['outcome'] * 100).round(2) return fin
5,807
def rbbh(args): """ %prog rbbh A_vs_B.blast B_vs_A.blast Identify the reciprocal best blast hit for each query sequence in set A when compared to set B. This program assumes that the BLAST results have already been filtered based on a combination of %id, %cov, e-value cutoffs. BLAST output should be in tabular `-m 8` format. """ p = OptionParser(rbbh.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) abfile, bafile, = args ab = Blast(abfile) ba = Blast(bafile) ab_hits = ab.best_hits ba_hits = ba.best_hits for aquery in ab_hits: ahit = ab_hits[aquery].subject ba_bline = ba_hits.get(ahit) if ba_bline: bhit = ba_bline.subject if bhit == aquery: print "\t".join(str(x) for x in (aquery, ahit))
5,808
def get_container_info(pi_status): """ Expects a dictionary data structure that include keys and values of the parameters that describe the containers running in a Raspberry Pi computer. Returns the input dictionary populated with values measured from the current status of one or more containers running in the Pi. """ pi_status['containers'] = [] if len(client.containers()) == 0: print 'No container running' new_container={ 'id': 'None', 'cpuUsage': '0.0', 'memUsage': '0.0', 'name': 'None', # the client.container() returns a list of names. 'status': 'None', # as a temporary solution, I take the first name 'image': 'None', # of the list. 'port_host': '0', # the client.container() returns a list of ports 'port_container': '0'} # getting the first, is a tmp solution pi_status['containers'].append(new_container) else: print 'num container %d' % len(client.containers()) for container in client.containers(): cmd = "docker stats %s --no-stream | grep %s | awk \'{print $2}\' " % (container['Id'], container['Id']) cpuUsage = system_call(cmd) cpuUsage_str = cpuUsage.replace("\n", "") cpuUsage_str = cpuUsage_str.replace("%", "") cmd = "docker stats %s --no-stream | grep %s | awk \'{print $6}\' " % (container['Id'], container['Id']) memUsage = system_call(cmd) memUsage_str = memUsage.replace("\n", "") memUsage_str = memUsage_str.replace("%", "") #dict_port_host= container['Ports'][0] #p_int=dict_port_host['PublicPort'] #port_host_str= str(p_int).replace("\n", "") new_container={ 'id': container['Id'], 'cpuUsage': cpuUsage_str, 'memUsage': memUsage_str, 'name': container['Names'][0], # the client.container() returns a list of names. 'status': container['Status'], # as a temporary solution, I take the first name 'image': container['Image'], # of the list. 'port_host': '80', # the client.container() returns a list of ports 'port_container': '8000'} # getting the first, is a tmp solution pi_status['containers'].append(new_container) return (len((pi_status['containers'])))
5,809
def formatSI(n: float) -> str: """Format the integer or float n to 3 significant digits + SI prefix.""" s = '' if n < 0: n = -n s += '-' if type(n) is int and n < 1000: s = str(n) + ' ' elif n < 1e-22: s = '0.00 ' else: assert n < 9.99e26 log = int(math.floor(math.log10(n))) i, j = divmod(log, 3) for _try in range(2): templ = '%.{}f'.format(2 - j) val = templ % (n * 10 ** (-3 * i)) if val != '1000': break i += 1 j = 0 s += val + ' ' if i != 0: s += 'yzafpnum kMGTPEZY'[i + 8] return s
5,810
def pemp(stat, stat0): """ Computes empirical values identically to bioconductor/qvalue empPvals """ assert len(stat0) > 0 assert len(stat) > 0 stat = np.array(stat) stat0 = np.array(stat0) m = len(stat) m0 = len(stat0) statc = np.concatenate((stat, stat0)) v = np.array([True] * m + [False] * m0) perm = np.argsort(-statc, kind="mergesort") # reversed sort, mergesort is stable v = v[perm] u = np.where(v)[0] p = (u - np.arange(m)) / float(m0) # ranks can be fractional, we round down to the next integer, ranking returns values starting # with 1, not 0: ranks = np.floor(scipy.stats.rankdata(-stat)).astype(int) - 1 p = p[ranks] p[p <= 1.0 / m0] = 1.0 / m0 return p
5,811
def help_message() -> str: """ Return help message. Returns ------- str Help message. """ msg = f"""neocities-sync Sync local directories with neocities.org sites. Usage: neocities-sync options] [--dry-run] [-c CONFIG] [-s SITE1] [-s SITE2] ... Options: -C CONFIG_FILE Path to the config file to use. (defaults to "{config_file_path_unexpanded}".) -s SITE Which site to sync (as specified in the config file). The default is to sync all sites in the config file. --dry-run Do not actually upload anything. -v Verbose output. -q Quiet output. -h, --help Show this help message and exit. Config file: The config file is an ini file, located at "{config_file_path_unexpanded}". Each section of the config file describes a different site (the name of the section doesn't need to be the same as the site's domain, since the api_key suffices to identify the site). The keys of the config file are: api_key (str) [required] The api key of the site. root_dir (path) [required] The local directory to sync. sync_disallowed (yes/no) [default: no] Whether to sync files that are only allowed for paying users. sync_hidden (yes/no) [default: no] Whether to sync hidden files. sync_vcs (yes/no) [default: no] Whether to sync version control files. allowed_extensions (list of str) [default: not set] Which file extensions to sync. If not set, all files are synced. remove_empty_dirs (yes/no) [default: yes] Whether to remove empty directories after sync. Example config: [site1] api_key = 6b9b522e7d8d93e88c464aafc421a61b root_dir = ~/path/to/site1 allowed_extensions = .html .css .js remove_empty_dirs = no [site2] api_key = 78559e6ebc35fe33eec21de05666a243 root_dir = /var/www/path/to/site2 allowed_extensions = .html .css .js .woff2 .neocitiesignore In any subdirectory of the root directory, a file named ".neocitiesignore" can be used to specify which files to ignore. The syntax is the same as the one for ".gitignore". Credits: This software was developed by Andre Kugland <kugland@gmail.com>.""" return msg
5,812
def list_videos(plugin, item_id, page, **kwargs): """Build videos listing""" resp = urlquick.get(URL_REPLAY % page) root = resp.parse() for video_datas in root.iterfind(".//a"): if video_datas.get('href') is not None: video_title = video_datas.find('.//h3').text video_image = video_datas.find('.//img').get('src') video_url = URL_ROOT + video_datas.get('href') item = Listitem() item.label = video_title item.art['thumb'] = item.art['landscape'] = video_image if 'overlayDescription' in video_datas: date_value = video_datas['overlayDescription'].split('|')[0] item.info.date(date_value, '%d/%m/%Y') item.set_callback(get_video_url, item_id=item_id, video_url=video_url) item_post_treatment(item, is_playable=True, is_downloadable=True) yield item yield Listitem.next_page(item_id=item_id, page=str(int(page) + 1))
5,813
def create_folder(folder_path): """ if folder does not exist, create it :param folder_path: """ if not os.path.exists(folder_path): try: os.makedirs(folder_path) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise
5,814
def fuzzyCompareDouble(p1, p2): """ compares 2 double as points """ return abs(p1 - p2) * 100000. <= min(abs(p1), abs(p2))
5,815
def test_get_map_not_found(client, session, map_fixtures): """Test if the response of /map is 404.""" response = client.get("/maps/404") assert response.status_code == 404
5,816
def filter_date_df(date_time, df, var="date"): """Filtrar dataframe para uma dada lista de datas. Parameters ---------- date_time: list list with dates. df: pandas.Dataframe var: str column to filter, default value is "date" but can be adaptable for other ones. Returns ------- df_filter: pandas.Dataframe Examples -------- >>> file1 = './data/WIN$N_1M_2015.08.12_2015.12.30_.csv', >>> file2 = './data/WIN$N_10M_2013.11.08_2021.01.22_.csv' >>> dates = filter_overlapping_dates(file1, file2) >>> df1 = pandas.read_csv(file1) >>> filter_date_df(dates_overlapping, df1).head() date hour open high low close real_volume tick_volume 0 2015.08.12 09:00:00 50280 50430 50255 50405 976 217 1 2015.08.12 09:01:00 50405 50440 50335 50400 1589 445 2 2015.08.12 09:02:00 50395 50410 50355 50355 465 102 3 2015.08.12 09:03:00 50350 50360 50320 50325 474 150 4 2015.08.12 09:04:00 50325 50330 50090 50190 2078 747 """ filters = [True if date in date_time else False for date in df[var]] df_filter = df[filters] df_filter = df_filter.drop(columns=["spread"], errors="ignore") df_filter = df_filter.dropna().drop_duplicates() df_filter = df_filter.sort_values(by=["date", "hour"]) df_filter = df_filter.reset_index(drop=True) df_filter = format_hour(df_filter) return df_filter
5,817
def mcmc_fit(x, y, yerr, p_init, p_max, id, RESULTS_DIR, truths, burnin=500, nwalkers=12, nruns=10, full_run=500, diff_threshold=.5, n_independent=1000): """ Run the MCMC """ try: print("Total number of points = ", sum([len(i) for i in x])) print("Number of light curve sections = ", len(x)) except TypeError: print("Total number of points = ", len(x)) theta_init = np.log([np.exp(-12), np.exp(7), np.exp(-1), np.exp(-17), p_init]) runs = np.zeros(nruns) + full_run ndim = len(theta_init) print("p_init = ", p_init, "days, log(p_init) = ", np.log(p_init), "p_max = ", p_max) args = (x, y, yerr, np.log(p_init), p_max) # Time the LHF call. start = time.time() mod = MyModel(x, y, yerr, np.log(p_init), p_max) print("lnlike = ", mod.lnlike_split(theta_init), "lnprior = ", mod.Glnprior(theta_init), "\n") end = time.time() tm = end - start print("1 lhf call takes ", tm, "seconds") print("burn in will take", tm * nwalkers * burnin, "s") print("each run will take", tm * nwalkers * runs[0]/60, "mins") print("total = ", (tm * nwalkers * np.sum(runs) + tm * nwalkers * burnin)/60, "mins") # Run MCMC. mod = MyModel(x, y, yerr, np.log(p_init), p_max) model = emcee3.SimpleModel(mod.lnlike_split, mod.Glnprior) p0 = [theta_init + 1e-4 * np.random.rand(ndim) for i in range(nwalkers)] ensemble = emcee3.Ensemble(model, p0) # moves = emcee3.moves.KDEMove() # sampler = emcee3.Sampler(moves) sampler = emcee3.Sampler() print("burning in...") total_start = time.time() ensemble = sampler.run(ensemble, burnin) flat = sampler.get_coords(flat=True) logprob = sampler.get_log_probability(flat=True) ensemble = emcee3.Ensemble(model, p0) # repeating MCMC runs. autocorr_times, mean_ind, mean_diff = [], [], [] sample_array = np.zeros((nwalkers, sum(runs), ndim)) for i, run in enumerate(runs): print("run {0} of {1}".format(i, len(runs))) print("production run, {0} steps".format(int(run))) start = time.time() ensemble = sampler.run(ensemble, run) end = time.time() print("time taken = ", (end - start)/60, "minutes") f = h5py.File(os.path.join(RESULTS_DIR, "{0}.h5".format(id)), "w") data = f.create_dataset("samples", np.shape(sampler.get_coords(flat=True))) data[:, :] = sampler.get_coords(flat=True) f.close() print("samples = ", np.shape(sampler.get_coords(flat=True))) results = make_plot(sampler, x, y, yerr, id, RESULTS_DIR, truths, traces=True, tri=True, prediction=True) nsteps, _ = np.shape(sampler.get_coords(flat=True)) conv, autocorr_times, ind_samp, diff = \ evaluate_convergence(sampler.get_coords(flat=True), autocorr_times, diff_threshold, n_independent) mean_ind.append(ind_samp) mean_diff.append(diff) print("Converged?", conv) if conv: break total_end = time.time() total_time = total_end - total_start print("Total time taken = ", total_time/60., "minutes", total_time/3600., "hours") with open(os.path.join(RESULTS_DIR, "{0}_time.txt".format(id)), "w") as f: f.write("{}".format(total_time)) # col = "b" # if conv: # col = "r" # if autocorr_times: # plt.clf() # plt.plot(autocorr_times, color=col) # plt.savefig(os.path.join(RESULTS_DIR, "{0}_acorr".format(id))) # plt.clf() # plt.plot(mean_ind, color=col) # plt.savefig(os.path.join(RESULTS_DIR, "{0}_ind".format(id))) # plt.clf() # plt.plot(mean_diff, color=col) # plt.savefig(os.path.join(RESULTS_DIR, "{0}_diff".format(id))) return
5,818
def db_list(config: str, verbose: bool): """ List the DBs found in the database directory. """ m = CVDUpdate(config=config, verbose=verbose) m.db_list()
5,819
def set_transparent_color(rgb: tuple[int, int, int] = (0, 0, 0)) -> None: """Applies 100% transparency to <rgb>. A application window using this value as it's clear color ("background") will also be transparent. This may cause other renders of the same color within the application window to also be transparent. Args: rgb (tuple[int, int, int]): The color that will be transparent. # NOTE: This will bork the functionality of the window title bar, and does not hide it. Calling `unset_transparent_color` should restore functionality. # NOTE: Make certain that this call is scheduled AFTER your application/ viewport has been made and is showing. """ global _LWA_COLORKEY, _is_transparent _is_transparent = True SetLayeredWindowAttributes(_APP_HANDLE, rgb + (255,), _LWA_COLORKEY)
5,820
def pproxy_desired_access_log_line(url): """Return a desired pproxy log entry given a url.""" qe_url_parts = urllib.parse.urlparse(url) protocol_port = '443' if qe_url_parts.scheme == 'https' else '80' return 'http {}:{}'.format(qe_url_parts.hostname, protocol_port)
5,821
def unused_port() -> int: """Return a port that is unused on the current host.""" with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(("127.0.0.1", 0)) return s.getsockname()[1]
5,822
def get_axioma_risk_free_rate(conn) : """ Get the USD risk free rate provided by Axioma and converted it into a daily risk free rate assuming a 252 trading data calendar. """ query = """ select data_date, Risk_Free_Rate from axioma_currency where currencycode = 'USD' order by data_date """ df = pd.read_sql_query(query, conn.sql.CONN) df['Risk_Free_Rate'] = df['Risk_Free_Rate'].astype('float32') df[RFR] = (1 + df['Risk_Free_Rate']) ** (1.0/252.0) - 1 df.drop(columns = ['Risk_Free_Rate'], inplace = True) return df
5,823
def update_user_count_estimated(set_of_contributors, anonymous_coward_comments_counter): """ Total user count estimate update in the presence of anonymous users. Currently we use a very simplistic model for estimating the full user count. Inputs: - set_of_contributors: A python set of user ids. - anonymous_coward_comments_counter: The number of comments posted by anonymous user(s). Output: estimated_anonymous_contributor_count: The estimated number of users active in the information cascade. """ eponymous_user_count = len(set_of_contributors) if anonymous_coward_comments_counter > 0: # TODO: Of course, I can use a much more sophisticated model. estimated_anonymous_user_count = (1 + anonymous_coward_comments_counter)/2 else: estimated_anonymous_user_count = 0.0 estimated_user_count = eponymous_user_count + estimated_anonymous_user_count return estimated_user_count
5,824
def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, return_n_test_samples=False, return_times=False, return_estimator=False, split_progress=None, candidate_progress=None, error_score=np.nan): """override the sklearn.model_selection._validation._fit_and_score Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. scorer : A single callable or dict mapping scorer name to the callable If it is a single callable, the return value for ``train_scores`` and ``test_scores`` is a single float. For a dict, it should be one mapping the scorer name to the scorer callable object / function. The callable object / fn should have signature ``scorer(estimator, X, y)``. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. verbose : int The verbosity level. error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : bool, default=False Compute and return score on training set. return_parameters : bool, default=False Return parameters that has been used for the estimator. split_progress : {list, tuple} of int, default=None A list or tuple of format (<current_split_id>, <total_num_of_splits>). candidate_progress : {list, tuple} of int, default=None A list or tuple of format (<current_candidate_id>, <total_number_of_candidates>). return_n_test_samples : bool, default=False Whether to return the ``n_test_samples``. return_times : bool, default=False Whether to return the fit/score times. return_estimator : bool, default=False Whether to return the fitted estimator. Returns ------- result : dict with the following attributes train_scores : dict of scorer name -> float Score on training set (for all the scorers), returned only if `return_train_score` is `True`. test_scores : dict of scorer name -> float Score on testing set (for all the scorers). n_test_samples : int Number of test samples. fit_time : float Time spent for fitting in seconds. score_time : float Time spent for scoring in seconds. parameters : dict or None The parameters that have been evaluated. estimator : estimator object The fitted estimator. fit_failed : bool The estimator failed to fit. """ if estimator.__class__.__name__ != 'KerasGBatchClassifier': return _sk_fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=return_train_score, return_parameters=return_parameters, return_n_test_samples=return_n_test_samples, return_times=return_times, return_estimator=return_estimator, split_progress=split_progress, candidate_progress=candidate_progress, error_score=error_score) if not isinstance(error_score, numbers.Number) and error_score != 'raise': raise ValueError( "error_score must be the string 'raise' or a numeric value. " "(Hint: if using 'raise', please make sure that it has been " "spelled correctly.)" ) progress_msg = "" if verbose > 2: if split_progress is not None: progress_msg = f" {split_progress[0]+1}/{split_progress[1]}" if candidate_progress and verbose > 9: progress_msg += (f"; {candidate_progress[0]+1}/" f"{candidate_progress[1]}") if verbose > 1: if parameters is None: params_msg = '' else: sorted_keys = sorted(parameters) # Ensure deterministic o/p params_msg = (', '.join(f'{k}={parameters[k]}' for k in sorted_keys)) if verbose > 9: start_msg = f"[CV{progress_msg}] START {params_msg}" print(f"{start_msg}{(80 - len(start_msg)) * '.'}") # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = _check_fit_params(X, fit_params, train) if parameters is not None: # clone after setting parameters in case any parameters # are estimators (like pipeline steps) # because pipeline doesn't clone steps in fit cloned_parameters = {} for k, v in parameters.items(): cloned_parameters[k] = clone(v, safe=False) estimator = estimator.set_params(**cloned_parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) result = {} try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception: # Note fit time as time until error fit_time = time.time() - start_time score_time = 0.0 if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): if isinstance(scorer, dict): test_scores = {name: error_score for name in scorer} if return_train_score: train_scores = test_scores.copy() else: test_scores = error_score if return_train_score: train_scores = error_score warnings.warn("Estimator fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%s" % (error_score, format_exc()), FitFailedWarning) result["fit_failed"] = True else: result["fit_failed"] = False fit_time = time.time() - start_time test_scores = estimator.evaluate(X_test, y_test, scorer, error_score) score_time = time.time() - start_time - fit_time if return_train_score: train_scores = estimator.evaluate( X_train, y_train, scorer, error_score ) if verbose > 1: total_time = score_time + fit_time end_msg = f"[CV{progress_msg}] END " result_msg = params_msg + (";" if params_msg else "") if verbose > 2: if isinstance(test_scores, dict): for scorer_name in sorted(test_scores): result_msg += f" {scorer_name}: (" if return_train_score: scorer_scores = train_scores[scorer_name] result_msg += f"train={scorer_scores:.3f}, " result_msg += f"test={test_scores[scorer_name]:.3f})" else: result_msg += ", score=" if return_train_score: result_msg += (f"(train={train_scores:.3f}, " f"test={test_scores:.3f})") else: result_msg += f"{test_scores:.3f}" result_msg += f" total time={logger.short_format_time(total_time)}" # Right align the result_msg end_msg += "." * (80 - len(end_msg) - len(result_msg)) end_msg += result_msg print(end_msg) result["test_scores"] = test_scores if return_train_score: result["train_scores"] = train_scores if return_n_test_samples: result["n_test_samples"] = _num_samples(X_test) if return_times: result["fit_time"] = fit_time result["score_time"] = score_time if return_parameters: result["parameters"] = parameters if return_estimator: result["estimator"] = estimator return result
5,825
def prettify_eval(set_: str, accuracy: float, correct: int, avg_loss: float, n_instances: int, stats: Dict[str, List[int]]): """Returns string with prettified classification results""" table = 'problem_type accuracy\n' for k in sorted(stats.keys()): accuracy_ = stats[k][0]/stats[k][1] accuracy_ = accuracy_*100 table += k table += ' ' table += '{:.2f}%\n'.format(accuracy_) return '\n' + set_ + ' set average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format( avg_loss, correct, n_instances, accuracy) + table + '\n'
5,826
def GetRecentRevisions(repository, project=None, num_revisions=20): """Get Recent Revisions. Args: repository: models.Repository, the repository whose revisions to get we ought. project: models.Project, restrict the query to a given project. num_revisions: int, maximum number of revisions to fetch. Returns: list of models.Revisions """ q = db.Query(models.Revision).filter('repository_name =', repository.name) # TODO(nicksantos): filter by project once the revisions have projects. # But talk to dbentley to make sure that we really want to do this. # if project: # q.filter('project =', project) # TODO(dbentley): eventually, it would be great to use the partial # order implied in the actual VCS. q.order('-time') q.order('-first_seen') return list(q.fetch(num_revisions))
5,827
def load_location(doc_name): """Load a location from db by name.""" doc_ref = get_db().collection("locations").document(doc_name) doc = doc_ref.get() if not doc.exists: return None else: return doc.to_dict()
5,828
def test_sanity(tmpdir, manifest_file, manifest): """ Does our mock manifest contents evaluate the same as a file? """ _file = tmpdir.join('manifest.yaml') _file.write(manifest_file) assert get_manifest_from_path(str(_file)).contents == manifest.contents
5,829
def refresh_jwt_public_keys(user_api=None, logger=None): """ Update the public keys that the Flask app is currently using to validate JWTs. The get_keys_url helper function will prefer the user_api's .well-known/openid-configuration endpoint, but if no jwks_uri is found, will default to /jwt/keys. In the latter case, the response from ``/jwt/keys`` should look like this: .. code-block:: javascript { "keys": [ [ "key-id-01", "-----BEGIN PUBLIC KEY---- ... -----END PUBLIC KEY-----\n" ], [ "key-id-02", "-----BEGIN PUBLIC KEY---- ... -----END PUBLIC KEY-----\n" ] ] } In either case, the keys are put into a dictionary and assigned to ``flask.current_app.jwt_public_keys`` with user_api as the key. Keys are serialized to PEM if not already. Args: user_api (Optional[str]): the URL of the user API to get the keys from; default to whatever the flask app is configured to use logger (Optional[Logger]): the logger; default to app's parent logger Return: None Side Effects: - Reassign ``flask.current_app.jwt_public_keys[user_api]`` to the keys obtained from ``get_jwt_public_keys``, as a dictionary. Raises: ValueError: if user_api is not provided or set in app config """ logger = logger or get_logger(__name__, log_level="info") # First, make sure the app has a ``jwt_public_keys`` attribute set up. missing_public_keys = ( not hasattr(flask.current_app, "jwt_public_keys") or not flask.current_app.jwt_public_keys ) if missing_public_keys: flask.current_app.jwt_public_keys = {} user_api = user_api or flask.current_app.config.get("USER_API") if not user_api: raise ValueError("no URL(s) provided for user API") path = get_keys_url(user_api) try: jwt_public_keys = httpx.get(path).json()["keys"] except: raise JWTError( "Attempted to refresh public keys for {}," "but could not get keys from path {}.".format(user_api, path) ) logger.info("Refreshing public key cache for issuer {}...".format(user_api)) logger.debug( "Received public keys:\n{}".format(json.dumps(str(jwt_public_keys), indent=4)) ) issuer_public_keys = {} for key in jwt_public_keys: if "kty" in key and key["kty"] == "RSA": logger.debug( "Serializing RSA public key (kid: {}) to PEM format.".format(key["kid"]) ) # Decode public numbers https://tools.ietf.org/html/rfc7518#section-6.3.1 n_padded_bytes = base64.urlsafe_b64decode( key["n"] + "=" * (4 - len(key["n"]) % 4) ) e_padded_bytes = base64.urlsafe_b64decode( key["e"] + "=" * (4 - len(key["e"]) % 4) ) n = int.from_bytes(n_padded_bytes, "big", signed=False) e = int.from_bytes(e_padded_bytes, "big", signed=False) # Serialize and encode public key--PyJWT decode/validation requires PEM rsa_public_key = rsa.RSAPublicNumbers(e, n).public_key(default_backend()) public_bytes = rsa_public_key.public_bytes( serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo, ) # Cache the encoded key by issuer issuer_public_keys[key["kid"]] = public_bytes else: logger.debug( "Key type (kty) is not 'RSA'; assuming PEM format. Skipping key serialization. (kid: {})".format( key[0] ) ) issuer_public_keys[key[0]] = key[1] flask.current_app.jwt_public_keys.update({user_api: issuer_public_keys}) logger.info("Done refreshing public key cache for issuer {}.".format(user_api))
5,830
def save_data(): """@Numpy Examples 数据的保存与加载 Examples: # 保存 array >>> a = np.asarray([1, 2, 3]) >>> fp = r'./array.npy' # 后缀为 .npy >>> np.save(fp, a) >>> _a = np.load(fp) >>> assert (a == _a).all() >>> _ = os.system(f'rm {fp}') # 保存多个数据,不限于 array >>> a = np.asarray([1, 2, 3]) >>> b = '测试' >>> fp = r'./data.npz' # 后缀为 .npz >>> np.savez(fp, a=a, b=b) >>> d = np.load(fp) >>> assert (a == d['a']).all() and b == d['b'] >>> _ = os.system(f'rm {fp}') """
5,831
def get_file(ctx, file_path): """ Get content of a file from OneDrive """ tokens = auth.ensure_tokens(ctx.client_id, ctx.tokens) ctx.save(tokens=tokens) session = auth.get_request_session(tokens) # Calling helper method to get the file api_util.get_file(session, file_path)
5,832
def plotMultiROC(y_true, # list of true labels y_scores, # array of scores for each class of shape [n_samples, n_classes] title = 'Multiclass ROC Plot', n_points=100, # reinterpolates to have exactly N points labels = None, # list of labels for each class threshdot = None, plot=True, # 1/0. If 0, returns plotly json object, but doesnt plot ): """ Makes a multiclass ROC plot. Can also be used for binary ROC plot """ y_true = np.array(y_true) y_scores = np.array(y_scores) if y_scores.ndim == 1: # convert to [n_samples, n_classes] even if 1 class y_scores = np.atleast_2d(y_scores).T N, n_classes = y_scores.shape if n_classes == 1: # needed to avoid inverting when doing binary classification y_scores *= -1 if threshdot is not None: threshdot *= -1 # calc ROC curves & AUC fpr = dict() tpr = dict() thresh = dict() thresh_txt = dict() roc_auc = dict() for i in range(n_classes): fpr[i], tpr[i], thresh[i] = sk.metrics.roc_curve(y_true == i, y_scores[:, i]) roc_auc[i] = sk.metrics.auc(fpr[i], tpr[i]) if n_points is not None: x = np.linspace(0, 1, n_points) indxs = np.searchsorted(tpr[i], x) tpr[i] = tpr[i][indxs] fpr[i] = fpr[i][indxs] thresh[i] = thresh[i][indxs] thresh_txt[i] = ['T=%.4f' % t for t in thresh[i]] if labels is None: labels = ['C%d' % n for n in range(1, n_classes+1)] labels = [str(x) for x in labels] # convert labels to str # make traces traces = [] [traces.append(go.Scatter(y=tpr[i], x=fpr[i], name=labels[i] + '. AUC= %.2f' % (roc_auc[i]), text=thresh_txt[i], legendgroup=str(i), line={'width': 1})) for i in range(n_classes)] traces += [go.Scatter(y=[0, 1], x=[0, 1], name='Random classifier', line={'width': 1, 'dash': 'dot'})] if threshdot is not None: for i in range(n_classes): c_indx = (np.abs(thresh[i]-threshdot)).argmin() traces += [go.Scatter(x=[fpr[i][c_indx]]*2, y=[tpr[i][c_indx]]*2, mode='markers', name='Threshold', legendgroup=str(i), showlegend=False)] # make layout layout = go.Layout(title=title, xaxis={'title': 'FPR'}, yaxis={'title': 'TPR'}, legend=dict(x=1), hovermode='closest', ) fig = go.Figure(data=traces, layout=layout) return plotOut(fig, plot)
5,833
def fasta_to_dict(fasta_file): """Consolidate deflines and sequences from FASTA as dictionary""" deflines = [] sequences = [] sequence = "" with open(fasta_file, "r") as file: for line in file: if line.startswith(">"): deflines.append(line.rstrip().lstrip('>')) if sequence: sequences.append(sequence) sequence = "" else: sequence += line.rstrip() sequences.append(sequence) fasta_dict = {} for x, defline in enumerate(deflines): fasta_dict[defline]=sequences[x] return fasta_dict
5,834
def find_roots(graph): """ return nodes which you can't traverse down any further """ return [n for n in graph.nodes() if len(list(graph.predecessors(n))) == 0]
5,835
def _is_test_file(filesystem, dirname, filename): """Return true if the filename points to a test file.""" return (_has_supported_extension(filesystem, filename) and not is_reference_html_file(filename))
5,836
def kitchen_door_device() -> Service: """Build the kitchen door device.""" transitions: TransitionFunction = { "unique": { "open_door_kitchen": "unique", "close_door_kitchen": "unique", }, } final_states = {"unique"} initial_state = "unique" return build_deterministic_service_from_transitions(transitions, initial_state, final_states)
5,837
def show_image(txt): """ Print ASCII art (saved as txt file. """ with open(txt, "r") as f: for line in f.readlines(): print(line, end="") sleep(0.01) return
5,838
def create_csv(file_path: str, params_names: dict, site_number: str): """ Function that creates the final version of the CSV files Parameters ---------- file_path : str [description] params_names : dict [description] site_number : str [description] """ df = pd.read_csv(file_path, sep="\t") for key, value in params_names.items(): df[value] = df[key] df.to_csv(site_number + "_flow_data.csv")
5,839
def test_json_file_is_valid(path): """Tests whether YAML data file is a valid YAML document.""" with path.open() as f: assert yaml.safe_load(f)
5,840
def sql_connection_delete( request: http.HttpRequest, pk: int ) -> http.JsonResponse: """AJAX processor for the delete SQL connection operation. :param request: AJAX request :param pk: primary key for the connection :return: AJAX response to handle the form """ conn = models.SQLConnection.objects.filter(pk=pk).first() if not conn: # The view is not there. Redirect to workflow detail return http.JsonResponse({'html_redirect': reverse('home')}) return services.delete( request, conn, reverse('connection:sqlconn_delete', kwargs={'pk': conn.id}))
5,841
def simpson_integration( title = text_control('<h2>Simpson integration</h2>'), f = input_box(default = 'x*sin(x)+x+1', label='$f(x)=$'), n = slider(2,100,2,6, label='# divisions'), interval_input = selector(['from slider','from keyboard'], label='Integration interval', buttons=True), interval_s = range_slider(-10,10,default=(0,10), label="slider: "), interval_g = input_grid(1,2,default=[[0,10]], label="keyboard: "), output_form = selector(['traditional','table','none'], label='Computations form', buttons=True)): """ Interact explaining the simpson method for definite integrals, based on work by Lauri Ruotsalainen, 2010 (based on the application "Numerical integrals with various rules" by Marshall Hampton and Nick Alexander) INPUT: - ``f`` -- function of variable x to integrate - ``n`` -- number of divisions (mult. of 2) - ``interval_input`` -- swithes the input for interval between slider and keyboard - ``interval_s`` -- slider for interval to integrate - ``interval_g`` -- input grid for interval to integrate - ``output_form`` -- the computation is formatted in a traditional form, in a table or missing EXAMPLES: Invoked in the notebook, the following command will produce the fully formatted interactive mathlet. In the command line, it will simply return the underlying HTML and Sage code which creates the mathlet:: sage: interacts.calculus.simpson_integration() <html>...</html> """ x = SR.var('x') f = symbolic_expression(f).function(x) if interval_input == 'from slider': interval = interval_s else: interval = interval_g[0] def parabola(a, b, c): from sage.all import solve A, B, C = SR.var("A, B, C") K = solve([A*a[0]**2+B*a[0]+C==a[1], A*b[0]**2+B*b[0]+C==b[1], A*c[0]**2+B*c[0]+C==c[1]], [A, B, C], solution_dict=True)[0] f = K[A]*x**2+K[B]*x+K[C] return f xs = []; ys = [] dx = float(interval[1]-interval[0])/n for i in range(n+1): xs.append(interval[0] + i*dx) ys.append(f(x=xs[-1])) parabolas = Graphics() lines = Graphics() for i in range(0, n-1, 2): p = parabola((xs[i],ys[i]),(xs[i+1],ys[i+1]),(xs[i+2],ys[i+2])) parabolas += plot(p(x=x), (x, xs[i], xs[i+2]), color="red") lines += line([(xs[i],ys[i]), (xs[i],0), (xs[i+2],0)],color="red") lines += line([(xs[i+1],ys[i+1]), (xs[i+1],0)], linestyle="-.", color="red") lines += line([(xs[-1],ys[-1]), (xs[-1],0)], color="red") html(r'Function $f(x)=%s$'%latex(f(x))) show(plot(f(x),x,interval[0],interval[1]) + parabolas + lines, xmin = interval[0], xmax = interval[1]) numeric_value = integral_numerical(f,interval[0],interval[1])[0] approx = dx/3 *(ys[0] + sum([4*ys[i] for i in range(1,n,2)]) + sum([2*ys[i] for i in range(2,n,2)]) + ys[n]) html(r'Integral value to seven decimal places is: $\displaystyle\int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x} = %.6f$'% (interval[0],interval[1], N(numeric_value,digits=7))) if output_form == 'traditional': sum_formula_html = r"\frac{d}{3} \cdot \left[ f(x_0) + %s + f(x_{%s})\right]" % ( ' + '.join([ r"%s \cdot f(x_{%s})" %(i%2*(-2)+4, i+1) for i in range(0,n-1)]), n ) sum_placement_html = r"\frac{%.2f}{3} \cdot \left[ f(%.2f) + %s + f(%.2f)\right]" % ( dx, N(xs[0],digits=5), ' + '.join([ r"%s \cdot f(%.2f)" %(i%2*(-2)+4, N(xk, digits=5)) for i, xk in enumerate(xs[1:-1])]), N(xs[n],digits=5) ) sum_values_html = r"\frac{%.2f}{3} \cdot \left[ %s %s %s\right]" %( dx, "%.2f + "%N(ys[0],digits=5), ' + '.join([ r"%s \cdot %.2f" %(i%2*(-2)+4, N(yk, digits=5)) for i, yk in enumerate(ys[1:-1])]), " + %.2f"%N(ys[n],digits=5) ) html(r''' <div class="math"> \begin{align*} \int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x} & \approx %s \\ & = %s \\ & = %s \\ & = %.6f \end{align*} </div> ''' % ( interval[0], interval[1], sum_formula_html, sum_placement_html, sum_values_html, N(approx,digits=7) )) elif output_form == 'table': s = [['$i$','$x_i$','$f(x_i)$','$m$','$m\cdot f(x_i)$']] for i in range(0,n+1): if i==0 or i==n: j = 1 else: j = (i+1)%2*(-2)+4 s.append([i, xs[i], ys[i],j,N(j*ys[i])]) s.append(['','','','$\sum$','$%s$'%latex(3/dx*approx)]) pretty_print(table(s, header_row=True)) html(r'$\int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x}\approx\frac {%.2f}{3}\cdot %s=%s$'% (interval[0], interval[1],dx,latex(3/dx*approx),latex(approx)))
5,842
def object_reactions_form_target(object): """ Get the target URL for the object reaction form. Example:: <form action="{% object_reactions_form_target object %}" method="post"> """ ctype = ContentType.objects.get_for_model(object) return reverse("comments-ink-react-to-object", args=(ctype.id, object.id))
5,843
def check_args(**kwargs): """ Check arguments for themis load function Parameters: **kwargs : a dictionary of arguments Possible arguments are: probe, level The arguments can be: a string or a list of strings Invalid argument are ignored (e.g. probe = 'g', level='l0', etc.) Invalid argument names are ignored (e.g. 'probes', 'lev', etc.) Returns: list Prepared arguments in the same order as the inputs Examples: res_probe = check_args(probe='a') (res_probe, res_level) = check_args(probe='a b', level='l2') (res_level, res_probe) = check_args(level='l1', probe=['a', 'b']) # With incorrect argument probes: res = check_args(probe='a', level='l2', probes='a b') : res = [['a'], ['l2']] """ valid_keys = {'probe', 'level'} valid_probe = {'a', 'b', 'c', 'd', 'e'} valid_level = {'l1', 'l2'} # Return list of values from arg_list that are only included in valid_set def valid_list(arg_list, valid_set): valid_res = [] for arg in arg_list: if arg in valid_set: valid_res.append(arg) return valid_res # Return list res = [] for key, values in kwargs.items(): if key.lower() not in valid_keys: continue # resulting list arg_values = [] # convert string into list, or ignore the argument if isinstance(values, str): values = [values] elif not isinstance(values, list): continue for value in values: arg_values.extend(value.strip().lower().split()) # simple validation of the arguments if key.lower() == 'probe': arg_values = valid_list(arg_values, valid_probe) if key.lower() == 'level': arg_values = valid_list(arg_values, valid_level) res.append(arg_values) return res
5,844
def make_theta_mask(aa): """ Gives the theta of the bond originating each atom. """ mask = np.zeros(14) # backbone mask[0] = BB_BUILD_INFO["BONDANGS"]['ca-c-n'] # nitrogen mask[1] = BB_BUILD_INFO["BONDANGS"]['c-n-ca'] # c_alpha mask[2] = BB_BUILD_INFO["BONDANGS"]['n-ca-c'] # carbon mask[3] = BB_BUILD_INFO["BONDANGS"]['ca-c-o'] # oxygen # sidechain for i, theta in enumerate(SC_BUILD_INFO[aa]['angles-vals']): mask[4 + i] = theta return mask
5,845
def create_keypoint(n,*args): """ Parameters: ----------- n : int Keypoint number *args: tuple, int, float *args must be a tuple of (x,y,z) coordinates or x, y and z coordinates as arguments. :: # Example kp1 = 1 kp2 = 2 create_keypoint(kp1,(0,0,0)) # x,y,z as tuple create_keypoint(kp2,1,1,1) # x,y,z as arguments """ if len(args)==1 and isinstance(args[0],tuple): x,y,z = args[0][0],args[0][1],args[0][2] else: x,y,z = args[0], args[1], args[2] _kp = "K,%g,%g,%g,%g"%(n,x,y,z) return _kp
5,846
def wait_for_sidekiq(gl): """ Return a helper function to wait until there are no busy sidekiq processes. Use this with asserts for slow tasks (group/project/user creation/deletion). """ def _wait(timeout=30, step=0.5): for _ in range(timeout): time.sleep(step) busy = False processes = gl.sidekiq.process_metrics()["processes"] for process in processes: if process["busy"]: busy = True if not busy: return True return False return _wait
5,847
def ldd(file): """ Given a file return all the libraries referenced by the file @type file: string @param file: Full path to the file @return: List containing linked libraries required by the file @rtype: list """ rlist = [] if os.path.exists(file) and shutil.which("ldd") is not None: process = subprocess.Popen(["ldd", file], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for line in process.stdout.readlines(): tokens = line.split(b"=>") if len(tokens) == 2: lib_loc = ((tokens[1].strip()).split(b" "))[0].strip() if os.path.exists(lib_loc): rlist.append(os.path.abspath(lib_loc).decode("utf-8")) return rlist
5,848
def test_list_unsigned_long_min_length_4_nistxml_sv_iv_list_unsigned_long_min_length_5_4(mode, save_output, output_format): """ Type list/unsignedLong is restricted by facet minLength with value 10. """ assert_bindings( schema="nistData/list/unsignedLong/Schema+Instance/NISTSchema-SV-IV-list-unsignedLong-minLength-5.xsd", instance="nistData/list/unsignedLong/Schema+Instance/NISTXML-SV-IV-list-unsignedLong-minLength-5-4.xml", class_name="NistschemaSvIvListUnsignedLongMinLength5", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,849
def insert_node_after(new_node, insert_after): """Insert new_node into buffer after insert_after.""" next_element = insert_after['next'] next_element['prev'] = new_node new_node['next'] = insert_after['next'] insert_after['next'] = new_node new_node['prev'] = insert_after return new_node
5,850
def test_random_game_json_identity(base): """Test random game to/from json identity""" game = random_game(base) jgame = json.dumps(game.to_json()) copy = paygame.game_json(json.loads(jgame)) assert game == copy
5,851
def test_rules_check_dependencies(mocker, rules): """ Test the dependencies in process rules. """ mocked_hash = mocker.patch('supvisors.process.ProcessRules.check_hash_nodes') mocked_auto = mocker.patch('supvisors.process.ProcessRules.check_autorestart') mocked_start = mocker.patch('supvisors.process.ProcessRules.check_start_sequence') mocked_stop = mocker.patch('supvisors.process.ProcessRules.check_stop_sequence') # test with no hash rules.hash_node_names = [] # check dependencies rules.check_dependencies('dummy') # test calls assert mocked_start.call_args_list == [call('dummy')] assert mocked_stop.call_args_list == [call('dummy')] assert mocked_auto.call_args_list == [call('dummy')] assert not mocked_hash.called # reset mocks mocker.resetall() # test with hash rules.hash_node_names = ['*'] # check dependencies rules.check_dependencies('dummy') # test calls assert mocked_start.call_args_list == [call('dummy')] assert mocked_stop.call_args_list == [call('dummy')] assert mocked_auto.call_args_list == [call('dummy')] assert mocked_hash.call_args_list == [call('dummy')]
5,852
def apply_wavelet_decomposition(mat, wavelet_name, level=None): """ Apply 2D wavelet decomposition. Parameters ---------- mat : array_like 2D array. wavelet_name : str Name of a wavelet. E.g. "db5" level : int, optional Decomposition level. It is constrained to return an array with a minimum size of larger than 16 pixels. Returns ------- list The first element is an 2D-array, next elements are tuples of three 2D-arrays. i.e [mat_n, (cH_level_n, cV_level_n, cD_level_n), ..., (cH_level_1, cV_level_1, cD_level_1)] """ (nrow, ncol) = mat.shape max_level = int( min(np.floor(np.log2(nrow / 16.0)), np.floor(np.log2(ncol / 16.0)))) if (level is None) or (level > max_level) or (level < 1): level = max_level return pywt.wavedec2(mat, wavelet_name, level=level)
5,853
def ACE(img, ratio=4, radius=300): """The implementation of ACE""" global para para_mat = para.get(radius) if para_mat is not None: pass else: size = radius * 2 + 1 para_mat = np.zeros((size, size)) for h in range(-radius, radius + 1): for w in range(-radius, radius + 1): if not h and not w: continue para_mat[radius + h, radius + w] = 1.0 / \ math.sqrt(h ** 2 + w ** 2) para_mat /= para_mat.sum() para[radius] = para_mat h, w = img.shape[:2] p_h, p_w = [0] * radius + list(range(h)) + [h - 1] * radius, \ [0] * radius + list(range(w)) + [w - 1] * radius temp = img[np.ix_(p_h, p_w)] res = np.zeros(img.shape) for i in range(radius * 2 + 1): for j in range(radius * 2 + 1): if para_mat[i][j] == 0: continue res += (para_mat[i][j] * np.clip((img - temp[i:i + h, j:j + w]) * ratio, -1, 1)) return res
5,854
def classname(object, modname): """Get a class name and qualify it with a module name if necessary.""" name = object.__name__ if object.__module__ != modname: name = object.__module__ + '.' + name return name
5,855
def demc_block(y, pars, pmin, pmax, stepsize, numit, sigma, numparams, cummodels, functype, myfuncs, funcx, iortholist, fits, gamma=None, isGR=True, ncpu=1): """ This function uses a differential evolution Markov chain with block updating to assess uncertainties. PARAMETERS ---------- y: Array containing dependent data Params: Array of initial guess for parameters #Pmin: Array of parameter minimum values #Pmax: Array of parameter maximum values stepsize: Array of 1-sigma change in parameter per iteration Numit: Number of iterations to perform Sigma: Standard deviation of data noise in y Numparams: Number of parameters for each model Cummodels: Cumulative number of models used Functype: Define function type (eclipse, ramp, ip, etc), see models.py Myfuncs: Pointers to model functions Funcx: Array of x-axis values for myfuncs fit: List of fit objects gamma: Multiplcation factor in parameter differential, establishes acceptance rate OUTPUTS ------- This function returns an array of the best fitting parameters, an array of all parameters over all iterations, and numaccept. REFERENCES ---------- Cajo J. F. Ter Braak, "Genetic algorithms and Markov Chain Monte Carlo: Differential Evolution Markov Chain makes Bayesian computing easy," Biometrics, 2006. HISTORY ------- Adapted from mcmc.py Kevin Stevenson, UChicago August 2012 """ global fit fit = fits params = np.copy(pars) nchains, nump = params.shape nextp = np.copy(params) #Proposed parameters bestp = np.copy(params[0]) #Best-fit parameters pedit = np.copy(params) #Editable parameters numaccept = 0 allparams = np.zeros((nump, nchains, numit)) inotfixed = np.where(stepsize != 0)[0] ishare = np.where(stepsize < 0)[0] #ifree = np.where(stepsize > 0)[0] outside = np.zeros((nchains, nump)) numevents = len(fit) intsteps = np.min((numit/5,1e5)) isrednoise = False wavelet = None noisefunc = None #UPDATE PARAMTER(S) EQUAL TO OTHER PARAMETER(S) if (ishare.size > 0): for s in range(ishare.size): params[:,ishare[s]] = params[:,int(abs(stepsize[ishare[s]])-1)] #Define blocks blocks = [] for j in range(numevents): #Build list of blocks blocks = np.concatenate((blocks, fit[j].blocks)) for i in range(cummodels[j],cummodels[j+1]): if functype[i] == 'noise': # Set up for modified chi-squared calculation using correlated noise isrednoise = True wavelet = fit[j].etc[k] noisefunc = myfuncs[i] blocks = blocks.astype(int) iblocks = [] eps = [] numblocks = blocks.max() + 1 numbp = np.zeros(numblocks) ifree = [[] for i in range(numblocks)] for b in range(numblocks): #Map block indices whereb = np.where(blocks == b)[0] iblocks.append(whereb) #Locate indices of free parameters in each block for w in whereb: ifree[b] = np.concatenate((ifree[b],numparams[w]+np.where(stepsize[numparams[w]:numparams[w+1]] > 0)[0])).astype(int) #Calculate number of free parameters per block numbp[b] += len(ifree[b]) eps.append(npr.normal(0, stepsize[ifree[b]]/100., [numit,numbp[b]])) print("Number of free parameters per block:") print(numbp) numa = np.zeros(numblocks) if gamma == None: gamma = 2.38/np.sqrt(2.*numbp) print("gamma:") print(gamma) #Calc chi-squared for model type using current params currchisq = np.zeros(nchains) currmodel = [[] for i in range(numevents)] for j in range(numevents): currmodel[j], noisepars = calcModel(nchains, functype, myfuncs, pedit, params, iortholist[j], funcx, cummodels, numparams, j) currchisq += calcChisq(y[j], sigma[j], currmodel[j], nchains, params, j, noisepars, isrednoise, wavelet, noisefunc) bestchisq = currchisq[0] #GENERATE RANDOM NUMBERS FOR MCMC numnotfixed = len(inotfixed) unif = npr.rand(numit,nchains) randchains = npr.randint(0,nchains,[numit,nchains,2]) #START TIMER clock = timer.Timer(numit,progress = np.arange(0.05,1.01,0.05)) #Run Differential Evolution Monte Carlo algorithm 'numit' times for m in range(numit): #Select next event (block) to update b = m % numblocks #Remove model component(s) that are taking a step pedit = np.copy(params) nextmodel = currmodel[:] for j in range(numevents): ymodels, noisepars = calcModel(nchains, functype, myfuncs, pedit, params, iortholist[j], funcx, cummodels, numparams, j, iblocks[b]) nextmodel[j] = np.divide(currmodel[j],ymodels) #Generate next step using differential evolution for n in range(nchains): rand1, rand2 = randchains[m,n] while rand1 == n or rand2 == n or rand1 == rand2: rand1, rand2 = npr.randint(0,nchains,2) nextp[n,ifree[b]] = params[n,ifree[b]] + gamma[b]*(params[rand1,ifree[b]]-params[rand2,ifree[b]]) + eps[b][m] #CHECK FOR NEW STEPS OUTSIDE BOUNDARIES ioutside = np.where(np.bitwise_or(nextp[n] < pmin, nextp[n] > pmax))[0] if (len(ioutside) > 0): nextp[n,ioutside] = np.copy(params[n,ioutside]) outside[n,ioutside] += 1 #UPDATE PARAMTER(S) EQUAL TO OTHER PARAMETER(S) if (ishare.size > 0): for s in range(ishare.size): nextp[:,ishare[s]] = nextp[:,int(abs(stepsize[ishare[s]])-1)] #COMPUTE NEXT CHI SQUARED AND ACCEPTANCE VALUES pedit = np.copy(nextp) nextchisq = np.zeros(nchains) for j in range(numevents): ymodels, noisepars = calcModel(nchains, functype, myfuncs, pedit, params, iortholist[j], funcx, cummodels, numparams, j, iblocks[b]) nextmodel[j] = np.multiply(nextmodel[j],ymodels) nextchisq += calcChisq(y[j], sigma[j], nextmodel[j], nchains, params, j, noisepars, isrednoise, wavelet, noisefunc) #CALCULATE ACCEPTANCE PROBABILITY accept = np.exp(0.5 * (currchisq - nextchisq)) #print(b,currchisq[0], nextchisq[0], accept[0]) for n in range(nchains): if accept[n] >= 1: #ACCEPT BETTER STEP numaccept += 1 numa[b] += 1 params[n] = np.copy(nextp[n]) currchisq[n] = np.copy(nextchisq[n]) if (currchisq[n] < bestchisq): bestp = np.copy(params[n]) bestchisq = np.copy(currchisq[n]) elif unif[m,n] <= accept[n]: #ACCEPT WORSE STEP numaccept += 1 numa[b] += 1 params[n] = np.copy(nextp[n]) currchisq[n] = np.copy(nextchisq[n]) allparams[:,:,m] = params.T #PRINT INTERMEDIATE INFO if ((m+1) % intsteps == 0) and (m > 0): print("\n" + time.ctime()) #print("Number of times parameter tries to step outside its prior:") #print(outside) print("Current Best Parameters: ") print(bestp) #Apply Gelman-Rubin statistic if isGR: #Check for no accepted steps in each chain #stdev = np.std(allparams[inotfixed],axis=1) #ichain = np.where(stdev > 0.)[0] #Call test #psrf, meanpsrf = gr.convergetest(allparams[inotfixed,ichain,:m+1], len(ichain)) psrf, meanpsrf = gr.convergetest(allparams[inotfixed,:,:m+1], nchains) numconv = np.sum(np.bitwise_and(psrf < 1.01, psrf >= 1.00)) print("Gelman-Rubin statistic for free parameters:") print(psrf) if numconv == numnotfixed: #and m >= 1e4: print("All parameters have converged to within 1% of unity. Halting MCMC.") allparams = allparams[:,:,:m+1] break clock.check(m+1) print("Acceptance rate per block (%):") print(100.*numa*numblocks/numit/nchains) allparams = np.reshape(allparams,(nump, (m+1)*nchains)) return allparams, bestp, numaccept, (m+1)*nchains
5,856
def test_construction_resistance_low(): """Calculate properties of a low capacitance wall.""" con = ConstructionLayered( materials=[ MaterialResistanceOnly(thermal_resistance=0.060), mats.Aluminium(thickness=1 / 1000), MaterialResistanceOnly(thermal_resistance=0.020), ], timestep=3600, ) # initialise values qe = np.zeros(24) qo = np.zeros(24) temp_in = np.full_like(SOL_AIR, 273.15 + 24) temp_out = SOL_AIR # use a large number of iterations for convergence iterate_heat_transfer(con, temp_in, temp_out, qe, qo, iterations=100) # check inside and outside heat transfer assert np.allclose( np.sum(qe) + np.sum(qo), 0.0, rtol=0.0, atol=HEAT_TRANSFER_SUM_EPS ) # check all the ctf sums are close to K assert np.allclose(_calculate_sums(con._ctfs), con.thermal_transmittance)
5,857
def shape5d(a, data_format="NDHWC"): """ Ensuer a 5D shape, to use with 5D symbolic functions. Args: a: a int or tuple/list of length 3 Returns: list: of length 5. if ``a`` is a int, return ``[1, a, a, a, 1]`` or ``[1, 1, a, a, a]`` depending on data_format "NDHWC" or "NCDHW". """ s2d = shape3d(a) if data_format == "NDHWC": return [1] + s2d + [1] else: return [1, 1] + s2d
5,858
def replace_bytes(fbin, start_addr, new_bytes, fout=None): """replace bytes from a start address for a binary image This function is replace variable number of bytes of a binary image and save it as a new image :param fbin: input image file :param start_addr: start address to replace :param new_bytes: new bytes to replace from an image :param fout: output image filename, optional. Default is fbin_replaced.bin """ if fout is None: fout = os.path.splitext(fbin)[0]+"_replaced.bin" with open(fbin, 'rb') as f1, open(fout, 'wb') as f2: f1.seek(0) f2.write(f1.read(start_addr)) f2.write(new_bytes) f1.seek(start_addr + len(new_bytes)) f2.write(f1.read())
5,859
def _compute_node_to_inventory_dict(compute_node): """Given a supplied `objects.ComputeNode` object, return a dict, keyed by resource class, of various inventory information. :param compute_node: `objects.ComputeNode` object to translate """ result = {} # NOTE(jaypipes): Ironic virt driver will return 0 values for vcpus, # memory_mb and disk_gb if the Ironic node is not available/operable # WRS: allow max_unit to be number of vcpus * allocation ratio to allow # for instances with dedicated cpu_policy to allocate correctly. Given # change to max unit have to set allocation ratio in resource inventory # to 1 so capacity check is correct. if compute_node.vcpus > 0: result[VCPU] = { 'total': int(compute_node.vcpus * compute_node.cpu_allocation_ratio), 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': int(compute_node.vcpus * compute_node.cpu_allocation_ratio), 'step_size': 1, 'allocation_ratio': 1, } if compute_node.memory_mb > 0: result[MEMORY_MB] = { 'total': compute_node.memory_mb, 'reserved': CONF.reserved_host_memory_mb, 'min_unit': 1, 'max_unit': compute_node.memory_mb, 'step_size': 1, 'allocation_ratio': compute_node.ram_allocation_ratio, } if compute_node.local_gb > 0: # TODO(johngarbutt) We should either move to reserved_host_disk_gb # or start tracking DISK_MB. reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb( CONF.reserved_host_disk_mb) result[DISK_GB] = { 'total': compute_node.local_gb, 'reserved': reserved_disk_gb, 'min_unit': 1, 'max_unit': compute_node.local_gb, 'step_size': 1, 'allocation_ratio': compute_node.disk_allocation_ratio, } return result
5,860
def test_octagonal_qubit_index(): """test that OctagonalQubit properly calculates index and uses it for comparison""" qubit0 = OctagonalQubit(0) assert qubit0.index == 0 assert OctagonalQubit(1) > qubit0
5,861
def resnet152(pretrained=False, num_classes=1000, ifmask=True, **kwargs): """Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ block = Bottleneck model = ResNet(block, [3, 8, 36, 3], num_classes=1000, **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) model.fc = nn.Linear(512 * block.expansion, num_classes) if ifmask: model.lmask = LearnableMaskLayer(feature_dim=512* block.expansion, num_classes=num_classes) return model
5,862
def register_driver(cls): """ Registers a driver class Args: cls (object): Driver class. Returns: name: driver name """ _discover_on_demand() if not issubclass(cls, BaseDriver): raise QiskitChemistryError('Could not register class {} is not subclass of BaseDriver'.format(cls)) return _register_driver(cls)
5,863
def ceil(array, value): """ Returns the smallest index i such that array[i - 1] < value. """ l = 0 r = len(array) - 1 i = r + 1 while l <= r: m = l + int((r - l) / 2) if array[m] >= value: # This mid index is a candidate for the index we are searching for # so save it, and continue searching for a smaller candidate on the # left side. i = m r = m - 1 else: # This mid index is not a candidate so continue searching the right # side. l = m + 1 return i
5,864
def FindOrgByUnionEtIntersection(Orgs): """Given a set of organizations considers all the possible unions and intersections to find all the possible organizations""" NewNewOrgs=set([]) KnownOrgs=copy.deepcopy(Orgs) for h in combinations(Orgs,2): #checks only if one is not contained in the other NewNewOrgs|=frozenset([OrgLibrary.check(h[0]|h[1])]) #checks only if one is not contained in the other NewNewOrgs|=frozenset([OrgLibrary.check(h[0]&h[1])]) FoundOrgs=NewNewOrgs NewOrgs=NewNewOrgs-KnownOrgs while NewOrgs: NewNewOrgs=set([]) for h in combinations(NewOrgs,2): #checks only if one is not contained in the other NewNewOrgs|=frozenset([OrgLibrary.check(h[0]|h[1])]) #checks only if one is not contained in the other NewNewOrgs|=frozenset([OrgLibrary.check(h[0]&h[1])]) for h in NewOrgs: for t in KnownOrgs: #checks only if one is not contained in the other NewNewOrgs|=frozenset([OrgLibrary.check(h|t)]) #checks only if one is not contained in the other NewNewOrgs|=frozenset([OrgLibrary.check(h&t)]) KnownOrgs|=NewOrgs NewOrgs=NewNewOrgs-KnownOrgs#NewOrgs is what we actually found KnownOrgs-=Orgs return KnownOrgs
5,865
def get_sf_fa( constraint_scale: float = 1 ) -> pyrosetta.rosetta.core.scoring.ScoreFunction: """ Get score function for full-atom minimization and scoring """ sf = pyrosetta.create_score_function('ref2015') sf.set_weight( pyrosetta.rosetta.core.scoring.ScoreType.atom_pair_constraint, 5.0 * constraint_scale) sf.set_weight(pyrosetta.rosetta.core.scoring.ScoreType.dihedral_constraint, 1.0 * constraint_scale) sf.set_weight(pyrosetta.rosetta.core.scoring.ScoreType.angle_constraint, 1.0 * constraint_scale) return sf
5,866
def py_lines_with_regions(): """Find lines with regions.""" lines, regions = {}, ev('s:R()') specific_line, rev = evint('l:specific_line'), evint('a:reverse') for r in regions: line = int(r['l']) #called for a specific line if specific_line and line != specific_line: continue #add region index to indices for that line lines.setdefault(line, []) lines[line].append(int(r['index'])) for line in lines: #sort list so that lower indices are put farther in the list if len(lines[line]) > 1: lines[line].sort(reverse=rev) let('lines', lines)
5,867
def print_red(content: str): """Prints the content in red :param: the content to print :type: str """ print(Fore.RED + content + Fore.RESET)
5,868
async def test_flow_user(hass: HomeAssistant): """Test user initialized flow.""" port = com_port() port_select = usb.human_readable_device_name( port.device, port.serial_number, port.manufacturer, port.description, port.vid, port.pid, ) with patch_config_flow_modem(), _patch_setup(): result = await hass.config_entries.flow.async_init( DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={CONF_DEVICE: port_select}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["data"] == {CONF_DEVICE: port.device} result = await hass.config_entries.flow.async_init( DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={CONF_DEVICE: port_select}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "no_devices_found"
5,869
def make_keypoint(class_name: str, x: float, y: float, subs: Optional[List[SubAnnotation]] = None) -> Annotation: """ Creates and returns a keypoint, aka point, annotation. Parameters ---------- class_name : str The name of the class for this ``Annotation``. x : float The ``x`` value of the point. y : float The ``y`` value of the point. subs : Optional[List[SubAnnotation]], default: None List of ``SubAnnotation``s for this ``Annotation``. Returns ------- Annotation A point ``Annotation``. """ return Annotation(AnnotationClass(class_name, "keypoint"), {"x": x, "y": y}, subs or [])
5,870
def plane_mean(window): """Plane mean kernel to use with convolution process on image Args: window: the window part to use from image Returns: Normalized residual error from mean plane Example: >>> from ipfml.filters.kernels import plane_mean >>> import numpy as np >>> window = np.arange(9).reshape([3, 3]) >>> result = plane_mean(window) >>> (result < 0.0001) True """ window = np.array(window) width, height = window.shape # prepare data nb_elem = width * height xs = [int(i / height) for i in range(nb_elem)] ys = [i % height for i in range(nb_elem)] zs = np.array(window).flatten().tolist() # get residual (error) from mean plane computed tmp_A = [] tmp_b = [] for i in range(len(xs)): tmp_A.append([xs[i], ys[i], 1]) tmp_b.append(zs[i]) b = np.matrix(tmp_b).T A = np.matrix(tmp_A) fit = (A.T * A).I * A.T * b errors = b - A * fit residual = np.linalg.norm(errors) return residual
5,871
def test_get_eth2_staking_deposits_fetch_from_db( # pylint: disable=unused-argument ethereum_manager, call_order, ethereum_manager_connect_at_start, inquirer, price_historian, freezer, ): """ Test new on-chain requests for existing addresses requires a difference of REQUEST_DELTA_TS since last used query range `end_ts`. """ freezer.move_to(datetime.fromtimestamp(EXPECTED_DEPOSITS[0].timestamp)) ts_now = int(datetime.now().timestamp()) # 1604506685 database = MagicMock() database.get_used_query_range.side_effect = [ (Timestamp(ts_now - (2 * REQUEST_DELTA_TS)), Timestamp(ts_now)), (Timestamp(ts_now - (2 * REQUEST_DELTA_TS)), Timestamp(ts_now)), (Timestamp(ts_now - (2 * REQUEST_DELTA_TS)), Timestamp(ts_now)), ] database.get_eth2_deposits.side_effect = [ [], # no on-chain request, nothing in DB [], # no on-chain request, nothing in DB [EXPECTED_DEPOSITS[0]], # on-chain request, deposit in DB ] with patch( 'rotkehlchen.chain.ethereum.eth2._get_eth2_staking_deposits_onchain', ) as mock_get_eth2_staking_deposits_onchain: # 3rd call return mock_get_eth2_staking_deposits_onchain.return_value = [EXPECTED_DEPOSITS[0]] wait_until_all_nodes_connected( ethereum_manager_connect_at_start=ethereum_manager_connect_at_start, ethereum=ethereum_manager, ) message_aggregator = MessagesAggregator() # First call deposit_results_onchain = get_eth2_staking_deposits( ethereum=ethereum_manager, addresses=[ADDR1], msg_aggregator=message_aggregator, database=database, ) assert deposit_results_onchain == [] mock_get_eth2_staking_deposits_onchain.assert_not_called() # NB: Move time to ts_now + REQUEST_DELTA_TS - 1s freezer.move_to(datetime.fromtimestamp(ts_now + REQUEST_DELTA_TS - 1)) # Second call deposit_results_onchain = get_eth2_staking_deposits( ethereum=ethereum_manager, addresses=[ADDR1], msg_aggregator=message_aggregator, database=database, ) assert deposit_results_onchain == [] mock_get_eth2_staking_deposits_onchain.assert_not_called() # NB: Move time to ts_now + REQUEST_DELTA_TS (triggers request) freezer.move_to(datetime.fromtimestamp(ts_now + REQUEST_DELTA_TS)) # Third call deposit_results_onchain = get_eth2_staking_deposits( ethereum=ethereum_manager, addresses=[ADDR1], msg_aggregator=message_aggregator, database=database, ) assert deposit_results_onchain == [EXPECTED_DEPOSITS[0]] mock_get_eth2_staking_deposits_onchain.assert_called_with( ethereum=ethereum_manager, addresses=[ADDR1], msg_aggregator=message_aggregator, from_ts=Timestamp(ts_now), to_ts=Timestamp(ts_now + REQUEST_DELTA_TS), )
5,872
def test_list_unsigned_short_length_3_nistxml_sv_iv_list_unsigned_short_length_4_2(mode, save_output, output_format): """ Type list/unsignedShort is restricted by facet length with value 8. """ assert_bindings( schema="nistData/list/unsignedShort/Schema+Instance/NISTSchema-SV-IV-list-unsignedShort-length-4.xsd", instance="nistData/list/unsignedShort/Schema+Instance/NISTXML-SV-IV-list-unsignedShort-length-4-2.xml", class_name="NistschemaSvIvListUnsignedShortLength4", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,873
def _eval_field_amplitudes(lat, k=5, n=1, amp=1e-5, field='v', wave_type='Rossby', parameters=Earth): """ Evaluates the latitude dependent amplitudes at a given latitude point. Parameters ---------- lat : Float, array_like or scalar latitude(radians) k : Integer, scalar spherical wave-number (dimensionless) Default : 5 n : Integer, scaler wave-mode (dimensionless) Default : 1 amp : Float, scalar wave amplitude(m/sec) Default : 1e-5 field : str pick 'phi' for geopotential height, 'u' for zonal velocity and 'v' for meridional velocity Defualt : 'v' wave_type: str choose Rossby waves or WIG waves or EIG waves. Defualt: Rossby parameters: dict planetary parameters dict with keys: angular_frequency: float, (rad/sec) gravitational_acceleration: float, (m/sec^2) mean_radius: float, (m) layer_mean_depth: float, (m) Defualt: Earth's parameters defined above Returns ------- Either u_hat(m/sec), v_hat(m/sec) or p_hat(m^2/sec^2) : Float, array_like or scalar Evaluation of the amplitudes for the zonal velocity, or meridional velocity or the geopotential height respectivly. Notes ----- This function supports k>=1 and n>=1 inputs only. Special treatments are required for k=0 and n=-1,0/-. """ if not isinstance(wave_type, str): raise TypeError(str(wave_type) + ' should be string...') # unpack dictionary into vars: OMEGA = _unpack_parameters(parameters, 'angular_frequency') G = _unpack_parameters(parameters, 'gravitational_acceleration') A = _unpack_parameters(parameters, 'mean_radius') H0 = _unpack_parameters(parameters, 'layer_mean_depth') # Lamb's parameter: Lamb = (2. * OMEGA * A)**2 / (G * H0) # evaluate wave frequency: all_omegas = _eval_omega(k, n, parameters) # check for validity of wave_type: if wave_type not in all_omegas: raise KeyError(wave_type + ' should be Rossby, EIG or WIG...') omega = all_omegas[wave_type] # evaluate the meridional velocity amp first: v_hat = _eval_meridional_velocity(lat, Lamb, n, amp) # evaluate functions for u and phi: v_hat_plus_1 = _eval_meridional_velocity(lat, Lamb, n + 1, amp) v_hat_minus_1 = _eval_meridional_velocity(lat, Lamb, n - 1, amp) # Eq. (6a) in the text if field == 'v': return v_hat # Eq. (6b) in the text elif field == 'u': u_hat = (- ((n + 1) / 2.0)**0.5 * (omega / (G * H0)**0.5 + k / A) * v_hat_plus_1 - ((n) / 2.0)**0.5 * (omega / (G * H0)**0.5 - k / A) * v_hat_minus_1) # pre-factors u_hat = G * H0 * Lamb**0.25 / \ (1j * A * (omega**2 - G * H0 * (k / A)**2)) * u_hat return u_hat # Eq. (6c) in the text elif field == 'phi': p_hat = (- ((n + 1) / 2.0)**0.5 * (omega + (G * H0)**0.5 * k / A) * v_hat_plus_1 + ((n) / 2.0)**0.5 * (omega - (G * H0)**0.5 * k / A) * v_hat_minus_1) p_hat = G * H0 * Lamb**0.25 / \ (1j * A * (omega**2 - G * H0 * (k / A)**2)) * p_hat return p_hat else: raise KeyError('field must be u, v or phi')
5,874
def assign(dest, src, transpose_on_convert=None): """Resizes the destination and copies the source.""" src = as_torch(src, transpose_on_convert) if isinstance(dest, Variable): dest.data.resize_(*shp(src)).copy_(src) elif isinstance(dest, torch.Tensor): dest.resize_(*shp(src)).copy_(src) else: raise ValueError("{}: unknown type".format(type(dest)))
5,875
def get_uframe_info(): """ Get uframe configuration information. (uframe_url, uframe timeout_connect and timeout_read.) """ uframe_url = current_app.config['UFRAME_URL'] + current_app.config['UFRAME_URL_BASE'] timeout = current_app.config['UFRAME_TIMEOUT_CONNECT'] timeout_read = current_app.config['UFRAME_TIMEOUT_READ'] return uframe_url, timeout, timeout_read
5,876
def main(): """ Set the client extensions of an open Trade in an Account """ parser = argparse.ArgumentParser() # # Add the command line argument to parse to the v20 config # common.config.add_argument(parser) parser.add_argument( "orderid", help=( "The ID of the Order to get. If prepended " "with an '@', this will be interpreted as a client Order ID" ) ) extnArgs = OrderArguments(parser) extnArgs.add_client_order_extensions() extnArgs.add_client_trade_extensions() args = parser.parse_args() # # Create the api context based on the contents of the # v20 config file # api = args.config.create_context() extnArgs.parse_arguments(args) # # Submit the request to create the Market Order # response = api.order.set_client_extensions( args.config.active_account, args.orderid, **extnArgs.parsed_args ) print("Response: {} ({})".format(response.status, response.reason)) print("") print(response.get( "orderClientExtensionsModifyTransaction", 200 ))
5,877
def yd_process_results( mentions_dataset, predictions, processed, sentence2ner, include_offset=False, mode='default', rank_pred_score=True, ): """ Function that can be used to process the End-to-End results. :return: dictionary with results and document as key. """ assert mode in ['best_candidate', 'remove_invalid', 'default'] res = {} for doc in mentions_dataset: if doc not in predictions: # No mentions found, we return empty list. continue pred_doc = predictions[doc] ment_doc = mentions_dataset[doc] text = processed[doc][0] res_doc = [] for pred, ment in zip(pred_doc, ment_doc): sent = ment["sentence"] idx = ment["sent_idx"] start_pos = ment["pos"] mention_length = int(ment["end_pos"] - ment["pos"]) if pred["prediction"] != "NIL": candidates = [ { 'cand_rank': cand_rank, 'cand_name': cand_name, 'cand_score': cand_score, } for cand_rank, (cand_name, cand_mask, cand_score) in enumerate(zip(pred['candidates'], pred['masks'], pred['scores'])) if float(cand_mask) == 1 ] if rank_pred_score: candidates = sorted(candidates, key=lambda x: float(x['cand_score']), reverse=True) # make sure that ed_model predict is always in the first place. for cand_index, candidate in enumerate(candidates): if candidate['cand_name'] == pred['prediction']: if cand_index != 0: candidates[0], candidates[cand_index] = candidates[cand_index], candidates[0] break if len(candidates) == 1: temp = ( start_pos, mention_length, pred["prediction"], ment["ngram"], pred["conf_ed"], ment["conf_md"] if "conf_md" in ment else 0.0, ment["tag"] if "tag" in ment else "NULL", [tmp_candidate['cand_name'] for tmp_candidate in candidates], ) res_doc.append(temp) else: if mode == 'best_candidate': for cand_index, candidate in enumerate(candidates): tmp_cand_name = candidate['cand_name'].replace('_', ' ') if sentence2ner is not None and \ tmp_cand_name in sentence2ner and \ ment["tag"] != sentence2ner[tmp_cand_name]: continue else: temp = ( start_pos, mention_length, candidate['cand_name'], ment["ngram"], pred["conf_ed"], ment["conf_md"] if "conf_md" in ment else 0.0, ment["tag"] if "tag" in ment else "NULL", [tmp_candidate['cand_name'] for tmp_candidate in candidates], ) res_doc.append(temp) break elif mode == 'remove_invalid': tmp_cand_name = pred["prediction"].replace('_', '') if sentence2ner is not None and \ tmp_cand_name in sentence2ner and \ ment["tag"] != sentence2ner[tmp_cand_name]: pass else: temp = ( start_pos, mention_length, pred["prediction"], ment["ngram"], pred["conf_ed"], ment["conf_md"] if "conf_md" in ment else 0.0, ment["tag"] if "tag" in ment else "NULL", [tmp_candidate['cand_name'] for tmp_candidate in candidates], ) res_doc.append(temp) elif mode == 'default': temp = ( start_pos, mention_length, pred["prediction"], ment["ngram"], pred["conf_ed"], ment["conf_md"] if "conf_md" in ment else 0.0, ment["tag"] if "tag" in ment else "NULL", [tmp_candidate['cand_name'] for tmp_candidate in candidates], ) res_doc.append(temp) res[doc] = res_doc return res
5,878
def valid(f): """Formula f is valid if and only if it has no numbers with leading zero, and evals true.""" try: return not re.search(r'\b0[0-9]', f) and eval(f) is True except ArithmeticError: return False
5,879
def user_info(): """ 渲染个人中心页面 :return: """ user = g.user if not user: return redirect('/') data={ "user_info":user.to_dict() } return render_template("news/user.html",data=data)
5,880
def _check_X(X, n_components=None, n_features=None, ensure_min_samples=1): """Check the input data X. See https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/_base.py . Parameters ---------- X : array-like, shape (n_samples, n_features) n_components : integer Returns ------- X : array, shape (n_samples, n_features) """ X = check_array(X, dtype=[np.float64, np.float32], ensure_min_samples=ensure_min_samples) if n_components is not None and X.shape[0] < n_components: raise ValueError('Expected n_samples >= n_components ' 'but got n_components = %d, n_samples = %d' % (n_components, X.shape[0])) if n_features is not None and X.shape[1] != n_features: raise ValueError("Expected the input data X have %d features, " "but got %d features" % (n_features, X.shape[1])) return X
5,881
def read_viirs_geo (filelist, ephemeris=False, hgt=False): """ Read JPSS VIIRS Geo files and return Longitude, Latitude, SatelliteAzimuthAngle, SatelliteRange, SatelliteZenithAngle. if ephemeris=True, then return midTime, satellite position, velocity, attitude """ if type(filelist) is str: filelist = [filelist] if len(filelist) ==0: return None # Open user block to read Collection_Short_Name with h5py.File(filelist[0], 'r') as fn: user_block_size = fn.userblock_size with open(filelist[0], 'rU') as fs: ub_text = fs.read(user_block_size) ub_xml = etree.fromstring(ub_text.rstrip('\x00')) #print(ub_text) #print(etree.tostring(ub_xml)) CollectionName = ub_xml.find('Data_Product/N_Collection_Short_Name').text+'_All' #print(CollectionName) # read the data geos = [h5py.File(filename, 'r') for filename in filelist] if not ephemeris: Latitude = np.concatenate([f['All_Data'][CollectionName]['Latitude'][:] for f in geos]) Longitude = np.concatenate([f['All_Data'][CollectionName]['Longitude'][:] for f in geos]) SatelliteAzimuthAngle = np.concatenate([f['All_Data'][CollectionName]['SatelliteAzimuthAngle'][:] for f in geos]) SatelliteRange = np.concatenate([f['All_Data'][CollectionName]['SatelliteRange'][:] for f in geos]) SatelliteZenithAngle = np.concatenate([f['All_Data'][CollectionName]['SatelliteZenithAngle'][:] for f in geos]) Height = np.concatenate([f['All_Data'][CollectionName]['Height'][:] for f in geos]) if hgt: return Longitude, Latitude, SatelliteAzimuthAngle, SatelliteRange, SatelliteZenithAngle, Height else: return Longitude, Latitude, SatelliteAzimuthAngle, SatelliteRange, SatelliteZenithAngle if ephemeris: MidTime = np.concatenate([f['All_Data'][CollectionName]['MidTime'] [:] for f in geos]) SCPosition = np.concatenate([f['All_Data'][CollectionName]['SCPosition'][:] for f in geos]) SCVelocity = np.concatenate([f['All_Data'][CollectionName]['SCVelocity'][:] for f in geos]) SCAttitude = np.concatenate([f['All_Data'][CollectionName]['SCAttitude'][:] for f in geos]) return MidTime, SCPosition, SCVelocity, SCAttitude
5,882
def test_default_configs_override(): """ Test CLI warning when project create invoked for the first time without specifying a template """ cmd = FakeCommand2(None, None, cmd_name='my_plugin fake_command') configs = {'foo': 'bar'} assert cmd.default_configs == configs
5,883
def percent_cb(name, complete, total): """ Callback for updating target progress """ logger.debug( "{}: {} transferred out of {}".format( name, sizeof_fmt(complete), sizeof_fmt(total) ) ) progress.update_target(name, complete, total)
5,884
def test_Sampler_start_text(abc_sampler_config: sampler_pb2.Sampler): """Test that start_text is set from Sampler proto.""" s = samplers.Sampler(abc_sampler_config) assert s.start_text == abc_sampler_config.start_text
5,885
def _in_terminal(): """ Detect if Python is running in a terminal. Returns ------- bool ``True`` if Python is running in a terminal; ``False`` otherwise. """ # Assume standard Python interpreter in a terminal. if "get_ipython" not in globals(): return True ip = globals()["get_ipython"]() # IPython as a Jupyter kernel. if hasattr(ip, "kernel"): return False return True
5,886
def test_models(app: Teal, db: SQLAlchemy): """Checks that the models used in the fixture work.""" DeviceDef, ComponentDef, ComputerDef = \ app.config['RESOURCE_DEFINITIONS'] # type: Tuple[ResourceDef] Component = ComponentDef.MODEL Computer = ComputerDef.MODEL component = Component(id=1, model='foo') pc = Computer(id=2, model='bar', components=[component]) with app.app_context(): db.session.add(pc) queried_pc = Computer.query.first() assert pc == queried_pc
5,887
def create_text_pipeline(documents): """ Create the full text pre-processing pipeline using spaCy that first cleans the texts using the cleaning utility functions and then also removes common stopwords and corpus specific stopwords. This function is used specifically on abstracts. :param documents: A list of textual documents to pre-process. :return cleaned_docs: Pre-processed textual documents. """ # Load all the documents into a spaCy pipe. docs = nlp.pipe(documents, disable=["ner"]) cleaned_docs = [] # Lowercase + custom stopwords list + remove one character tokens + remove symbolical and punctuation tokens. for doc in docs: lowercased_sents_without_stops = [] for sent in doc.sents: lowercased_lemmas_one_sent = [] for token in sent: if not token.pos_ in {"SYM", "PUNCT"} \ and len(token) > 1 \ and not has_links(token.lower_) \ and not check_for_mostly_numeric_string(token.lower_) \ and not re.sub(r'[^\w\s]', '', token.lemma_) in CUSTOM_STOPS: lowercased_lemmas_one_sent.append(token.lower_) sentence = ' '.join(lowercased_lemmas_one_sent) lowercased_sents_without_stops.append(sentence) cleaned_docs.append([s for s in lowercased_sents_without_stops]) return cleaned_docs
5,888
def giou_dist(tlbrs1, tlbrs2): """Computes pairwise GIoU distance.""" assert tlbrs1.ndim == tlbrs2.ndim == 2 assert tlbrs1.shape[1] == tlbrs2.shape[1] == 4 Y = np.empty((tlbrs1.shape[0], tlbrs2.shape[0])) for i in nb.prange(tlbrs1.shape[0]): area1 = area(tlbrs1[i, :]) for j in range(tlbrs2.shape[0]): iou = 0. area_union = area1 + area(tlbrs2[j, :]) iw = min(tlbrs1[i, 2], tlbrs2[j, 2]) - max(tlbrs1[i, 0], tlbrs2[j, 0]) + 1 ih = min(tlbrs1[i, 3], tlbrs2[j, 3]) - max(tlbrs1[i, 1], tlbrs2[j, 1]) + 1 if iw > 0 and ih > 0: area_inter = iw * ih area_union -= area_inter iou = area_inter / area_union ew = max(tlbrs1[i, 2], tlbrs2[j, 2]) - min(tlbrs1[i, 0], tlbrs2[j, 0]) + 1 eh = max(tlbrs1[i, 3], tlbrs2[j, 3]) - min(tlbrs1[i, 1], tlbrs2[j, 1]) + 1 area_encls = ew * eh giou = iou - (area_encls - area_union) / area_encls Y[i, j] = (1. - giou) * 0.5 return Y
5,889
def easter(date): """Calculate the date of the easter. Requires a datetime type object. Returns a datetime object with the date of easter for the passed object's year. """ if 1583 <= date.year < 10000: # Delambre's method b = date.year / 100 # Take the firsts two digits of the year. h = (((19 * (date.year % 19) + b - (b / 4)) - ((b - ((b + 8) / 25) + 1) / 3) + 15) % 30) k = ((32 + 2 * (b % 4) + 2 * ((date.year % 100) / 4) - h - ((year % 100) % 4)) % 7) m = ((date.year % 19) + 11 * h + 22 * k) / 451 return datetime.date(date.year, (h + k - 7 * m + 114) / 31, ((h + k - 7 * m + 114) % 31) + 1) elif 1 <= date.year < 1583: # Julian calendar d = (19 * (date.year % 19) + 15) % 30 e = (2 * (date.year % 4) + 4 * (date.year % 7) - d + 34) % 7 return datetime.date(date.year, (d + e + 114) / 31, ((d + e + 114) % 31) + 1) else: # Negative value raise ValueError, "Invalid year: %d." % year
5,890
def state_mahalanobis(od: Mahalanobis) -> Dict: """ Mahalanobis parameters to save. Parameters ---------- od Outlier detector object. """ state_dict = {'threshold': od.threshold, 'n_components': od.n_components, 'std_clip': od.std_clip, 'start_clip': od.start_clip, 'max_n': od.max_n, 'cat_vars': od.cat_vars, 'ohe': od.ohe, 'd_abs': od.d_abs, 'clip': od.clip, 'mean': od.mean, 'C': od.C, 'n': od.n} return state_dict
5,891
def setup_module(): """Setup test environment for the module: - Adds dummy home dir tree """ # Do not mask exceptions here. In particular, catching WindowsError is a # problem because that exception is only defined on Windows... (Path.cwd() / IP_TEST_DIR).mkdir(parents=True)
5,892
def show_table(table, **options): """ Displays a table without asking for input from the user. :param table: a :class:`Table` instance :param options: all :class:`Table` options supported, see :class:`Table` documentation for details :return: None """ return table.show_table(**options)
5,893
def refill_vaddresses(): """ Ensures that always enough withdraw addresses are available """ while True: try: data = json.dumps({"password":publicserverpassword}) r = post(url + "len/vaddress", data).text if int(r) < 100: vaddress = versum.getnewaddress() versum.grant(vaddress, "send") versum.grant(vaddress, "receive") data = json.dumps({"password":publicserverpassword,\ "vaddress": vaddress}) r = post(url + "set/vaddress", data).text except: pass time.sleep(60)
5,894
def create_client(name, func): """Creating resources/clients for all needed infrastructure: EC2, S3, IAM, Redshift Keyword arguments: name -- the name of the AWS service resource/client func -- the boto3 function object (e.g. boto3.resource/boto3.client) """ print("Creating client for", name) return func(name, region_name=DWH_REGION, aws_access_key_id=KEY, aws_secret_access_key=SECRET)
5,895
def hamming(s0, s1): """ >>> hamming('ABCD', 'AXCY') 2 """ assert len(s0) == len(s1) return sum(c0 != c1 for c0, c1 in zip(s0, s1))
5,896
def load_embeddings(topic): """ Load TSNE 2D Embeddings generated from fitting BlazingText on the news articles. """ print(topic) embeddings = pickle.load( open(f'covidash/data/{topic}/blazing_text/embeddings.pickle', 'rb')) labels = pickle.load( open(f'covidash/data/{topic}/blazing_text/labels.pickle', 'rb')) if '</s>' in labels: labels.remove('</s>') embeddings = embeddings[:len(labels), :] return embeddings, labels
5,897
def get_EAC_macro_log(year,DOY,dest_path): """ Copy the EAC macro processor log This gets the macro processor log which is created by the 'at' script which starts the macro processor. Notes ===== This uses find_EAC_macro_log() to get the log names. @param year : Year of observation @param DOY : Day of observation @param dest_path : Full path to the destination directory. @return: list EAC macro processor logs copied. """ print("Entered get_EAC_macro_log for",year,DOY,dest_path) pm_logs = find_EAC_macro_log(year,DOY) if pm_logs!= None: # We found one or more logs for f in pm_logs: try: shutil.copy(f,dest_path) print(os.path.basename(f),"copied to",dest_path) except: print("Could not copy",os.path.basename(f),'because', sys.exc_info()[0]) return pm_logs
5,898
def visualize_dimensionality_reduction(cell_data, columns, category, color_map="Spectral", algorithm="UMAP", dpi=None, save_dir=None): """Plots the dimensionality reduction of specified population columns Args: cell_data (pandas.DataFrame): Dataframe containing columns for dimensionality reduction and category columns (list): List of column names that are included for dimensionality reduction category (str): Name of column in dataframe containing population or patient data color_map (str): Name of MatPlotLib ColorMap used algorithm (str): Name of dimensionality reduction algorithm, must be UMAP, PCA, or tSNE dpi (float): The resolution of the image to save, ignored if save_dir is None save_dir (str): Directory to save plots, default is None """ cell_data = cell_data.dropna() dim_reduction_algos = ["UMAP", "PCA", "tSNE"] misc_utils.verify_in_list(algorithm=algorithm, dimensionality_reduction_algorithms=dim_reduction_algos) graph_title = "%s projection of data" % algorithm if algorithm == "UMAP": reducer = umap.UMAP() column_data = cell_data[columns].values scaled_column_data = StandardScaler().fit_transform(column_data) embedding = reducer.fit_transform(scaled_column_data) plot_dim_reduced_data(embedding[:, 0], embedding[:, 1], fig_id=1, hue=cell_data[category], cell_data=cell_data, title=graph_title, dpi=dpi, save_dir=save_dir, save_file="UMAPVisualization.png") elif algorithm == "PCA": pca = PCA() pca_result = pca.fit_transform(cell_data[columns].values) plot_dim_reduced_data(pca_result[:, 0], pca_result[:, 1], fig_id=2, hue=cell_data[category], cell_data=cell_data, title=graph_title, dpi=dpi, save_dir=save_dir, save_file="PCAVisualization.png") elif algorithm == "tSNE": tsne = TSNE() tsne_results = tsne.fit_transform(cell_data[columns].values) plot_dim_reduced_data(tsne_results[:, 0], tsne_results[:, 1], fig_id=3, hue=cell_data[category], cell_data=cell_data, title=graph_title, dpi=dpi, save_dir=save_dir, save_file="tSNEVisualization.png")
5,899