content
stringlengths
22
815k
id
int64
0
4.91M
def arraySeries(dates, values, observedproperty, unit): """ Display time series in tabular format. arguments: >>> dates: list of 'datetime.datetime' objects containing entries of time series date and time column. >>> values: list of float objects containing entries of time series value column. >>> observedproperty: (string). >>> unit: unit of measured observed property (string). """ if dates == [] or values == []: pass else: global view view = QtGui.QTableView() model = QtGui.QStandardItemModel(len(values), 2) model.setHorizontalHeaderItem(0, QtGui.QStandardItem("Date")) model.setHorizontalHeaderItem( 1, QtGui.QStandardItem(observedproperty + "(" + unit +")")) for i in range(len(values)): model.setItem(i, 0, QtGui.QStandardItem(str(dates[i]))) model.setItem(i, 1, QtGui.QStandardItem(str(values[i]))) view.setModel(model) view.setWindowTitle('Time series table view') view.show()
6,800
def download(auth, url, headers, output_path, size, overwrite, f_name=None, ext=None, block_size=4096, callback=None): """ Call GET for a file stream. :Args: - auth (:class:`.Credentials`): The session credentials object. - url (str): The complete endpoint URL. - headers (dict): The headers to be used in the request. - output_path (str): Full file path to download the data to. - size (int): File size of the file to be downloaded as retrieved by a HEAD request. - overwrite (bool): If ``True``, download the new data over an existing file. :Kwargs: - f_name (str): Used to specify a filename if one is not already included in the URL. The default is ``None``. - ext (str): Used to specify a file extension if one is not already included in the URL. The default is ``None``. - block_size (int): Used to vary the upload chunk size. The default is 4096 bytes. Determines the frequency with which the callback is called. - callback (func): A function to be called to report download progress. The function must take three arguments: the percent downloaded (float), the bytes downloaded (float), and the total bytes to be downloaded (float). :Returns: - The raw server response. :Raises: - :exc:`.RestCallException` is the call failed, a file operation failed, or returned a non-200 status. """ filename = filename_from_url(url, ext) if not f_name else f_name downloadfile = os.path.join(output_path, filename) if os.path.exists(downloadfile) and not overwrite: LOG.warning( "File {0} already exists. Not overwriting.".format(downloadfile)) return True LOG.debug("GET call URL: {0}, callback: {1}, file: " "{2}, size: {3}, overwrite: {4}, block_size: {5}".format(url, callback, downloadfile, size, overwrite, block_size)) LOG.info("Starting download to {0}".format(downloadfile)) if size > 0: data_downloaded = float(0) use_callback = hasattr(callback, "__call__") try: with open(downloadfile, "wb") as handle: response = _call(auth, 'GET', url, headers=headers, stream=True) for block in response.iter_content(block_size): if not block: LOG.info("Download complete") break handle.write(block) if size > 0 and use_callback: data_downloaded += len(block) callback(float(data_downloaded/size*100), data_downloaded, float(size)) return response except RestCallException: try: os.remove(downloadfile) except: pass raise except EnvironmentError as exp: try: os.remove(downloadfile) except: pass raise RestCallException(type(exp), str(exp), exp)
6,801
def to_int(matrix): """ Funciton to convert the eact element of the matrix to int """ for row in range(rows(matrix)): for col in range(cols(matrix)): for j in range(3): matrix[row][col][j] = int(matrix[row][col][j]) return matrix
6,802
def create_tastypie_resource(class_inst): """ Usage: url(r'^api/', include(create_tastypie_resource(UfsObjFileMapping).urls)), Access url: api/ufs_obj_file_mapping/?format=json :param class_inst: :return: """ return create_tastypie_resource_class(class_inst)()
6,803
def make_collector(entries): """ Creates a function that collects the location data from openLCA. """ def fn(loc): entry = [loc.getCode(), loc.getName(), loc.getRefId()] entries.append(entry) return fn
6,804
def apiname(funcname): """ Define what name the API uses, the short or the gl version. """ if funcname.startswith('gl'): return funcname else: if funcname.startswith('_'): return '_gl' + funcname[1].upper() + funcname[2:] else: return 'gl' + funcname[0].upper() + funcname[1:]
6,805
def add_numeric_gene_pos(gene_info): """ Add numeric gene (start) genomic position to a gene_info dataframe """ gene_chr_numeric = gene_info['chr'] gene_chr_numeric = ['23' if x == 'X' else x for x in gene_chr_numeric] gene_chr_numeric = ['24' if x == 'Y' else x for x in gene_chr_numeric] gene_start_vec = gene_info['start'] gene_start_vec = [str(x).zfill(10) for x in gene_start_vec] gene_pos_numeric = [x + '.' + y for x, y in zip(gene_chr_numeric, gene_start_vec)] gene_pos_numeric = np.array([float(x) for x in gene_pos_numeric]) gene_info['genome_pos_numeric'] = gene_pos_numeric return gene_info
6,806
def read_list_from_file(filename: str) -> set: """Build a set from a simple multiline text file. Args: filename: name of the text file Returns: a set of the unique lines from the file """ filepath = pathlib.Path(__file__).parent.joinpath(filename) lines = filepath.read_text().splitlines() return set(lines)
6,807
def test_machine_status( requests_mock: Mocker, mock_hqs_api_handler: HoneywellQAPI, ) -> None: """Test that we can retrieve the machine state via Honeywell endpoint.""" machine_name = "HQS-LT-S1-APIVAL" mock_machine_state = "online" mock_url = f"https://qapi.honeywell.com/v1/machine/{machine_name}" requests_mock.register_uri( "GET", mock_url, json={"state": mock_machine_state}, headers={"Content-Type": "application/json"}, ) assert mock_hqs_api_handler.status(machine_name) == mock_machine_state # Delete authentication tokens to clean them from the keyring mock_hqs_api_handler.delete_authentication()
6,808
def process_poc_output(poc_list, target, verbose, quiet): """Write the finalized netblocks to a CSV file. Args: poc_list: A list of point of contact information from ARIN. target: The company the PoC information was gathered for. verbose: A boolean that indicates whether verbose status messages should be printed. quiet: A boolean that indicates that all status messages should be disabled. """ poc_list_unique = [] poc_list = sorted(set(tuple(item) for item in poc_list)) for contact in poc_list: poc_list_unique.append(list(contact)) if poc_list_unique: try: with open(process_output_name(target)+'_contacts.csv', 'w') as output_file: writer = csv.writer(output_file, delimiter=',', lineterminator='\n') writer.writerow(['Name', 'Company', 'Address', 'Emails', 'Phone Numbers']) for contact in poc_list_unique: writer.writerow(contact) print('\n[*] Point of contact data written to', process_output_name(target)+'_contacts.csv') except IOError: print('\n[!] WARNING: Error with filename', process_output_name(target)+'_contacts.csv') ## TODO: replace input() so that no user input is ever required output = str(input('[+] Please enter a new filename (no extension): ')) if output: output = 'retrieved_contacts' output = process_output_name(output) output += '.csv' with open(output, 'w', newline='') as output_file: writer = csv.writer(output_file, delimiter=',', lineterminator='\n') writer.writerow(['Name', 'Company', 'Address', 'Emails', 'Phone Numbers']) for contact in poc_list_unique: writer.writerow(contact) print('\n[*] Point of contact data written to', output)
6,809
def parseAnswerA(answer, index, data): """ parseAnswerA(data): Grab our IP address from an answer to an A query """ retval = {} text = (str(answer[0]) + "." + str(answer[1]) + "." + str(answer[2]) + "." + str(answer[3])) retval["ip"] = text # # TODO: There may be pointers even for A responses. Will have to check into this later. # retval["sanity"] = [] return(retval, text)
6,810
def calc_initial_conditions(state): """ calculate dyn. enthalp, etc """ vs = state.variables if npx.any(vs.salt < 0.0): raise RuntimeError("encountered negative salinity") vs.update(calc_initial_conditions_kernel(state))
6,811
def qlCleanCache(cloth): """Clean layback cache for given cloth. Accepts qlCloth object""" cmds.select(cloth) mel.eval('qlClearCache()')
6,812
def test_toggle_off_show_all_files(editorstack, outlineexplorer, test_files): """ Test that toggling off the option to show all files in the Outline Explorer hide all root file items but the one corresponding to the currently selected Editor and assert that the remaning root file item is expanded correctly. """ editorstack = editorstack(test_files) treewidget = outlineexplorer.treewidget assert editorstack.get_stack_index() == 0 # Untoggle show all files option. treewidget.toggle_show_all_files(False) results = [item.text(0) for item in treewidget.get_visible_items()] assert results == ['foo1.py', 'foo']
6,813
def _get_files(data_path, modality, img_or_label): """Gets files for the specified data type and dataset split. Args: data: String, desired data ('image' or 'label'). dataset_split: String, dataset split ('train', 'val', 'test') Returns: A list of sorted file names or None when getting label for test set. """ if "CT" in modality: subject_path = os.path.join(data_path, _FOLDERS_MAP[img_or_label]) elif "MR" in modality: subject_path = os.path.join(data_path, _MODALITY_MAP[modality][1], _FOLDERS_MAP[img_or_label]) if "MR_T1" in modality and _FOLDERS_MAP[img_or_label]==_FOLDERS_MAP["image"]: subject_path = os.path.join(subject_path, _MODALITY_MAP[modality][2]) else: raise ValueError("Unknown data modality") filenames = file_utils.get_file_list(subject_path, fileStr=_POSTFIX_MAP[modality][img_or_label], fileExt=[_DATA_FORMAT_MAP[img_or_label]], sort_files=True) return filenames
6,814
def _parse_args() -> argparse.Namespace: """Registers the script's arguments on an argument parser.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--source-root', type=Path, required=True, help='Prefix to strip from the source files') parser.add_argument('sources', type=Path, nargs='*', help='Files to mirror to the directory') parser.add_argument('--directory', type=Path, required=True, help='Directory to which to mirror the sources') parser.add_argument('--path-file', type=Path, help='File with paths to files to mirror') return parser.parse_args()
6,815
def stderr(string): """ Print the given ``string`` to stderr. This is equivalent to ``print >> sys.stderr, string`` """ print >> sys.stderr, string
6,816
def qlist(q): """Convenience function that converts asyncio.Queues into lists. This is inefficient and should not be used in real code. """ l = [] # get the messages out while not q.empty(): l.append(q.get_nowait()) # now put the messages back (since we popped them out) for i in l[::-1]: q.put_nowait(item) return l
6,817
def is_stdin(name): """Tell whether or not the given name represents stdin.""" return name in STDINS
6,818
def filter_marker_y_padding(markers_y_indexes, padding_y_top, padding_y_bottom): """ Filter the markers indexes for padding space in the top and bottom of answer sheet :param markers_y_indexes: :param padding_y_top: :param padding_y_bottom: :return: """ return markers_y_indexes[(markers_y_indexes > padding_y_top) & (markers_y_indexes < padding_y_bottom)]
6,819
def budget_italy(path): """Budget Shares for Italian Households a cross-section from 1973 to 1992 *number of observations* : 1729 *observation* : households *country* : Italy A dataframe containing : wfood food share whouse housing and fuels share wmisc miscellaneous share pfood food price phouse housing and fuels price pmisc miscellaneous price totexp total expenditure year year income income size household size pct cellule weight Bollino, Carlo Andrea, Frederico Perali and Nicola Rossi (2000) “Linear household technologies”, *Journal of Applied Econometrics*, **15(3)**, 253–274. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `budget_italy.csv`. Returns: Tuple of np.ndarray `x_train` with 1729 rows and 11 columns and dictionary `metadata` of column headers (feature names). """ import pandas as pd path = os.path.expanduser(path) filename = 'budget_italy.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/Ecdat/BudgetItaly.csv' maybe_download_and_extract(path, url, save_file_name='budget_italy.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
6,820
def hr_admin(request): """ Views for HR2 Admin page """ template = 'hr2Module/hradmin.html' # searched employee query = request.GET.get('search') if(request.method == "GET"): if(query != None): emp = ExtraInfo.objects.filter( Q(user__first_name__icontains=query) | Q(user__last_name__icontains=query) ).distinct() emp = emp.filter(user_type="faculty") else: emp = ExtraInfo.objects.all() emp = emp.filter(user_type="faculty") else: emp = ExtraInfo.objects.all() emp = emp.filter(user_type="faculty") context = {'emps': emp} return render(request, template, context)
6,821
def create_aws_clients(region='us-east-1'): """Creates an S3, IAM, and Redshift client to interact with. Parameters ---------- region : str The aws region to create each client (default 'us-east-1'). Returns ------- ec3 A boto3 ec2 resource. s3 A boto3 s3 resource. iam A boto3 iam client. redshift A boto3 redshift client. """ ec2 = boto3.resource( 'ec2', region_name=region, aws_access_key_id=KEY, aws_secret_access_key=SECRET ) s3 = boto3.resource( 's3', region_name=region, aws_access_key_id=KEY, aws_secret_access_key=SECRET ) iam = boto3.client( 'iam', region_name=region, aws_access_key_id=KEY, aws_secret_access_key=SECRET ) redshift = boto3.client( 'redshift', region_name=region, aws_access_key_id=KEY, aws_secret_access_key=SECRET ) return ec2, s3, iam, redshift
6,822
def apply_function(f, *args, **kwargs): """ Apply a function or staticmethod/classmethod to the given arguments. """ if callable(f): return f(*args, **kwargs) elif len(args) and hasattr(f, '__get__'): # support staticmethod/classmethod return f.__get__(None, args[0])(*args, **kwargs) else: assert False, "expected a function or staticmethod/classmethod"
6,823
def test_precursormz_match_tolerance2_array_ppm(): """Test with array and tolerance=2 and type=ppm.""" spectrum_1 = Spectrum(mz=numpy.array([], dtype="float"), intensities=numpy.array([], dtype="float"), metadata={"precursor_mz": 100.0}) spectrum_2 = Spectrum(mz=numpy.array([], dtype="float"), intensities=numpy.array([], dtype="float"), metadata={"precursor_mz": 101.0}) spectrum_a = Spectrum(mz=numpy.array([], dtype="float"), intensities=numpy.array([], dtype="float"), metadata={"precursor_mz": 99.99}) spectrum_b = Spectrum(mz=numpy.array([], dtype="float"), intensities=numpy.array([], dtype="float"), metadata={"precursor_mz": 98.0}) similarity_score = PrecursorMzMatch(tolerance=101.0, tolerance_type="ppm") scores = similarity_score.matrix([spectrum_1, spectrum_2], [spectrum_a, spectrum_b]) assert numpy.all(scores == numpy.array([[True, False], [False, False]])), "Expected different scores."
6,824
def join_data(ycom_county, census, land_area_data): """ Getting one dataframe from the three datasets """ census['LogPopDensity'] = np.log10(census['TotalPop']/land_area_data['LND110200D']) data = pd.concat(([ycom_county, census]), axis=1) return data
6,825
def _createController(config): """ Creates the appropriate (hypervisor) controller based on the given configuration. This is the place where to perform particular initialization tasks for the particular hypervisor controller implementations. @param config: an instance of L{ConfigParser} """ hv = config.get('hypervisor', 'name') hvMod = None logger.debug("Hypervisor specified in config: '%s'" % hv) fqHvName = "%s.%s" % (CONTROLLERS_PATH, hv) try: hvPkg = __import__(fqHvName, globals=globals(), level=-1) hvMod = getattr(hvPkg, hv) except ImportError, e: msg = "Hypervisor '%s' is not supported. Error: %s" % (hv, e) logger.fatal(msg) raise exceptions.ConfigError(msg) logger.info("Using %s as the HyperVisor" % hvMod.__name__) return hvMod
6,826
def main(): """ Paths from phone and bounder for Skyfall data set """ is_export_bounder_csv: bool = False # If true, save only episode start to end as csv # Concentrate on single station phone_id = "1637610021" # Load for all stations loc_fields = ['station_id', 'location_epoch_s', 'location_latitude', 'location_longitude', 'location_altitude', 'location_speed', 'location_horizontal_accuracy', 'barometer_epoch_s', 'barometer_wf_raw'] df_loc = sf_dw.dw_main(skyfall_config.tdr_load_method)[loc_fields] print(f'Dimensions (# of rows, # of columns): {df_loc.shape}') # Pick only the balloon station m_list = df_loc.index[df_loc['station_id'] == phone_id] m = m_list[0] phone_loc = df_loc.iloc[m] # Verify if phone_loc.size != df_loc.shape[1]: print(f"Error: Station selected does not match # of bounder columns; " f"station({phone_loc.size}) != bounder({df_loc.shape[1]})") raise ValueError("Station # columns does not match bounder # columns") # print(f'Verify that balloon station selected matches # of columns: {phone_loc.shape}') # Bounder data is a standard rectangular matrix if not os.path.exists(BOUNDER_PATH): print("Bounder input directory does not exist, check path:") print(BOUNDER_PATH) exit() if skyfall_config.is_rerun_bounder: bounder_data(BOUNDER_PATH, BOUNDER_FILE, BOUNDER_PQT_FILE) print('Constructing bounder.parquet file') # Load parquet with bounder data fields print('Load Bounder parquet:') bounder_loc = pd.read_parquet(os.path.join(BOUNDER_PATH, BOUNDER_PQT_FILE)) print(f'Dimensions (# of rows, # of columns): {bounder_loc.shape}') print(f'Available columns: {bounder_loc.columns}') # Remove bounder repeated values and NaNs # DataWindow should be cleared of nans # phone_loc = phone_loc[~phone_loc['location_epoch_s'].duplicated(keep='first')].dropna() bounder_loc = bounder_loc[~bounder_loc['Epoch_s'].duplicated(keep='first')].dropna() # Bounder clock, initial, and final conditions print('\nBounder Start Time:', bounder_loc['Datetime'].iloc[0]) print('Bounder Start Epoch s:', bounder_loc['Epoch_s'].iloc[0]) print('Bounder Start Lat:', bounder_loc['Lat_deg'].iloc[0]) print('Bounder Start Lon:', bounder_loc['Lon_deg'].iloc[0]) print('Bounder Start Alt:', bounder_loc['Alt_m'].iloc[0]) print('\nBounder End Time:', bounder_loc['Datetime'].iloc[-1]) print('Bounder Terminus Parameters (Ref):') print('Bounder Ref Epoch s:', bounder_loc['Epoch_s'].iloc[-1]) print('Bounder Ref Lat:', bounder_loc['Lat_deg'].iloc[-1]) print('Bounder Ref Lon:', bounder_loc['Lon_deg'].iloc[-1]) print('Bounder Ref Alt:', bounder_loc['Alt_m'].iloc[-1]) # Bounder sample interval and standard deviation bounder_sample_interval_s = np.mean(np.diff(bounder_loc['Epoch_s'])) bounder_interval_std_s = np.std(np.diff(bounder_loc['Epoch_s'])) print('Bounder sample interval, s:', bounder_sample_interval_s) print('Bounder standard dev, s:', bounder_interval_std_s) if is_export_bounder_csv: # Export Initial and Final states to CSV print(f"Export Bounder initial and final states to CSV. Path: " f"{os.path.join(BOUNDER_PATH, skyfall_config.event_name + '_bounder_start_end.csv')}") file_bounder_start_end_csv = os.path.join(BOUNDER_PATH, skyfall_config.event_name + '_bounder_start_end.csv') bounder_specs_to_csv(df=bounder_loc, csv_export_file=file_bounder_start_end_csv) # Compare to phone phone_datetime_start = dt.datetime_from_epoch_seconds_utc(phone_loc['location_epoch_s'][0]) phone_datetime_end = dt.datetime_from_epoch_seconds_utc(phone_loc['location_epoch_s'][-1]) print('Phone loc start:', phone_datetime_start) print('Phone loc end:', phone_datetime_end) # Use atmospheric pressure to construct an elevation model elevation_model = rpd_geo.bounder_model_height_from_pressure(pressure_kPa=bounder_loc['Pres_kPa']) plt.figure() plt.semilogx(bounder_loc['Pres_kPa'], bounder_loc['Alt_m']*METERS_TO_KM, label='data') plt.semilogx(bounder_loc['Pres_kPa'], elevation_model*METERS_TO_KM, '-.', label='polynomial') plt.legend() plt.ylabel('Height, km') plt.xlabel('Pressure, kPa') # plt.title('Bounder Pressure vs Height') # Compute ENU projections txyzuvw_phone = \ rpd_geo.compute_t_xyz_uvw(unix_s=phone_loc['location_epoch_s'], lat_deg=phone_loc['location_latitude'], lon_deg=phone_loc['location_longitude'], alt_m=phone_loc['location_altitude'], ref_unix_s=ref_epoch_s, ref_lat_deg=ref_latitude_deg, ref_lon_deg=ref_longitude_deg, ref_alt_m=ref_altitude_m) txyzuvw_bounder = \ rpd_geo.compute_t_xyz_uvw(unix_s=bounder_loc['Epoch_s'], lat_deg=bounder_loc['Lat_deg'], lon_deg=bounder_loc['Lon_deg'], alt_m=bounder_loc['Alt_m'], ref_unix_s=ref_epoch_s, ref_lat_deg=ref_latitude_deg, ref_lon_deg=ref_longitude_deg, ref_alt_m=ref_altitude_m) # Internal Bounder temperature is coarse, 1C steps plt.figure() plt.plot(txyzuvw_bounder['T_s']*SECONDS_TO_MINUTES, bounder_loc['Temp_C']) plt.title("Skyfall Bounder, Temperature vs Elapsed Time") plt.xlabel('Elapsed Time, minutes') plt.ylabel('Temp, C') # Scatter plots are cool scatter_dot_size = 24 scatter_colormap = 'inferno' # Phone plots # 3D scatter plot, LAT LON # title_str = "Skyfall Path, Phone" title_str = "" geo_scatter.location_3d(x=phone_loc['location_longitude'], y=phone_loc['location_latitude'], z=phone_loc['location_altitude']*METERS_TO_KM, color_guide=txyzuvw_phone['T_s']*SECONDS_TO_MINUTES, fig_title=title_str, x_label='Lat', y_label='Lon', z_label='Z, km', color_label='Elapsed time, minutes', dot_size=scatter_dot_size, color_map=scatter_colormap, azimuth_degrees=-134, elevation_degrees=30) # 3D speed quiver plot, velocity geo_scatter.loc_quiver_3d(x=txyzuvw_phone['X_m']*METERS_TO_KM, y=txyzuvw_phone['Y_m']*METERS_TO_KM, z=txyzuvw_phone['Z_m']*METERS_TO_KM, u=txyzuvw_phone['U_mps'], v=txyzuvw_phone['V_mps'], w=txyzuvw_phone['W_mps'], color_guide=txyzuvw_phone['T_s']*SECONDS_TO_MINUTES, fig_title=title_str, x_label='X, km', y_label='Y, km', z_label='Z, km', color_label='Elapsed time, minutes', dot_size=scatter_dot_size, color_map=scatter_colormap, azimuth_degrees=-134, elevation_degrees=30, arrow_length=0.05) # XYZ-T geo_scatter.location_3d(x=txyzuvw_bounder['X_m']*METERS_TO_KM, y=txyzuvw_bounder['Y_m']*METERS_TO_KM, z=txyzuvw_bounder['Z_m']*METERS_TO_KM, color_guide=txyzuvw_bounder['T_s']*SECONDS_TO_MINUTES, fig_title=title_str, x_label='X, km', y_label='Y, km', z_label='Z, km', color_label='Elapsed time, minutes', dot_size=scatter_dot_size, color_map=scatter_colormap, azimuth_degrees=-80, elevation_degrees=25) # # XYZ-P # title_str = "Skyfall, Bounder" title_str = "" geo_scatter.location_3d(x=txyzuvw_bounder['X_m']*METERS_TO_KM, y=txyzuvw_bounder['Y_m']*METERS_TO_KM, z=txyzuvw_bounder['Z_m']*METERS_TO_KM, color_guide=bounder_loc['Pres_kPa'], fig_title=title_str, x_label='X, km', y_label='Y, km', z_label='Z, km', color_label='Pressure, kPa', dot_size=scatter_dot_size, color_map=scatter_colormap, azimuth_degrees=-80, elevation_degrees=25) # XYZ-Speed geo_scatter.location_3d(x=txyzuvw_bounder['X_m']*METERS_TO_KM, y=txyzuvw_bounder['Y_m']*METERS_TO_KM, z=txyzuvw_bounder['Z_m']*METERS_TO_KM, color_guide=txyzuvw_bounder['Speed_mps'], fig_title=title_str, x_label='X, km', y_label='Y, km', z_label='Z, km', color_label='Speed, m/s', dot_size=scatter_dot_size, color_map=scatter_colormap, azimuth_degrees=-80, elevation_degrees=25) # Overlay # title_str = "Skyfall Path, Phone and Bounder" title_str = "" geo_scatter.loc_overlay_3d(x1=bounder_loc['Lon_deg'], y1=bounder_loc['Lat_deg'], z1=bounder_loc['Alt_m']*METERS_TO_KM, dot_size1=9, color1='grey', legend1='Bounder', alpha1=1, x2=phone_loc['location_longitude'], y2=phone_loc['location_latitude'], z2=phone_loc['location_altitude']*METERS_TO_KM, dot_size2=6, color2='b', legend2='Phone', alpha2=0.6, fig_title=title_str, x_label='Lat', y_label='Lon', z_label='Z, km', azimuth_degrees=-134, elevation_degrees=30) plt.show()
6,827
def path_graph(): """Return a path graph of length three.""" G = nx.path_graph(3, create_using=nx.DiGraph) G.graph["name"] = "path" nx.freeze(G) return G
6,828
def dataset_to_cfl(dir_out, file_name, suffix="", file_png=None, verbose=False): """ Convert ISMRMRD to CFL files in specified directory. Parameters ---------- dir_out : str Output directory to write CFL files file_name : str Name of ISMRMRD file suffix : str, optional Suffix to attach to output file names file_png : str, optional If not None, a png file will be written out verbose : bool, optional Turn on/off verbose print outs """ kspace, header = load_ismrmrd_to_np(file_name, verbose=verbose) if verbose: print("Transforming k-space data to image domain...") image = transform(kspace, header, verbose=verbose) num_phases = kspace.shape[0] num_echoes = kspace.shape[1] if verbose: print("Writing files...") for i_phase in range(num_phases): for i_echo in range(num_echoes): suffix_i = suffix if num_echoes > 1: suffix_i = "_" + ("echo%02d" % i_echo) + suffix_i if num_phases > 1: suffix_i = "_" + ("phase%02d" % i_phase) + suffix_i cfl.write(os.path.join(dir_out, "kspace" + suffix_i), kspace[i_phase, i_echo, :, :, :, :, :]) cfl.write(os.path.join(dir_out, "image" + suffix_i), image[i_phase, i_echo, :, :, :, :, :]) if file_png is not None: if os.path.splitext(file_png)[1] != ".png": file_png += ".png" if verbose: print("Writing example png ({})...".format(file_png)) energy = np.sum(np.abs(kspace) ** 2, axis=(-1, -2, -4)) i_phase, i_echo, i_slice, i_z = np.where(energy == energy.max()) image_out = image[i_phase[0], i_echo[0], i_slice[0], :, i_z[0], :, :] image_out = np.sqrt(np.sum(np.abs(image_out) ** 2, axis=0)) image_out = image_out / np.max(image_out) * np.iinfo(np.uint8).max imageio.imwrite(file_png, image_out.astype(np.uint8))
6,829
def dropStudentsWithEvents(df, events, saveDroppedAs=None, studentId='BookletNumber', eventId='Label', verbose=True): """ Drop students with certain events. It finds students with the events, and use dropStudents() to drop them. :param df: input data frame with data from multiple students :param events: a list of events. Each event is a string of event name :param saveDroppedAs: optionally saving the dropped data to a csv or pickle file. Remember to specify .csv or .pickle :param studentId: name of the column containing the student ID info; default ot "BookletNumber" :param eventId: name of the column containing the event name; default to "Label" :param verbose: default to True :return: a data frame with students having any of these events dropped. """ # error checks assert (isinstance(df, pd.DataFrame)) for v in [studentId, eventId]: assert (v in df.columns) studentsToDrop = df.loc[df[eventId].isin(events), studentId].unique() if verbose: print("\ndropStudentsWithEvents:") print(events) return dropStudents(df, studentsToDrop, saveDroppedAs, studentId, verbose)
6,830
def get_html_app_files_dirs(output_file): """ Return a tuple of (parent_dir, dir_name) directory named after the `output_file` file object file_base_name (stripped from extension) and a `_files` suffix Return empty strings if output is to stdout. """ if is_stdout(output_file): return '', '' file_name = output_file.name parent_dir = os.path.dirname(file_name) dir_name = fileutils.file_base_name(file_name) + '_files' return parent_dir, dir_name
6,831
def create_slides(user, node, slideshow_data): """ Generate SlideshowSlides from data """ """ Returns a collection of SlideshowSlide objects """ slides = [] with transaction.atomic(): for slide in slideshow_data: slide_obj = SlideshowSlide( contentnode=node, sort_order=slide.get("sort_order"), metadata={ "caption": slide.get('caption'), "descriptive_text": slide.get('descriptive_text'), "checksum": slide.get('checksum'), "extension": slide.get('extension') } ) slide_obj.save() slides.append(slide_obj) return slides
6,832
def is_valid(sequence): """ A string is not valid if the knight moves onto a blank square and the string cannot contain more than two vowels. """ if any(letter == "_" for letter in sequence): return False # Check for vowels # Strings shorter than 3 letters are always ok, as they # can't contain more than two vowels if len(sequence) < 3: return True # Check longer sequences for number of vowels vowels="AEIUO" num_vowels = len([v for v in sequence if v in vowels]) if num_vowels > 2: return False # Check for duplicate characters. # The original question did not say anything about # repeated characters, but ignoring them would lead to infinite # sequences, such as AMAMAMA..., where the knight makes the same sequence # of moves over and over again if duplicate_characters(sequence): return False return True
6,833
def batch_dl1_to_dl2( dict_paths, config_file, jobid_from_training, batch_config, logs, ): """ Function to batch the dl1_to_dl2 stage once the lstchain train_pipe batched jobs have finished. Parameters ---------- dict_paths : dict Core dictionary with {stage: PATHS} information config_file : str Path to a configuration file. If none is given, a standard configuration is applied jobid_from_training : str string containing the jobid from the jobs batched in the train_pipe stage, to be passed to the dl1_to_dl2 function (as a slurm dependency) batch_config : dict Dictionary containing the (full) source_environment and the slurm_account strings to be passed to dl1_dl2 function logs: dict Dictionary with logs files Returns ------- jobid_for_dl2_to_dl3 : str string containing the jobids to be passed to the next stage of the workflow (as a slurm dependency) """ log_dl1_to_dl2 = {} jobid_for_dl2_to_dl3 = [] debug_log = {} log.info("==== START {} ==== \n".format("batch dl1_to_dl2_workflow")) for paths in dict_paths: job_logs, jobid = dl1_to_dl2( paths["input"], paths["output"], path_models=paths["path_model"], config_file=config_file, wait_jobid_train_pipe=jobid_from_training, batch_configuration=batch_config, slurm_options=paths.get("slurm_options", None), ) log_dl1_to_dl2.update(job_logs) # Single particle dl1_dl2 jobid to be appended jobid_for_dl2_to_dl3.append(jobid) debug_log[jobid] = f"dl1_to_dl2 jobid that depends on : {jobid_from_training} training job" jobid_for_dl2_to_dl3 = ",".join(jobid_for_dl2_to_dl3) save_log_to_file(log_dl1_to_dl2, logs["log_file"], workflow_step="dl1_to_dl2") save_log_to_file(debug_log, logs["debug_file"], workflow_step="dl1_to_dl2") log.info("==== END {} ====".format("batch dl1_to_dl2_workflow")) return jobid_for_dl2_to_dl3
6,834
def runCommands(cmds, localTempDir, inPipes=None, outPipes=None, errPipes=None): """ Run commands from CMDS list. """ if inPipes is None: inPipes = [None] * len(cmds) if outPipes is None: outPipes = [None] * len(cmds) if errPipes is None: errPipes = [None] * len(cmds) for i, c in enumerate(cmds, 0): if inPipes[i] is None: sin = None else: sin = subprocess.PIPE if outPipes[i] is None: sout = None else: sout = subprocess.PIPE if errPipes[i] is None: serr = None else: serr = subprocess.PIPE p = subprocess.Popen(c, cwd=localTempDir, stdin=sin, stdout=sout, stderr=serr) if inPipes[i] is None: sin = None else: if not os.path.exists(inPipes[i]): raise IOError('Unable to locate inPipe file: %s for command %s' % (inPipes[i], ' '.join(c))) sin = open(inPipes[i], 'r').read() if outPipes[i] is None: pout, perr = p.communicate(sin) handleReturnCode(p.returncode, cmds[i]) else: with open(outPipes[i], 'w') as f: f.write(p.communicate(sin)[0]) handleReturnCode(p.returncode, cmds[i])
6,835
def podmanOcpRegistryLogin(ctx): """ Log into the default registry of an OpenShift cluster """ ocLogin(ctx, ctx.cr.ocp.user) cmd = 'podman login' cmd += ' --tls-verify=false' cmd += ' -u $(oc whoami) -p $(oc whoami --show-token)' cmd += f' default-route-openshift-image-registry.apps.{ctx.cf.ocp.domain}' out = CmdShell().run(cmd).out if not out.startswith('Login Succeeded!'): fail('podman login failed')
6,836
def browse(): """ A browser for the bibmanager database. """ # Content of the text buffer: bibs = bm.load() keys = [bib.key for bib in bibs] compact_text = "\n".join(keys) expanded_text = "\n\n".join(bib.content for bib in bibs) # A list object, since I want this to be a global variable selected_content = [None] lex_style = style_from_pygments_cls( pygments.styles.get_style_by_name(cm.get('style'))) custom_style = Style.from_dict({ "status": "reverse", "status.position": "#aaaa00", "status.key": "#ffaa00", "shadow": "bg:#440044", "not-searching": "#888888", }) style = merge_styles([lex_style, custom_style]) def get_menubar_text(): return [ ("class:status", " ("), ("class:status.key", "enter"), ("class:status", ")select entry ("), ("class:status.key", "e"), ("class:status", ")xpand entry ("), ("class:status.key", "f"), ("class:status", ")ind ("), ("class:status.key", "s"), ("class:status", ")ave ("), ("class:status.key", "h"), ("class:status", ")elp ("), ("class:status.key", "q"), ("class:status", ")uit"), ] def get_menubar_right_text(): """Get index of entry under cursor.""" key = get_current_key(text_field.buffer.document, keys) return f" {keys.index(key) + 1} " def get_infobar_text(): """Get author-year-title of entry under cursor.""" key = get_current_key(text_field.buffer.document, keys) bib = bibs[keys.index(key)] year = '' if bib.year is None else bib.year title = 'NO_TITLE' if bib.title is None else bib.title return f"{bib.get_authors('ushort')}{year}: {title}" search_buffer = Buffer( completer=WordCompleter(keys), complete_while_typing=False, multiline=False) search_field = SearchToolbar( search_buffer=search_buffer, forward_search_prompt = "Search: ", backward_search_prompt = "Search backward: ", ignore_case=False) text_field = TextArea( text=compact_text, lexer=PygmentsLexer(BibTeXLexer), scrollbar=True, line_numbers=False, read_only=True, search_field=search_field, input_processors=[HighlightEntryProcessor()], ) text_field.buffer.name = 'text_area_buffer' text_field.is_expanded = False # Shortcut to HighlightEntryProcessor: for processor in text_field.control.input_processors: if processor.__class__.__name__ == 'HighlightEntryProcessor': text_field.bm_processor = processor # Do not highlight searched text: sp = text_field.control.default_input_processors[0] sp._classname = ' ' sp._classname_current = ' ' menu_bar = VSplit([ Window( FormattedTextControl(get_menubar_text), style="class:status"), Window( FormattedTextControl(get_menubar_right_text), style="class:status.right", width=9, align=WindowAlign.RIGHT), ], height=1, ) info_bar = Window( content=FormattedTextControl(get_infobar_text), height=D.exact(1), style="class:status", ) body = HSplit([ menu_bar, text_field, search_field, info_bar, ]) root_container = FloatContainer( content=body, floats=[ Float( xcursor=True, ycursor=True, content=CompletionsMenu(max_height=16, scroll_offset=1), ), ], ) # Key bindings: bindings = KeyBindings() text_focus = Condition( lambda: get_app().layout.current_window == text_field.window) dialog_focus = Condition( lambda: hasattr(get_app().layout.current_window, 'dialog')) @bindings.add("q", filter=text_focus) def _quit(event): event.app.exit() # Navigation: @bindings.add("g", filter=text_focus) def _go_to_first_line(event): event.current_buffer.cursor_position = 0 @bindings.add("G", filter=text_focus) def _go_to_last_line(event) -> None: event.current_buffer.cursor_position = len(event.current_buffer.text) @bindings.add("d", filter=text_focus) def _scroll_down(event): scroll_half_page_down(event) @bindings.add("u", filter=text_focus) def _scroll_up(event): scroll_half_page_up(event) @bindings.add("n", filter=text_focus) def _find_next(event): search_state = event.app.current_search_state event.current_buffer.apply_search( search_state, include_current_position=False, count=event.arg) @bindings.add("N", filter=text_focus) def _find_previous(event): search_state = event.app.current_search_state event.current_buffer.apply_search( ~search_state, include_current_position=False, count=event.arg) @bindings.add("h", filter=text_focus) def _show_help(event): show_message("Shortcuts", help_message) @bindings.add("f", filter=text_focus) def _start_search(event): search.start_search(direction=search.SearchDirection.FORWARD) @bindings.add("b", filter=text_focus) def _open_in_browser(event): key = get_current_key(event.current_buffer.document, keys) bib = bm.find(key=key, bibs=bibs) if bib.adsurl is not None: webbrowser.open(bib.adsurl, new=2) else: show_message("Message", f"Entry '{key}' does not have an ADS url.") @bindings.add("c-c", filter=dialog_focus) def _close_dialog(event): get_app().layout.current_window.dialog.future.set_result(None) @bindings.add("s", filter=text_focus) def _save_selected_to_file(event): selected = text_field.bm_processor.selected_entries if len(selected) == 0: show_message("Message", "Nothing to save.") return async def coroutine(): dialog = TextInputDialog( title="Save to File", label_text="\nEnter a file path or leave blank to quit " "and print to screen:\n(press Control-c to cancel)\n", completer=PathCompleter(), ) path = await show_dialog_as_float(dialog) content = '\n\n'.join( bibs[keys.index(key)].content for key in selected) if path == "": selected_content[0] = content # The program termination is in TextInputDialog() since I # need to close this coroutine first. return if path is not None: try: with open(path, "w") as f: f.write(content) except IOError as e: show_message("Error", str(e)) ensure_future(coroutine()) @bindings.add("enter", filter=text_focus) def _toggle_selected_entry(event): "Select/deselect entry pointed by the cursor." key = get_current_key(event.current_buffer.document, keys) text_field.bm_processor.toggle_selected_entry(key) @bindings.add("e", filter=text_focus) def _expand_collapse_entry(event): "Expand/collapse current entry." key, start_end, is_expanded = get_current_key( event.current_buffer.document, keys, get_start_end=True, get_expanded=True) bib = bm.find(key=key, bibs=bibs) if is_expanded: event.app.clipboard.set_text(bib.key) else: event.app.clipboard.set_text(bib.content + '\n') text_field.read_only = False event.current_buffer.cursor_position = start_end[0] event.current_buffer.delete(count=start_end[1] - start_end[0]) event.current_buffer.paste_clipboard_data( event.app.clipboard.get_data(), count=event.arg, paste_mode=PasteMode.VI_BEFORE) text_field.read_only = True if is_expanded: event.current_buffer.cursor_position = start_end[0] @bindings.add("E", filter=text_focus) def _expand_collapse_all(event): "Expand/collapse all entries." buffer = event.current_buffer key = get_current_key(buffer.document, keys) if text_field.is_expanded: text_field.text = compact_text else: text_field.text = expanded_text buffer.cursor_position = buffer.text.index(key) text_field.is_expanded = not text_field.is_expanded @bindings.add("o", filter=text_focus) def _open_pdf(event): buffer = event.current_buffer key = get_current_key(buffer.document, keys) bib = bm.find(key=key, bibs=bibs) has_pdf = bib.pdf is not None has_bibcode = bib.bibcode is not None is_missing = has_pdf and not os.path.exists(f'{u.BM_PDF()}{bib.pdf}') if not has_pdf and not has_bibcode: show_message("Message", f"BibTeX entry '{key}' does not have a PDF.") return if has_pdf and not is_missing: pm.open(key=key) #except Exception as e: # show_message("Message", textwrap.fill(str(e), width=70)) return if has_pdf and is_missing and not has_bibcode: show_message("Message", f"BibTeX entry has a PDF file: {bib.pdf}, but the file " "could not be found.") return # Need to fetch before opening: async def coroutine(): dialog = MessageDialog( "PDF file not found", "Fetch from ADS?\n(might take a few seconds ...)", asking=True) fetch = await show_dialog_as_float(dialog) if fetch: with io.StringIO() as buf, redirect_stdout(buf): fetched = pm.fetch(bib.bibcode, replace=True) fetch_output = buf.getvalue() if fetched is None: show_message("PDF fetch failed", fetch_output) else: show_message("PDF fetch succeeded.", fetch_output) pm.open(key=key) ensure_future(coroutine()) application = Application( layout=Layout(root_container, focused_element=text_field), key_bindings=bindings, enable_page_navigation_bindings=True, style=style, full_screen=True, ) application.run() if selected_content[0] is not None: tokens = list(pygments.lex(selected_content[0], lexer=BibTeXLexer())) print_formatted_text( PygmentsTokens(tokens), end="", style=lex_style, #output=create_output(sys.stdout), )
6,837
def read_lines_from_input(file): """ Reads the provided file line by line to provide a list representation of the contained names. :param file: A text file containing one name per line. If it's None, the input is read from the standard input. :return: A list of the names contained in the provided text file """ if file is None: file = sys.stdin return map(lambda l: l.strip(), file.readlines())
6,838
def diag(input_, k=0): """Wrapper of `numpy.diag`. Parameters ---------- input_ : DTensor Input dense tensor. k : int, optional Offset to main diagonal, by default 0 """ pass
6,839
def is_dataproc_VM(): """Check if this installation is being executed on a Google Compute Engine dataproc VM""" try: dataproc_metadata = urllib.request.urlopen("http://metadata.google.internal/0.1/meta-data/attributes/dataproc-bucket").read() if dataproc_metadata.decode("UTF-8").startswith("dataproc"): return True except: pass return False
6,840
def restore_tmux(tmux_id): """ retore tmux sessions by given backuped Tmux id - check if there is tmux running and with same session name - handle windows, panes .. """ #validate given tmux_id LOG.info('loading backuped tmux sessions') jsonfile = os.path.join(config.BACKUP_PATH,tmux_id,tmux_id+'.json') LOG.debug('load json file:%s'% jsonfile ) tmux = util.json_to_obj(jsonfile) LOG.debug('converted json file to Tmux object') LOG.info('backuped tmux sessions loaded') for sess in tmux.sessions: LOG.debug('processing session name %s'%sess.name) #check if session exists if cmd.has_tmux_server() and cmd.has_session(sess.name): LOG.warning('found session with same name in current tmux, \ skip restoring the session:%s.' % sess.name) continue restore_session(sess, tmux_id) LOG.debug('check and kill dummy session') if DUMMY_SESSION: cmd.kill_session(DUMMY_SESSION) LOG.info('Backup %s is restored! run "tmux list-sessions" to see sessions and attach'% tmux_id)
6,841
def lookup_all(base): """Looks up a subclass of a base class from the registry. Looks up a subclass of a base class with name provided from the registry. Returns a list of registered subclass if found, None otherwise. Args: base: The base class of the subclass to be found. Returns: A list of subclass of the name if found, None otherwise. """ basename = base.__name__ if basename not in _registries: return None registry = _registries[basename] output = [] for name in registry.keys(): init_args = registry[name][_INIT_ARGS] if init_args is not None: output.append(registry[name][_TYPE_TAG](**init_args)) else: output.append(registry[name][_TYPE_TAG]) return output
6,842
def makepyfile(testdir): """Fixture for making python files with single function and docstring.""" def make(*args, **kwargs): func_name = kwargs.pop('func_name', 'f') # content in args and kwargs is treated as docstring wrap = partial(_wrap_docstring_in_func, func_name) args = map(wrap, args) kwargs = dict(zip(kwargs.keys(), map(wrap, kwargs.values()))) return testdir.makepyfile(*args, **kwargs) return make
6,843
def _clean_environment(env_dir): """Remove problem elements in environmental directories. - Get rid of old history comment lines that cause parsing failures: https://github.com/bcbio/bcbio-nextgen/issues/2431 """ history_file = os.path.join(env_dir, "conda-meta", "history") if os.path.exists(history_file): has_problem = False cleaned_lines = [] with open(history_file) as in_handle: for line in in_handle: # Remove lines like `# create specs:` which have no information after colon if line.startswith("#") and len([x for x in line.strip().split(":") if x]) == 1: has_problem = True else: cleaned_lines.append(line) if has_problem: shutil.copy(history_file, history_file + ".orig") with open(history_file, "w") as out_handle: for line in cleaned_lines: out_handle.write(line)
6,844
def test_good_input3(): """runs on good input""" run(rna, 'codons.dna', '-P-RPE-R---P--T-E')
6,845
def createTextWatermark(msg, size, loc, fontcolor='white', fontpath='arial.ttf', fontsize=18): """Creates a watermark image of the given text. Puts it at the given location in an RGBA image of the given size. Location should be a 2-tuple denoting the center location of the text.""" from PIL import Image, ImageDraw, ImageFont im = Image.new('RGBA', size, (0,0,0,0)) draw = ImageDraw.Draw(im) font = ImageFont.truetype(fontpath, fontsize) tw, th = draw.textsize(msg, font=font) loc = (loc[0] - tw//2, loc[1] - th//2) draw.text(loc, msg, font=font, fill=fontcolor) return im
6,846
def generate_bookmarks(inputfile, sections, outputfile): """Operate on INPUTFILE, optionally filtering for WORKSPACES.""" with open(inputfile, 'rb') as fh: rawdata = fh.read() if inputfile.endswith('.json'): data = json.loads(rawdata) elif inputfile.endswith('.txt'): data = yaml_load(rawdata) else: raise Exception("Expecting yaml or json Workona export files") output = make_bookmarks(data, filter_sections=sections) with open(outputfile, 'w') as fh: fh.write(output)
6,847
def _discover_on_demand(): """ Attempts to discover operator modules, if not already discovered """ global _DISCOVERED if not _DISCOVERED: _DISCOVERED = True discover_local_chemistry_operators() discover_preferences_chemistry_operators() if logger.isEnabledFor(logging.DEBUG): logger.debug("Found: chemistry operators {} ".format(local_chemistry_operators()))
6,848
def get_server_pull_config(config:dict): """ takes a config dictionary and returns the variables related to server deployment (pull from intersections). If there is any error in the configuration, returns a quadruple of -1 with a console output of the exception """ try: server = config["DataTransfer"]["server"] intersection = config["DataTransfer"]["intersection"] startHour = config["DataTransfer"]["StartTime_PullFromIntersections"]["hour"] startMinute = config["DataTransfer"]["StartTime_PullFromIntersections"]["minute"] return server, intersection, startHour, startMinute except Exception as e: print(e) return -1, -1, -1, -1
6,849
def buff_push(item: BufferItem): """ Add BufferItem to the buffer and execute if the buffer is full """ q.put(item) make_dependencies(item) if q.full(): return buff_empty_partial(q.maxsize - 1) return None
6,850
def installDirectory(): """ Return the software installation directory, by looking at location of this method. """ #path = os.path.abspath(os.path.join(os.path.realpath(__file__), os.pardir)) path = os.path.abspath(os.path.realpath(__file__)) path = os.path.abspath(os.path.join(path, '../..')) #path = path.replace("EGG-INFO/scripts/smodels-config", "") #path = path.replace("installation.py", "") return path + "/"
6,851
def test_post_requests_fails_with_invalid_invalid_target_date_in_body( invalid_request_body_with_invalid_date, client, request_headers ): """ Tests that response shows failure when request body has invalid target date. Args: invalid_request_body_with_invalid_string_length (dict): a request body with invalid string length client (FlaskClient): a test client created by a fixture. request_headers (dict): a header created by a fixture. """ res = client.post( requests_url(), headers=request_headers, json=invalid_request_body_with_invalid_date ) response = res.get_json() assert not response['success'] assert response['message'] == { 'target_date': ['date can only be a later time in the future'] } assert res.status_code == 400
6,852
def take(count: int) -> Callable[[Observable], Observable]: """Returns a specified number of contiguous elements from the start of an observable sequence. .. marble:: :alt: take -----1--2--3--4----| [ take(2) ] -----1--2-| Example: >>> op = take(5) Args: count: The number of elements to return. Returns: An operator function that takes an observable source and returns an observable sequence that contains the specified number of elements from the start of the input sequence. """ from rx.core.operators.take import _take return _take(count)
6,853
def stock(self): """Market search google""" search_term = self.query.split("for")[-1] url = "https://google.com/search?q=" + search_term webbrowser.get().open(url) speak("Here is what I found for " + search_term + " on google")
6,854
def test_getitem(seas_metadict): """ Test `MetaDict.__getitem__(...)` """ assert seas_metadict['Norwegian'] == 'Europe' assert seas_metadict['BALTIC'] != 'arctic' with pytest.raises(KeyError): seas_metadict['key-not-exists']
6,855
def _Net_batch(self, blobs): """ Batch blob lists according to net's batch size. Take blobs: Keys blob names and values are lists of blobs (of any length). Naturally, all the lists should have the same length. Give (yield) batch: {blob name: list of blobs} dict for a single batch. """ num = len(next(iter(blobs.values()))) batch_size = iter(self.blobs.values()).next().num remainder = num % batch_size num_batches = num / batch_size # Yield full batches. for b in range(num_batches): i = b * batch_size yield {name: blobs[name][i:i + batch_size] for name in blobs} # Yield last padded batch, if any. if remainder > 0: padded_batch = {} for name in blobs: padding = np.zeros((batch_size - remainder,) + blobs[name].shape[1:]) padded_batch[name] = np.concatenate([blobs[name][-remainder:], padding]) yield padded_batch
6,856
def format_fields_for_join( fields: List[Union[Field, DrivingKeyField]], table_1_alias: str, table_2_alias: str, ) -> List[str]: """Get formatted list of field names for SQL JOIN condition. Args: fields: Fields to be formatted. table_1_alias: Alias that should be used in the field on the left side of the equality sign. table_2_alias: alias that should be used in the field on the right side of the equality sign. Returns: Fields list formatted for an SQL JOIN condition. """ return [ JOIN_CONDITION_SQL_TEMPLATE.format( field_name=field.name, table_1_alias=table_1_alias, table_2_alias=table_2_alias, ) for field in fields ]
6,857
def get_payment_balance(currency): """ Returns available balance for selected currency This method requires authorization. """ result = get_data("/payment/balances", ("currency", currency)) payment_balance = namedtuple("Payment_balance", get_namedtuple(result[0])) return [payment_balance(**element) for element in result]
6,858
def build_scheduler(optimizer, config): """ """ scheduler = None config = config.__dict__ sch_type = config.pop('type') if sch_type == 'LambdaLR': burn_in, steps = config['burn_in'], config['steps'] # Learning rate setup def burnin_schedule(i): if i < burn_in: factor = pow(i / burn_in, 4) elif i < steps[0]: factor = 1.0 elif i < steps[1]: factor = 0.1 else: factor = 0.01 return factor scheduler = optim.lr_scheduler.LambdaLR(optimizer, burnin_schedule) elif sch_type == 'StepLR': # 等间隔调整学习率, 调整倍数为gamma倍,调整间隔为step_size,间隔单位是step,step通常是指epoch。 step_size, gamma = config['step_size'], config['gamma'] scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma) elif sch_type == 'ReduceLROnPlateau': # 当某指标不再变化(下降或升高),调整学习率,这是非常实用的学习率调整策略。例如,当验证集的loss不再下降时,进行学习率调整;或者监测验证集的accuracy,当accuracy不再上升时,则调整学习率。 scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=3, verbose=True, threshold=1e-4) return scheduler
6,859
def calculate_z_caller(r, p, inv_t, actual): """ This calls the CUDA function and fills out the array """ x = cuda.grid(1) actual[x] = formal_integral_cuda.calculate_z_cuda(r, p, inv_t)
6,860
def rpca_alm(X, lmbda=None, tol=1e-7, max_iters=1000, verbose=True, inexact=True): """ Augmented Lagrange Multiplier """ if lmbda is None: lmbda = 1.0 / np.sqrt(X.shape[0]) Y = np.sign(X) norm_two = svd(Y, 1)[1] norm_inf = np.abs(Y).max() / lmbda dual_norm = np.max([norm_two, norm_inf]) Y = Y / dual_norm A = np.zeros(Y.shape) E = np.zeros(Y.shape) dnorm = la.norm(X, ord='fro') tol_primal = 1e-6 * dnorm total_svd = 0 mu = 0.5 / norm_two rho = 6 sv = 5 n = Y.shape[0] for iter1 in xrange(max_iters): primal_converged = False sv = sv + np.round(n * 0.1) primal_iter = 0 while not primal_converged: Eraw = X - A + (1/mu) * Y Eupdate = np.maximum( Eraw - lmbda/mu, 0) + np.minimum(Eraw + lmbda / mu, 0) U, S, V = svd(X - Eupdate + (1 / mu) * Y, sv) svp = (S > 1/mu).sum() if svp < sv: sv = np.min([svp + 1, n]) else: sv = np.min([svp + round(.05 * n), n]) Aupdate = np.dot( np.dot(U[:, :svp], np.diag(S[:svp] - 1/mu)), V[:svp, :]) if primal_iter % 10 == 0 and verbose >= 2: print(la.norm(A - Aupdate, ord='fro')) if ((la.norm(A - Aupdate, ord='fro') < tol_primal and la.norm(E - Eupdate, ord='fro') < tol_primal) or (inexact and primal_iter > 5)): primal_converged = True A = Aupdate E = Eupdate primal_iter += 1 total_svd += 1 Z = X - A - E Y = Y + mu * Z mu *= rho if la.norm(Z, ord='fro') / dnorm < tol: if verbose: print('\nConverged at iteration {}'.format(iter1)) break if verbose: _verbose(A, E, X) return A, E
6,861
def dwt_embed(wmImage, hostImage, alpha, beta): """Embeds a watermark image into a host image, using the First Level Discrete Wavelet Transform and Alpha Blending.\n The formula used for the alpha blending is: resultLL = alpha * hostLL + beta * watermarkLL Arguments: wmImage (NumPy array) -- the image to be embedded hostImage (NumPy array) -- the image to be watermarked alpha (float) -- the first embedding strength factor beta (float) -- the second embedding strength factor Returns: NumPy array type -- the watermarked image, in float64 format """ # Take the dimensions of the host and watermark images wmHeight, wmWidth = wmImage.shape[:2] hostHeight, hostWidth = hostImage.shape[:2] # Resize the watermark image so that it is the same size as the host image if wmHeight > hostHeight or wmWidth > hostWidth: # Scale down the watermark image wmImage = cv2.resize(wmImage, (hostWidth, hostHeight), interpolation = cv2.INTER_AREA) elif wmHeight < hostHeight or wmWidth < hostWidth: # Scale up the watermark image wmImage = cv2.resize(wmImage, (hostWidth, hostHeight), interpolation = cv2.INTER_LINEAR) # Take the new dimensions of the watermark image wmHeight, wmWidth = wmImage.shape[:2] # Split both images into channels hostB, hostG, hostR = cv2.split(hostImage) wmB, wmG, wmR = cv2.split(wmImage) # Compute the first level bidimensional DWT for each channel of both images # (LL, (HL, LH, HH)) cAhostB, (cHhostB, cVhostB, cDhostB) = pywt.dwt2(hostB, 'db2') cAhostG, (cHhostG, cVhostG, cDhostG) = pywt.dwt2(hostG, 'db2') cAhostR, (cHhostR, cVhostR, cDhostR) = pywt.dwt2(hostR, 'db2') cAhostHeight, cAhostWidth = cAhostB.shape cAwmB, (cHwmB, cVwmB, cDwmB) = pywt.dwt2(wmB, 'db2') cAwmG, (cHwmG, cVwmG, cDwmG) = pywt.dwt2(wmG, 'db2') cAwmR, (cHwmR, cVwmR, cDwmR) = pywt.dwt2(wmR, 'db2') cAwmHeight, cAwmWidth = cAwmB.shape # Generate image matrix for containing all four host coefficients images coeffsHost = np.zeros((cAhostHeight * 2, cAhostWidth * 2, 3), dtype = 'float64') # Merge channels for each of A, H, V and D and build the host coefficients image cAhost = cv2.merge([cAhostB, cAhostG, cAhostR]) coeffsHost[0:cAhostHeight, 0:cAhostWidth] = cAhost cHhost = cv2.merge([cHhostB, cHhostG, cHhostR]) coeffsHost[0:cAhostHeight, cAhostWidth:cAhostWidth * 2] = cHhost cVhost = cv2.merge([cVhostB, cVhostG, cVhostR]) coeffsHost[cAhostHeight:cAhostHeight * 2, 0:cAhostWidth] = cVhost cDhost = cv2.merge([cDhostB, cDhostG, cDhostR]) coeffsHost[cAhostHeight:cAhostHeight * 2, cAhostWidth:cAhostWidth * 2] = cDhost # Display the host coefficients image temp = np.uint8(np.rint(coeffsHost)) cv2.imshow('Host DWT', temp) # Generate image matrix for containing all four watermark coefficients images coeffsWm = np.zeros((cAwmHeight * 2, cAwmWidth * 2, 3), dtype = 'float64') # Merge channels for each of A, H, V and D and build the wm coefficients image cAwm = cv2.merge([cAwmB, cAwmG, cAwmR]) coeffsWm[0:cAwmHeight, 0:cAwmWidth] = cAwm cHwm = cv2.merge([cHwmB, cHwmG, cHwmR]) coeffsWm[0:cAwmHeight, cAwmWidth:cAwmWidth * 2] = cHwm cVwm = cv2.merge([cVwmB, cVwmG, cVwmR]) coeffsWm[cAwmHeight:cAwmHeight * 2, 0:cAwmWidth] = cVwm cDwm = cv2.merge([cDwmB, cDwmG, cDwmR]) coeffsWm[cAwmHeight:cAwmHeight * 2, cAwmWidth:cAwmWidth * 2] = cDwm # Display the watermark coefficients image temp = np.uint8(np.rint(coeffsWm)) cv2.imshow('Watermark DWT', temp) # Apply the Alpha Blending Technique # wmImageLL = alpha * hostLL + beta * wmLL cAresult = alpha * cAhost + beta * cAwm cAresultB, cAresultG, cAresultR = cv2.split(cAresult) # Compute the channels of the watermarked image by applying the inverse DWT resultB = pywt.idwt2((cAresultB, (cHhostB, cVhostB, cDhostB)), 'db2') resultG = pywt.idwt2((cAresultG, (cHhostG, cVhostG, cDhostG)), 'db2') resultR = pywt.idwt2((cAresultR, (cHhostR, cVhostR, cDhostR)), 'db2') # Merge the channels and obtain the final watermarked image resultImage = cv2.merge([resultB, resultG, resultR]) return resultImage
6,862
def process_file(file_path: str): """Reads a file and prints its C++ tokenization with nesting of groups.""" print() print('#' * 80) print('Finding nested C++ tokens in file', file_path, flush=True) cpp_source = CppSource(file_path=file_path) dump_grouped_tokens(cpp_source.grouped_cpp_tokens)
6,863
def make_file_iterator(filename): """Return an iterator over the contents of the given file name.""" # pylint: disable=C0103 with open(filename) as f: contents = f.read() return iter(contents.splitlines())
6,864
def pull_partner_statistics(partner_id): """ This method pulls partner statistics. Parameters: - partner_id : the partner ID Returns: None """ signature = "pull_partner_statistics(partner_id)" logginghelper.method_enter(logger, signature, partner_id) # Send request to HFPP network node request_to_node = urllib.request.Request(HFPP_NODE_HTTP_SERVICE_BASE_URL + '/general_service') request_to_node.add_header('Content-Type','application/xml;charset=utf-8') request_to_node.add_header('x-hfpp-username', HFPP_PARTNER_USERNAME) request_to_node.add_header('x-hfpp-password', HFPP_PARTNER_PASSWORD) request_xml = '<?xml version="1.0" encoding="utf-8"?>' \ '<PartnerStatisticsRequest>' \ '<PartnerID>{partner_id}</PartnerID>' \ '</PartnerStatisticsRequest>'.format(partner_id=partner_id) try: response_from_node = urllib.request.urlopen(request_to_node, request_xml.encode(), timeout=PARTNER_REQUEST_TIMEOUT, cafile=CA_CERTIFICATE_FILE, cadefault=CA_DEFAULT) resp_content = response_from_node.read().decode('utf-8') logger.debug('response:%s',resp_content) root = ElementTree.fromstring(resp_content) count_of_data_requests_received = int(root.findtext('./NumberOfDataRequestsReceived')) count_of_data_requests_sent = int(root.findtext('./NumberOfDataRequestsInitiated')) count_of_data_requests_responded = int(root.findtext('./NumberOfDataRequestsResponded')) - INITIAL_RESPONDED_REQUESTS_VALUE count_of_data_requests_declined = int(root.findtext('./NumberOfDataRequestsDeclined')) count_of_data_requests_pending = count_of_data_requests_received - count_of_data_requests_responded - count_of_data_requests_declined reciprocity = count_of_data_requests_responded * 1.0 / count_of_data_requests_received if count_of_data_requests_received > 0 else 0 Partner.objects.filter(hfpp_network_id=partner_id).update( count_of_data_requests_received=count_of_data_requests_received, count_of_data_requests_sent=count_of_data_requests_sent, count_of_data_requests_responded=count_of_data_requests_responded, count_of_data_requests_declined=count_of_data_requests_declined, count_of_data_requests_pending=count_of_data_requests_pending, reciprocity=reciprocity ) except urllib.error.HTTPError as e: # Parse response XML resp_content = e.read().decode('utf-8') logger.debug('response:%s',resp_content) try: root = ElementTree.fromstring(resp_content) # Not succeeded # 400, 401, 403 or 500 error_code = root.findtext('./ErrorCode') error_message = root.findtext('./ErrorMessage') # Log error code and error message logging.error('error code:%s',error_code) logging.error('error message:%s',error_message) except Exception as e: logging.exception("") logginghelper.method_exit(logger, signature)
6,865
def build_summary(resource, children, attribute, summarizer, keep_details=False): """ Update the `resource` Resource with a summary of itself and its `children` Resources and this for the `attribute` key (such as copyrights, etc). - `attribute` is the name of the attribute ('copyrights', 'holders' etc.) - `summarizer` is a function that takes a list of texts and returns summarized texts with counts """ # Collect current data values = getattr(resource, attribute, []) no_detection_counter = 0 if values: # keep current data as plain strings candidate_texts = [entry.get('value') for entry in values] else: candidate_texts = [] if resource.is_file: no_detection_counter += 1 # Collect direct children existing summaries for child in children: child_summaries = get_resource_summary(child, key=attribute, as_attribute=keep_details) or [] for child_summary in child_summaries: count = child_summary['count'] value = child_summary['value'] if value: candidate_texts.append(Text(value, value, count)) else: no_detection_counter += count # summarize proper using the provided function summarized = summarizer(candidate_texts) # add back the counter of things without detection if no_detection_counter: summarized.update({None: no_detection_counter}) summarized = sorted_counter(summarized) if TRACE: logger_debug('COPYRIGHT summarized:', summarized) set_resource_summary(resource, key=attribute, value=summarized, as_attribute=keep_details) return summarized
6,866
def test_write_process_button(handler): """ Test the _write_process_button method. """ # patch the view context handler.view_ctx = Mock(**{'format_url.return_value': 'an url'}) # patch the meld elements cell_elt = Mock(attrib={'class': ''}) tr_elt = Mock(**{'findmeld.return_value': cell_elt}) # test with process state not in expected list handler._write_process_button(tr_elt, 'meld_id', '10.0.0.1', 'index.html', 'action', 'dummy_proc', 'running', ['stopped', 'stopping']) assert tr_elt.findmeld.call_args_list == [call('meld_id')] assert cell_elt.attrib['class'] == 'button off' assert not cell_elt.attributes.called assert not cell_elt.content.called tr_elt.findmeld.reset_mock() del cell_elt.attrib['class'] # test with filled stats on selected process handler._write_process_button(tr_elt, 'meld_id', '10.0.0.1', 'index.html', 'action', 'dummy_proc', 'running', ['running', 'starting']) assert tr_elt.findmeld.call_args_list == [call('meld_id')] assert cell_elt.attrib['class'] == 'button on' assert handler.view_ctx.format_url.call_args_list == [call('10.0.0.1', 'index.html', action='action', namespec='dummy_proc')] assert cell_elt.attributes.call_args_list == [call(href='an url')] assert not cell_elt.content.called tr_elt.findmeld.reset_mock() handler.view_ctx.format_url.reset_mock() cell_elt.attributes.reset_mock() del cell_elt.attrib['class'] # test with unset namespec handler._write_process_button(tr_elt, 'meld_id', '10.0.0.1', 'index.html', 'action', '', 'running', ['running', 'starting']) assert tr_elt.findmeld.call_args_list == [call('meld_id')] assert 'class' not in cell_elt.attrib assert not handler.view_ctx.format_url.called assert not cell_elt.attributes.called assert cell_elt.content.call_args_list == [call('')]
6,867
def little_endian_bytes_to_int(little_endian_byte_seq): """Converts a pair of bytes into an integer. The `little_endian_byte_seq` input must be a 2 bytes sequence defined according to the little-endian notation (i.e. the less significant byte first). For instance, if the `little_endian_byte_seq` input is equals to ``(0xbc, 0x02)`` this function returns the decimal value ``700`` (0x02bc in hexadecimal notation). :param bytes little_endian_byte_seq: the 2 bytes sequence to be converted. It must be compatible with the "bytes" type and defined according to the little-endian notation. """ # Check the argument and convert it to "bytes" if necessary. # Assert "little_endian_byte_seq" items are in range (0, 0xff). # "TypeError" and "ValueError" are sent by the "bytes" constructor if # necessary. # The statement "tuple(little_endian_byte_seq)" implicitely rejects # integers (and all non-iterable objects) to compensate the fact that the # bytes constructor doesn't reject them: bytes(2) is valid and returns # b'\x00\x00' little_endian_byte_seq = bytes(tuple(little_endian_byte_seq)) # Check that the argument is a sequence of two items if len(little_endian_byte_seq) != 2: raise ValueError("A sequence of two bytes is required.") integer = little_endian_byte_seq[1] * 0x100 + little_endian_byte_seq[0] return integer
6,868
def higher_follower_count(A, B): """ Compares follower count key between two dictionaries""" if A['follower_count'] >= B['follower_count']: return "A" return "B"
6,869
def _element_or_none(germanium, selector, point): """ Function to check if the given selector is only a regular element without offset clicking. If that is the case, then we enable the double hovering in the mouse actions, to solve a host of issues with hovering and scrolling, such as elements appearing on mouse in, or edge not hovering correctly. :param germanium: :param selector: :param point: :return: """ if isinstance(selector, Point): return None if point: return None return _element(germanium, selector)
6,870
def ENDLEMuEpP_TransferMatrix( style, tempInfo, crossSection, productFrame, angularData, EMuEpPData, multiplicity, comment = None ) : """This is LLNL I = 1, 3 type data.""" logFile = tempInfo['logFile'] workDir = tempInfo['workDir'] s = versionStr + '\n' s += "Process: 'Double differential EMuEpP data transfer matrix'\n" s += commonDataToString( comment, style, tempInfo, crossSection, productFrame, multiplicity = multiplicity ) s += angularToString( angularData, crossSection ) s += EMuEpPDataToString( EMuEpPData ) return( executeCommand( logFile, transferMatrixExecute, s, workDir, tempInfo['workFile'], tempInfo['restart'] ) )
6,871
def _fit_gaussian(f, grid, image_spot, p0, lower_bound=None, upper_bound=None): """Fit a gaussian function to a 3-d or 2-d image. # TODO add equations and algorithm Parameters ---------- f : func A 3-d or 2-d gaussian function with some parameters fixed. grid : np.ndarray, np.float Grid data to compute the gaussian function for different voxel within a volume V or surface S. In nanometer, with shape (3, V_z * V_y * V_x), or (2, S_y * S_x). image_spot : np.ndarray, np.uint A 3-d or 2-d image with detected spot and shape (z, y, x) or (y, x). p0 : List List of parameters to estimate. lower_bound : List List of lower bound values for the different parameters. upper_bound : List List of upper bound values for the different parameters. Returns ------- popt : np.ndarray Fitted parameters. pcov : np.ndarray Estimated covariance of 'popt'. """ # TODO check that we do not fit a 2-d gaussian function to a 3-d image or # the opposite # compute lower bound and upper bound if lower_bound is None: lower_bound = [-np.inf for _ in p0] if upper_bound is None: upper_bound = [np.inf for _ in p0] bounds = (lower_bound, upper_bound) # Apply non-linear least squares to fit a gaussian function to a 3-d image y = np.reshape(image_spot, (image_spot.size,)).astype(np.float32) popt, pcov = curve_fit(f=f, xdata=grid, ydata=y, p0=p0, bounds=bounds) return popt, pcov
6,872
def createfourierdesignmatrix_chromatic(toas, freqs, nmodes=30, Tspan=None, logf=False, fmin=None, fmax=None, idx=4): """ Construct Scattering-variation fourier design matrix. :param toas: vector of time series in seconds :param freqs: radio frequencies of observations [MHz] :param nmodes: number of fourier coefficients to use :param freq: option to output frequencies :param Tspan: option to some other Tspan :param logf: use log frequency spacing :param fmin: lower sampling frequency :param fmax: upper sampling frequency :param idx: Index of chromatic effects :return: F: Chromatic-variation fourier design matrix :return: f: Sampling frequencies """ # get base fourier design matrix and frequencies F, Ffreqs = utils.createfourierdesignmatrix_red( toas, nmodes=nmodes, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax) # compute the DM-variation vectors Dm = (1400/freqs) ** idx return F * Dm[:, None], Ffreqs
6,873
def get_word_data(char_data): """ 获取分词的结果 :param char_data: :return: """ seq_data = [''.join(l) for l in char_data] word_data = [] # stop_words = [line.strip() for line in open(stop_word_file, 'r', encoding='utf-8')] for seq in seq_data: seq_cut = jieba.cut(seq, cut_all=False) word_data.append([w for w in seq_cut ]) return word_data
6,874
def make_non_absolute(path): """ Make a path non-absolute (so it can be joined to a base directory) @param path: The file path """ drive, path = os.path.splitdrive(path) index = 0 while os.path.isabs(path[index:]): index = index + 1 return path[index:]
6,875
def assert_allclose(actual: numpy.float64, desired: int): """ usage.matplotlib: 1 usage.networkx: 4 usage.scipy: 20 usage.skimage: 1 usage.sklearn: 1 usage.statsmodels: 31 """ ...
6,876
def test_bbox(tiler): """Bounding boxes of tiles.""" assert tiler.bbox(1, 5, 3) == (-15028131.257091932, -10018754.17139462, -10018754.171394622, -5009377.085697312) assert tiler.bbox(77, 93, 8) == (-7983694.730330089, 5322463.153553393, -7827151.696402049, 5479006.187481433) assert tiler.bbox(27685, 19041, 15) == (13821037.70641243, -3250713.9389119744, 13822260.698864993, -3249490.9464594126)
6,877
def get_output_stream(items: List[Dict[str, Any]]) -> List[OutputObject]: """Convert a list of items in an output stream into a list of output objects. The element in list items are expected to be in default serialization format for output objects. Paramaters ---------- items: list(dict) Items in the output stream in default serialization format Returns ------- list(vizier.viztrail.module.OutputObject) """ result = list() for item in items: result.append( OutputObject( type=item[KEY_OUTPUT_TYPE], value=item[KEY_OUTPUT_VALUE] ) ) return result
6,878
def lvnf_stats(**kwargs): """Create a new module.""" return RUNTIME.components[LVNFStatsWorker.__module__].add_module(**kwargs)
6,879
def parse_float(string): """ Finds the first float in a string without casting it. :param string: :return: """ matches = re.findall(r'(\d+\.\d+)', string) if matches: return matches[0] else: return None
6,880
def _flush_temp_file(): """ Clear directory where blob files are downloaded to if exists. """ global BLOB_DOWNLOAD_PATH try: dirPath = os.path.join(os.getcwd(), BLOB_DOWNLOAD_PATH) print("PTH", os.getcwd()) print("DIR", dirPath) if os.path.isdir(dirPath): print("CLEAR PATH") files = glob.glob(dirPath) for f in files: os.remove(f) except Exception as ex: print(str(ex))
6,881
def objectproxy_realaddress(obj): """ Obtain a real address as an integer from an objectproxy. """ voidp = QROOT.TPython.ObjectProxy_AsVoidPtr(obj) return C.addressof(C.c_char.from_buffer(voidp))
6,882
def CollateRevisionHistory(builds, repo): """Sorts builds and revisions in repository order. Args: builds: a dict of the form: ``` builds := { master: { builder: [Build, ...], ..., }, ... } ``` repo (GitWrapper): repository in which the revision occurs. Returns: A 2-tuple of (build_history, revisions), where: ``` build_history := { master: { builder: [Build, ...], ..., }, ... } ``` and ``` revisions := [revision, ...] ``` """ build_history = {} revisions = set() for master, master_data in builds.iteritems(): LOGGER.debug('Collating master %s', master) master_history = build_history.setdefault(master, {}) for builder, builder_data in master_data.iteritems(): LOGGER.debug('Collating builder %s', builder) for build in builder_data: revisions.add(str(build.revision)) master_history[builder] = repo.sort( builder_data, keyfunc=lambda b: b.revision) revisions = repo.sort(revisions) return (build_history, revisions)
6,883
def delta_shear(observed_gal, psf_deconvolve, psf_reconvolve, delta_g1, delta_g2): """ Takes in an observed galaxy object, two PSFs for metacal (deconvolving and re-convolving), and the amount by which to shift g1 and g2, and returns a tuple of tuples of modified galaxy objects. ((g1plus, g1minus), (g2plus, g2minus)) """ # Deconvolving by psf_deconvolve inv_psf = galsim.Deconvolve(psf_deconvolve) deconvolved = galsim.Convolve(observed_gal, inv_psf) # Applying second shear in g1 sheared_plus_g1 = deconvolved.shear(g1=delta_g1, g2=0) sheared_minus_g1 = deconvolved.shear(g1=-delta_g1, g2=0) # Applying second shear in g2 sheared_plus_g2 = deconvolved.shear(g1=0, g2=delta_g2) sheared_minus_g2 = deconvolved.shear(g1=0, g2=-delta_g2) # Reconvolving by psf_reconvolve for g1 reconvolved_plus_g1 = galsim.Convolve(sheared_plus_g1, psf_reconvolve) reconvolved_minus_g1 = galsim.Convolve(sheared_minus_g1, psf_reconvolve) g1_plus_minus = (reconvolved_plus_g1, reconvolved_minus_g1) # Reconvolving by psf_reconvolve for g2 reconvolved_plus_g2 = galsim.Convolve(sheared_plus_g2, psf_reconvolve) reconvolved_minus_g2 = galsim.Convolve(sheared_minus_g2, psf_reconvolve) g2_plus_minus = (reconvolved_plus_g2, reconvolved_minus_g2) # g1_plus_minus = (sheared_plus_g1, sheared_minus_g1) # g2_plus_minus = (sheared_plus_g2, sheared_minus_g2) # adding noshear reconvolved for testing reconvolved_noshear = galsim.Convolve(deconvolved, psf_reconvolve) return g1_plus_minus, g2_plus_minus, reconvolved_noshear
6,884
def estimateModifiedPiSquared(n): """ Estimates that value of Pi^2 through a formula involving partial sums. n is the number of terms to be summed; the larger the more accurate the estimation of Pi^2 tends to be (but not always). The modification relative to estimatePiSquared() is that the n terms are added in reverse order (i.e. the smallest values are added first). """ partialSum = 0 # Initializing # Implementation of the mathematical formula involving summing for k in range(n, 0, -1): # Order reversed partialSum += 1 / (k ** 2) estimate = 6*partialSum return estimate
6,885
def pattern_match(template, image, upsampling=16, metric=cv2.TM_CCOEFF_NORMED, error_check=False): """ Call an arbitrary pattern matcher using a subpixel approach where the template and image are upsampled using a third order polynomial. Parameters ---------- template : ndarray The input search template used to 'query' the destination image image : ndarray The image or sub-image to be searched upsampling : int The multiplier to upsample the template and image. func : object The function to be used to perform the template based matching Options: {cv2.TM_CCORR_NORMED, cv2.TM_CCOEFF_NORMED, cv2.TM_SQDIFF_NORMED} In testing the first two options perform significantly better with Apollo data. error_check : bool If True, also apply a different matcher and test that the values are not too divergent. Default, False. Returns ------- x : float The x offset y : float The y offset strength : float The strength of the correlation in the range [-1, 1]. """ if upsampling < 1: raise ValueError # Fit a 3rd order polynomial to upsample the images if upsampling != 1: u_template = zoom(template, upsampling, order=3) u_image = zoom(image, upsampling, order=3) else: u_template = template u_image = image result = cv2.matchTemplate(u_image, u_template, method=metric) _, max_corr, min_loc, max_loc = cv2.minMaxLoc(result) if metric == cv2.TM_SQDIFF or metric == cv2.TM_SQDIFF_NORMED: x, y = (min_loc[0], min_loc[1]) else: x, y = (max_loc[0], max_loc[1]) # Compute the idealized shift (image center) ideal_y = u_image.shape[0] / 2 ideal_x = u_image.shape[1] / 2 # Compute the shift from template upper left to template center y += (u_template.shape[0] / 2) x += (u_template.shape[1] / 2) x = (x - ideal_x) / upsampling y = (y - ideal_y) / upsampling return x, y, max_corr, result
6,886
def test_run_high_cardinality_forever(high_cardinality_instance): """ This test is a utility and is useful in situations where you want to connect to the database instance and have queries executing against it. Note, you must kill the test execution to stop this test. In order to run this test, you must pass the `--run_high_cardinality_forever` flag. e.g. `ddev ... -pa --run_high_cardinality_forever` TIP: It's easier to utilize this by running it as a standalone test operation. e.g. in conjunction with the required flag `ddev ... -pa --run_high_cardinality_forever -k test_run_high_cardinality_forever` """ queries = HighCardinalityQueries(high_cardinality_instance) _check_queries_is_ready(queries) queries.start_background(config={'hc_threads': 20, 'slow_threads': 5, 'complex_threads': 10})
6,887
def main(argv=[__name__]): """Raspi_x10 command line interface. """ try: try: devices_file, rules_file, special_days_file = argv[1:] except ValueError: raise Usage('Wrong number of arguments') sched = Schedule() try: sched.load_conf(devices_file, 'x10_devices', 'devices') sched.load_conf(rules_file, 'x10_rules', 'rules') sched.load_conf(special_days_file, 'special_days', 'special_days') except IOError: raise Usage except KeyError as err: raise Usage('KeyError: {0}'.format(err)) sched.build() sched.write() return 0 except Usage as err: log.error('{0.msg}\n{0.usage}'.format(err)) return 2
6,888
def HexaMeshIndexCoord2VoxelValue(nodes, elements, dim, elementValues): """ Convert hexamesh (bricks) in index coordinates to volume in voxels with value of voxels assigned according to elementValues. dim: dimension of volume in x, y and z in voxels (tuple) elementValues: len(elements) == len(elementValues) Example: to retrieve nodes corresponding to element 217: nodesSortedUnique[elements[217],:] Given the default voxelSize and origin, coordinates range from (-0.5 to dimXYZ+0.5) nodesSortedUnique.shape = (nodes,3) """ volume = np.zeros(dim, dtype=elementValues.dtype) # initialize volume of False xyz = nodes[elements,:][:,0,:] + 0.5 # voxel coordinates of bone xyz = xyz.astype(int) volume[tuple(xyz.T)] = elementValues return volume
6,889
def main(): """Create the database and add data to it""" Base.metadata.create_all(engine) create_session = sessionmaker(bind=engine) session = create_session() session.add_all([]) session.commit()
6,890
def protocol_0101(abf): """0112 0101 tau -10pA""" assert isinstance(abf, pyabf.ABF) generic_overlay_average(abf, baselineSec1=0, baselineSec2=0.1) return
6,891
def hamiltonian(latt: Lattice, eps: (float, np.ndarray) = 0., t: (float, np.ndarray) = 1.0, dense: bool = True) -> (csr_matrix, np.ndarray): """Computes the Hamiltonian-matrix of a tight-binding model. Parameters ---------- latt : Lattice The lattice the tight-binding model is defined on. eps : array_like, optional The on-site energies of the model. t : array_like, optional The hopping energies of the model. dense : bool, optional If ``True`` the hamiltonian matrix is returned as a ``np.ndarray`` Returns ------- ham : csr_matrix or np.ndarray The Hamiltonian-matrix as a sparse or dense matrix. """ dmap = latt.data.map() data = np.zeros(dmap.size) data[dmap.onsite()] = eps data[dmap.hopping()] = t ham = csr_matrix((data, dmap.indices)) if dense: ham = ham.toarray() return ham
6,892
def zip_dir(name, srcs, zipname, **kwargs): """Zips up an entire directory or Fileset. Args: name: The name of the target srcs: A single-item list with a directory or fileset zipname: The name of the output zip file **kwargs: Further generic arguments to pass to genrule, e.g. visibility. """ if len(srcs) > 1: fail("More than one directory is not supported by zip_dir yet", attr = srcs) native.genrule( name = name, srcs = srcs, outs = [zipname], cmd = "zip $(OUTS) $(SRCS)", **kwargs )
6,893
def pad_in(string: str, space: int) -> str: """ >>> pad_in('abc', 0) 'abc' >>> pad_in('abc', 2) ' abc' """ return "".join([" "] * space) + string
6,894
def maybe_download_and_extract_tar_gz(root, file_name, data_url): """Downloads file from given URL and extracts if compressed as tar.gz Args: root (str): The root directory file_name (str): File name to download to data_url (str): Url of data """ if not os.path.exists(root): os.makedirs(root) file_path = os.path.join(root, file_name) # Download file if not present if len([x for x in os.listdir(root) if x == file_name]) == 0: progress_download(data_url, file_path) if file_name.endswith(".tar.gz"): with tarfile.open(file_path, "r:gz") as tar: dirs = [member for member in tar.getmembers()] tar.extractall(path=root, members=dirs)
6,895
def get_notifies(request): """页面展示全部通知""" user = request.siteuser if not user: return HttpResponseRedirect(reverse('siteuser_login')) notifies = Notify.objects.filter(user=user).select_related('sender').order_by('-notify_at') # TODO 分页 ctx = get_notify_context(request) ctx['notifies'] = notifies return render_to_response( notify_template, ctx, context_instance=RequestContext(request) )
6,896
def set_module_repos(module, path): """ Sets the repository path for a specific module """ if os.path.exists(path): Settings.module_repos[module] = path else: raise VersionsException("Cannot set the repos path to a non existent directory.")
6,897
def declare_eq_branch_power_ptdf_approx(model, index_set, PTDF, rel_ptdf_tol=None, abs_ptdf_tol=None): """ Create the equality constraints or expressions for power (from PTDF approximation) in the branch """ m = model con_set = decl.declare_set("_con_eq_branch_power_ptdf_approx_set", model, index_set) pf_is_var = isinstance(m.pf, pe.Var) if pf_is_var: m.eq_pf_branch = pe.Constraint(con_set) else: if not isinstance(m.pf, pe.Expression): raise Exception("Unrecognized type for m.pf", m.pf.pprint()) for branch_name in con_set: expr = \ get_power_flow_expr_ptdf_approx(m, branch_name, PTDF, rel_ptdf_tol=rel_ptdf_tol, abs_ptdf_tol=abs_ptdf_tol) if pf_is_var: m.eq_pf_branch[branch_name] = \ m.pf[branch_name] == expr else: m.pf[branch_name] = expr
6,898
def get_existing_rule(text): """ Return the matched rule if the text is an existing rule matched exactly, False otherwise. """ matches = get_license_matches(query_string=text) if len(matches) == 1: match = matches[0] if match.matcher == MATCH_HASH: return match.rule
6,899