content
stringlengths
22
815k
id
int64
0
4.91M
def painel(request): """ Exibe o painel do usuário. """ return render(request, "lancamentos/painel.html")
6,700
def dialogue(bdg, activate_audio, activate_proximity, mode="server"): """ Attempts to read data from the device specified by the address. Reading is handled by gatttool. :param bdg: :return: """ ret = bdg.pull_data(activate_audio, activate_proximity) addr = bdg.addr if ret == 0: logger.info("Successfully pulled data") # if we were able to pull data, we saw the badge again bdg.last_seen_ts = time.time() else: logger.info("Errors pulling data.") if bdg.dlg.chunks: logger.info("Chunks received: {}".format(len(bdg.dlg.chunks))) logger.info("saving chunks to file") # store in JSON file with open(get_audio_name(mode), "a") as fout: for chunk in bdg.dlg.chunks: ts_with_ms = round_float_for_log(ts_and_fract_to_float(chunk.ts, chunk.fract)) log_line = { 'type': "audio received", 'log_timestamp': round_float_for_log(time.time()), 'log_index': -1, # need to find a good accumulator. 'data': { 'voltage': round_float_for_log(chunk.voltage), 'timestamp': ts_with_ms, 'sample_period': chunk.sampleDelay, 'num_samples': len(chunk.samples), 'samples': chunk.samples, 'badge_address': addr, 'member': bdg.key, 'member_id':bdg.badge_id } } logger.debug("Chunk timestamp: {0:.3f}, Voltage: {1:.3f}, Delay: {2}, Samples in chunk: {3}".format( ts_with_ms, chunk.voltage, chunk.sampleDelay, len(chunk.samples))) #logger.debug(json.dumps(log_line)) json.dump(log_line, fout) fout.write('\n') logger.info("done writing") # update badge object to hold latest timestamps last_chunk = bdg.dlg.chunks[-1] last_chunk_ts_pretty = dt.fromtimestamp(last_chunk.ts).strftime("%Y-%m-%d@%H:%M:%S UTC") if bdg.is_newer_audio_ts(last_chunk.ts, last_chunk.fract): logger.debug("Setting last badge audio timestamp to {} {} ({})".format( last_chunk.ts, last_chunk.fract, last_chunk_ts_pretty)) bdg.set_audio_ts(last_chunk.ts, last_chunk.fract) else: logger.debug("Keeping existing timestamp ({}.{}) for {}. Last chunk timestamp was: {}.{} ({})" .format(bdg.last_audio_ts_int,bdg.last_audio_ts_fract,bdg.addr, last_chunk.ts, last_chunk.fract, last_chunk_pretty)) else: logger.info("No mic data ready") if bdg.dlg.scans: logger.info("Proximity scans received: {}".format(len(bdg.dlg.scans))) logger.info("saving proximity scans to file") with open(get_proximity_name(mode), "a") as fout: for scan in bdg.dlg.scans: ts_with_ms = round_float_for_log(scan.ts) log_line = { 'type': "proximity received", 'log_timestamp': round_float_for_log(time.time()), 'log_index': -1, # need to find a good accumulator. 'data': { 'voltage': round_float_for_log(scan.voltage), 'timestamp': ts_with_ms, 'badge_address': addr, 'rssi_distances': { device.ID: {'rssi': device.rssi, 'count': device.count} for device in scan.devices }, 'member': bdg.key, 'member_id': bdg.badge_id } } logger.debug("SCAN: scan timestamp: {0:.3f}, voltage: {1:.3f}, Devices in scan: {2}".format( ts_with_ms, scan.voltage, scan.numDevices)) #logger.info(json.dumps(log_line)) json.dump(log_line, fout) fout.write('\n') # update badge object to hold latest timestamps last_scan = bdg.dlg.scans[-1] last_scan_ts_pretty = dt.fromtimestamp(last_scan.ts).strftime("%Y-%m-%d@%H:%M:%S UTC") logger.debug("Setting last badge proximity timestamp to {} ([])".format( last_scan.ts, last_scan_ts_pretty)) bdg.last_proximity_ts = last_scan.ts else: logger.info("No proximity scans ready")
6,701
def which(program, mode=os.F_OK | os.X_OK, path=None): """ Mimics the Unix utility which. For python3.3+, shutil.which provides all of the required functionality. An implementation is provided in case shutil.which does not exist. :param program: (required) string Name of program (can be fully-qualified path as well) :param mode: (optional) integer flag bits Permissions to check for in the executable Default: os.F_OK (file exists) | os.X_OK (executable file) :param path: (optional) string A custom path list to check against. Implementation taken from shutil.py. Returns: A fully qualified path to program as resolved by path or user environment. Returns None when program can not be resolved. """ try: from shutil import which as shwhich return shwhich(program, mode, path) except ImportError: def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, _ = os.path.split(program) if fpath: if is_exe(program): return program else: if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) for pathdir in path: pathdir = pathdir.strip('"') exe_file = os.path.join(pathdir, program) if is_exe(exe_file): return exe_file return None
6,702
def q_statistic(y, c1, c2): """ Q-Statistic. Parameters ---------- y : numpy.array Target sample. c1 : numpy.array Output of the first classifier. c2 : numpy.array Output of the second classifier. Returns ------- float Return the Q-Statistic measure between the classifiers 'c1' and 'c2'. Q-Statistic takes value in the range of [-1, 1]: - is zero if 'c1' and 'c2' are independent. - is positive if 'c1' and 'c2' make similar predictions. - is negative if 'c1' and 'c2' make different predictions. References ---------- .. [1] Zhi-Hua Zhou. (2012), pp 105: Ensemble Methods Foundations and Algorithms Chapman & Hall/CRC Machine Learning & Pattern Recognition Series. """ a, b, c, d = contingency_table(y, c1, c2) return (a * d - b * c) / (a * d + b * c)
6,703
def nan_helper(y): """Helper to handle indices and logical indices of NaNs. Input: - y, 1d numpy array with possible NaNs Output: - nans, logical indices of NaNs - index, a function, with signature indices= index(logical_indices), to convert logical indices of NaNs to 'equivalent' indices Example: >>> # linear interpolation of NaNs >>> nans, x= nan_helper(y) >>> y[nans]= np.interp(x(nans), x(~nans), y[~nans]) Taken from: https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array """ return np.isnan(y), lambda z: z.nonzero()[0]
6,704
def lid_mle_amsaleg(knn_distances): """ Local intrinsic dimension (LID) estimators from the papers, 1. Amsaleg, Laurent, et al. "Estimating local intrinsic dimensionality." Proceedings of the 21th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining. ACM, 2015. 2. Ma, Xingjun, et al. "Characterizing adversarial subspaces using local intrinsic dimensionality." arXiv preprint arXiv:1801.02613 (2018). :param knn_distances: numpy array of k nearest neighbor distances. Has shape `(n, k)` where `n` is the number of points and `k` is the number of neighbors. :return: `lid_est` is a numpy array of shape `(n, )` with the local intrinsic dimension estimates in the neighborhood of each point. """ n, k = knn_distances.shape # Replace 0 distances with a very small float value knn_distances = np.clip(knn_distances, sys.float_info.min, None) log_dist_ratio = np.log(knn_distances) - np.log(knn_distances[:, -1].reshape((n, 1))) # lid_est = -k / np.sum(log_dist_ratio, axis=1) lid_est = -(k - 1) / np.sum(log_dist_ratio, axis=1) return lid_est
6,705
def _rebase_bv(bv: BinaryView, dbg: DebugAdapter.DebugAdapter) -> BinaryView: """Get a rebased BinaryView for support of ASLR compatible binaries.""" new_base = dbg.target_base() if core_ui_enabled() and new_base != bv.start: dbg.quit() raise Exception('[!] Can\'t do necessary rebase in GUI, try headless operation') new_bv = bv.rebase(new_base) if new_bv is None: # None if rebasing is unecessary return bv print('[*] Rebasing bv from 0x%x to 0x%x' % (bv.start, new_base)) new_bv.update_analysis_and_wait() # required after rebase return new_bv
6,706
def convert_parameters(child, text=False, tail=False, **kwargs): """ Get child text or tail :param child: :param text: :param tail: :return: """ p = re.compile(r'\S') # Remove empty info child_text = child.text if child.text else '' child_tail = child.tail if child.tail else '' child_text = child_text if p.search(child_text) else '' child_tail = child_tail if p.search(child_tail) else '' # all if text and tail: convert_string = child_text + child_tail # only_text elif text: convert_string = child_text # only_tail elif tail: convert_string = child_tail else: convert_string = '' # replace params mybatis_param_list = get_params(child) for mybatis_param in mybatis_param_list: convert_value = '' if mybatis_param.sql_param.is_function: # eval function convert_value = __eval_function(mybatis_param, **kwargs) else: # 类型转换 param_value = __get_param(mybatis_param.param_name, **kwargs) print(mybatis_param.param_name+ ' value:'+str(param_value)) convert_value = PY_MYBATIS_TYPE_HANDLER.convert(mybatis_param.python_type, mybatis_param.sql_type, param_value, PyMybatisTypeHandler.PYTHON2SQL_TYPE_HANDLER_CONVERT_MODE) #longjb modify 2021.10.29: if convert_value!='null' and len(convert_value)>0 and( mybatis_param.sql_type=='raw' or mybatis_param.python_type=='raw'): convert_value= convert_value.replace("'","`") #convert_value= convert_value[1:len(convert_value)-1] # print('name:'+str(mybatis_param.name)) # print('value:'+convert_value) # print('sql_type:'+str(mybatis_param.sql_type)) # print('python_type:'+str(mybatis_param.python_type)) convert_string = convert_string.replace(mybatis_param.full_name, convert_value, 1) # convert CDATA string convert_cdata(convert_string) return convert_string
6,707
def extend_drag_table(): """ Make an extended table that covers a longer range of Reynolds numbers for each mach value. For low Reynolds values, this uses lower mach number coefficients, and for high Reynolds values it uses higher mach number coefficients. The coefficients are copied with an offset based on critical Reynolds number and closest matching coefficient. """ if len(ExtendedMnLogReCdDataTable): return for pos, (mach, reynolds_data, crit) in enumerate(MnReCdDataTable): ext_re_data = [(math.log10(re), cd) for re, cd in reynolds_data] for lowpos in range(pos - 1, -1, -1): ref_cd = ext_re_data[0][1] low_mach, low_reynolds_data, low_crit = MnReCdDataTable[lowpos] last_cd = None for re, cd in low_reynolds_data[::-1]: adj_re = math.log10(re) - math.log10(low_crit) + math.log10(crit) if adj_re < ext_re_data[0][0] and last_cd is not None: ext_re_data[0:0] = [(adj_re, cd - last_cd + ref_cd)] else: last_cd = cd for highpos in range(pos + 1, len(MnReCdDataTable)): ref_cd = ext_re_data[-1][1] high_mach, high_reynolds_data, high_crit = MnReCdDataTable[highpos] last_cd = None for re, cd in high_reynolds_data: adj_re = math.log10(re) - math.log10(high_crit) + math.log10(crit) if adj_re > ext_re_data[-1][0] and last_cd is not None: ext_re_data.append((adj_re, cd - last_cd + ref_cd)) else: last_cd = cd ExtendedMnLogReCdDataTable.append((mach, ext_re_data, crit))
6,708
def plugin_unloaded(): """On plugin unloaded. """ events.broadcast("plugin_unloaded")
6,709
def Get_Country_Name_From_ISO3_Extended(countryISO): """ Creates a subset of the quick chart data for a specific country. The subset includes all those rows containing the given country either as the origin or as the country of asylum. """ countryName = "" # June-22 - This function has been updated to include a to upper without a check on if the data is null or not # So we need to wrap it in a try catch try: countryName = Country.get_country_name_from_iso3(countryISO) except: print("Failed to get the country from get_country_name_from_iso3.") # Now lets try to find it for the three typical non-standard codes if countryName is None or countryName == "": print("Non-standard ISO code:", countryISO) if countryISO == "UKN": countryName = "Various / unknown" elif countryISO == "STA": countryName = "Stateless" elif countryISO == "TIB": countryName = "Tibetan" else: print("!!SERIOUS!! Unknown ISO code identified:", countryISO) # Lets add a sensible default here... countryName = "Various / unknown" return countryName
6,710
def individual_concentration(positions, base_capital): """ Print the individual concentration in the portfolio given existing positions and base capital :param positions: list of dict :param base_capital: int or float """ for i in positions: ticker = two.lookup_ticker(i['instrumentId'])['ticker'] if ticker not in etf: print('{} - {} - {:.2f}%'.format('Equity', ticker, 100 * i['totalShares'] * i['avgEntryPrice'] / base_capital)) else: print('{} - {} - {:.2f}%'.format('ETF', ticker, 100 * i['totalShares'] * i['avgEntryPrice'] / base_capital))
6,711
def to_cpu(x): """ Move cupy arrays (or dicts/lists of arrays) to CPU """ if len(sys.argv) > 1: if type(x) == dict: return {k:to_cpu(a) for (k, a) in x.items()} elif type(x) == list: return [to_cpu(a) for a in x] else: return cp.asnumpy(x) else: return x
6,712
def write_deltapack(statedir, chrootdir, version, manifest, previous_manifest, bundle_name): """Output deltapack to the statedir.""" out_path = os.path.join(statedir, "www", "update", version) delta_path = os.path.join(out_path, "delta") if not os.path.isdir(out_path): os.makedirs(out_path, exist_ok=True) with tempfile.TemporaryDirectory(dir=os.getcwd()) as odir: staged = os.path.join(odir, "staged") delta = os.path.join(odir, "delta") os.makedirs(staged) os.makedirs(delta) for val in manifest['files'].values(): if val[2] != version or val[1] == ZERO_HASH: continue delta_file = None if previous_manifest and previous_manifest['files'].get(val[3]): pval = previous_manifest['files'][val[3]] fname = f"{pval[2]}-{val[2]}-{pval[1]}-{val[1]}" delta_file = os.path.join(delta_path, fname) if not os.path.isfile(delta_file): delta_file = None copy_file = os.path.join(chrootdir, val[3][1:]) if delta_file: out_file = os.path.join(delta, fname) if os.path.isfile(f"{out_file}"): continue shutil.copyfile(delta_file, out_file) else: out_file = os.path.join(staged, val[1]) if os.path.exists(f"{out_file}"): continue extract_file(statedir, val, staged) if previous_manifest: out_file = f"pack-{bundle_name}-from-{previous_manifest['version']}" else: out_file = f"pack-{bundle_name}-from-0" create_tar(odir, out_path, (staged, delta), out_file, pack=True)
6,713
def parse_mapfile(map_file_path): """Parse the '.map' file""" def parse_keyboard_function(f, line): """Parse keyboard-functions in the '.map' file""" search = re.search(r'(0x\S+)\s+(0x\S+)', next(f)) position = int( search.group(1), 16 ) length = int( search.group(2), 16 ) search = re.search(r'0x\S+\s+(\S+)', next(f)) name = search.group(1) return { 'keyboard-functions': { name: { 'position': position, 'length': length, }, }, } def parse_layout_matrices(f, line): """Parse layout matrix information in the '.map' file""" name = re.search(r'.progmem.data.(_kb_layout\S*)', line).group(1) search = re.search(r'(0x\S+)\s+(0x\S+)', next(f)) position = int( search.group(1), 16 ) length = int( search.group(2), 16 ) return { 'layout-matrices': { name: { 'position': position, 'length': length, }, }, } # --- parse_mapfile() --- # normalize paths map_file_path = os.path.abspath(map_file_path) # check paths if not os.path.exists(map_file_path): raise ValueError("invalid 'map_file_path' given") output = {} f = open(map_file_path) for line in f: if re.search(r'^\s*\.text\.kbfun_', line): dict_merge(output, parse_keyboard_function(f, line)) elif re.search(r'^\s*\.progmem\.data.*layout', line): dict_merge(output, parse_layout_matrices(f, line)) return output
6,714
def thetaG(t,t1,t2): """ Return a Gaussian pulse. Arguments: t -- time of the pulse t1 -- initial time t2 -- final time Return: theta -- Scalar or vector with the dimensions of t, """ tau = (t2-t1)/5 to = t1 + (t2-t1)/2 theta = (np.sqrt(np.pi)/(2*tau))*np.exp(-((t-to)/tau)**2) return theta
6,715
def grouped(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n]
6,716
def test_backup_block_deletion(client, core_api, volume_name, set_backupstore_s3): # NOQA """ Test backup block deletion Context: We want to make sure that we only delete non referenced backup blocks, we also don't want to delete blocks while there other backups in progress. The reason for this is that we don't yet know which blocks are required by the in progress backup, so blocks deletion could lead to a faulty backup. Setup: 1. Setup minio as S3 backupstore Steps: 1. Create a volume and attach to the current node 2. Write 4 MB to the beginning of the volume (2 x 2MB backup blocks) 3. Create backup(1) of the volume 4. Overwrite the first of the backup blocks of data on the volume 5. Create backup(2) of the volume 6. Overwrite the first of the backup blocks of data on the volume 7. Create backup(3) of the volume 8. Verify backup block count == 4 assert volume["DataStored"] == str(BLOCK_SIZE * expected_count) assert count of *.blk files for that volume == expected_count 9. Create an artificial in progress backup.cfg file json.dumps({"Name": name, "VolumeName": volume, "CreatedTime": ""}) 10. Delete backup(2) 11. Verify backup block count == 4 (because of the in progress backup) 12. Delete the artificial in progress backup.cfg file 13. Delete backup(1) 14. Verify backup block count == 2 15. Delete backup(3) 16. Verify backup block count == 0 17. Delete the backup volume 18. Cleanup the volume """ backupstore_cleanup(client) volume = create_and_check_volume(client, volume_name) host_id = get_self_host_id() volume = volume.attach(hostId=host_id) volume = common.wait_for_volume_healthy(client, volume_name) data0 = {'pos': 0, 'len': 2 * BACKUP_BLOCK_SIZE, 'content': common.generate_random_data(2 * BACKUP_BLOCK_SIZE)} bv0, backup0, _, _ = create_backup(client, volume_name, data0) data1 = {'pos': 0, 'len': BACKUP_BLOCK_SIZE, 'content': common.generate_random_data(BACKUP_BLOCK_SIZE)} bv1, backup1, _, _ = create_backup(client, volume_name, data1) data2 = {'pos': 0, 'len': BACKUP_BLOCK_SIZE, 'content': common.generate_random_data(BACKUP_BLOCK_SIZE)} bv2, backup2, _, _ = create_backup(client, volume_name, data2) backup_blocks_count = backupstore_count_backup_block_files(client, core_api, volume_name) assert backup_blocks_count == 4 bvs = client.list_backupVolume() for bv in bvs: if bv['name'] == volume_name: assert bv['dataStored'] == \ str(backup_blocks_count * BACKUP_BLOCK_SIZE) backupstore_create_dummy_in_progress_backup(client, core_api, volume_name) delete_backup(client, volume_name, backup1.name) assert backupstore_count_backup_block_files(client, core_api, volume_name) == 4 backupstore_delete_dummy_in_progress_backup(client, core_api, volume_name) delete_backup(client, volume_name, backup0.name) assert backupstore_count_backup_block_files(client, core_api, volume_name) == 2 delete_backup(client, volume_name, backup2.name) assert backupstore_count_backup_block_files(client, core_api, volume_name) == 0 delete_backup_volume(client, volume_name)
6,717
def choose_string(g1, g2): """Function used by merge_similar_guesses to choose between 2 possible properties when they are strings. If the 2 strings are similar, or one is contained in the other, the latter is returned with an increased confidence. If the 2 strings are dissimilar, the one with the higher confidence is returned, with a weaker confidence. Note that here, 'similar' means that 2 strings are either equal, or that they differ very little, such as one string being the other one with the 'the' word prepended to it. >>> s(choose_string(('Hello', 0.75), ('World', 0.5))) ('Hello', 0.25) >>> s(choose_string(('Hello', 0.5), ('hello', 0.5))) ('Hello', 0.75) >>> s(choose_string(('Hello', 0.4), ('Hello World', 0.4))) ('Hello', 0.64) >>> s(choose_string(('simpsons', 0.5), ('The Simpsons', 0.5))) ('The Simpsons', 0.75) """ v1, c1 = g1 # value, confidence v2, c2 = g2 if not v1: return g2 elif not v2: return g1 v1, v2 = v1.strip(), v2.strip() v1l, v2l = v1.lower(), v2.lower() combined_prob = 1 - (1 - c1) * (1 - c2) if v1l == v2l: return (v1, combined_prob) # check for common patterns elif v1l == 'the ' + v2l: return (v1, combined_prob) elif v2l == 'the ' + v1l: return (v2, combined_prob) # if one string is contained in the other, return the shortest one elif v2l in v1l: return (v2, combined_prob) elif v1l in v2l: return (v1, combined_prob) # in case of conflict, return the one with highest confidence else: if c1 > c2: return (v1, c1 - c2) else: return (v2, c2 - c1)
6,718
def WorkPath(): """ WorkPath 2016.04.09 L'utilisation de cette fonction permet d'utiliser des chemins d'accès relatifs entre scripts et dossiers Cette fonction détermine automatiquement où le script est lancé : Soit sur un pc en travail local Soit sur le serveur avec le shell Django """ # TODO : remplacer le snippet de travail en dossiers relatifs # de voca.py par cette fonction qui détecte automatiquement # le dossier de travail !
6,719
def update_alert_command(client: MsClient, args: dict): """Updates properties of existing Alert. Returns: (str, dict, dict). Human readable, context, raw response """ alert_id = args.get('alert_id') assigned_to = args.get('assigned_to') status = args.get('status') classification = args.get('classification') determination = args.get('determination') comment = args.get('comment') args_list = [assigned_to, status, classification, determination, comment] check_given_args_update_alert(args_list) json_data, context = add_args_to_json_and_context(alert_id, assigned_to, status, classification, determination, comment) alert_response = client.update_alert(alert_id, json_data) entry_context = { 'MicrosoftATP.Alert(val.ID === obj.ID)': context } human_readable = f'The alert {alert_id} has been updated successfully' return human_readable, entry_context, alert_response
6,720
def create_payment(context: SagaContext) -> SagaContext: """For testing purposes.""" context["payment"] = "payment" return context
6,721
def test_plot_data(example_pm_data, modified_ansatz): """Load example data and test plotting.""" fitter = ThresholdFit(modified_ansatz=modified_ansatz) figure = plt.figure() fitter.plot_data(example_pm_data, "p_bitflip", figure=figure)
6,722
async def TwitterAuthURLAPI( request: Request, current_user: User = Depends(User.getCurrentUser), ): """ Twitter アカウントと連携するための認証 URL を取得する。<br> 認証 URL をブラウザで開くとアプリ連携の許可を求められ、ユーザーが許可すると /api/twitter/callback に戻ってくる。 JWT エンコードされたアクセストークンがリクエストの Authorization: Bearer に設定されていないとアクセスできない。<br> """ # コールバック URL を設定 ## Twitter API の OAuth 連携では、事前にコールバック先の URL をデベロッパーダッシュボードから設定しておく必要がある ## 一方 KonomiTV サーバーの URL はまちまちなので、コールバック先の URL を一旦 https://app.konomi.tv/api/redirect/twitter に集約する ## この API は、リクエストを "server" パラメーターで指定された KonomiTV サーバーの TwitterAuthCallbackAPI にリダイレクトする ## 最後に KonomiTV サーバーがリダイレクトを受け取ることで、コールバック対象の URL が定まらなくても OAuth 連携ができるようになる ## Twitter だけ他のサービスと違い OAuth 1.0a なので、フローがかなり異なる ## ref: https://github.com/tsukumijima/KonomiTV-API callback_url = f'https://app.konomi.tv/api/redirect/twitter?server={request.url.scheme}://{request.url.netloc}/' # OAuth1UserHandler を初期化し、認証 URL を取得 ## signin_with_twitter を True に設定すると、oauth/authenticate の認証 URL が生成される ## oauth/authorize と異なり、すでにアプリ連携している場合は再承認することなくコールバック URL にリダイレクトされる ## ref: https://developer.twitter.com/ja/docs/authentication/api-reference/authenticate try: oauth_handler = tweepy.OAuth1UserHandler(Interlaced(1), Interlaced(2), callback=callback_url) authorization_url = await asyncio.to_thread(oauth_handler.get_authorization_url, signin_with_twitter=True) # 同期関数なのでスレッド上で実行 except tweepy.TweepyException: raise HTTPException( status_code = status.HTTP_422_UNPROCESSABLE_ENTITY, detail = 'Failed to get Twitter authorization URL', ) # 仮で TwitterAccount のレコードを作成 ## 戻ってきたときに oauth_token がどのユーザーに紐づいているのかを判断するため ## TwitterAuthCallbackAPI は仕組み上認証をかけられないので、勝手に任意のアカウントを紐付けられないためにはこうせざるを得ない twitter_account = TwitterAccount() twitter_account.user = current_user twitter_account.name = 'Temporary' twitter_account.screen_name = 'Temporary' twitter_account.icon_url = 'Temporary' twitter_account.access_token = oauth_handler.request_token['oauth_token'] # 暫定的に oauth_token を格納 (認証 URL の ?oauth_token= と同じ値) twitter_account.access_token_secret = oauth_handler.request_token['oauth_token_secret'] # 暫定的に oauth_token_secret を格納 await twitter_account.save() return {'authorization_url': authorization_url}
6,723
def post_sunday(request): """Post Sunday Details, due on the date from the form""" date_form = SelectDate(request.POST or None) if request.method == 'POST': if date_form.is_valid(): groups = DetailGroup.objects.filter(semester=get_semester()) details = settings.SUNDAY_DETAILS g = [e for e in groups] groups = g random.shuffle(groups) random.shuffle(details) emails = [] for group in groups: if len(details) <= 0: break group_detail = SundayGroupDetail( group=group, due_date=date_form.cleaned_data['due_date'] ) group_detail.save() for _ in range(group.size()): if len(details) <= 0: break d = details.pop() det = SundayDetail( short_description=d['name'], long_description="\n".join(d['tasks']), due_date=date_form.cleaned_data['due_date'] ) det.save() group_detail.details.add(det) group_detail.save() emails.append( build_sunday_detail_email( group_detail, request.scheme + "://" + request.get_host() ) ) det_manager_email = Position.objects.get( title=Position.PositionChoices.DETAIL_MANAGER ).brothers.first().user.email for (subject, message, to) in emails: send_mail(subject, message, det_manager_email, to) context = { 'form': date_form, 'date': 'sunday', } return render(request, 'detail-manager/post-details.html', context)
6,724
def reconstruct(lvl: Level, flow_dict: Dict[int, Dict[int, int]], info: Dict[int, NodeInfo]) -> List[List[int]]: """Reconstruct agent paths from the given flow and node information""" paths: List[List[int]] = [[]] * len(lvl.scenario.agents) start_flows = flow_dict[0] agent_starts = {agent.origin: i for i, agent in enumerate(lvl.scenario.agents)} for n in start_flows: if start_flows[n] > 0: agent = agent_starts[info[n].id] paths[agent] = follow_path(n, flow_dict, info) return paths
6,725
def test_single_agent() -> None: """ Create an environment and perform different action types to make sure the commands are translated correctly between griddly and enn wrappers """ env_cls = create_env( yaml_file=os.path.join(init_path, "env_descriptions/test/test_actions.yaml") ) env = env_cls() observation = env.reset() entity1_id = observation.ids["entity_1"][0] entity2_ids = observation.ids["entity_2"] # The starting location assert env.entity_locations[entity1_id] == [2, 1] move_down_action = CategoricalAction( actions=np.array([[4]], dtype=int), actors=[entity1_id] ) observation_1 = env.act({"move_entity_one": move_down_action}) # The entity has moved down assert len(observation_1.ids["entity_1"]) == 1 assert env.entity_locations[entity1_id] == [2, 2] # There are three entity2 and one of them is in position 3,3 assert len(observation_1.ids["entity_2"]) == 3 assert ( env.entity_locations[entity2_ids[0]] == [2, 3] or env.entity_locations[entity2_ids[1]] == [2, 3] or env.entity_locations[entity2_ids[2]] == [2, 3] ) remove_down_action = CategoricalAction( actions=np.array([[4]], dtype=int), actors=[entity1_id] ) observation_2 = env.act({"remove_entity_two": remove_down_action}) assert len(observation_2.ids["entity_1"]) == 1 # There are two entity_2 and none of them are in 3,3 assert len(observation_2.ids["entity_2"]) == 2 assert np.all( [env.entity_locations[id] != [2, 3] for id in observation_2.ids["entity_2"]] )
6,726
def add_aggregate_algo(ctx, data): """Add aggregate algo. The path must point to a valid JSON file with the following schema: \b { "name": str, "description": path, "file": path, "permissions": { "public": bool, "authorized_ids": list[str], }, } \b Where: - name: name of the algorithm - description: path to a markdown file describing the algo - file: path to tar.gz or zip archive containing the algorithm python script and its Dockerfile - permissions: define asset access permissions """ client = get_client(ctx.obj) res = client.add_aggregate_algo(data) printer = printers.get_asset_printer(assets.AGGREGATE_ALGO, ctx.obj.output_format) printer.print(res, is_list=False)
6,727
def run_with_reloader(root, *hotkeys): """Run the given application in an independent python interpreter.""" import signal signal.signal(signal.SIGTERM, lambda *args: sys.exit(0)) reloader = Reloader() try: if os.environ.get('TKINTER_MAIN') == 'true': for hotkey in hotkeys: root.bind_all(hotkey, lambda event: reloader.trigger_reload()) if os.name == 'nt': root.wm_state("iconic") root.wm_state("zoomed") root.mainloop() else: sys.exit(reloader.start_process()) except KeyboardInterrupt: pass
6,728
def arch_explain_instruction(bv, instruction, lifted_il_instrs): """ Returns the explanation string from explanations_en.json, formatted with the preprocessed instruction token list """ if instruction is None: return False, [] parsed = parse_instruction(bv, instruction, lifted_il_instrs) if len(parsed) == 0: return False, [] out = [] out_bool = False for name in parsed: name = find_proper_name(name).lower() if name in explanations: try: # Get the string from the JSON and format it out_bool = out_bool or name not in dont_supersede_llil out.append(explanations[name].format(instr=preprocess(bv, parsed, lifted_il_instrs, name))) except (AttributeError, KeyError): # Usually a bad format string. Shouldn't show up unless something truly weird happens. log_error("Bad Format String in binja_explain_instruction") traceback.print_exc() out.append(name) return out_bool, out
6,729
def execute_tuning(data: Dict[str, Any]) -> dict: """Get configuration.""" from lpot.ux.utils.workload.workload import Workload if not str(data.get("id", "")): message = "Missing request id." mq.post_error( "tuning_finish", {"message": message, "code": 404}, ) raise Exception(message) request_id: str = data["id"] workdir = Workdir(request_id=request_id) workload_path: str = workdir.workload_path try: workload_data = _load_json_as_dict( os.path.join(workload_path, "workload.json"), ) except Exception as err: mq.post_error( "tuning_finish", {"message": repr(err), "code": 404, "id": request_id}, ) raise err workload = Workload(workload_data) tuning: Tuning = Tuning(workload, workdir.workload_path, workdir.template_path) send_data = { "message": "started", "id": request_id, "size_fp32": get_size(tuning.model_path), } workdir.clean_logs() workdir.update_data( request_id=request_id, model_path=tuning.model_path, model_output_path=tuning.model_output_path, status="wip", ) executor = Executor( workspace_path=workload_path, subject="tuning", data=send_data, log_name="output", ) proc = executor.call( tuning.command, ) tuning_time = executor.process_duration if tuning_time: tuning_time = round(tuning_time, 2) log.debug(f"Elapsed time: {tuning_time}") logs = [os.path.join(workload_path, "output.txt")] parser = TuningParser(logs) if proc.is_ok: response_data = parser.process() if isinstance(response_data, dict): response_data["id"] = request_id response_data["tuning_time"] = tuning_time response_data["size_int8"] = get_size(tuning.model_output_path) response_data["model_output_path"] = tuning.model_output_path response_data["size_fp32"] = get_size(tuning.model_path) response_data["is_custom_dataloader"] = bool(workdir.template_path) workdir.update_data( request_id=request_id, model_path=tuning.model_path, model_output_path=tuning.model_output_path, metric=response_data, status="success", execution_details={"tuning": tuning.serialize()}, ) response_data["execution_details"] = {"tuning": tuning.serialize()} log.debug(f"Parsed data is {json.dumps(response_data)}") mq.post_success("tuning_finish", response_data) return response_data else: log.debug("FAIL") workdir.update_data( request_id=request_id, model_path=tuning.model_path, status="error", ) mq.post_failure("tuning_finish", {"message": "failed", "id": request_id}) raise ClientErrorException("Tuning failed during execution.")
6,730
def get_subnet_mask(subnet: int, v6: bool) -> int: """Get the subnet mask given a CIDR prefix 'subnet'.""" if v6: return bit_not((1 << (128 - subnet)) - 1, 128) else: return bit_not((1 << (32 - subnet)) - 1, 32)
6,731
def show_notification_activity_relays(message, append=False): """ Show the notification on the Text Box Widget for the action on the relays :param message: The message to display :param append: Enable or disable notification append on the Text Area Widget :return: None """ if append: old_message_notification = text_area.text current_message = f'{datetime.datetime.utcnow().isoformat()} - {message}' relay_status_string = [old_message_notification, current_message] text_area.text = "".join(relay_status_string) else: text_area.text = message
6,732
def get_parser(): """Return base parser for scripts. """ parser = argparse.ArgumentParser() parser.add_argument('config', help='Tuning configuration file (examples: configs/tuning)') return parser
6,733
def cli( input_maps_directory, data_group, caps_directory, participants_tsv, gpu, n_proc, batch_size, # use_extracted_features, selection_metrics, diagnoses, multi_cohort, ): """Save the output tensors of a trained model on a test set. INPUT_MAPS_DIRECTORY is the MAPS folder from where the model used for prediction will be loaded. DATA_GROUP is the name of the subjects and sessions list used to compute outputs. """ from clinicadl.utils.cmdline_utils import check_gpu if gpu: check_gpu() from .save_tensor import save_tensor save_tensor( maps_dir=input_maps_directory, data_group=data_group, caps_directory=caps_directory, tsv_path=participants_tsv, gpu=gpu, n_proc=n_proc, batch_size=batch_size, # prepare_dl=use_extracted_features, selection_metrics=selection_metrics, diagnoses=diagnoses, multi_cohort=multi_cohort, )
6,734
def arm_and_takeoff(aTargetAltitude, vehicle): """ Arms vehicle and fly to aTargetAltitude. """ print "Basic pre-arm checks" if vehicle.mode.name == "INITIALISING": print "Waiting for vehicle to initialise" time.sleep(1) while vehicle.gps_0.fix_type < 2: print "Waiting for GPS...:", vehicle.gps_0.fix_type time.sleep(1) # Don't try to arm until autopilot is ready # vehicle has booted, EKF is ready, and the vehicle has GPS lock while not vehicle.is_armable: print " Waiting for vehicle to initialise..." time.sleep(1) print "Arming motors" # Copter should arm in GUIDED mode vehicle.mode = VehicleMode("GUIDED") vehicle.armed = True # Confirm vehicle armed before attempting to take off while not vehicle.armed: print " Waiting for arming..." time.sleep(1) print "Taking off!" vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude # Wait until the vehicle reaches a safe height before processing the goto (otherwise the command # after Vehicle.simple_takeoff will execute immediately). while True: print " Altitude: ", vehicle.location.global_relative_frame.alt #Break and return from function just below target altitude. if vehicle.location.global_relative_frame.alt>=aTargetAltitude*0.95: print "Reached target altitude" break time.sleep(1)
6,735
def home(request): """return HttpResponse('<h1>Hello, Welcome to this test</h1>')""" """Le chemin des templates est renseigne dans "DIRS" de "TEMPLATES" dans settings.py DONC PAS BESOIN DE RENSEIGNER LE CHEMIN ABSOLU""" return render(request, "index.html")
6,736
def cd(path): """Context manager to switch working directory""" def normpath(path): """Normalize UNIX path to a native path.""" normalized = os.path.join(*path.split('/')) if os.path.isabs(path): return os.path.abspath('/') + normalized return normalized path = normpath(path) cwd = os.getcwd() os.chdir(path) try: yield path finally: os.chdir(cwd)
6,737
def validate_module_specific_inputs(scenario_id, subscenarios, subproblem, stage, conn): """ :param subscenarios: :param subproblem: :param stage: :param conn: :return: """ params = get_inputs_from_database(scenario_id, subscenarios, subproblem, stage, conn) df = cursor_to_df(params) # Check data types availability expected_dtypes = get_expected_dtypes( conn, ["inputs_project_availability", "inputs_project_availability_endogenous"]) dtype_errors, error_columns = validate_dtypes(df, expected_dtypes) write_validation_to_database( conn=conn, scenario_id=scenario_id, subproblem_id=subproblem, stage_id=stage, gridpath_module=__name__, db_table="inputs_project_availability_endogenous", severity="High", errors=dtype_errors ) # Check for missing inputs msg = "" value_cols = ["unavailable_hours_per_period", "unavailable_hours_per_event_min", "available_hours_between_events_min"] write_validation_to_database( conn=conn, scenario_id=scenario_id, subproblem_id=subproblem, stage_id=stage, gridpath_module=__name__, db_table="inputs_project_availability_endogenous", severity="Low", errors=validate_missing_inputs(df, value_cols, "project", msg) ) cols = ["unavailable_hours_per_event_min", "unavailable_hours_per_period"] write_validation_to_database( conn=conn, scenario_id=scenario_id, subproblem_id=subproblem, stage_id=stage, gridpath_module=__name__, db_table="inputs_project_availability_endogenous", severity="High", errors=validate_column_monotonicity( df=df, cols=cols, idx_col=["project"] ) )
6,738
def extract_commands(data, *commands): """Input function to find commands output in the "data" text""" ret = "" hostname = _ttp_["variable"]["gethostname"](data, "input find_command function") if hostname: for command in commands: regex = r"{}[#>] *{} *\n([\S\s]+?)(?={}[#>]|$)".format( hostname, command, hostname ) match = search(regex, data) if match: ret += "\n{}\n".format(match.group()) if ret: return ret, None return data, None
6,739
def get_version() -> str: """ Returns the version string for the ufotest project. The version scheme of ufotest loosely follows the technique of `Semantic Versioning <https://semver.org/>`_. Where a minor version change may introduce backward incompatible changes, due to the project still being in active development with many features being subject to change. The return value of this function is subject to the "get_version" filter hook, which is able to modify the version string *after* it has been loaded from the file and sanitized. *EXAMPLE* .. code-block:: python version = get_version() # "1.2.1" :returns: The version string without any additional characters or whitespaces. """ with open(VERSION_PATH) as version_file: version = version_file.read() version = version.replace(' ', '').replace('\n', '') # Here we actually need to check if the plugin management system is actually initialized (this is what the boolean # return of is_prepared indicates) because the version function needs to be functional even when the ufotest # installation folder and thus the config file does not yet exist. if CONFIG.is_prepared(): version = CONFIG.pm.apply_filter('get_version', value=version) return version
6,740
async def clear_pending_revocations(request: web.BaseRequest): """ Request handler for clearing pending revocations. Args: request: aiohttp request object Returns: Credential revocation ids still pending revocation by revocation registry id. """ context: AdminRequestContext = request["context"] body = await request.json() purge = body.get("purge") rev_manager = RevocationManager(context.profile) try: results = await rev_manager.clear_pending_revocations(purge) except StorageError as err: raise web.HTTPBadRequest(reason=err.roll_up) from err return web.json_response({"rrid2crid": results})
6,741
def verify_bpl_svcomp(args): """Verify the Boogie source file using SVCOMP-tuned heuristics.""" heurTrace = "\n\nHeuristics Info:\n" if args.memory_safety: if not (args.only_check_valid_deref or args.only_check_valid_free or args.only_check_memleak): heurTrace = "engage valid deference checks.\n" args.only_check_valid_deref = True args.prop_to_check = 'valid-deref' args.bpl_with_all_props = smack.top.temporary_file(os.path.splitext(os.path.basename(args.bpl_file))[0], '.bpl', args) copyfile(args.bpl_file, args.bpl_with_all_props) smack.top.property_selection(args) elif args.only_check_valid_deref: heurTrace = "engage valid free checks.\n" args.only_check_valid_free = True args.prop_to_check = 'valid-free' args.only_check_valid_deref = False args.bpl_file = smack.top.temporary_file(os.path.splitext(os.path.basename(args.bpl_file))[0], '.bpl', args) copyfile(args.bpl_with_all_props, args.bpl_file) smack.top.property_selection(args) elif args.only_check_valid_free: heurTrace = "engage memleak checks.\n" args.only_check_memleak = True args.prop_to_check = 'memleak' args.only_check_valid_free = False args.bpl_file = smack.top.temporary_file(os.path.splitext(os.path.basename(args.bpl_file))[0], '.bpl', args) copyfile(args.bpl_with_all_props, args.bpl_file) smack.top.property_selection(args) elif args.only_check_memcleanup: heurTrace = "engage memcleanup checks.\n" args.only_check_memleak = True smack.top.property_selection(args) args.only_check_memleak = False # If pthreads found, perform lock set analysis if args.pthread: lockpwn_command = ["lockpwn"] lockpwn_command += [args.bpl_file] lockpwn_command += ["/corral"] args.bpl_file = smack.top.temporary_file(os.path.splitext(os.path.basename(args.bpl_file))[0], '.bpl', args) lockpwn_command += ["/o:%s" % args.bpl_file] lockpwn_output = smack.top.try_command(lockpwn_command); corral_command = ["corral"] corral_command += [args.bpl_file] corral_command += ["/tryCTrace", "/noTraceOnDisk", "/printDataValues:1"] corral_command += ["/useProverEvaluate", "/cex:1"] with open(args.bpl_file, "r") as f: bpl = f.read() with open(args.input_files[0], "r") as f: csource = f.read() if args.memory_safety: is_stack_benchmark(args, csource) else: if "angleInRadian" in csource: if not args.quiet: print("Stumbled upon trigonometric function is float benchmark\n") sys.exit(smack.top.results(args)['unknown']) elif "copysign(1" in csource: if not args.quiet: print("Stumbled upon tricky float benchmark\n") sys.exit(smack.top.results(args)['unknown']) is_buggy_driver_benchmark(args, bpl) if args.pthread: if "fib_bench" in bpl or "27_Boop_simple_vf_false-unreach-call" in bpl or "k < 5;" in csource or "k < 10;" in csource or "k < 20;" in csource: heurTrace += "Increasing context switch bound for certain pthread benchmarks.\n" corral_command += ["/k:30"] else: corral_command += ["/k:3"] if not "qrcu_reader2" in bpl and not "__VERIFIER_atomic_take_write_lock" in bpl and not "fib_bench" in bpl: corral_command += ["/cooperative"] else: corral_command += ["/k:1"] if not (args.memory_safety or args.bit_precise or args.only_check_memcleanup): if not ("dll_create" in csource or "sll_create" in csource or "changeMethaneLevel" in csource): corral_command += ["/di"] # we are not modeling strcpy if args.pthread and "strcpy" in bpl: heurTrace += "We are not modeling strcpy - aborting\n" if not args.quiet: print(heurTrace + "\n") sys.exit(smack.top.results(args)['unknown']) # Setting good loop unroll bound based on benchmark class loopUnrollBar = 8 staticLoopBound = 65536 if not args.bit_precise and "ssl3_accept" in bpl and "s__s3__tmp__new_cipher__algorithms" in bpl: heurTrace += "ControlFlow benchmark detected. Setting loop unroll bar to 23.\n" loopUnrollBar = 23 elif "s3_srvr.blast.10_false-unreach-call" in bpl or "s3_srvr.blast.15_false-unreach-call" in bpl: heurTrace += "ControlFlow benchmark detected. Setting loop unroll bar to 23.\n" loopUnrollBar = 23 elif "NonTerminationSimple4_false-no-overflow" in bpl: heurTrace += "Overflow benchmark detected. Setting loop unroll bar to 1024.\n" loopUnrollBar = 1024 elif " node3" in bpl: heurTrace += "Sequentialized benchmark detected. Setting loop unroll bar to 100.\n" loopUnrollBar = 100 elif "calculate_output" in bpl or "psyco" in bpl: heurTrace += "ECA benchmark detected. Setting loop unroll bar to 15.\n" loopUnrollBar = 15 elif "ldv" in bpl: if "linux-4.2-rc1.tar.xz-08_1a-drivers--staging--lustre--lustre--llite--llite_lloop.ko-entry_point" in bpl or "linux-3.14__complex_emg__linux-usb-dev__drivers-media-usb-hdpvr-hdpvr" in bpl: heurTrace += "Special LDV benchmark detected. Setting loop unroll bar to 32.\n" loopUnrollBar = 32 else: heurTrace += "LDV benchmark detected. Setting loop unroll bar to 13.\n" loopUnrollBar = 13 staticLoopBound = 64 elif "standard_strcpy_false-valid-deref_ground_true-termination" in bpl or "960521-1_false-valid-free" in bpl or "960521-1_false-valid-deref" in bpl or "lockfree-3.3" in bpl or "list-ext_false-unreach-call_false-valid-deref" in bpl: heurTrace += "Memory safety benchmark detected. Setting loop unroll bar to 129.\n" loopUnrollBar = 129 elif "is_relaxed_prefix" in bpl: heurTrace += "Benchmark relax_* detected. Setting loop unroll bar to 15.\n" loopUnrollBar = 15 elif "id_o1000_false-unreach-call" in bpl: heurTrace += "Recursive benchmark detected. Setting loop unroll bar to 1024.\n" loopUnrollBar = 1024 elif "n.c24" in bpl or "array_false-unreach-call3" in bpl: heurTrace += "Loops benchmark detected. Setting loop unroll bar to 1024.\n" loopUnrollBar = 1024 elif "printf_false-unreach-call" in bpl or "echo_true-no-overflow" in bpl: heurTrace += "BusyBox benchmark detected. Setting loop unroll bar to 11.\n" loopUnrollBar = 11 elif args.memory_safety and "__main($i0" in bpl: heurTrace += "BusyBox memory safety benchmark detected. Setting loop unroll bar to 4.\n" loopUnrollBar = 4 elif args.integer_overflow and "__main($i0" in bpl: heurTrace += "BusyBox overflows benchmark detected. Setting loop unroll bar to 40.\n" loopUnrollBar = 40 elif args.integer_overflow and ("jain" in bpl or "TerminatorRec02" in bpl or "NonTerminationSimple" in bpl): heurTrace += "Infinite loop in overflow benchmark. Setting loop unroll bar to INT_MAX.\n" loopUnrollBar = 2**31 - 1 elif args.integer_overflow and ("(x != 0)" in csource or "(z > 0)" in csource or "(max > 0)" in csource or "(k < N)" in csource or "partial_sum" in csource): heurTrace += "Large overflow benchmark. Setting loop unroll bar to INT_MAX.\n" loopUnrollBar = 2**31 - 1 elif "i>>16" in csource: heurTrace += "Large array reach benchmark. Setting loop unroll bar to INT_MAX.\n" loopUnrollBar = 2**31 - 1 elif "whoop_poll_table" in csource: heurTrace += "Large concurrency benchmark. Setting loop unroll bar to INT_MAX.\n" loopUnrollBar = 2**31 - 1 if not "forall" in bpl: heurTrace += "No quantifiers detected. Setting z3 relevancy to 0.\n" corral_command += ["/bopt:z3opt:smt.relevancy=0"] if args.bit_precise: heurTrace += "--bit-precise flag passed - enabling bit vectors mode.\n" corral_command += ["/bopt:proverOpt:OPTIMIZE_FOR_BV=true"] corral_command += ["/bopt:boolControlVC"] if args.memory_safety: if args.prop_to_check == 'valid-deref': if "memleaks_test12_false-valid-free" in bpl: time_limit = 10 else: time_limit = 750 elif args.prop_to_check == 'valid-free': time_limit = 80 elif args.prop_to_check == 'memleak': time_limit = 50 else: time_limit = 880 command = list(corral_command) command += ["/timeLimit:%s" % time_limit] command += ["/v:1"] command += ["/maxStaticLoopBound:%d" % staticLoopBound] command += ["/recursionBound:65536"] command += ["/irreducibleLoopUnroll:12"] command += ["/trackAllVars"] verifier_output = smack.top.try_command(command, timeout=time_limit) result = smack.top.verification_result(verifier_output) if result == 'error' or result == 'invalid-deref' or result == 'invalid-free' or result == 'invalid-memtrack' or result == 'overflow': #normal inlining heurTrace += "Found a bug during normal inlining.\n" if not args.quiet: error = smack.top.error_trace(verifier_output, args) print error if args.memory_safety: heurTrace += (args.prop_to_check + "has errors\n") if args.prop_to_check == 'valid-free': if args.valid_deref_check_result != 'verified': force_timeout() elif args.prop_to_check == 'memleak': if args.valid_free_check_result == 'timeout': force_timeout() elif result == 'timeout': #normal inlining heurTrace += "Timed out during normal inlining.\n" heurTrace += "Determining result based on how far we unrolled.\n" # If we managed to unroll more than loopUnrollBar times, then return verified # First remove exhausted loop bounds generated during max static loop bound computation unrollMax = 0 if 'Verifying program while tracking' in verifier_output: verifier_output = re.sub(re.compile('.*Verifying program while tracking', re.DOTALL), 'Verifying program while tracking', verifier_output) it = re.finditer(r'Exhausted recursion bound of ([1-9]\d*)', verifier_output) for match in it: if int(match.group(1)) > unrollMax: unrollMax = int(match.group(1)) else: heurTrace += "Corral didn't even start verification.\n" if unrollMax >= loopUnrollBar: heurTrace += "Unrolling made it to a recursion bound of " heurTrace += str(unrollMax) + ".\n" heurTrace += "Reporting benchmark as 'verified'.\n" if args.execute and not args.pthread: heurTrace += "Hold on, let's see the execution result.\n" execution_result = run_binary(args) heurTrace += "Excecution result is " + execution_result + '\n' if execution_result != 'true': heurTrace += "Oops, execution result says {0}.\n".format(execution_result) if not args.quiet: print(heurTrace + "\n") sys.exit(smack.top.results(args)['unknown']) random_test_result = random_test(args, result) if random_test_result == 'false' or random_test_result == 'unknown': heurTrace += "Oops, random testing says {0}.\n".format(random_test_result) if not args.quiet: print(heurTrace + "\n") sys.exit(smack.top.results(args)['unknown']) if not args.quiet: print(heurTrace + "\n") if args.memory_safety: heurTrace += (args.prop_to_check + "is verified\n") if args.prop_to_check == 'valid-deref': args.valid_deref_check_result = 'verified' elif args.prop_to_check == 'valid-free': args.valid_free_check_result = 'verified' elif args.prop_to_check == 'memleak': if args.valid_deref_check_result == 'timeout': force_timeout() else: sys.exit(smack.top.results(args)[args.valid_deref_check_result]) verify_bpl_svcomp(args) else: write_error_file(args, 'verified', verifier_output) sys.exit(smack.top.results(args)['verified']) else: heurTrace += "Only unrolled " + str(unrollMax) + " times.\n" heurTrace += "Insufficient unrolls to consider 'verified'. " heurTrace += "Reporting 'timeout'.\n" if not args.quiet: print(heurTrace + "\n") sys.stdout.flush() if args.memory_safety: heurTrace += (args.prop_to_check + " times out\n") if args.prop_to_check == 'valid-deref': args.valid_deref_check_result = 'timeout' force_timeout() elif args.prop_to_check == 'valid-free': args.valid_free_check_result = 'timeout' elif args.prop_to_check == 'memleak': if args.valid_deref_check_result == 'timeout': force_timeout() else: sys.exit(smack.top.results(args)[args.valid_deref_check_result]) verify_bpl_svcomp(args) else: force_timeout() elif result == 'verified': #normal inlining heurTrace += "Normal inlining terminated and found no bugs.\n" else: #normal inlining heurTrace += "Normal inlining returned 'unknown'. See errors above.\n" if not args.quiet: print(heurTrace + "\n") if args.memory_safety and result == 'verified': heurTrace += (args.prop_to_check + " is verified\n") if args.prop_to_check == 'valid-deref': args.valid_deref_check_result = 'verified' elif args.prop_to_check == 'valid-free': args.valid_free_check_result = 'verified' elif args.prop_to_check == 'memleak': if args.valid_deref_check_result == 'timeout': force_timeout() else: sys.exit(smack.top.results(args)[args.valid_deref_check_result]) verify_bpl_svcomp(args) else: write_error_file(args, result, verifier_output) if args.only_check_memcleanup and result == 'invalid-memtrack': sys.exit('SMACK found an error: memory cleanup.') else: sys.exit(smack.top.results(args)[result])
6,742
def add_filter(field, bind, criteria): """Generate a filter.""" if 'values' in criteria: return '{0}=any(:{1})'.format(field, bind), criteria['values'] if 'date' in criteria: return '{0}::date=:{1}'.format(field, bind), datetime.strptime(criteria['date'], '%Y-%m-%d').date() if 'gte' in criteria: return '{0}>=:{1}'.format(field, bind), criteria['gte'] if 'lte' in criteria: return '{0}<=:{1}'.format(field, bind), criteria['lte'] raise ValueError('criteria not supported')
6,743
def ireject(predicate, iterable): """Reject all items from the sequence for which the predicate is true. ireject(function or None, sequence) --> iterator :param predicate: Predicate function. If ``None``, reject all truthy items. :param iterable: Iterable to filter through. :yields: A sequence of all items for which the predicate is false. """ return _ifilterfalse(predicate, iterable)
6,744
def test_do_auto_false(): """ """ Npts1, Npts2, Nran = 300, 180, 1000 with NumpyRNGContext(fixed_seed): sample1 = np.random.random((Npts1, 3)) sample2 = np.random.random((Npts2, 3)) randoms = [Nran] # result1 = wp_jackknife(sample1, randoms, rp_bins, pi_max, # period=period, Nsub=3, num_threads=1, sample2=sample2, # do_auto=False) result = wp_jackknife(sample1, randoms, rp_bins, pi_max, period=period, Nsub=2, num_threads=1, sample2=sample2, do_auto=False)
6,745
def construct_config_error_msg(config, errors): """Construct an error message for an invalid configuration setup Parameters ---------- config: Dict[str, Any] Merged dictionary of configuration options from CLI, user configfile and default configfile errors: Dict[str, Any] Dictionary of schema validation errors passed by Marshmallow Returns ------- str """ error_msg = "Failed to parse config\n" for error_param, exception_msg in errors.items(): error_msg += parse_config_error(error_param, exception_msg) return error_msg
6,746
def isMSAADebugLoggingEnabled(): """ Whether the user has configured NVDA to log extra information about MSAA events. """ return config.conf["debugLog"]["MSAA"]
6,747
def _haversine_GC_distance(φ1, φ2, λ1, λ2): """ Haversine formula for great circle distance. Suffers from rounding errors for antipodal points. Parameters ---------- φ1, φ2 : :class:`numpy.ndarray` Numpy arrays wih latitudes. λ1, λ2 : :class:`numpy.ndarray` Numpy arrays wih longitude. """ Δλ = np.abs(λ1 - λ2) Δφ = np.abs(φ1 - φ2) return 2 * np.arcsin( np.sqrt(np.sin(Δφ / 2) ** 2 + np.cos(φ1) * np.cos(φ2) * np.sin(Δλ / 2) ** 2) )
6,748
def differentiate_branch(branch, suffix="deriv"): """calculates difference between each entry and the previous first entry in the new branch is difference between first and last entries in the input""" def bud(manager): return {add_suffix(branch,suffix):manager[branch]-np.roll(manager[branch],1)} return bud
6,749
def etf_holders(apikey: str, symbol: str) -> typing.Optional[typing.List[typing.Dict]]: """ Query FMP /etf-holder/ API. :param apikey: Your API key. :param symbol: Company ticker. :return: A list of dictionaries. """ path = f"etf-holder/{symbol}" query_vars = {"apikey": apikey} return __return_json_v3(path=path, query_vars=query_vars)
6,750
def clean_visibility_flags(horizon_dataframe: pd.DataFrame) -> pd.DataFrame: """ assign names to unlabeled 'visibility flag' columns -- solar presence, lunar/interfering body presence, is-target-on-near-side-of-parent-body, is-target-illuminated; drop then if empty """ flag_mapping = { unlabeled_flag: flag_name for unlabeled_flag, flag_name in zip( [c for c in horizon_dataframe.columns if 'Unnamed' in c], VISIBILITY_FLAG_NAMES ) } horizon_dataframe = horizon_dataframe.rename(mapper=flag_mapping, axis=1) empty_flags = [] for flag_column in flag_mapping.values(): if horizon_dataframe[flag_column].isin([' ', '']).all(): empty_flags.append(flag_column) return horizon_dataframe.drop(empty_flags, axis=1)
6,751
def insert_content_tetml_word() -> None: """Store the parse result in the database table content_tetml_word.""" if not cfg.glob.setup.is_simulate_parser: cfg.glob.parse_result_page_words[cfg.glob.JSON_NAME_NO_LINES_IN_PAGE] = cfg.glob.parse_result_no_lines_in_page cfg.glob.parse_result_page_words[cfg.glob.JSON_NAME_NO_PARAS_IN_PAGE] = cfg.glob.parse_result_no_paras_in_page cfg.glob.parse_result_page_words[cfg.glob.JSON_NAME_NO_WORDS_IN_PAGE] = cfg.glob.parse_result_no_words_in_page db.dml.insert_dbt_row( cfg.glob.DBT_CONTENT_TETML_WORD, { cfg.glob.DBC_DOCUMENT_ID: cfg.glob.document_id_base, cfg.glob.DBC_PAGE_NO: cfg.glob.parse_result_no_pages_in_doc, cfg.glob.DBC_PAGE_DATA: cfg.glob.parse_result_page_words, }, )
6,752
def test_sp_mp2_rhf_fc(mtd, opts, h2o): """cfour/???/input.dat #! single point MP2/qz2p on water """ h2o = qcdb.set_molecule(h2o) qcdb.set_options(opts) g, jrec = qcdb.gradient(mtd, return_wfn=True, molecule=h2o) print(g) assert compare_arrays(rhf_mp2_fc, g, atol=1.0e-5) ## from cfour #scf_tot = -76.062748460117 #mp2_tot = -76.307900312177 #mp2_corl = mp2_tot - scf_tot #atol = 1.e-6 #assert compare_values(mp2_tot, e, tnm() + ' Returned', atol=atol) #assert compare_values(mp2_tot, qcdb.variable('current energy'), tnm() + ' Current', atol=atol) #assert compare_values(mp2_tot, qcdb.variable('mp2 total energy'), tnm() + ' MP2', atol=atol) # #assert compare_values(mp2_corl, qcdb.variable('current correlation energy'), tnm() + ' MP2 Corl', atol=atol) #assert compare_values(mp2_corl, qcdb.variable('mp2 correlation energy'), tnm() + ' MP2 Corl', atol=atol) #assert compare_values(scf_tot, qcdb.variable('hf total energy'), tnm() + ' SCF', atol=atol) #assert compare_values(scf_tot, qcdb.variable('scf total energy'), tnm() + ' SCF', atol=atol)
6,753
def calib(phase, k, axis=1): """Phase calibration Args: phase (ndarray): Unwrapped phase of CSI. k (ndarray): Subcarriers index axis (int): Axis along which is subcarrier. Default: 1 Returns: ndarray: Phase calibrated ref: [Enabling Contactless Detection of Moving Humans with Dynamic Speeds Using CSI] (http://tns.thss.tsinghua.edu.cn/wifiradar/papers/QianKun-TECS2017.pdf) """ p = np.asarray(phase) k = np.asarray(k) slice1 = [slice(None, None)] * p.ndim slice1[axis] = slice(-1, None) slice1 = tuple(slice1) slice2 = [slice(None, None)] * p.ndim slice2[axis] = slice(None, 1) slice2 = tuple(slice2) shape1 = [1] * p.ndim shape1[axis] = k.shape[0] shape1 = tuple(shape1) k_n, k_1 = k[-1], k[1] a = (p[slice1] - p[slice2]) / (k_n - k_1) b = p.mean(axis=axis, keepdims=True) k = k.reshape(shape1) phase_calib = p - a * k - b return phase_calib
6,754
def rescale_as_int( s: pd.Series, min_value: float = None, max_value: float = None, dtype=np.int16 ) -> pd.Series: """Cannot be converted to njit because np.clip is unsupported.""" valid_dtypes = {np.int8, np.int16, np.int32} if dtype not in valid_dtypes: raise ValueError(f"dtype: expecting [{valid_dtypes}] but found [{dtype}]") if min_value is None: min_value = min(s) if max_value is None: max_value = max(s) if min_value == 0 and max_value == 0: raise ValueError("Both min_value and max_value must not be zero") limit = max(abs(min_value), abs(max_value)) res = np.clip(s / limit, 0, 1) * np.iinfo(dtype).max return res.astype(dtype)
6,755
def format_headers(headers): """Formats the headers of a :class:`Request`. :param headers: the headers to be formatted. :type headers: :class:`dict`. :return: the headers in lower case format. :rtype: :class:`dict`. """ dictionary = {} for k, v in headers.items(): if isinstance(k, unicode): k = k.encode('utf-8') if isinstance(v, unicode): v = v.encode('utf-8') dictionary[k.lower()] = v.lower() return dictionary
6,756
def read_analogy_file(filename): """ Read the analogy task test set from a file. """ section = None with open(filename, 'r') as questions_file: for line in questions_file: if line.startswith(':'): section = line[2:].replace('\n', '') continue else: words = line.replace('\n', '').split(' ') yield section, words
6,757
def decode_field(value): """Decodes a field as defined in the 'Field Specification' of the actions man page: http://www.openvswitch.org/support/dist-docs/ovs-actions.7.txt """ parts = value.strip("]\n\r").split("[") result = { "field": parts[0], } if len(parts) > 1 and parts[1]: field_range = parts[1].split("..") start = field_range[0] end = field_range[1] if len(field_range) > 1 else start if start: result["start"] = int(start) if end: result["end"] = int(end) return result
6,758
def process_targets(targets): """Process cycle targets for sending.""" for subscription_id, subscription_targets in groupby( targets, key=lambda x: x["subscription_id"] ): try: process_subscription_targets(subscription_id, subscription_targets) except Exception as e: logging.exception(e) for t in subscription_targets: target_manager.update( document_id=t["_id"], data={ "error": str(e), "sent_date": datetime.utcnow(), }, )
6,759
def compute_norm(x_train, in_ch): """Returns image-wise mean and standard deviation per channel.""" mean = np.zeros((1, 1, 1, in_ch)) std = np.zeros((1, 1, 1, in_ch)) n = np.zeros((1, 1, 1, in_ch)) # Compute mean. for x in tqdm(x_train, desc='Compute mean'): mean += np.sum(x, axis=(0, 1, 2), keepdims=True) n += np.sum(x > 0, axis=(0, 1, 2), keepdims=True) mean /= n # Compute std. for x in tqdm(x_train, desc='Compute std'): std += np.sum((x - mean) ** 2, axis=(0, 1, 2), keepdims=True) std = (std / n) ** 0.5 return mean, std
6,760
def _nonempty_line_count(src: str) -> int: """Count the number of non-empty lines present in the provided source string.""" return sum(1 for line in src.splitlines() if line.strip())
6,761
def run(data, results_filepath, methods=None, combiner='summa'): """ Run COSIFER GUI pipeline. Args: data (pd.DataFrame): data used for inference. results_filepath (str): path where to store the results. methods (list, optional): inference methods. Defaults to None, a.k.a., only recommended methods. combiner (str, optional): combiner type. Defaults to summa. """ # make sure the output exists os.makedirs(os.path.dirname(results_filepath), exist_ok=True) # decide on which network inference methods to perform selected_methods = method_selection(methods) if len(methods) < 1: raise RuntimeError('No valid methods passed!') # run inference methods interaction_tables_dict = run_inference(data, selected_methods) number_of_inferred_networks = len(interaction_tables_dict) if number_of_inferred_networks < 2: if number_of_inferred_networks < 1: raise RuntimeError('No inferred networks!') # a single method produced a valid network _, interaction_table = next(iter(interaction_tables_dict.items())) interaction_table.df.to_csv(results_filepath, compression='gzip') else: # run a consensus strategy to combine the results of the single methods run_combiner(combiner, interaction_tables_dict, results_filepath)
6,762
def geom_to_xml_element(geom): """Transform a GEOS or OGR geometry object into an lxml Element for the GML geometry.""" if geom.srs.srid != 4326: raise NotImplementedError("Only WGS 84 lat/long geometries (SRID 4326) are supported.") # GeoJSON output is far more standard than GML, so go through that return geojson_to_gml(json.loads(geom.geojson))
6,763
def _validate_source(source): """ Check that the entered data source paths are valid """ # acceptable inputs (for now) are a single file or directory assert type(source) == str, "You must enter your input as a string." assert ( os.path.isdir(source) == True or os.path.isfile(source) == True ), "Your data source string is not a valid data source." return True
6,764
def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. Parameters ---------- y_true : numpy array an array of true labels y_pred : numpy array an array of predicted labels Returns ------- recall : float the batch-wise average of precision value """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision
6,765
def get_validators(setting): """ :type setting: dict """ if 'validate' not in setting: return [] validators = [] for validator_name in setting['validate'].keys(): loader_module = load_module( 'spreadsheetconverter.loader.validator.{}', validator_name) validators.append(loader_module.Validator(setting)) return validators
6,766
def main(): """ Executing relevant functions """ # Get Keyword Args for Prediction args = arg_parser() # Load categories to names json file with open(args.category_names, 'r') as f: cat_to_name = json.load(f) # Load model trained with train.py model = load_checkpoint(args.checkpoint) # Process Image image_tensor = process_image(args.image) # Check for GPU device = check_gpu(gpu_arg=args.gpu); # Use `processed_image` to predict the top K most likely classes top_probs, top_labels, top_flowers = predict(image_tensor, model, device, cat_to_name, args.top_k) # Print out probabilities print_probability(top_flowers, top_probs)
6,767
def _check_found(py_exe, version_text, log_invalid=True): """Check the Python and pip version text found. Args: py_exe (str or None): Python executable path found, if any. version_text (str or None): Pip version found, if any. log_invalid (bool): Whether to log messages if found invalid. Returns: bool: Python is OK and pip version fits against ``PIP_SPECIFIER``. """ is_valid = True message = "Needs pip%s, but found '%s' for Python '%s'" if version_text is None or not py_exe: is_valid = False if log_invalid: print_debug(message, PIP_SPECIFIER, version_text, py_exe) elif PackagingVersion(version_text) not in PIP_SPECIFIER: is_valid = False if log_invalid: print_warning(message, PIP_SPECIFIER, version_text, py_exe) return is_valid
6,768
def unpack_binary_data(filename, obj_meta, object_index, obj_type, exclude_meta): """ Unpack binary data for Speakers or Conversations """ # unpack speaker meta for field, field_type in object_index.items(): if field_type == "bin" and field not in exclude_meta: with open(os.path.join(filename, field + "-{}-bin.p".format(obj_type)), "rb") as f: l_bin = pickle.load(f) for speaker, metadata in obj_meta.items(): for k, v in metadata.items(): if k == field and type(v) == str and str(v).startswith(BIN_DELIM_L) and \ str(v).endswith(BIN_DELIM_R): idx = int(v[len(BIN_DELIM_L):-len(BIN_DELIM_R)]) metadata[k] = l_bin[idx] for field in exclude_meta: del object_index[field]
6,769
def test_adap_aux_pop(): """ test to see if aux_critical is properly being stored in the adaptive storage """ ATS = AdapTrajStorage() ATS.aux = [[tuple((0,1)),tuple((2,3))]],[tuple((0, 1))], [tuple((2, 3))] aux_crit = ATS.list_aux_pop known_aux_crit = [[tuple((0, 1))]] assert np.array_equal(aux_crit, known_aux_crit)
6,770
def worker(remote, parent_remote, env_fn_wrapper): """ worker func to execute vec_env commands """ def step_env(env, action): ob, reward, done, info = env.step(action) if done: ob = env.reset() return ob, reward, done, info parent_remote.close() envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x] try: while True: cmd, data = remote.recv() # branch out for requests if cmd == 'step': res = [step_env(env, action) for env, action in zip(envs, data)] remote.send(res) elif cmd == 'reset': remote.send([env.reset() for env in envs]) elif cmd == 'render': remote.send([env.render(mode='rgb_array') for env in envs]) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces': remote.send(CloudpickleWrapper( (envs[0].observation_space, envs[0].action_space) )) elif cmd == 'get_agent_types': if all([hasattr(a, 'adversary') for a in envs[0].agents]): res = [ 'adversary' if a.adversary else 'agent' for a in envs[0].agents ] else: # fully cooperative res = ['agent' for _ in envs[0].agents] remote.send(res) else: raise NotImplementedErrors except KeyboardInterrupt: print('SubprocVecEnv worker: got KeyboardInterrupt') except: print('Environment runner process failed...') finally: for env in envs: env.close()
6,771
def greedysplit_general(n, k, sigma, combine=lambda a, b: a + b, key=lambda a: a): """ Do a greedy split """ splits = [n] s = sigma(0, n) def score(splits, sigma): splits = sorted(splits) return key(reduce(combine, (sigma(a, b) for (a, b) in tools.seg_iter(splits)))) while k > 0: usedinds = set(splits) new = min((score(splits + [i], sigma), splits + [i]) for i in range(1, n) if i not in usedinds) splits = new[1] s = new[0] k -= 1 return sorted(splits), s
6,772
def write_outputs_separate_dir(out_dir, file_name, input_image = None, pred_scene = None, pred_flare = None, pred_blend = None): """Writes various outputs to separate subdirectories on disk.""" if not tf.io.gfile.isdir(out_dir): raise ValueError(f'{out_dir} is not a directory.') if input_image is not None: utils.write_image(input_image, os.path.join(out_dir, 'input', file_name)) if pred_scene is not None: utils.write_image(pred_scene, os.path.join(out_dir, 'output', file_name)) if pred_flare is not None: utils.write_image(pred_flare, os.path.join(out_dir, 'output_flare', file_name)) if pred_blend is not None: utils.write_image(pred_blend, os.path.join(out_dir, 'output_blend', file_name))
6,773
def public_assignment_get(assignment_id: str): """ Get a specific assignment spec :param assignment_id: :return: """ return success_response({ 'assignment': get_assignment_data(current_user.id, assignment_id) })
6,774
def recursion_detected(frame, keys): """Detect if we have a recursion by finding if we have already seen a call to this function with the same locals. Comparison is done only for the provided set of keys. """ current = frame current_filename = current.f_code.co_filename current_function = current.f_code.co_name current_locals = {k: v for k, v in current.f_locals.items() if k in keys} while frame.f_back: frame = frame.f_back fname = frame.f_code.co_filename if not(fname.endswith(".py") or fname == "<template>"): return False if fname != current_filename or \ frame.f_code.co_name != current_function: continue if ({k: v for k, v in frame.f_locals.items() if k in keys} == current_locals): return True return False
6,775
def arg_parser(cmd_line=None, config=None): """ Parse the command line or the parameter to pass to the rest of the workflow :param cmd_line: A string containing a command line (mainly used for testing) :return: An args object with overrides for the configuration """ default_formatter = argparse.ArgumentDefaultsHelpFormatter main_parser = argparse.ArgumentParser(description="Promoter workflow", formatter_class=default_formatter) main_parser.add_argument("--release-config", required=False, default=DEFAULT_CONFIG_RELEASE, help="Release config file") main_parser.add_argument("--config-root", required=False, default=DEFAULT_CONFIG_ROOT, help="Specify the environment type " "Default: staging, For production" "use rdo and downstream") main_parser.add_argument("--log-level", default='INFO', help="Set the log level") command_parser = main_parser.add_subparsers(dest='subcommand') command_parser.required = True promote_all_parser = command_parser.add_parser('promote-all', help="Promote everything") # promote-all has no sub-arguments promote_all_parser.set_defaults(handler=promote_all) force_promote_parser = \ command_parser.add_parser('force-promote', help="Force promotion of a specific hash, " "bypassing candidate selection", formatter_class=default_formatter) # force-promote arguments force_promote_parser.add_argument("--commit-hash", required=True, help="The commit hash part for the " "candidate hash") force_promote_parser.add_argument("--distro-hash", required=True, help="The distro hash part for the " "candidate hash") force_promote_parser.add_argument("--aggregate-hash", help="The aggregate hash part for the " "candidate hash") force_promote_parser.add_argument("--allowed-clients", default="registries_client,qcow_client," "dlrn_client", help="The comma separated list of " "clients allowed to perfom the " "promotion") force_promote_parser.add_argument("candidate_label", help="The label associated with the " "candidate hash") force_promote_parser.add_argument("target_label", help="The label to promoted " "the candidate hash to") force_promote_parser.set_defaults(handler=force_promote) if cmd_line is not None: args = main_parser.parse_args(cmd_line.split()) else: args = main_parser.parse_args() return args
6,776
def showdecmore(vec, fa): """ Supposedly for a 3d volume, but doesn't really work""" print('this doesn\'t work properly') mmontage( abs(vec) * tile(fa, (3,1,1,1)).transpose(1,2,3,0))
6,777
def createTable(cursor): """Creates specified table if it does not exist""" command = "CREATE TABLE IF NOT EXISTS leaderboard (username varchar(50) NOT NULL, score INT NOT NULL)" cursor.execute(command)
6,778
def menu_2(wavelength_list, absorption_list, epsilon_list, epsilon_mean_list, concentration_list): """Main functions are called by menu_2""" epsilon(absorption_list, epsilon_list, concentration_list) epsilon_mean(epsilon_list, epsilon_mean_list) menu(wavelength_list, absorption_list, epsilon_mean_list, epsilon_list, concentration_list)
6,779
def ST_MinX(geom): """ Returns the minimum X value of the bounding envelope of a geometry """ geom = _strip_header(geom) j = _dumps(geom) return
6,780
def pars_to_blocks(pars): """ this simulates one of the phases the markdown library goes through when parsing text and returns the paragraphs grouped as blocks, as markdown handles them """ pars = list(pars) m = markdown.Markdown() bp = markdown.blockprocessors.build_block_parser(m) root = markdown.util.etree.Element('div') blocks = [] while pars: parsbefore = list(pars) for processor in bp.blockprocessors.values(): if processor.test(root, pars[0]): processor.run(root, pars) while len(parsbefore) > len(pars): blocks.append(parsbefore[0]) parsbefore = parsbefore[1:] if pars and pars[0].strip('\n') != parsbefore[0].strip('\n'): strippedbefore = parsbefore[0].strip('\n') strippedcurrent = pars[0].strip('\n') if strippedbefore.endswith(strippedcurrent): beforelength = len(strippedbefore) currentlength = len(strippedcurrent) block = strippedbefore[0:beforelength - currentlength] blocks.append(block) else: raise Exception('unsupported change by blockprocessor. abort! abort!') break return blocks
6,781
def extract_intersections_from_osm_xml(osm_xml): """ Extract the GPS coordinates of the roads intersections Return a list of gps tuples """ soup = BeautifulSoup(osm_xml) retval = [] segments_by_extremities = {} Roads = [] RoadRefs = [] Coordinates = {} for point in soup.osm.findAll('node'): Coordinates[point['id']] = (float(point['lat']), float(point['lon'])) for way in soup.osm.findAll(lambda node : node.name=="way" and node.findAll(k='highway')): name = "" roadPoints = [] nodes = way.findAll('nd') for node in nodes: roadPoints.append(node['ref']) RoadRefs.append(roadPoints) # iterate over the list of street and over each segment of a street. # for each segment extremity, build a list of segment leading to it for roadIdx, roadRef in enumerate(RoadRefs): for segIdx, seg in enumerate(roadRef): coords = Coordinates[seg] if coords not in segments_by_extremities: segments_by_extremities[coords] = [] segments_by_extremities[coords].append([roadIdx, segIdx]) # Iterate over the extremity lists, only keep the ones with at least three segments leading to them # Otherwise, they are not an intersection, just a turn in a road for k in segments_by_extremities.keys(): if len(segments_by_extremities[k]) <2: del(segments_by_extremities[k]) #finally return just the keys return segments_by_extremities.keys()
6,782
def spark_session(request): """Fixture for creating a spark context.""" spark = (SparkSession .builder .master('local[2]') .config('spark.jars.packages', 'com.databricks:spark-avro_2.11:3.0.1') .appName('pytest-pyspark-local-testing') .enableHiveSupport() .getOrCreate()) request.addfinalizer(lambda: spark.stop()) quiet_py4j() return spark
6,783
def _load_model_from_config(config_path, hparam_overrides, vocab_file, mode): """Loads model from a configuration file""" with gfile.GFile(config_path) as config_file: config = yaml.load(config_file) model_cls = locate(config["model"]) or getattr(models, config["model"]) model_params = config["model_params"] if hparam_overrides: model_params.update(hparam_overrides) # Change the max decode length to make the test run faster model_params["decoder.params"]["max_decode_length"] = 5 model_params["vocab_source"] = vocab_file model_params["vocab_target"] = vocab_file return model_cls(params=model_params, mode=mode)
6,784
def _get_assignment_node_from_call_frame(frame): """ Helper to get the Assign or AnnAssign AST node for a call frame. The call frame will point to a specific file and line number, and we use the source index to retrieve the AST nodes for that line. """ filename = frame.f_code.co_filename # Go up the AST from a node in the call frame line until we find an Assign or # AnnAssign, since the (Ann)Assign may be over multiple lines. nodes_in_line = _get_source_index(filename).get(frame.f_lineno, []) cur_node = nodes_in_line[0] while cur_node: if isinstance(cur_node, (ast.Assign, ast.AnnAssign)): return cur_node cur_node = cur_node.parent raise Exception("Could not find AST assignment node in the line" f" {filename}:{frame.f_lineno}")
6,785
def test_close(test_mp, caplog): """Platform.close_db() doesn't throw needless exceptions.""" # Close once test_mp.close_db() # Close again, once already closed test_mp.close_db() assert caplog.records[0].message == \ 'Database connection could not be closed or was already closed'
6,786
def check_existing_user(username): """ a function that is used to check and return all exissting accounts """ return User.user_exist(username)
6,787
def test_simple_profiler_describe(caplog, simple_profiler): """Ensure the profiler won't fail when reporting the summary.""" simple_profiler.describe() assert "Profiler Report" in caplog.text
6,788
def createevent(): """ An event is a (immediate) change of the world. It has no duration, contrary to a StaticSituation that has a non-null duration. This function creates and returns such a instantaneous situation. :sees: situations.py for a set of standard events types """ sit = Situation(type = GENERIC, pattern = None) return sit
6,789
def _dump_multipoint(obj, fmt): """ Dump a GeoJSON-like MultiPoint object to WKT. Input parameters and return value are the MULTIPOINT equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] mp = 'MULTIPOINT (%s)' points = (' '.join(fmt % c for c in pt) for pt in coords) # Add parens around each point. points = ('(%s)' % pt for pt in points) mp %= ', '.join(points) return mp
6,790
def do_inference(engine, pics_1, h_input_1, d_input_1, h_output, d_output, stream, batch_size, height, width): """ This is the function to run the inference Args: engine : Path to the TensorRT engine pics_1 : Input images to the model. h_input_1: Input in the host d_input_1: Input in the device h_output_1: Output in the host d_output_1: Output in the device stream: CUDA stream batch_size : Batch size for execution time height: Height of the output image width: Width of the output image Output: The list of output images """ load_images_to_buffer(pics_1, h_input_1) with engine.create_execution_context() as context: # Transfer input data to the GPU. cuda.memcpy_htod_async(d_input_1, h_input_1, stream) # Run inference. context.profiler = trt.Profiler() context.execute(batch_size=1, bindings=[int(d_input_1), int(d_output)]) # Transfer predictions back from the GPU. cuda.memcpy_dtoh_async(h_output, d_output, stream) # Synchronize the stream stream.synchronize() # Return the host output. out = h_output.reshape((batch_size,-1, height, width)) return out
6,791
def stage_files(input_dir, output_dir, n_files, rank=0, size=1): """Stage specified number of files to directory. This function works in a distributed fashion. Each rank will only stage its chunk of the file list. """ if rank == 0: logging.info(f'Staging {n_files} files to {output_dir}') # Find all the files in the input directory files = sorted(os.listdir(input_dir)) # Make sure there are at least enough files available if len(files) < n_files: raise ValueError(f'Cannot stage {n_files} files; only {len(files)} available') # Take the specified number of files files = files[:n_files] # Copy my chunk into the output directory os.makedirs(output_dir, exist_ok=True) for f in files[rank::size]: logging.debug(f'Staging file {f}') shutil.copyfile(os.path.join(input_dir, f), os.path.join(output_dir, f)) logging.debug('Data staging completed')
6,792
def make_import(): """Import(alias* names)""" return ast.Import(names=[make_alias()])
6,793
def watch(): """Watch bundles for file changes.""" _webassets_cmd('watch')
6,794
def preprocess( image: Union[np.ndarray, Image.Image], threshold: int = None, resize: int = 64, quantiles: List[float] = [.01, .05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], reduction: Union[str, List[str]] = ['max', 'median', 'mean', 'min'] ) -> dict: """ Basic preprocessing metrics for a histological image. Args: image (Union[np.ndarray, Image.Image]): Input image. threshold (int, optional): Threshold for tissue detection. If not defined Otsu's binarization will be used (which) may fail for images with data loss or only background. Defaults to None. resize (int, optional): For artifact() function. Defaults to 64. quantiles (List[float], optional): For HSV_quantiles() and RGB_quantiles functions. Defaults to [.01, .05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]. reduction (Union[str, List[str]], optional): Reduction methods for sharpness() function. Defaults to ['max', 'median', 'mean', 'min']. Raises: TypeError: Invalid type for ``image``. Returns: dict: Dictionary of basic preprocessing metrics. """ if isinstance(image, Image.Image): if image.mode != 'RGB': image = image.convert('RGB') image = np.array(image, dtype=np.uint8) elif isinstance(image, np.ndarray): image = image.astype(np.uint8) else: raise TypeError('Excpected {} or {} not {}.'.format( np.ndarray, Image.Image, type(image) )) # Initialize results and other shit. results = {} gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) mask = tissue_mask(image, threshold=threshold) # Background percentage. results['background'] = (mask == 0).sum()/mask.size # Sharpness. results.update(sharpness(gray, reduction=reduction)) # Data loss. results.update(data_loss(gray)) # Artifacts. small_img = cv2.resize(image, (resize, resize), cv2.INTER_LANCZOS4) small_mask = cv2.resize(mask, (resize, resize), cv2.INTER_LANCZOS4) results.update(HSV_quantiles( small_img, mask=small_mask, quantiles=quantiles)) results.update(RGB_quantiles( small_img, mask=small_mask, quantiles=quantiles)) return results
6,795
def binary_distance(label1, label2): """Simple equality test. 0.0 if the labels are identical, 1.0 if they are different. >>> from nltk.metrics import binary_distance >>> binary_distance(1,1) 0.0 >>> binary_distance(1,3) 1.0 """ return 0.0 if label1 == label2 else 1.0
6,796
def fetch_collections_info(data): """Connect to solr_cloud status page and and return JSON object""" url = "{0}/admin/collections?action=CLUSTERSTATUS&wt=json".format(data["base_url"]) get_data = _api_call(url, data["opener"]) solr_cloud = {} if get_data is None: collectd.error("solr_collectd plugin: can't get info") solr_cloud["error"] = "Solr instance is not running in solr_cloud mode" elif "error" in get_data: collectd.warning("%s" % get_data["error"]["msg"]) solr_cloud["error"] = get_data["error"]["msg"] elif "cluster" in get_data: if "cluster" not in data["custom_dimensions"]: data["custom_dimensions"]["cluster"] = data["cluster"] solr_cloud["live_nodes"] = get_data["cluster"]["live_nodes"] solrCollections = get_data["cluster"]["collections"] for collection in solrCollections: solr_cloud[collection] = {} solrShards = get_data["cluster"]["collections"][collection]["shards"] for shard in solrShards.keys(): solr_cloud[collection][shard] = {} for coreNodes in solrShards[shard]["replicas"]: coreNode = solrShards[shard]["replicas"][coreNodes] core = coreNode["core"] solr_cloud[collection][shard][core] = {} # if 'leader' in coreNode.keys() and coreNode['base_url'] == data['base_url']: # collectd.debug('{0} - Solr running in solr_cloud mode'.format(data['member_id'])) solr_cloud[collection][shard][core]["node"] = coreNode["node_name"] solr_cloud[collection][shard][core]["base_url"] = coreNode["base_url"] solr_cloud[collection][shard][core]["state"] = coreNode["state"] if "leader" in coreNode: solr_cloud[collection][shard][core]["leader"] = coreNode["leader"] else: solr_cloud[collection][shard][core]["leader"] = "false" return solr_cloud
6,797
def calc_commission_futures_global(trade_cnt, price): """ 国际期货:差别很大,最好外部自定义自己的计算方法,这里只简单按照0.002计算 :param trade_cnt: 交易的股数(int) :param price: 每股的价格(美元) :return: 计算结果手续费 """ cost = trade_cnt * price # 国际期货各个券商以及代理方式差别很大,最好外部自定义计算方法,这里只简单按照0.002计算 commission = cost * 0.002 return commission
6,798
def is_section_command(row): """CSV rows are cosidered new section commands if they start with <SECTION> and consist of at least two columns column. >>> is_section_command('<SECTION>\tSection name'.split('\t')) True >>> is_section_command('<other>\tSection name'.split('\t')) False >>> is_section_command(['<SECTION>', 'Section name', 'some more']) True """ return len(row) >= 2 and row[0] == __CSV_SECTION_PREFIX
6,799