content
stringlengths
22
815k
id
int64
0
4.91M
def print_ofpt_stats_reply(msg): """ Args: msg: OpenFlow message unpacked by python-openflow """ def print_ofpt_stats_reply_description(msg): """ Args: msg: OpenFlow message unpacked by python-openflow """ print('StatRes Type: OFPST_DESC') print('StatRes mfr_desc: %s' % msg.body.mfr_desc) print('StatRes hw_desc: %s' % msg.body.hw_desc) print('StatRes sw_desc: %s' % msg.body.sw_desc) print('StatRes serial_num: %s' % msg.body.serial_num) print('StatRes dp_desc: %s' % msg.body.dp_desc) def print_ofpt_stats_reply_flow_array(msg): """ Args: msg: OpenFlow message unpacked by python-openflow """ def print_ofpt_stats_reply_flow(flow): """ Args: msg: OpenFlow message unpacked by python-openflow """ print('StatRes Type: Flow(1)') print('StatRes Length: %s Table_id: %s Pad: %s ' % (flow.length, flow.table_id, flow.pad)) print('StatRes ', end='') print_ofp_match(flow.match) print('StatRes duration_sec: %s, duration_nsec: %s, priority: %s,' ' idle_timeout: %s, hard_timeout: %s, pad: %s, cookie: %s,' ' packet_count: %s, byte_count: %s' % (flow.duration_sec, flow.duration_nsec, flow.priority, flow.idle_timeout, flow.hard_timeout, flow.pad, flow.cookie, flow.packet_count, flow.byte_count)) print('StatRes ', end='') print_actions(flow.actions) if len(msg.body) == 0: print('StatRes Type: Flow(1)\nNo Flows') return for flow in msg.body: print_ofpt_stats_reply_flow(flow) def print_ofpt_stats_reply_aggregate(msg): """ Args: msg: OpenFlow message unpacked by python-openflow """ print('StatRes Type: Aggregate(2)') print('StatRes packet_count: %s, byte_count: %s flow_count: %s ' 'pad: %s' % (msg.stats.packet_count, msg.stats.byte_count, msg.stats.flow_count, msg.stats.pad)) def print_ofpt_stats_reply_table_array(msg): """ Args: msg: OpenFlow message unpacked by python-openflow """ def print_ofpt_stats_reply_table(table): """ Args: msg: OpenFlow message unpacked by python-openflow """ print('StatRes table_id: %s, pad: %s, name: "%s", wildcards: %s, ' 'max_entries: %s, active_count: %s, lookup_count: %s, ' 'matched_count: %s' % (table.table_id.value, table.pad, table.name.value, hex(table.wildcards.value), table.max_entries.value, table.active_count.value, table.count_lookup.value, table.count_matched.value)) if len(msg.body) == 0: print('StatRes Type: Table(3)\nNo Tables') return print('StatRes Type: Table(3)') for table in msg.body: print_ofpt_stats_reply_table(table) def print_ofp_stats_reply_port_array(msg): """ Args: msg: OpenFlow message unpacked by python-openflow """ def print_ofpt_stats_reply_port(port): """ Args: msg: OpenFlow message unpacked by python-openflow """ print('StatRes Type: Port(4)') print('StatRes port_number: %s rx_packets: %s rx_bytes: %s rx_errors: %s' ' rx_crc_err: %s rx_dropped: %s rx_over_err: %s rx_frame_err: %s\n' 'StatRes port_number: %s tx_packets: %s tx_bytes: %s tx_errors: %s' ' tx_dropped: %s collisions: %s pad: %s' % (red(port.port_no), port.rx_packets, port.rx_bytes, port.rx_errors, port.rx_crc_err, port.rx_dropped, port.rx_over_err, port.rx_frame_err, red(port.port_no), port.tx_packets, port.tx_bytes, port.tx_errors, port.tx_dropped, port.collisions, port.pad)) if len(msg.body) == 0: print('StatRes Type: Port(4)\nNo Ports') return for port in msg.body: print_ofpt_stats_reply_port(port) def print_ofpt_stats_reply_queue_array(msg): """ Args: msg: OpenFlow message unpacked by python-openflow """ def print_ofpt_stats_reply_queue(queue): """ Args: msg: OpenFlow message unpacked by python-openflow """ print('StatRes Type: Queue(5)') print('StatRes queue_id: %s length: %s pad: %s' ' tx_bytes: %s tx_packets: %s tx_errors: %s' % (queue.queue_id, queue.length, queue.pad, queue.tx_bytes, queue.tx_packets, queue.tx_errors)) if len(msg.body) == 0: print('StatRes Type: Queue(5)\nNo Queues') return for queue in msg.body: print_ofpt_stats_reply_queue(queue) def print_ofpt_stats_reply_vendor(msg): """ Args: msg: OpenFlow message unpacked by python-openflow """ def print_ofpt_stats_reply_vendor_data(data): """ Args: msg: OpenFlow message unpacked by python-openflow """ print('StatRes Vendor Data: ') hexdump(data) print('StatRes Type: Vendor(%s)' % hex(msg.body_type.value)) print('StatRes Vendor_Id: %s' % red(hex(msg.body[0].vendor.value))) print_ofpt_stats_reply_vendor_data(msg.body[0].body.value) if msg.body_type == 0: print_ofpt_stats_reply_description(msg) elif msg.body_type == 1: print_ofpt_stats_reply_flow_array(msg) elif msg.body_type == 2: print_ofpt_stats_reply_aggregate(msg) elif msg.body_type == 3: print_ofpt_stats_reply_table_array(msg) elif msg.body_type == 4: print_ofp_stats_reply_port_array(msg) elif msg.body_type == 5: print_ofpt_stats_reply_queue_array(msg) elif msg.body_type == 65535: print_ofpt_stats_reply_vendor(msg)
6,000
def validate_logger(lname): """ Make sure the datalogger name (lname) is a valid datalogger in the current configuration. Raises: ValueError """ if lname in sy.loggers: pass else: print('Available logger names are {0}'.format(sy.loggers)) raise ValueError('Not a valid datalogger name for this configuration')
6,001
def computeAbsoluteMagnitudes(traj, meteor_list): """ Given the trajectory, compute the absolute mangitude (visual mangitude @100km). """ # Go though every observation of the meteor for i, meteor_obs in enumerate(meteor_list): # Go through all magnitudes and compute absolute mangitudes for dist, mag in zip(traj.observations[i].model_range, meteor_obs.mag_data): # Skip nonexistent magnitudes if mag is not None: # Compute the range-corrected magnitude abs_mag = mag + 5*np.log10((10**5)/dist) else: abs_mag = None meteor_obs.abs_mag_data.append(abs_mag)
6,002
def associate_routes(stack, subnet_list=()): """Add Route Association Resources.""" for association in subnet_list: stack.stack.add_resource( SubnetRouteTableAssociation( '{0}RouteAssociation'.format(association['name']), SubnetId=Ref(association['subnet']), RouteTableId=Ref(association['route_table']) ))
6,003
def list_tris_nibbles(fp, reg): """ Create TRIS functions for lower- and upper-nibbles only Input: - file pointer - TRIS register """ port = "PORT" + reg[4:] if port.endswith("IO"): port = "PORTA" fp.write("--\n") half = port + "_low_direction" fp.write("procedure " + half + "'put(byte in x) is\n" + " " + reg + " = (" + reg + " & 0xF0) | (x & 0x0F)\n" + "end procedure\n" + "function " + half + "'get() return byte is\n" + " return (" + reg + " & 0x0F)\n" + "end function\n" + "--\n") half = port + "_high_direction" fp.write("procedure " + half + "'put(byte in x) is\n" + " " + reg + " = (" + reg + " & 0x0F) | (x << 4)\n" + "end procedure\n" + "function " + half + "'get() return byte is\n" + " return (" + reg + " >> 4)\n" + "end function\n" + "--\n")
6,004
def map_parallel(function, xs): """Apply a remote function to each element of a list.""" if not isinstance(xs, list): raise ValueError('The xs argument must be a list.') if not hasattr(function, 'remote'): raise ValueError('The function argument must be a remote function.') # EXERCISE: Modify the list comprehension below to invoke "function" # remotely on each element of "xs". This should essentially submit # one remote task for each element of the list and then return the # resulting list of ObjectIDs. return [function.remote(x) for x in xs]
6,005
def verify(medusaconfig, backup_name): """ Verify the integrity of a backup """ medusa.verify.verify(medusaconfig, backup_name)
6,006
def test_build_reader_antenna__geometry( num_lanes, lane, side, angle, width, offset, height, forward, pos): """ Validate that forward direction, right direction and position of the reader antenna are computed and set properly. """ # Define constant values for expected right_dir values: right = { 'front': (0, -1, 0), 'back': (0, 1, 0), }[side] md = ModelDescriptor() md.reader_antenna_angle = angle md.lanes_number = num_lanes md.lane_width = width md.reader_antenna_offset = offset md.reader_antenna_height = height # Create ReaderAntenna: factory = Factory() factory.params = md ant = factory.build_reader_antenna(lane, side) # Validate geometry properties: assert ant.lane == lane assert ant.side == side assert_allclose(ant.dir_forward, forward, rtol=0.01) assert_allclose(ant.dir_right, right, rtol=0.01) assert_allclose(ant.position, pos, rtol=0.01)
6,007
def log_interp(x,y,xnew): """ Apply interpolation in logarithmic space for both x and y. Beyound input x range, returns 10^0=1 """ ynew = 10**ius(np.log10(x), np.log10(y), ext=3)(np.log10(xnew)) return ynew
6,008
def inten_sat_compact(args): """ Memory saving version of inten_scale followed by saturation. Useful for multiprocessing. Parameters ---------- im : numpy.ndarray Image of dtype np.uint8. Returns ------- numpy.ndarray Intensity scale and saturation of input. """ return ((inten_scale(args[0]) * saturation(args[0])) ** 2).astype(np.float32)
6,009
def test_gtfeatures_to_variants(patient_37): """Test the function that parses variants dictionaries from patient's genomic features""" # GIVEN a patient containing 1 genomic feature (and one variant) gt_features = patient_37["patient"]["genomicFeatures"] assert len(gt_features) == 1 # WHEN gtfeatures_to_variants is used to extract variants from gt_features variants = gtfeatures_to_variants(gt_features) # THEN it should return 2 variants assert len(variants) == 2 # One with genome build GRCh37 assert variants[0]["assembly"] == "GRCh37" # And one with genome build GRCh38 assert variants[1]["assembly"] == "GRCh38"
6,010
def write_subjectvolume(fn, subjdict): """subjdict is a dictionary subid -> volume""" # read if os.path.exists(fn): with open(fn, 'rb') as f: subjectvolume = json.load(f) subjectvolume.update(subjdict) else: subjectvolume = subjdict with open(fn, 'wb') as f: json.dump(subjectvolume, f, indent=True)
6,011
def iob_to_docs(input_data, n_sents=10, no_print=False, *args, **kwargs): """ Convert IOB files with one sentence per line and tags separated with '|' into Doc objects so they can be saved. IOB and IOB2 are accepted. Sample formats: I|O like|O London|I-GPE and|O New|B-GPE York|I-GPE City|I-GPE .|O I|O like|O London|B-GPE and|O New|B-GPE York|I-GPE City|I-GPE .|O I|PRP|O like|VBP|O London|NNP|I-GPE and|CC|O New|NNP|B-GPE York|NNP|I-GPE City|NNP|I-GPE .|.|O I|PRP|O like|VBP|O London|NNP|B-GPE and|CC|O New|NNP|B-GPE York|NNP|I-GPE City|NNP|I-GPE .|.|O """ vocab = Vocab() # need vocab to make a minimal Doc msg = Printer(no_print=no_print) if n_sents > 0: n_sents_info(msg, n_sents) yield from read_iob(input_data.split("\n"), vocab, n_sents)
6,012
def masseuse_memo(A, memo, ind=0): """ Return the max with memo :param A: :param memo: :param ind: :return: """ # Stop if if ind > len(A)-1: return 0 if ind not in memo: memo[ind] = max(masseuse_memo(A, memo, ind + 2) + A[ind], masseuse_memo(A, memo, ind + 1)) return memo[ind]
6,013
def unique_pairs(bonded_nbr_list): """ Reduces the bonded neighbor list to only include unique pairs of bonds. For example, if atoms 3 and 5 are bonded, then `bonded_nbr_list` will have items [3, 5] and also [5, 3]. This function will reduce the pairs only to [3, 5] (i.e. only the pair in which the first index is lower). Args: bonded_nbr_list (list): list of arrays of bonded pairs for each molecule. Returns: sorted_pairs (list): same as bonded_nbr_list but without duplicate pairs. """ unique_pairs = [] for pair in bonded_nbr_list: # sort according to the first item in the pair sorted_pair = torch.sort(pair)[0].numpy().tolist() if sorted_pair not in unique_pairs: unique_pairs.append(sorted_pair) # now make sure that the sorting is still good (this may be unnecessary but I added # it just to make sure) idx = list(range(len(unique_pairs))) # first_arg = list of the the first node in each pair first_arg = [pair[0] for pair in unique_pairs] # sorted_idx = sort the indices of unique_pairs by the first node in each pair sorted_idx = [item[-1] for item in sorted(zip(first_arg, idx))] # re-arrange by sorted_idx sorted_pairs = torch.LongTensor(np.array(unique_pairs)[sorted_idx]) return sorted_pairs
6,014
def permission(*perms: str): """ Decorator that runs the command only if the author has the specified permissions. perms must be a string matching any property of discord.Permissions. NOTE: this function is deprecated. Use the command 'permissions' attribute instead. """ def decorator(func): @wraps(func) async def wrapped(message: discord.Message, *args, **kwargs): member_perms = message.channel.permissions_for(message.author) if all(getattr(member_perms, perm, False) for perm in perms): await func(message, *args, **kwargs) return wrapped return decorator
6,015
def validate_script(value): """Check if value is a valid script""" if not sabnzbd.__INITIALIZED__ or (value and sabnzbd.filesystem.is_valid_script(value)): return None, value elif (value and value == "None") or not value: return None, "None" return T("%s is not a valid script") % value, None
6,016
async def post_user(ctx: Context, user: MemberOrUser) -> t.Optional[dict]: """ Create a new user in the database. Used when an infraction needs to be applied on a user absent in the guild. """ log.trace(f"Attempting to add user {user.id} to the database.") payload = { 'discriminator': int(user.discriminator), 'id': user.id, 'in_guild': False, 'name': user.name, 'roles': [] } try: response = await ctx.bot.api_client.post('bot/users', json=payload) log.info(f"User {user.id} added to the DB.") return response except ResponseCodeError as e: log.error(f"Failed to add user {user.id} to the DB. {e}") await ctx.send(f":x: The attempt to add the user to the DB failed: status {e.status}")
6,017
def setup(app): """ Install the plugin. :param app: Sphinx application context. """ app.add_role('tl', tl_role) app.add_config_value('tl_ref_url', None, 'env') return
6,018
def create_preferences_docs(): """Create preferences docs from SETTINGS using a jinja template.""" sections = {} for setting in CORE_SETTINGS: schema = setting.schema() title = schema.get("title", "") description = schema.get("description", "") preferences_exclude = getattr( setting.NapariConfig, "preferences_exclude", [] ) if set(preferences_exclude) == set(schema["properties"].keys()): continue section = title.lower() sections[section] = { "title": title, "description": description, "fields": [], } schema = setting.__fields__ for field in sorted(setting.__fields__): if field not in ["schema_version"]: data = schema[field].field_info default = repr(schema[field].get_default()) title = data.title description = data.description try: type_ = schema[field].type_ except Exception: pass sections[section]["fields"].append( { "field": field, "title": title, "description": description, "default": default, "ui": field not in preferences_exclude, "type": type_, } ) template = Template(PREFERENCES_TEMPLATE) text = template.render( sections=sections, images_path="../images/_autogenerated/" ) with open(GUIDES_PATH / "preferences.md", "w") as fh: fh.write(text)
6,019
def delete(ids: List = Body(...)): """ Deletes from an embeddings index. Returns list of ids deleted. Args: ids: list of ids to delete Returns: ids deleted """ try: return application.get().delete(ids) except ReadOnlyError as e: raise HTTPException(status_code=403, detail=e.args[0]) from e
6,020
def main(): """ Simple Event Viewer """ events = None try: events = remote('127.0.0.1', EventOutServerPort, ssl=False, timeout=5) while True: event_data = '' while True: tmp = len(event_data) event_data += events.recv(numb=8192, timeout=1).decode('latin-1') if tmp == len(event_data): break if len(event_data): # fix the JSON mess event_data = fix_json(event_data) if not len(event_data): log.warning('[Simple Event Viewer]: callback data invalid!\n') return False for event in event_data: log.info('[Event From]: {}\n{}'.format(color(event.get('host'), GREEN), event)) except (PwnlibException, EOFError, KeyboardInterrupt): log.warning("[Simple Event Viewer]") if events: events.close() return False
6,021
def test_slack_chart_alert( screenshot_mock, email_mock, create_alert_email_chart, ): """ ExecuteReport Command: Test chart slack alert """ # setup screenshot mock screenshot_mock.return_value = SCREENSHOT_FILE with freeze_time("2020-01-01T00:00:00Z"): AsyncExecuteReportScheduleCommand( TEST_ID, create_alert_email_chart.id, datetime.utcnow() ).run() notification_targets = get_target_from_report_schedule(create_alert_email_chart) # Assert the email smtp address assert email_mock.call_args[0][0] == notification_targets[0] # Assert the email inline screenshot smtp_images = email_mock.call_args[1]["images"] assert smtp_images[list(smtp_images.keys())[0]] == SCREENSHOT_FILE # Assert logs are correct assert_log(ReportState.SUCCESS)
6,022
def fsi_acm_up_profiler_descending(vp1, vp2, vp3): """ Description: Calculates the VEL3D Series A and L upwards velocity data product VELPTMN-VLU-DSC_L1 for the Falmouth Scientific (FSI) Acoustic Current Meter (ACM) mounted on a McLane profiler. Because of the orientation of the ACM stinger fingers (see Notes) upward current velocity can be calculated in several different ways. This function calculates the vertical velocity to be used when the profiler is descending, avoiding the use of data from vp4 which will be contaminated by the sheet-flow wake of the stinger's central post. Usage: w_fsi_dsc = fsi_acm_up_profiler_descending(vp1, vp2, vp3) where w_fsi_dsc = velocity up; VELPTMN-VLU-DSC_L1 [m/s] vp1 = raw beam velocity from the port stinger finger; VELPTMN-VP1_L0 [cm/s] vp2 = raw beam velocity from the lower stinger finger; VELPTMN-VP2_L0 [cm/s] vp3 = raw beam velocity from the starboard stinger finger; VELPTMN-VP3_L0 [cm/s] Implemented by: 2015-02-13: Russell Desiderio. Initial code. Notes: The VEL3D series A and L instruments are FSI current meters modified for use on a McLane profiler. The FSI ACM has 4 raw beam velocities. The correspondences between the MMP manual designations and the IDD designations are: (Xplus, Yplus, Xminus, Yminus) (MMP manual, page G-22) (va , vb , vc , vd ) (IDD, VEL3D series A) (vp1 , vp2 , vp3 , vp4 ) (IDD, VEL3D series L) (left , down , right , up ) (spatial orientation) This is also the ordering of these parameters in telemetered and recovered data. The MMP manual Rev E, page 8-30, incorrectly calculates the upward velocities wU and wD. For more information see the Notes to worker function fsi_acm_horz_vel. References: OOI (2015). Data Product Specification for Mean Point Water Velocity Data from FSI Acoustic Current Meters. Document Control Number 1341-00792. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-00792_Data_Product_SPEC_VELPTMN_ACM_OOI.pdf) OOI (2015). 1341-00792_VELPTMN Artifact: McLane Moored Profiler User Manual. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> >> REFERENCE >> Data Product Specification Artifacts >> 1341-00792_VELPTMN >> MMP-User Manual-Rev-E-WEB.pdf) """ # find the x-velocity in the instrument coordinate system x = -(vp1 + vp3) / np.sqrt(2.0) # the z-velocity in the instrument coordinate system is also the w velocity in the # earth coordinate system because the effects of pitch and roll are negligible. w = -x + np.sqrt(2.0) * vp2 # change units from cm/s to m/s return w / 100.0
6,023
def colour_name(colour: Tuple[int, int, int]) -> str: """Return the colour name associated with this colour value, or the empty string if this colour value isn't in our colour list. >>> colour_name((1, 128, 181)) 'Pacific Point' >>> colour_name(PACIFIC_POINT) 'Pacific Point' """ colour_names = { PACIFIC_POINT: 'Pacific Point', REAL_RED: 'Real Red', OLD_OLIVE: 'Old Olive', DAFFODIL_DELIGHT: 'Daffodil Delight' } if colour in colour_names: return colour_names[colour] else: return ''
6,024
def score_fn(subj_score, comp_score): """ Generates the TextStim with the updated score values Parameters ---------- subj_score : INT The subjects score at the moment comp_score : INT The computer's score at the moment' Returns ------- score_stim : psychopy.visual.text.TextStim The visual stimulus ready to be drawn. e.g. 5 - 4 Spacebar to continue """ score = stimuli.score_text.format(subj_score, comp_score) #To edit the score_text go to the stimuli.py module score_stim = visual.TextStim(win, text = score, pos = (0, -.6)) return score_stim
6,025
def parse_write_beam(line): """ Write_beam (type -2) If btype = −2, output particle phase-space coordinate information at given location V3(m) into filename fort.Bmpstp with particle sample frequency Bnseg. Here, the maximum number of phase- space files which can be output is 100. Here, 40 and 50 should be avoided since these are used for initial and final phase space output. """ x = line.split() v = v_from_line(line) d={} d['filename']='fort.'+x[2] d['sample_frequency'] = int(x[1]) d['s'] = float(v[3]) if int(x[2]) in [40, 50]: print('warning, overwriting file fort.'+x[2]) return d
6,026
def sleep(sleep_time=0.250): """Default sleep time to enable the OS to reuse address and port. """ time.sleep(sleep_time)
6,027
def add_column_opt(M_opt, tgt, src): """For a matrix produced by siqs_build_matrix_opt, add the column src to the column target (mod 2). """ M_opt[tgt] ^= M_opt[src]
6,028
def list_s3(bucket, prefix, ext=None): """Get listing of files on S3 with prefix and extension """ s3 = boto3.resource('s3') s3_bucket = s3.Bucket(bucket) if ext: ext = '.' + ext.lstrip('.') else: ext = '' for item in s3_bucket.objects.filter(Prefix=prefix): key = item.key if not key.endswith(ext): continue yield key
6,029
def get_tone(pinyin): """Renvoie le ton du pinyin saisi par l'utilisateur. Args: pinyin {str}: l'entrée pinyin par l'utilisateur Returns: number/None : Si pas None, la partie du ton du pinyin (chiffre) """ # Prenez le dernier chaine du pinyin tone = pinyin[-1] # Déterminer s'il s'agit d'un type numérique if tone.isdigit(): return tone else: return None
6,030
def authIfV2(sydent, request, requireTermsAgreed=True): """For v2 APIs check that the request has a valid access token associated with it :returns Account|None: The account object if there is correct auth, or None for v1 APIs :raises MatrixRestError: If the request is v2 but could not be authed or the user has not accepted terms """ if request.path.startswith('/_matrix/identity/v2'): token = tokenFromRequest(request) if token is None: raise MatrixRestError(401, "M_UNAUTHORIZED", "Unauthorized") accountStore = AccountStore(sydent) account = accountStore.getAccountByToken(token) if account is None: raise MatrixRestError(401, "M_UNAUTHORIZED", "Unauthorized") if requireTermsAgreed: terms = get_terms(sydent) if ( terms.getMasterVersion() is not None and account.consentVersion != terms.getMasterVersion() ): raise MatrixRestError(403, "M_TERMS_NOT_SIGNED", "Terms not signed") return account return None
6,031
def gen_rho(K): """The Ideal Soliton Distribution, we precompute an array for speed """ return [1.0/K] + [1.0/(d*(d-1)) for d in range(2, K+1)]
6,032
async def test_initialize_ncbi_blast(mock_blast_server): """ Using a mock BLAST server, test that a BLAST initialization request works properly. """ seq = "ATGTACAGGATCAGCATCGAGCTACGAT" assert await virtool.bio.initialize_ncbi_blast({"proxy": ""}, seq) == ("YA40WNN5014", 19)
6,033
def print_rules() -> None: """ Load a text files 'rules_eng.txt' saved inside the same directory. Open a second window. Returns ------- None. """ ruleWindow=tk.Toplevel() ruleWindow.title("How to play?") with open('rules_eng.txt') as f: gameRules=f.read() lbl_rules=tk.Label(ruleWindow, text=gameRules, fg="black", anchor="e", justify=tk.LEFT) lbl_rules.pack(side=tk.TOP) ruleWindow.resizable(0,0) ruleWindow.mainloop()
6,034
def assert_array_max_ulp(a, b, maxulp=1, dtype=None): """ Check that all items of arrays differ in at most N Units in the Last Place. Parameters ---------- a, b : array_like Input arrays to be compared. maxulp : int, optional The maximum number of units in the last place that elements of `a` and `b` can differ. Default is 1. dtype : dtype, optional Data-type to convert `a` and `b` to if given. Default is None. Returns ------- ret : ndarray Array containing number of representable floating point numbers between items in `a` and `b`. Raises ------ AssertionError If one or more elements differ by more than `maxulp`. Notes ----- For computing the ULP difference, this API does not differentiate between various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 is zero). See Also -------- assert_array_almost_equal_nulp : Compare two arrays relatively to their spacing. Examples -------- >>> a = np.linspace(0., 1., 100) >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) """ __tracebackhide__ = True # Hide traceback for py.test import numpy as np ret = nulp_diff(a, b, dtype) if not np.all(ret <= maxulp): raise AssertionError("Arrays are not almost equal up to %g " "ULP (max difference is %g ULP)" % (maxulp, np.max(ret))) return ret
6,035
def add_default_field_spec_settings(field_spec_model, field_spec_db_rec): """ Add default settings for field spec. :param field_spec_model: Internal rep of the field spec. :param field_spec_db_rec: The DB record for the field spec. """ base_settings_db = FieldSettingDb.objects.filter( availablefieldspecsettingdb__field_spec=field_spec_db_rec ).values() relationship_settings = AvailableFieldSpecSettingDb.objects.filter( field_spec=field_spec_db_rec).order_by('field_setting_order').values() settings_gatherer = SettingsGatherer( base_settings=base_settings_db, relationship_settings=relationship_settings, relationship_setting_id_field='field_setting_id', relationship_setting_order_field='field_setting_order', relationship_setting_params_field='field_setting_params', ) field_spec_model.settings = settings_gatherer.gather_settings()
6,036
def Ambient_Switching(crop_PPFDmin, Trans, consumption): """ Inputs: consumption (returned from Light_Sel) """ #How much energy can you save if you switch off when ambient lighting is enough for plant needs? #Assume that when ambient is higher than max recommended PPFD, that the greenhouse is cloaked, allowing it to still rely on outside light. #Assume that peak solar always happens in the afternoons #Inputs are Detroit 2010 data for solar insolation, consumption in J, and transmissivity. count = 0 for i in Detroit['PPFD (Micromoles/m^2/s)']: if (i*Trans) > (crop_PPFDmin): count = count + 1 energy_savings = count *consumption #print("If lights are strageically shut off during highly sunny hours, then", energy_savings, "J will be saved") return energy_savings
6,037
def callStructuralVariants(bam, reffa, wSizeMax, svLen, lengthWiggle, positionWiggle, basepairIdWiggle, mapq, secondary, duplicate, qcfail, margin, gapdistance, samples): """To identify structural variants from BAM alignments: 1. Find windows in each read that contain an excess of a certain CIGAR operation. 2. Cluster similar windows from different reads. 3. Summarize into structural variant calls. Return `StructuralVariant` objects ordered by chrom,chromStart. """ activelist = [] # read SV window clusters that might still be extended svheap = [] # heap of SV calls for rsvw in findSvWindowsInBam(bam, reffa, wSizeMax, svLen, mapq, secondary, duplicate, qcfail, margin, gapdistance): rsvwc = ReadSvWindowCluster([rsvw]) # new cluster for the new window # Add into the new cluster any existing clusters that overlap. The new # window might merge two or more existing clusters into one. Call structural # variants from the clusters that can not be extended. nextactivelist = [] for svwc in activelist: # A cluster can not be extended when its rightmost window starts prior # to the current window (allowing for positionWiggle). if svwc.maxRefStart[0] < rsvw.refName or svwc.maxRefStart[1] + positionWiggle < rsvw.refStart: # Call structural variants from the complete window. for sv in svwc.toStructuralVariants(StructuralVariant, samples): heapq.heappush(svheap, sv) elif rsvwc.overlaps(svwc, lengthWiggle, positionWiggle, basepairIdWiggle): rsvwc.merge(svwc) # merge into the new cluster else: nextactivelist.append(svwc) nextactivelist.append(rsvwc) activelist = nextactivelist # minimum start position of any active cluster minRefStart = min([svwc.minRefStart for svwc in activelist]) # Flush the structural variants for which it is certain that no active or future # cluster will produce a structural variant that starts earlier. while len(svheap) and (svheap[0].chrom, svheap[0].chromStart) < minRefStart: yield heapq.heappop(svheap) # No cluster can be further extended. for svwc in activelist: for sv in svwc.toStructuralVariants(StructuralVariant, samples): heapq.heappush(svheap, sv) # Flush any remaining structural variants. while len(svheap): yield heapq.heappop(svheap)
6,038
def get_numerical_gradient(position: np.ndarray, function: Callable[[np.ndarray], float], delta_magnitude: float = 1e-6) -> np.ndarray: """ Returns the numerical derivative of an input function at the specified position.""" dimension = position.shape[0] vec_low = np.zeros(dimension) vec_high = np.zeros(dimension) for ii in range(dimension): delta_vec = np.zeros(dimension) delta_vec[ii] = delta_magnitude/2.0 vec_low[ii] = function(position-delta_vec) vec_high[ii] = function(position+delta_vec) return (vec_high-vec_low)/delta_magnitude
6,039
def plot_psd(dpl, *, fmin=0, fmax=None, tmin=None, tmax=None, layer='agg', ax=None, show=True): """Plot power spectral density (PSD) of dipole time course Applies `~scipy.signal.periodogram` from SciPy with ``window='hamming'``. Note that no spectral averaging is applied across time, as most ``hnn_core`` simulations are short-duration. However, passing a list of `Dipole` instances will plot their average (Hamming-windowed) power, which resembles the `Welch`-method applied over time. Parameters ---------- dpl : instance of Dipole | list of Dipole instances The Dipole object. fmin : float Minimum frequency to plot (in Hz). Default: 0 Hz fmax : float Maximum frequency to plot (in Hz). Default: None (plot up to Nyquist) tmin : float or None Start time of data to include (in ms). If None, use entire simulation. tmax : float or None End time of data to include (in ms). If None, use entire simulation. layer : str, default 'agg' The layer to plot. Can be one of 'agg', 'L2', and 'L5' ax : instance of matplotlib figure | None The matplotlib axis. show : bool If True, show the figure Returns ------- fig : instance of matplotlib Figure The matplotlib figure handle. """ import matplotlib.pyplot as plt from scipy.signal import periodogram from .dipole import Dipole if ax is None: _, ax = plt.subplots(1, 1, constrained_layout=True) if isinstance(dpl, Dipole): dpl = [dpl] scale_applied = dpl[0].scale_applied sfreq = dpl[0].sfreq trial_power = [] for dpl_trial in dpl: if dpl_trial.scale_applied != scale_applied: raise RuntimeError('All dipoles must be scaled equally!') if dpl_trial.sfreq != sfreq: raise RuntimeError('All dipoles must be sampled equally!') data, _ = _get_plot_data_trange(dpl_trial.times, dpl_trial.data[layer], tmin, tmax) freqs, Pxx = periodogram(data, sfreq, window='hamming', nfft=len(data)) trial_power.append(Pxx) ax.plot(freqs, np.mean(np.array(Pxx, ndmin=2), axis=0)) if fmax is not None: ax.set_xlim((fmin, fmax)) ax.ticklabel_format(axis='both', scilimits=(-2, 3)) ax.set_xlabel('Frequency (Hz)') if scale_applied == 1: ylabel = 'Power spectral density\n(nAm' + r'$^2 \ Hz^{-1}$)' else: ylabel = 'Power spectral density\n' +\ r'([nAm$\times$ {:.0f}]'.format(scale_applied) +\ r'$^2 \ Hz^{-1}$)' ax.set_ylabel(ylabel, multialignment='center') plt_show(show) return ax.get_figure()
6,040
def stable_point(r): """ repeat the process n times to make sure we have reaches fixed points """ n = 1500 x = np.zeros(n) x[0] = np.random.uniform(0, 0.5) for i in range(n - 1): x[i + 1] = f(x[i], r) print(x[-200:]) return x[-200:]
6,041
def test_regexp_message(dummy_form, dummy_field, grab_error_message): """ Regexp validator should return given message """ validator = regexp("^a", message="foo") dummy_field.data = "f" assert grab_error_message(validator, dummy_form, dummy_field) == "foo"
6,042
def vary_on_headers(*headers): """ A view decorator that adds the specified headers to the Vary header of the response. Usage: @vary_on_headers('Cookie', 'Accept-language') def index(request): ... Note that the header names are not case-sensitive. """ def decorator(func): @wraps(func) def inner_func(*args, **kwargs): response = func(*args, **kwargs) patch_vary_headers(response, headers) return response return inner_func return decorator
6,043
def load_frames( frame_dir: pathlib.Path, df_frames: pd.DataFrame, ) -> Dict[int, Dict[str, Union[str, np.ndarray]]]: """Load frame files from a directory. Args: frame_dir: Path to directory where frames are stored in a target class folder or background class folder df_frames: Dataframe with frame information. Returns: Dictionary where key is frame index and value is a dictionary with the target class and frame image """ logger = logging.getLogger(__name__) logger.debug("Loading frames at %s", frame_dir) frame_filepaths = frame_dir.rglob("*.jpeg") frames_dict = {} df_video_frames = df_frames[frame_dir.name == df_frames["video_name"]] for frame_filepath in frame_filepaths: frame_img = imageio.imread(frame_filepath) _, frame_index = frame_filepath.stem.split("___") frame_index = int(frame_index) target = df_video_frames.loc[df_video_frames["frame_ind"] == frame_index, "target"].item() logger.debug("Frame %s target class is %s", frame_filepath.name, target) frames_dict[frame_index] = {"image": frame_img, "target": target} return frames_dict
6,044
def calc_Z(pattern): """Calculate Z values using Z-algorithm as in Gusfield""" Z = [] len_p = len(pattern) Z.append(len_p) Z.append(0) l=1 Z[1] = match(pattern, 0, 1) r = Z[1]+1 l = 1 for k in range(2, len_p): if k>r: zk = match(pattern,0, k) r = zk+k l = k else: b = r-k ks = k-l if Z[ks]<b: zk = Z[ks] else: q = match(pattern, r+1, b) zk = r-k r = q-1 l = k Z.append(zk) print(Z)
6,045
def non_daemonic_process_pool_map(func, jobs, n_workers, timeout_per_job=None): """ function for calculating in parallel a function that may not be run a in a regular pool (due to forking processes for example) :param func: a function that accepts one input argument :param jobs: a list of input arguments to func :param n_workers: number of parallel workers :param timeout_per_job: timeout for processing a single job :return: list of results in the order of the "jobs" list """ END_TOKEN = 'END' q_in = Queue() q_out = Queue() def queue_worker(q_in, q_out): arg_in = q_in.get() while arg_in != END_TOKEN: try: result = func(arg_in) except Exception as e: logger.exception(e) logger.error(f'Queue worker failed on input: {arg_in}, with {str(e)}') result = None q_out.put((arg_in, result)) arg_in = q_in.get() q_out.put(END_TOKEN) # put jobs [q_in.put(c) for c in jobs + n_workers * [END_TOKEN]] # start workers workers = [Process(target=queue_worker, args=(q_in, q_out)) for _ in range(n_workers)] [w.start() for w in workers] # wait for results n_finished = 0 outputs = [] while n_finished < n_workers: output = q_out.get(timeout=timeout_per_job) logger.info(f'queue out, got: {output}') if output == END_TOKEN: n_finished += 1 logger.info(f'{n_finished}/{n_workers} queue workers done') else: outputs.append(output) # wait for workers to join logger.info('Joining queue workers') [w.join() for w in workers] logger.info('Joined all queue workers') # sort in original order results = [output[1] for output in sorted(outputs, key=lambda output: jobs.index(output[0]))] return results
6,046
def register_commands(subparsers, context): """Register devtool subcommands from the package plugin""" if context.fixed_setup: parser_package = subparsers.add_parser('package', help='Build packages for a recipe', description='Builds packages for a recipe\'s output files', group='testbuild', order=-5) parser_package.add_argument('recipename', help='Recipe to package') parser_package.set_defaults(func=package)
6,047
def condense_alignment(outfile, aln_file, logger=lambda x: None): """Call viruses from the alignment file.""" pass
6,048
def test_output_simple(): """ Elbow should be at 2. """ X = np.array([10, 9, 3, 2, 1]) elbows, _ = select_dimension(X, n_elbows=1) assert_equal(elbows[0], 2)
6,049
def result_list_handler(*args: list, **kwargs) -> str: """ Handles the main search result for each query. It checks whether there are any result for this qeury or not. 1. If there was results, then it sorts and decorates the them. 2 Otherwise it shows a message containing there were no results for this query :param args: 1. *[0] -> query 2. *[1] -> a list of search results objects :param kwargs: :return: Final decorated search results """ query = args[0] search_res = args[1] print(UD.bidirectional(u'\u0688')) x = len([None for ch in query if UD.bidirectional(ch) in ('R', 'AL')]) / float(len(query)) # print('{t} => {c}'.format(t=query.encode('utf-8'), c='RTL' if x > 0.5 else 'LTR')) # print(UD.bidirectional("dds".decode('utf-8'))) # direction = 'RTL' if x > 0.5 else 'LTR' dir_str = "&rlm;" if x > 0.5 else '&lrm;' fruit = random.choice(fruit_list) print(search_res) if int(search_res["hits"]["total"]["value"]) > 0: text = f"<b>{_search_emoji} نتایج جستجو برای: {textwrap.shorten(query, width=100, placeholder='...')}</b>\n" text += f"{_checkmark_emoji} نتایج بهتر پایین لیست هستند.\n\n\n" _headphone_emoji = emoji.EMOJI_ALIAS_UNICODE[':headphone:'] for index, hit in reversed(list(enumerate(search_res['hits']['hits']))): duration = timedelta(seconds=int(hit['_source']['duration'])) d = datetime(1, 1, 1) + duration _performer = hit['_source']['performer'] _title = hit['_source']['title'] _file_name = hit['_source']['file_name'] if not (len(_title) < 2 or len(_performer) < 2): name = f"{_performer} - {_title}" elif not len(_performer) < 2: name = f"{_performer} - {_file_name}" else: name = _file_name # name = f"{_file_name if (_performer == 'None' and _title == 'None') else (_performer if _title == 'None' else _title)}".replace( # ".mp3", "") text += f"<b>{str(index + 1)}. {dir_str} {_headphone_emoji} {fruit if index == 0 else ''}</b>" \ f"<b>{textwrap.shorten(name, width=35, placeholder='...')}</b>\n" \ f"{dir_str} {_floppy_emoji} | {round(int(hit['_source']['file_size']) / 1000_000, 1)} {'مگابایت' if x > 0.5 else 'MB'} " \ f"{_clock_emoji} | {str(d.hour) + ':' if d.hour > 0 else ''}{d.minute}:{d.second}\n{dir_str}" \ f" دانلود: " \ f" /dl_{hit['_id']} \n" \ f" {34 * '-' if not index == 0 else ''}{dir_str} \n\n" else: text = f"{_traffic_light} هیچ نتیجه ای برای این عبارت پیدا نشد:" \ f"\n<pre>{textwrap.shorten(query, width=200, placeholder='...')}</pre>" return text
6,050
def flip_mask(mask, x_flip, y_flip): """ Args: mask: バイナリマスク [height, width] """ mask = mask.copy() if y_flip: mask = np.flip(mask, axis=0) if x_flip: mask = np.flip(mask, axis=1) return mask
6,051
def sbn2journal(sbn_record, permalink_template="http://id.sbn.it/bid/%s"): """ Creates a `dbmodels.Journal` instance out of a dictionary with metadata. :param record: the dictionary returned by `resolution.supporting_functions.enrich_metadata()` :return: an instance of `dbmodels.Journal` """ bid = normalize_bid(sbn_record["codiceIdentificativo"]) metadata = { 'short_title' : sbn_record["titolo"].split(":")[0].split("/")[0].strip() , 'full_title' : sbn_record["titolo"] , 'bid' : bid , 'sbn_link' : permalink_template % bid , 'identifiers' : [] , "provenance" : "lbcatalogue" } if "numeri" in sbn_record: identifiers = sbn_record["numeri"] for identifier in identifiers: tmp = [{ "identifier_type" : key ,"value": identifier[key] } for key in identifier.keys()][0] metadata["identifiers"].append(SBN_Identifier(**tmp)) return Journal(**metadata)
6,052
def run(rendering=True): """ Runs the balls demo, in which the robot moves according to random torques as 10 balls bounces around. You may run the executable roboball2d_balls_demo after install. Parameters ---------- rendering : renders the environment if True """ # similar to roboball2d_demo, but with several balls n_balls = 10 # configurations : # just create as many configs as there are balls robot_config = DefaultRobotConfig() ball_configs = [BallConfig() for _ in range(n_balls)] # the first ball : in pink ball_configs[0].color = (1,0.08,0.57) visible_area_width = 6.0 visual_height = 0.05 # physics engine # physical engine world = B2World(robot_config, ball_configs, visible_area_width) # graphics renderer if rendering: renderer_config = RenderingConfig(visible_area_width, visual_height) renderer = PygletRenderer(renderer_config, robot_config, ball_configs) # ball gun : specifies the reset of # the ball (by shooting a new one) ball_guns = [DefaultBallGun(ball_config) for index,ball_config in enumerate(ball_configs)] # robot init : specifies the reinit of the robot # (e.g. angles of the rods and rackets, etc) robot_init = DefaultRobotState(robot_config) # we add a fixed goal # starting at x=3 and finishing at x=4 goal = (2,4) goal_color = (0,0.7,0) # usual color goal_activated_color = (0,1,0) # when hit by a ball # running 10 episodes, max 3 seconds per episode n_episodes = 0 for episode in range(10): # tracking the number of times the ball bounced n_bounced = 0 # reset before a new episode world.reset(robot_init, ball_guns) while True: # random torques torques = [random.uniform(-1.0,1.0) for _ in range(3)] # returns a snapshot of all the data computed # and updated by the physics engine at this # iteration world_state = world.step(torques,relative_torques=True) # episode ends # if the number of bounces is 2 or above # for the first ball ... if world_state.ball_hits_floor : n_bounced += 1 if n_bounced >= 2: break # ... or if 3 seconds passed if world_state.t > 3: break # display the goal with a lighter color # if hit by a ball at this iteration color = goal_color for p in world_state.balls_hits_floor: p = world_state.ball_hits_floor if p is not None and p>goal[0] and p<goal[1]: # goal hit, using activated color color = goal_activated_color break # the renderer can take in an array of goals # to display. Here specifying only 1 goal # (start_x,end_x,color) goals = [(goal[0],goal[1],color)] # render based on the information provided by # the physics engine if rendering: renderer.render(world_state,goals, time_step=1.0/60.0)
6,053
def median(data): """Calculates the median value from |data|.""" data = sorted(data) n = len(data) if n % 2 == 1: return data[n / 2] else: n2 = n / 2 return (data[n2 - 1] + data[n2]) / 2.0
6,054
def test_regression_1(): """ Regression test for https://github.com/sjjessop/omnidice/issues/1 """ expr = (-dice.d6).explode() check_approx(expr, eval(repr(expr), dice.__dict__))
6,055
def evaluation(eval_loader, model, criterion, num_classes, batch_size, ep_idx, progress_log, scale, vis_params, batch_metrics=None, dataset='val', device=None, debug=False): """ Evaluate the model and return the updated metrics :param eval_loader: data loader :param model: model to evaluate :param criterion: loss criterion :param num_classes: number of classes :param batch_size: number of samples to process simultaneously :param ep_idx: epoch index (for hypertrainer log) :param progress_log: progress log file (for hypertrainer log) :param scale: Scale to which values in sat img have been redefined. Useful during visualization :param vis_params: (Dict) Parameters useful during visualization :param batch_metrics: (int) Metrics computed every (int) batches. If left blank, will not perform metrics. :param dataset: (str) 'val or 'tst' :param device: device used by pytorch (cpu ou cuda) :param debug: if True, debug functions will be performed :return: (dict) eval_metrics """ eval_metrics = create_metrics_dict(num_classes) model.eval() for batch_index, data in enumerate(tqdm(eval_loader, dynamic_ncols=True, desc=f'Iterating {dataset} ' f'batches with {device.type}')): progress_log.open('a', buffering=1).write(tsv_line(ep_idx, dataset, batch_index, len(eval_loader), time.time())) with torch.no_grad(): try: # For HPC when device 0 not available. Error: RuntimeError: CUDA error: invalid device ordinal inputs = data['sat_img'].to(device) labels = data['map_img'].to(device) except RuntimeError: logging.exception(f'Unable to use device {device}. Trying "cuda:0"') device = torch.device('cuda') inputs = data['sat_img'].to(device) labels = data['map_img'].to(device) labels_flatten = flatten_labels(labels) outputs = model(inputs) if isinstance(outputs, OrderedDict): outputs = outputs['out'] # vis_batch_range: range of batches to perform visualization on. see README.md for more info. # vis_at_eval: (bool) if True, will perform visualization at eval time, as long as vis_batch_range is valid if vis_params['vis_batch_range'] and vis_params['vis_at_eval']: min_vis_batch, max_vis_batch, increment = vis_params['vis_batch_range'] if batch_index in range(min_vis_batch, max_vis_batch, increment): vis_path = progress_log.parent.joinpath('visualization') if ep_idx == 0 and batch_index == min_vis_batch: logging.info( f'Visualizing on {dataset} outputs for batches in range {vis_params["vis_batch_range"]} ' f'images will be saved to {vis_path}\n') vis_from_batch(vis_params, inputs, outputs, batch_index=batch_index, vis_path=vis_path, labels=labels, dataset=dataset, ep_num=ep_idx + 1, scale=scale) outputs_flatten = flatten_outputs(outputs, num_classes) loss = criterion(outputs, labels) eval_metrics['loss'].update(loss.item(), batch_size) if (dataset == 'val') and (batch_metrics is not None): # Compute metrics every n batches. Time consuming. if not batch_metrics <= len(eval_loader): logging.error(f"Batch_metrics ({batch_metrics}) is smaller than batch size " f"{len(eval_loader)}. Metrics in validation loop won't be computed") if (batch_index + 1) % batch_metrics == 0: # +1 to skip val loop at very beginning a, segmentation = torch.max(outputs_flatten, dim=1) eval_metrics = iou(segmentation, labels_flatten, batch_size, num_classes, eval_metrics) eval_metrics = report_classification(segmentation, labels_flatten, batch_size, eval_metrics, ignore_index=eval_loader.dataset.dontcare) elif dataset == 'tst': a, segmentation = torch.max(outputs_flatten, dim=1) eval_metrics = iou(segmentation, labels_flatten, batch_size, num_classes, eval_metrics) eval_metrics = report_classification(segmentation, labels_flatten, batch_size, eval_metrics, ignore_index=eval_loader.dataset.dontcare) logging.debug(OrderedDict(dataset=dataset, loss=f'{eval_metrics["loss"].avg:.4f}')) if debug and device.type == 'cuda': res, mem = gpu_stats(device=device.index) logging.debug(OrderedDict(device=device, gpu_perc=f'{res.gpu} %', gpu_RAM=f'{mem.used / (1024 ** 2):.0f}/{mem.total / (1024 ** 2):.0f} MiB')) logging.info(f"{dataset} Loss: {eval_metrics['loss'].avg}") if batch_metrics is not None: logging.info(f"{dataset} precision: {eval_metrics['precision'].avg}") logging.info(f"{dataset} recall: {eval_metrics['recall'].avg}") logging.info(f"{dataset} fscore: {eval_metrics['fscore'].avg}") logging.info(f"{dataset} iou: {eval_metrics['iou'].avg}") return eval_metrics
6,056
def split_text_by_length(text: str, length: Optional[int] = None, # 方案一:length + delta delta: Optional[int] = 30, max_length: Optional[int] = None, # 方案二:直接确定长度上下限 min_length: Optional[int] = None, ignore_=False): """ 根据给定的长度切分文本 :param text: 文本 :param delta: :param length: :param max_length: 文章允许的最长长度。 :param min_length: 文章允许的最短长度。比这还短就丢弃。 :return : 迭代器,每次返回切分出来的那一段 :param ignore_: 如果最后一段太短,是否丢弃掉该段。默认不丢弃 """ if length: max_length = length + delta min_length = length - delta if not max_length or not min_length: logger.error(f"split_text_by_length 缺少必要参数!!!") return None while len(text) > max_length: s = text[:max_length] index = search_split_pos(s) # 上策 if index < min_length: index = search_split_pos(s, keys=",") # 中策 if index == -1: index = (max_length + min_length) // 2 # 直接切分,下下策 yield text[:index] text = text[index:] else: if len(text) < min_length and ignore_: return # 结束迭代 yield text
6,057
def _tfidf_fit_transform(vectors: np.ndarray): """ Train TF-IDF (Term Frequency — Inverse Document Frequency) Transformer & Extract TF-IDF features on training data """ transformer = TfidfTransformer() features = transformer.fit_transform(vectors).toarray() return features, transformer
6,058
def _is_missing_sites(spectra: List[XAS]): """ Determines if the collection of spectra are missing any indicies for the given element """ structure = spectra[0].structure element = spectra[0].absorbing_element # Find missing symmeterically inequivalent sites symm_sites = SymmSites(structure) absorption_indicies = {spectrum.absorbing_index for spectrum in spectra} missing_site_spectra_indicies = set(structure.indices_from_symbol(element)) - absorption_indicies for site_index in absorption_indicies: missing_site_spectra_indicies -= set(symm_sites.get_equivalent_site_indices(site_index)) return len(missing_site_spectra_indicies) != 0
6,059
def get_objects(params, meta): """ Retrieve a list of objects based on their upas. params: guids - list of string - KBase IDs (upas) to fetch post_processing - object of post-query filters (see PostProcessing def at top of this module) output: objects - list of ObjectData - see the ObjectData type description in the module docstring above. search_time - int - time it took to perform the search on ES access_group_narrative_info - dict of {access_group_id: narrative_info} - Information about the workspaces in which the objects in the results reside. This data only applies to workspace objects. """ # KBase convention is to wrap params in an array if isinstance(params, list) and len(params) == 1: params = params[0] post_processing = params.get('post_processing', {}) search_results = _search_objects({'query': {'terms': {'_id': params['guids']}}}, meta) objects = _get_object_data_from_search_results(search_results, post_processing) (narrative_infos, ws_infos) = _fetch_narrative_info(search_results, meta) return [{ 'search_time': search_results['search_time'], 'objects': objects, 'access_group_narrative_info': narrative_infos, 'access_groups_info': ws_infos }]
6,060
def heat_transfer_delta(): """ :return: net - OpenModelica network converted to a pandapipes network :rtype: pandapipesNet :Example: >>> pandapipes.networks.simple_water_networks.heat_transfer_delta() """ return from_json(os.path.join(heat_tranfer_modelica_path, "delta.json"))
6,061
def test_list_decimal_max_length_nistxml_sv_iv_list_decimal_max_length_1_2(mode, save_output, output_format): """ Type list/decimal is restricted by facet maxLength with value 5. """ assert_bindings( schema="nistData/list/decimal/Schema+Instance/NISTSchema-SV-IV-list-decimal-maxLength-1.xsd", instance="nistData/list/decimal/Schema+Instance/NISTXML-SV-IV-list-decimal-maxLength-1-2.xml", class_name="NistschemaSvIvListDecimalMaxLength1", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
6,062
def configure_pin_as_input(pin_number): """ :param pin_number: an integer """ GPIO.setup(pin_number, GPIO.IN)
6,063
def set_testcase_path(testcase_file_path): """ Set testcase path """ global tc_path tc_path = testcase_file_path
6,064
def extract(file_path, extract_path): """ Extract if exists. Args: file_path (str): Path of the file to be extracted extract_path (str): Path to copy the extracted files Returns: True if extracted successfully, False otherwise """ if (os.path.exists(file_path) and os.path.isfile(file_path)): with rarfile.RarFile(file_path, 'r') as compressed: compressed.extractall(extract_path) compressed.close() return True return False
6,065
def assert_if_provinces_have_no_cases_and_deaths(country: DataCollector, data: dict): """Check if the data contains all required provinces and that each province has an entry for 'cases' and 'deaths'. Assert if not.""" for p, p_info in country.provinces.items(): short_name = p_info['short_name'] assert short_name in data['provinces'], "Could not find province '{} ({})'".format(p, short_name) assert 'c' in data['provinces'][short_name], \ "Could not find 'cases' for province '{} ({})'".format(p, short_name) cases = data['provinces'][short_name]['c'] assert cases > 0, "Invalid number for 'cases' for province '{} ({})'".format(p, short_name) assert 'd' in data['provinces'][short_name], \ "Could not find 'deaths' for province '{} ({})'".format(p, short_name) deaths = data['provinces'][short_name]['d'] assert deaths > 0, "Invalid number for 'deaths' for province '{} ({})'".format(p, short_name)
6,066
def parseMidi(midifile): """Take a MIDI file and return the list Of Chords and Interval Vectors. The file is first parsed, midi or xml. Then with chordify and PC-Set we compute a list of PC-chords and Interval Vectors. """ mfile = ms.converter.parse(midifile) mChords = mfile.chordify() chordList = [] chordVectors = [] for c in mChords.recurse().getElementsByClass('Chord'): chordList.append(c.orderedPitchClasses) chordVectors.append(c.intervalVector) # print('The number of chords found is : ', len(chordList)) return chordList, chordVectors
6,067
def get_similarity(s1, s2): """ Return similarity of both strings as a float between 0 and 1 """ return SM(None, s1, s2).ratio()
6,068
def preCheck(path: str): """preCheck.""" if not os.path.exists(path): os.makedirs(path) else: print("dir is okay!")
6,069
def test_embed(): """ Test that embedding is treated like a Variable""" embed_dense = L.EmbedID(5, 10) embed_sparse = L.EmbedID(5, 10) embed_dense.W.data[:] = np.random.randn(5, 10).astype('float32') embed_sparse.W.data[:] = np.random.randn(5, 10).astype('float32') embed_sparse.W.data[:, 1:] /= 1e5 dhl_dense_01 = dirichlet_likelihood(embed_dense, alpha=0.1).data dhl_sparse_01 = dirichlet_likelihood(embed_sparse, alpha=0.1).data msg = "Sparse vector has higher likelihood than dense with alpha=0.1" assert dhl_sparse_01 > dhl_dense_01, msg
6,070
def concrete_values_from_iterable( value: Value, ctx: CanAssignContext ) -> Union[None, Value, Sequence[Value]]: """Return the exact values that can be extracted from an iterable. Three possible return types: - ``None`` if the argument is not iterable - A sequence of :class:`Value` if we know the exact types in the iterable - A single :class:`Value` if we just know that the iterable contains this value, but not the precise number of them. Examples: - ``int`` -> ``None`` - ``tuple[int, str]`` -> ``(int, str)`` - ``tuple[int, ...]`` -> ``int`` """ if isinstance(value, MultiValuedValue): subvals = [concrete_values_from_iterable(val, ctx) for val in value.vals] if any(subval is None for subval in subvals): return None value_subvals = [subval for subval in subvals if isinstance(subval, Value)] seq_subvals = [ subval for subval in subvals if subval is not None and not isinstance(subval, Value) ] if not value_subvals and len(set(map(len, seq_subvals))) == 1: return [unite_values(*vals) for vals in zip(*seq_subvals)] return unite_values(*value_subvals, *chain.from_iterable(seq_subvals)) elif isinstance(value, AnnotatedValue): return concrete_values_from_iterable(value.value, ctx) value = replace_known_sequence_value(value) if isinstance(value, SequenceIncompleteValue) and value.typ is tuple: return value.members tv_map = IterableValue.can_assign(value, ctx) if not isinstance(tv_map, CanAssignError): return tv_map.get(T, UNRESOLVED_VALUE) return None
6,071
def hatch_egg(*, egg_name: str, potion_name: str) -> NoReturn: """ Hatch an egg by performing an API request and echo the result to the terminal. """ requester = HatchRequester( egg_name=egg_name, hatch_potion_name=potion_name ) response: requests.Response = requester.post_hatch_egg_request() json: dict = response.json() if json["success"] is True: click.echo(f"Successfully hatched a {egg_name}-{potion_name}.") sys.exit(0) click.echo(f"{json['error']}: {json['message']}") sys.exit(1)
6,072
def ReadSimInfo(basefilename): """ Reads in the information in .siminfo and returns it as a dictionary """ filename = basefilename + ".siminfo" if (os.path.isfile(filename)==False): print("file not found") return [] cosmodata = {} siminfofile = open(filename,"r") line = siminfofile.readline().strip().split(" : ") while(line[0]!=""): cosmodata[line[0]] = float(line[1]) line = siminfofile.readline().strip().split(" : ") siminfofile.close() return cosmodata
6,073
def quat_conjugate(quat_a): """Create quatConjugate-node to conjugate a quaternion. Args: quat_a (NcNode or NcAttrs or str or list or tuple): Quaternion to conjugate. Returns: NcNode: Instance with quatConjugate-node and output-attribute(s) Example: :: Op.quat_conjugate(create_node("decomposeMatrix").outputQuat) """ created_node = _create_operation_node("quat_conjugate", quat_a) return created_node
6,074
def payment_successful(sender, **kwargs): """Успешный платёж. Списываем средства, начисляем баланс""" try: bill = Bill.objects.get(id=kwargs['InvId']) except ObjectDoesNotExist: return if bill.status != Bill.BILL_STATUS_UNPAID: return bill.client.balance_minutes += bill.minutes bill.client.save() bill.status = Bill.BILL_STATUS_PAID bill.save()
6,075
def askfont(): """ Opens a :class:`FontChooser` toplevel to allow the user to select a font :return: font tuple (family_name, size, \*options), :class:`~font.Font` object """ chooser = FontChooser() chooser.wait_window() return chooser.font
6,076
def load_data(train_file, test_file): """ The method reads train and test data from their dataset files. Then, it splits train data into features and labels. Parameters ---------- train_file: directory of the file in which train data set is located test_file: directory of the file in which test data set is located """ x_tra = pd.read_csv(train_file[0]).drop(columns=["ID"]) y_tra = pd.read_csv(train_file[1]).drop(columns=["ID"]) x_tst = pd.read_csv(test_file).drop(columns=["ID"]) return x_tra, y_tra, x_tst
6,077
def split_dataset_random(data_path, test_size=0.2): """Split the dataset in two sets train and test with a repartition given by test_size data_path is a string test_size is a float between 0 and 1""" #Initialise list for the names of the files sample_name = [] #Get the file of the dataset for filename in os.listdir(data_path): sample_name.append(filename) #Split the dataset in train and test with 80% in train and 20% in test train_sample, test_sample = train_test_split(sample_name, test_size=test_size) return (train_sample, test_sample)
6,078
def guide(batch_X, batch_y=None, num_obs_total=None): """Defines the probabilistic guide for z (variational approximation to posterior): q(z) ~ p(z|x) """ # we are interested in the posterior of w and intercept # since this is a fairly simple model, we just initialize them according # to our prior believe and let the optimization handle the rest assert(jnp.ndim(batch_X) == 2) d = jnp.shape(batch_X)[1] z_w_loc = param("w_loc", jnp.zeros((d,))) z_w_std = jnp.exp(param("w_std_log", jnp.zeros((d,)))) z_w = sample('w', dist.Normal(z_w_loc, z_w_std)) z_intercept_loc = param("intercept_loc", 0.) z_interpet_std = jnp.exp(param("intercept_std_log", 0.)) z_intercept = sample('intercept', dist.Normal(z_intercept_loc, z_interpet_std)) return (z_w, z_intercept)
6,079
def show_images_row(imgs, titles, rows=1): """ Display grid of cv2 images image :param img: list [cv::mat] :param title: titles :return: None """ assert ((titles is None) or (len(imgs) == len(titles))) num_images = len(imgs) if titles is None: titles = ['Image (%d)' % i for i in range(1, num_images + 1)] fig = plt.figure() for n, (image, title) in enumerate(zip(imgs, titles)): ax = fig.add_subplot(rows, np.ceil(num_images / float(rows)), n + 1) if image.ndim == 2: plt.gray() plt.imshow(image) ax.set_title(title) plt.axis('off') plt.show()
6,080
def downloads_dir(): """ :returns string: default downloads directory path. """ return os.path.expanduser('~') + "/Downloads/"
6,081
def get_namespace(Id=None): """ Gets information about a namespace. See also: AWS API Documentation Exceptions :example: response = client.get_namespace( Id='string' ) :type Id: string :param Id: [REQUIRED]\nThe ID of the namespace that you want to get information about.\n :rtype: dict ReturnsResponse Syntax{ 'Namespace': { 'Id': 'string', 'Arn': 'string', 'Name': 'string', 'Type': 'DNS_PUBLIC'|'DNS_PRIVATE'|'HTTP', 'Description': 'string', 'ServiceCount': 123, 'Properties': { 'DnsProperties': { 'HostedZoneId': 'string' }, 'HttpProperties': { 'HttpName': 'string' } }, 'CreateDate': datetime(2015, 1, 1), 'CreatorRequestId': 'string' } } Response Structure (dict) -- Namespace (dict) --A complex type that contains information about the specified namespace. Id (string) --The ID of a namespace. Arn (string) --The Amazon Resource Name (ARN) that AWS Cloud Map assigns to the namespace when you create it. Name (string) --The name of the namespace, such as example.com . Type (string) --The type of the namespace. The methods for discovering instances depends on the value that you specify: HTTP : Instances can be discovered only programmatically, using the AWS Cloud Map DiscoverInstances API. DNS_PUBLIC : Instances can be discovered using public DNS queries and using the DiscoverInstances API. DNS_PRIVATE : Instances can be discovered using DNS queries in VPCs and using the DiscoverInstances API. Description (string) --The description that you specify for the namespace when you create it. ServiceCount (integer) --The number of services that are associated with the namespace. Properties (dict) --A complex type that contains information that\'s specific to the type of the namespace. DnsProperties (dict) --A complex type that contains the ID for the Route 53 hosted zone that AWS Cloud Map creates when you create a namespace. HostedZoneId (string) --The ID for the Route 53 hosted zone that AWS Cloud Map creates when you create a namespace. HttpProperties (dict) --A complex type that contains the name of an HTTP namespace. HttpName (string) --The name of an HTTP namespace. CreateDate (datetime) --The date that the namespace was created, in Unix date/time format and Coordinated Universal Time (UTC). The value of CreateDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. CreatorRequestId (string) --A unique string that identifies the request and that allows failed requests to be retried without the risk of executing an operation twice. Exceptions ServiceDiscovery.Client.exceptions.InvalidInput ServiceDiscovery.Client.exceptions.NamespaceNotFound :return: { 'Namespace': { 'Id': 'string', 'Arn': 'string', 'Name': 'string', 'Type': 'DNS_PUBLIC'|'DNS_PRIVATE'|'HTTP', 'Description': 'string', 'ServiceCount': 123, 'Properties': { 'DnsProperties': { 'HostedZoneId': 'string' }, 'HttpProperties': { 'HttpName': 'string' } }, 'CreateDate': datetime(2015, 1, 1), 'CreatorRequestId': 'string' } } :returns: ServiceDiscovery.Client.exceptions.InvalidInput ServiceDiscovery.Client.exceptions.NamespaceNotFound """ pass
6,082
def get_reddit_tables(): """Returns 12 reddit tables corresponding to 2016""" reddit_2016_tables = [] temp = '`fh-bigquery.reddit_posts.2016_{}`' for i in range(1, 10): reddit_2016_tables.append(temp.format('0' + str(i))) for i in range(10, 13): reddit_2016_tables.append(temp.format(str(i))) return reddit_2016_tables
6,083
def first_empty(): """Return the lowest numbered workspace that is empty.""" workspaces = sorted(get_workspace_numbers(get_workspaces().keys())) for i in range(len(workspaces)): if workspaces[i] != i + 1: return str(i + 1) return str(len(workspaces) + 1)
6,084
def get_args(): """get command-line arguments""" parser = argparse.ArgumentParser( description='Demonstrate affect on SVM of removing a support vector' , formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '-o', '--outfile', help='File to write figure', metavar='FILE', type=str, default=None) parser.add_argument( '-r', '--random_seed', help='Random seed value', metavar='int', type=int, default=None) return parser.parse_args()
6,085
def get_local_tzone(): """Get the current time zone on the local host""" if localtime().tm_isdst: if altzone < 0: tzone = '+' + \ str(int(float(altzone) / 60 // 60)).rjust(2, '0') + \ str(int(float( altzone) / 60 % 60)).ljust(2, '0') else: tzone = '-' + \ str(int(float(altzone) / 60 // 60)).rjust(2, '0') + \ str(int(float( altzone) / 60 % 60)).ljust(2, '0') else: if altzone < 0: tzone = \ '+' + str(int(float(timezone) / 60 // 60)).rjust(2, '0') + \ str(int(float( timezone) / 60 % 60)).ljust(2, '0') else: tzone = \ '-' + str(int(float(timezone) / 60 // 60)).rjust(2, '0') + \ str(int(float( timezone) / 60 % 60)).ljust(2, '0') return tzone
6,086
def subscribers_tables_merge(tablename1: Tablename, tablename2: Tablename, csv_path=csvpath, verbose=True): """ Сводит таблицы, полученные загрузчиком, в одну. Может принимать pandas.DataFrame или имя группы, в этом случае группа должна быть в списке групп, а соответствующий файл - в <csv_path> """ if isinstance(tablename1, pd.DataFrame): table1 = tablename1 else: table1 = pd.read_csv(csv_path + tablename1 + '.csv', sep=";", header=0, dtype=str) if isinstance(tablename2, pd.DataFrame): table2 = tablename2 else: table2 = pd.read_csv(csv_path + tablename2 + '.csv', sep=";", header=0, dtype=str) concatenated = table1.append(table2, ignore_index=True) # Выявляем тех, кто подписан на несколько групп # Условие gs_x != gs_x проверяет, не является ли значение NaN outer_joined = pd.merge(table1[{'id', 'group_subscribed'}], table2[{'id', 'group_subscribed'}], on='id', how='outer') outer_joined['groups'] = outer_joined['group_subscribed_x'] + ',' + outer_joined['group_subscribed_y'] outer_joined.loc[ outer_joined.group_subscribed_x != outer_joined.group_subscribed_x, 'groups'] = outer_joined.group_subscribed_y outer_joined.loc[ outer_joined.group_subscribed_y != outer_joined.group_subscribed_y, 'groups'] = outer_joined.group_subscribed_x # Сводим воедино и чистим left_joined = pd.merge(concatenated, outer_joined[{'id', 'groups'}], on='id', how='left') left_joined['group_subscribed'] = left_joined['groups'] L = left_joined.drop_duplicates('id') if verbose: print("{0} и {1} обработаны".format(str(tablename1), str(tablename2))) return L[L.columns[0:6]]
6,087
def delete_api_key_v1(key_id: str) -> None: """Delete api key by key ID ONLY if it is not the last key on the chain Args: key_id: ID of api key to delete """ # Don't allow removal of reserved keys if key_id.startswith("SC_") or key_id.startswith("INTERCHAIN"): raise exceptions.ActionForbidden("Cannot delete reserved API keys") # Don't allow removal of root keys if secrets.get_dc_secret("hmac-id") == key_id: raise exceptions.ActionForbidden("Cannot remove root API key") # Delete the actual key after previous checks pass api_key_dao.delete_api_key(key_id=key_id, interchain=False)
6,088
def get_current_offset(session): """ For backfilling only, this function works with the init container to look up it's job_id so it can line that up with it's consumer group and offest so that we can backfill up to a given point and then kill the worker afterwards. """ if settings.JOB_ID is None: return settings.CONSUMER_GROUP, None output = {} while True: logger.info(f"Getting kafka job with job_id = {settings.JOB_ID}") sql = f"select * from kafka_jobs WHERE job_id='{settings.JOB_ID}';" result = session.execute(sql).fetchall() session.commit() if len(result) == 0: logger.info(f"Did not find job_id={settings.JOB_ID} - sleeping") sleep(2) continue for r in result: # Keyed on tuple of topic, partition to look up the stop_offset output[(r[2], r[3])] = r[4] return r[1], output
6,089
def get_params(p1, p2, L): """ Return the curve parameters 'a', 'p', 'q' as well as the integration constant 'c', given the input parameters. """ hv = p2 - p1 m = p1 + p2 def f_bind(a): return f(a, *hv, L) def fprime_bind(a): return fprime(a, hv[0]) # Newton-Raphson algorithm to find a value for 'a' a0 = nr_first_guess(f_bind, 0.1, 0.01, 1.8) a = optimize.newton(f_bind, a0, fprime_bind) # Use our formulas to compute the rest p = 0.5 * (m[0] - a * np.log((L+hv[1])/(L-hv[1]))) q = 0.5 * (m[1] - L / np.tanh(0.5 * hv[0]/a)) c = -a * np.sinh((p1[0]-p)/a) return a, p, q, c
6,090
def _unpack(f): """to unpack arguments""" def decorated(input): if not isinstance(input, tuple): input = (input,) return f(*input) return decorated
6,091
def chain_rich(iterable: Iterable['WriteRichOp']) -> 'WriteRichOp': """Take an `iterable` of `WriteRich` segments and combine them to produce a single WriteRich operation.""" from .ops.classes import WriteRichOp return reduce(WriteRichOp.then, iterable)
6,092
def sum_plot_chi2_curve(bin_num, sum_bin, r_mpc, ax=None, cov_type='bt', label=None, xlabel=True, ylabel=True, show_bin=True, ref_sig=None): """Plot the chi2 curve.""" if ax is None: fig = plt.figure(figsize=(6, 6)) fig.subplots_adjust( left=0.165, bottom=0.13, right=0.995, top=0.99, wspace=None, hspace=None) ax = fig.add_subplot(111) ax.axhline(1.0, linewidth=3.0, alpha=.4, c='k') # Reduced chi2 curves rchi2 = sum_bin['chi2_' + cov_type] / (len(sum_bin['dsigma']) - 1) # Best-fit scatter and its uncertainty ax.axvline(sum_bin['sig_med_' + cov_type], linewidth=2.0, alpha=0.4, linestyle='--', color='k') ax.fill_between( [sum_bin['sig_low_' + cov_type], sum_bin['sig_upp_' + cov_type]], [0, 0], [np.max(rchi2) * 1.2, np.max(rchi2) * 1.2], color=color_bins[bin_num], alpha=0.2) if ref_sig is not None: ax.axvline(ref_sig, linewidth=3.0, alpha=0.5, linestyle='-.', color='k') # Reduced chi2 curves sims = sum_bin['simulation'] markers = cycle(['o', 's', 'h', '8', '+']) for sim in np.unique(sims): mask = sims == sim ax.scatter( sum_bin['sigma'][mask], rchi2[mask], marker=next(markers), s=60, alpha=0.8, facecolor=color_bins[bin_num], edgecolor='grey', linewidth=1.0, label=label) ax.scatter(sum_bin['sigma'][sum_bin['idx_med_' + cov_type]], rchi2[sum_bin['idx_med_' + cov_type]], marker='o', s=100, alpha=1.0, facecolor=color_bins[bin_num], edgecolor='k', linewidth=1.0, label=r'__no_label__') ax.set_xlim(0.00, np.max(sum_bin['sigma']) * 1.09) ax.set_ylim(0.01, np.max(rchi2) * 1.19) sig_best = sum_bin['sig_med_' + cov_type] sig_upp = sum_bin['sig_upp_' + cov_type] sig_low = sum_bin['sig_low_' + cov_type] if sig_best <= 0.65: _ = ax.text( sig_best + 0.05, np.max(rchi2) * 0.95, r'$\sigma={:4.2f}^{{+{:4.2f}}}_{{-{:4.2f}}}$'.format( sig_best, sig_upp - sig_best, sig_best - sig_low), fontsize=25) else: _ = ax.text( sig_best - 0.45, np.max(rchi2) * 0.95, r'$\sigma={:4.2f}^{{+{:4.2f}}}_{{-{:4.2f}}}$'.format( sig_best, sig_upp - sig_best, sig_best - sig_low), fontsize=25) if show_bin: _ = ax.text(0.07, 0.87, r'$\rm Bin\ {:1d}$'.format(bin_num + 1), fontsize=35, transform=ax.transAxes) if xlabel: _ = ax.set_xlabel(r'$\sigma_{\mathcal{M} | \mathcal{O}}$', fontsize=30) else: _ = ax.set_xticklabels([]) if ylabel: _ = ax.set_ylabel(r'$\rm Reduced\ \chi^2$', fontsize=30) else: _ = ax.set_yticklabels([]) if ax is None: return fig return ax
6,093
def configure_logger(): """Default log format""" log_header = "%(asctime)s [bold cyan]%(levelname)s[/] [yellow]-[/] [royal_blue1]%(name)s[/] [yellow]-[/]" log_body = "%(message)s [yellow]([/][chartreuse4]%(filename)s[/]:%(lineno)d[yellow])[/]" log_format = f"{log_header} {log_body}" logging.basicConfig( level=logging.INFO, format=log_format, datefmt="[%X]", handlers=[ RichHandler(show_time=False, show_level=False, markup=True, tracebacks_show_locals=True, rich_tracebacks=True)] )
6,094
def compute_importance(model, sequences, tasks, score_type='gradient_input', find_scores_layer_idx=0, target_layer_idx=-2, reference_gc=0.46, reference_shuffle_type=None, num_refs_per_seq=10): """ reference_shuffle_type in ['random', 'dinuc'] reference_gc = 0 will return numpy array of 0s reference_gc < 1 will assign each G and C reference_gc/2 """ ### Compute Importance scores print('Calculating Importance Scores') importance_method = { "deeplift": deeplift.blobs.NonlinearMxtsMode.DeepLIFT_GenomicsDefault, "rescale_all_layers": deeplift.blobs.NonlinearMxtsMode.Rescale, "revealcancel_all_layers": deeplift.blobs.NonlinearMxtsMode.RevealCancel, "gradient_input": deeplift.blobs.NonlinearMxtsMode.Gradient, "guided_backprop": deeplift.blobs.NonlinearMxtsMode.GuidedBackprop, "deconv": deeplift.blobs.NonlinearMxtsMode.DeconvNet } importance_model = kc.convert_sequential_model(model, nonlinear_mxts_mode=importance_method[score_type]) importance_func = importance_model.get_target_contribs_func( find_scores_layer_idx=find_scores_layer_idx, target_layer_idx=target_layer_idx) (reference, new_importance_func) = get_reference(sequences, importance_func, gc_fraction=reference_gc, shuffle=reference_shuffle_type, seed=1) importance_score_dict = {} for task in tasks: if reference is None: import dfim import dfim.util reload(dfim.util) seq_fastas = dfim.util.convert_one_hot_to_fasta(sequences) scores = np.array(new_importance_func(task_idx=task, # was 0 input_data_sequences=seq_fastas, num_refs_per_seq=num_refs_per_seq, batch_size=10, progress_update=1000)) else: scores = np.array(new_importance_func(task_idx=task, input_data_list=[sequences], batch_size=10, progress_update=1000, input_references_list=[reference])) importance_score_dict[task] = scores * sequences return importance_score_dict
6,095
def test_check_non_existing() -> None: """Test a check on a non-existing column.""" class Schema(pa.SchemaModel): a: Series[int] @pa.check("nope") @classmethod def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]: return series < 100 err_msg = ( "Check int_column_lt_100 is assigned to a non-existing field 'nope'" ) with pytest.raises(pa.errors.SchemaInitError, match=err_msg): Schema.to_schema()
6,096
def xy_slicing_animation(filename, Z_direction, number_slices, x_actual, y_actual, x_size, y_size): """XY Slicing Animation function depend on the number of slices input""" if Z_direction == "up": Z_dir = create_pslist.create_pslist(filename, x_size, y_size)[2] else: Z_dir = create_pslist.create_pslist(filename, x_size, y_size)[1] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.set_xlim(left=0, right=x_size) ax.set_ylim(top=y_size, bottom=0) ax.set_zlim(bottom=Z_dir.min(), top=Z_dir.max()) ax.set_xlabel('X(nm)', fontsize=15) ax.set_ylabel('Y(nm)', fontsize=15) ax.set_zlabel('Z(nm)', fontsize=15) ax.set_title('XY Slicing Animation for the AFM Phase Shift', fontsize=20) #---------------------------------------------------------------------------------------------------------------- ims = [] for add in np.arange(number_slices): a = np.linspace(0, x_actual, x_size) b = np.linspace(0, y_actual, y_size) c = Z_dir.iloc[add*(Z_dir.size//number_slices)] x, z, y = np.meshgrid(a,c,b) psasas = [] for k in range(x_size): for i in range(y_size): B = (pd.DataFrame(create_pslist.create_pslist(filename, x_size, y_size)[0][add*(Z_dir.size//number_slices)]))[k][i] psasas.append(B) l = psasas ims.append((ax.scatter(x, y, z, c=l, s=6))) im_ani = animation.ArtistAnimation(fig, ims, interval=500, blit=True) plt.show() im_ani.save('XY Slice.htm', metadata={'artist':'Guido'}) return
6,097
def update_user(user): """ 更新用户的账号信息 """ accounts = list(Account.query({'user': user})) u = User.query_one({'_id': user}) for key, value in accounts_summary(accounts).items(): setattr(u, key, value) u.num_accounts = len(accounts) u.num_exchanges = len(set(a.exchange for a in accounts)) u.upsert()
6,098
def fetch_url(url): """Fetches the specified URL. :param url: The URL to fetch :type url: string :returns: The response object """ return requests.get(url)
6,099