content
stringlengths
22
815k
id
int64
0
4.91M
def isValidListOrRulename(word: str) -> bool: """test if there are no accented characters in a listname or rulename so asciiletters, digitis, - and _ are allowed """ return bool(reValidName.match(word))
4,900
def capture(source, img_num, raw_path, grayscale_path): """ :param source: 视频源 :param img_num: 需要获取的人脸图像数目 :param raw_path: 原始图片的保存路径 :param grayscale_path: 人脸灰度图的保存路径 :return: 无 """ # 记录已经截取的人脸图像数目 img_count = 0 # 开始截取 while cap.isOpened(): ok, frame = cap.read() if not ok: break # 转换为灰度图 grayscale_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faceRects = classifier.detectMultiScale(grayscale_image, 1.3, 5) # 如果len(faceRects > 0,则检测到人脸 if len(faceRects) > 0: # 遍历视频流里的每张脸 for face in faceRects: img_count += 1 # 获取脸的坐标 x, y, w, h = face # 绘制脸所在的位置 cv2.rectangle(frame, (x, y), (x + w, y + h), (0,0, 255), 2) # 实时显示已截取脸的图片的数目以及目标数目 cv2.putText(frame, "{}/{}".format(img_count, img_num), (x - 40, y - 20), font, 1, (0, 0, 255), 2) # 保存图片 raw_filename = "{}/{}.jpg".format(RAW_PATH, img_count) grayscale_filename = "{}/{}.jpg".format(GRAYSCALE_PATH, img_count) cv2.imwrite(raw_filename, frame) cv2.imwrite(grayscale_filename, grayscale_image[y: y + h, x: x + w]) # 使用opencv可视化 cv2.imshow("Capture", frame) c = cv2.waitKey(10) # 按q键退出 if c & 0xFF == ord('q'): break if (img_count >= img_num): break # 释放资源 cap.release() cv2.destroyAllWindows()
4,901
def glyph_has_ink(font: TTFont, name: Text) -> bool: """Checks if specified glyph has any ink. That is, that it has at least one defined contour associated. Composites are considered to have ink if any of their components have ink. Args: font: the font glyph_name: The name of the glyph to check for ink. Returns: True if the font has at least one contour associated with it. """ if 'glyf' in font: return ttf_glyph_has_ink(font, name) elif ('CFF ' in font) or ('CFF2' in font): return cff_glyph_has_ink(font, name) else: raise Exception("Could not find 'glyf', 'CFF ', or 'CFF2' table.")
4,902
def test_list_algorithms(client): """Test get list of active learning models""" response = client.get("/api/algorithms") json_data = response.get_json() assert "classifier" in json_data.keys() assert "name" in json_data["classifier"][0].keys() assert isinstance(json_data, dict)
4,903
async def async_setup_entry(hass, entry, async_add_entities): """ Set up n3rgy data sensor :param hass: hass object :param entry: config entry :return: none """ # in-line function async def async_update_data(): """ Fetch data from n3rgy API This is the place to pre-process the data to lookup tables so entities can quickly look up their data :param: none :return: power consumption data """ return await hass.async_add_executor_job(read_consumption, api, entry) async def async_initialize(): """ Initialize objects from n3rgy API :param: none :return: data coordinator, device type """ coordinator = DataUpdateCoordinator( hass, _LOGGER, name=PLATFORM, update_method=async_update_data ) # fetch initial data so we have data when entities subscribe sensor_name, device_type = await hass.async_add_executor_job(get_device_info, api, entry) await coordinator.async_refresh() return (coordinator, sensor_name, device_type) # initialize n3rgy API device_type = None api = init_api_client(entry) # grant consent options if GRANT_CONSENT_READY: # grant consent is enabled for live environment if process_grant_consent(entry): coordinator, sensor_name, device_type = await async_initialize() else: # grant consent is disabled coordinator, sensor_name, device_type = await async_initialize() # add sensor async_add_entities([N3rgySensor(coordinator, sensor_name, device_type)], False)
4,904
def main(): """ Process command line arguments and run the script """ bp = BrPredMetric() result = bp.Run() return result
4,905
def step(init_distr,D): """ """ for k in init_distr.keys(): init_distr[k] = D[init_distr[k]]() return init_distr
4,906
def from_json(data: JsonDict) -> AttributeType: """Make an attribute type from JSON data (deserialize) Args: data: JSON data from Tamr server """ base_type = data.get("baseType") if base_type is None: logger.error(f"JSON data: {repr(data)}") raise ValueError("Missing required field 'baseType'.") if base_type == Boolean._tag: return BOOLEAN elif base_type == Double._tag: return DOUBLE elif base_type == Int._tag: return INT elif base_type == Long._tag: return LONG elif base_type == String._tag: return STRING elif base_type == Array._tag: inner_type = data.get("innerType") if inner_type is None: logger.error(f"JSON data: {repr(data)}") raise ValueError("Missing required field 'innerType' for Array type.") return Array(inner_type=from_json(inner_type)) elif base_type == Map._tag: inner_type = data.get("innerType") if inner_type is None: logger.error(f"JSON data: {repr(data)}") raise ValueError("Missing required field 'innerType' for Map type.") return Map(inner_type=from_json(inner_type)) elif base_type == Record._tag: attributes = data.get("attributes") if attributes is None: logger.error(f"JSON data: {repr(data)}") raise ValueError("Missing required field 'attributes' for Record type.") return Record( attributes=tuple([subattribute.from_json(attr) for attr in attributes]) ) else: logger.error(f"JSON data: {repr(data)}") raise ValueError(f"Unrecognized 'baseType': {base_type}")
4,907
def ftduino_find_by_name(name): """ Returns the path of the ftDuino with the specified `name`. :param name: Name of the ftDuino. :return: The path of the ftDuino or ``None`` if the ftDuino was not found. """ for path, device_name in ftduino_iter(): if device_name == name: return path return None
4,908
def init_config_file( odin_config_full_path, circuit_list, architecture_file, output_netlist, memory_addr_width, min_hard_mult_size, min_hard_adder_size, ): """initializing the raw odin config file""" # Update the config file file_replace( odin_config_full_path, { "YYY": architecture_file, "ZZZ": output_netlist, "PPP": memory_addr_width, "MMM": min_hard_mult_size, "AAA": min_hard_adder_size, }, ) # loading the given config file config_file = ET.parse(odin_config_full_path) root = config_file.getroot() # based on the base condfig file verilog_files_tag = root.find("verilog_files") # remove the template line XXX, verilog_files_tag [0] is a comment verilog_files_tag.remove(verilog_files_tag[0]) for circuit in circuit_list: verilog_file = ET.SubElement(verilog_files_tag, "verilog_file") verilog_file.tail = "\n\n\t" if (circuit == circuit_list[-1]) else "\n\n\t\t" verilog_file.text = circuit # update the config file with new values config_file.write(odin_config_full_path)
4,909
def test_from_str_returns_ulid_instance(api, valid_bytes_128): """ Assert that :func:`~ulid.api.from_str` returns a new :class:`~ulid.ulid.ULID` instance from the given bytes. """ value = base32.encode(valid_bytes_128) instance = api.from_str(value) assert isinstance(instance, ulid.ULID) assert instance.bytes == valid_bytes_128
4,910
def test_nonable_fields(declaration): """Tests that nonable fields are supported and correctly handled""" if declaration == 'typing': from typing import Optional class Foo(object): a = field(type_hint=Optional[int], check_type=True) b = field(type_hint=Optional[int], validators={'is positive': lambda x: x > 0}) c = field(nonable=False, check_type=True) d = field(validators={'accept_all': lambda x: True}) e = field(nonable=False) elif declaration == 'default_value': class Foo(object): a = field(type_hint=int, default=None, check_type=True) b = field(type_hint=int, default=None, validators={'is positive': lambda x: x > 0}) c = field(nonable=False, check_type=True) d = field(validators={'accept_all': lambda x: True}) e = field(nonable=False) elif declaration == 'explicit_nonable': class Foo(object): a = field(type_hint=int, nonable=True, check_type=True) b = field(type_hint=int, nonable=True, validators={'is positive': lambda x: x > 0}) c = field(nonable=False, check_type=True) d = field(validators={'accept_all': lambda x: True}) e = field(nonable=False) else: raise ValueError(declaration) f = Foo() f.a = None f.b = None with pytest.raises(NoneError): f.c = None f.d = None f.e = None assert vars(f) == {'_a': None, '_b': None, '_d': None, 'e': None}
4,911
def spectral_entropy (Sxx, fn, flim=None, display=False) : """ Compute different entropies based on the average spectrum, its variance, and its maxima [1]_ [2]_ Parameters ---------- Sxx : ndarray of floats Spectrogram (2d). It is recommended to work with PSD to be consistent with energy conservation fn : 1d ndarray of floats frequency vector flim : tupple (fmin, fmax), optional, default is None Frequency band used to compute the spectral entropy. For instance, one may want to compute the spectral entropy for the biophony bandwidth display : boolean, optional, default is False Display the different spectra (mean, variance, covariance, max...) Returns ------- EAS : scalar Entropy of Average Spectrum ECU : scalar Entropy of spectral variance (along the time axis for each frequency) ECV : scalar Entropy of Coefficient of Variation (along the time axis for each frequency) EPS : scalar Entropy of spectral maxima (peaks) EPS_KURT : scalar Kurtosis of spectral maxima EPS_SKEW : scalar Skewness of spectral maxima References ---------- .. [1] TOWSEY, Michael W. The calculation of acoustic indices derived from long-duration recordings of the natural environment. 2017. https://eprints.qut.edu.au/110634/1/QUTePrints110634_TechReport_Towsey2017August_AcousticIndices%20v3.pdf .. [2] QUT : https://github.com/QutEcoacoustics/audio-analysis. Michael Towsey, Anthony Truskinger, Mark Cottman-Fields, & Paul Roe. (2018, March 5). Ecoacoustics Audio Analysis Software v18.03.0.41 (Version v18.03.0.41). Zenodo. http://doi.org/10.5281/zenodo.1188744 Examples -------- >>> s, fs = maad.sound.load('../data/cold_forest_daylight.wav') >>> Sxx_power, tn, fn, _ = maad.sound.spectrogram (s, fs) >>> EAS, ECU, ECV, EPS, EPS_KURT, EPS_SKEW = maad.features.spectral_entropy(Sxx_power, fn, flim=(2000,10000)) >>> print('EAS: %2.2f / ECU: %2.2f / ECV: %2.2f / EPS: %2.2f / EPS_KURT: %2.2f / EPS_SKEW: %2.2f' % (EAS, ECU, ECV, EPS, EPS_KURT, EPS_SKEW)) EAS: 0.27 / ECU: 0.49 / ECV: 0.24 / EPS: 1.00 / EPS_KURT: 17.58 / EPS_SKEW: 3.55 """ if isinstance(flim, numbers.Number) : print ("WARNING: flim must be a tupple (fmin, fmax) or None") return if flim is None : flim=(fn.min(),fn.max()) # select the indices corresponding to the frequency range iBAND = index_bw(fn, flim) # force Sxx to be an ndarray X = np.asarray(Sxx) # TOWSEY : only on the bio band # EAS [TOWSEY] # #### COMMENT : Result a bit different due to different Hilbert implementation X_mean = mean(X[iBAND], axis=1) Hf = entropy(X_mean) EAS = 1 - Hf #### Entropy of spectral variance (along the time axis for each frequency) """ ECU [TOWSEY] """ X_Var = var(X[iBAND], axis=1) Hf_var = entropy(X_Var) ECU = 1 - Hf_var #### Entropy of coefficient of variance (along the time axis for each frequency) """ ECV [TOWSEY] """ X_CoV = var(X[iBAND], axis=1)/mean(X[iBAND], axis=1) Hf_CoV = entropy(X_CoV) ECV = 1 - Hf_CoV #### Entropy of spectral maxima """ EPS [TOWSEY] """ ioffset = np.argmax(iBAND==True) Nbins = sum(iBAND==True) imax_X = np.argmax(X[iBAND],axis=0) + ioffset imax_X = fn[imax_X] max_X_bin, bin_edges = np.histogram(imax_X, bins=Nbins, range=flim) if sum(max_X_bin) == 0 : max_X_bin = np.zeros(len(max_X_bin)) EPS = float('nan') #### Kurtosis of spectral maxima EPS_KURT = float('nan') #### skewness of spectral maxima EPS_SKEW = float('nan') else: max_X_bin = max_X_bin/sum(max_X_bin) Hf_fmax = entropy(max_X_bin) EPS = 1 - Hf_fmax #### Kurtosis of spectral maxima EPS_KURT = kurtosis(max_X_bin) #### skewness of spectral maxima EPS_SKEW = skewness(max_X_bin) if display: fig, ax = plt.subplots() ax.plot(fn[iBAND], X_mean/max(X_mean),label="Normalized mean") plt.plot(fn[iBAND], X_Var/max(X_Var),label="Normalized variance") ax.plot(fn[iBAND], X_CoV/max(X_CoV),label="Normalized covariance") ax.plot(fn[iBAND], max_X_bin/max(max_X_bin),label="Normalized Spectral max") ax.set_title('Signals') ax.set_xlabel('Frequency [Hz]') ax.legend() return EAS, ECU, ECV, EPS, EPS_KURT, EPS_SKEW
4,912
def get_pmt_numbers(channels, modules, pmts_buffer, pmt_lookup): """Fills pmts_buffer with pmt numbers corresponding to channels, modules according to pmt_lookup matrix: - pmt_lookup: lookup matrix for pmt numbers. First index is digitizer module, second is digitizer channel. Modifies pmts_buffer in-place. """ for i in range(len(channels)): pmts_buffer[i] = pmt_lookup[modules[i], channels[i]]
4,913
def _percentages(self): """ An extension method for Counter that returns a dict mapping the keys of the Counter to their percentages. :param self: Counter :return: a dict mapping the keys of the Counter to their percentages """ # type: () -> dict[any, float] length = float(sum(count for count in self.viewvalues())) return {value: self[value] / length for value in self}
4,914
def find_best_word_n(draw, nb_letters, path): """ """ lexicon = get_lexicon(path, nb_letters) mask = [is_word_in_draw(draw, word) for word in lexicon["draw"]] lexicon = lexicon.loc[mask] return lexicon
4,915
def feature_reader(path): """ Reading the feature matrix stored as JSON from the disk. :param path: Path to the JSON file. :return out_features: Dict with index and value tensor. """ features = json.load(open(path)) features = {int(k): [int(val) for val in v] for k, v in features.items()} return features
4,916
def locate_all_occurrence(l, e): """ Return indices of all element occurrences in given list :param l: given list :type l: list :param e: element to locate :return: indices of all occurrences :rtype: list """ return [i for i, x in enumerate(l) if x == e]
4,917
def UVectorFromAngles(reflection): """ Calculate the B&L U vector from bisecting geometry angles """ u = np.zeros((3,), dtype='float64') # The tricky bit is set again: Busing & Levy's omega is 0 in # bisecting position. This is why we have to correct for # stt/2 here om = np.deg2rad(reflection['om'] - reflection['stt']/2.) chi = np.deg2rad(reflection['chi']) phi = np.deg2rad(reflection['phi']) u[0] = cos(om) * cos(chi) * cos(phi) - sin(om) * sin(phi) u[1] = cos(om) * cos(chi) * sin(phi) + sin(om) * cos(phi) u[2] = cos(om) * sin(chi) return u
4,918
def to_pillow_image(img_array, image_size=None): """Convert an image represented as a numpy array back into a Pillow Image object.""" if isinstance(image_size, (numbers.Integral, np.integer)): image_size = (image_size, image_size) img_array = skimage.img_as_ubyte(img_array) img = pil_image.fromarray(img_array) if image_size: img = img.resize((image_size[1], image_size[0]), pil_image.LANCZOS) return img
4,919
def new_session_dir(rootdir, pid, sid): """ Creates a path to a new session directory. Example: <DATA_ROOT>/p0/session_2014-08-12_p0_arm1 """ date_str = datetime.date.today().strftime('%Y-%m-%d') session_dir = os.path.join( rootdir, pid, 'session_' + date_str + '_' + pid + '_' + sid) return (session_dir, date_str)
4,920
def calibrateImage(contours, img, arm1, outfile): """ Perform camera calibration using both images. This code saves the camera pixels (cX,cY) and the robot coordinates (the (pos,rot) for ONE arm) all in one pickle file. Then, not in this code, we run regression to get our desired mapping from pixel space to robot space. Whew. It's manual, but worth it. I put numbers to indicate how many we've saved. DO ONE SAVE PER CONTOUR so that I can get a correspondence with left and right images after arranging pixels in the correct ordering (though I don't think I have to do that). """ utils.move(arm1, HOME_POS, ROTATION, 'Fast') arm1.close_gripper() print("(after calling `home`) psm1 current position: {}".format( arm1.get_current_cartesian_position())) print("len(contours): {}".format(len(contours))) num_saved = 0 for i, (cX, cY, approx, peri) in enumerate(contours): if utils.filter_point(cX, cY, 500, 1500, 75, 1000): continue image = img.copy() # Deal with the image and get a visual. Keep clicking ESC key until we see a circle. cv2.circle(image, (cX,cY), 50, (0,0,255)) cv2.drawContours(image, [approx], -1, (0,255,0), 3) cv2.putText(img=image, text=str(num_saved), org=(cX,cY), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0,0,0), thickness=2) cv2.imshow("Contour w/{} saved so far out of {}.".format(num_saved,i), image) key1 = cv2.waitKey(0) if key1 not in utils.ESC_KEYS: # We have a circle. Move arm to target. The rotation is off, but we command it to rotate. frame = arm1.get_current_cartesian_position() utils.move(arm=arm1, pos=frame.position[:3], rot=ROTATION, SPEED_CLASS='Slow') # Now the human re-positions it to the center. cv2.imshow("Here's where we are after generic movement + rotation. Now correct it!", image) key2 = cv2.waitKey(0) # Get position and orientation of the arm, save, & reset. pos, rot = utils.lists_of_pos_rot_from_frame( arm1.get_current_cartesian_position() ) a1 = (pos, rot, cX, cY) print("contour {}, a1={}".format(i,a1)) else: print("(not storing contour {} on the left)".format(i)) utils.move(arm1, HOME_POS, ROTATION, 'Fast') arm1.close_gripper() # Only store this contour if both keys were not escape keys. if key1 not in utils.ESC_KEYS: utils.storeData(outfile, a1) num_saved += 1 cv2.destroyAllWindows()
4,921
def getter_collector(): # noqa """ This function is the main function of the toolkit. It performs two roles: 1) Collects configurations for all devices in the inventory, based on NAPALM support. These configurations are saved in the configs/ directory using the following convention: <hostname>/<filter_name>.txt 2) Performs a collection of supported getters based on the official NAPALM supported filter list: https://napalm.readthedocs.io/en/latest/support/ It has been written in a way whereby one simply updates the appropriate <os>_getters list to add or remove supported getters. All getters are stored in the facts/ directory using the following convention: <hostname>/<filter_name>.json """ """ The following block of code is used to generate a log file in a directory. These log files will indicate the success/failure of filter collector for retrospective analysis. """ # Capture time cur_time = dt.datetime.now() # Cleanup time, so that the format is clean for the output file 2019-07-01-13-04-59 fmt_time = cur_time.strftime("%Y-%m-%d-%H-%M-%S") # Set log directory variable log_dir = "logs" # Create log directory if it doesn't exist. pathlib.Path(log_dir).mkdir(exist_ok=True) # Create log file name, with timestamp in the name filename = str("DISCOVERY-LOG") + "-" + fmt_time + ".txt" # Join the log file name and log directory together into a variable log_file_path = log_dir + "/" + filename # Create the log file log_file = open(log_file_path, "w") # Start of logging output print(f"{Fore.MAGENTA}STARTING DISCOVERY: " + str(fmt_time)) log_file.write("STARTING DISCOVERY: " + str(fmt_time) + "\n\n") """ Initialise two counters, so that success and failure can be counted and incremented as the getters are collected. """ # Success Counter success_count = 0 # Fail Counter fail_count = 0 # Initialize Nornir and define the inventory variables. nr = InitNornir( inventory={ "options": { "host_file": "inventory/hosts.yaml", "group_file": "inventory/groups.yaml", "defaults_file": "inventory/defaults.yaml", } } ) # Set default username and password from environmental variables. nr.inventory.defaults.username = env_uname nr.inventory.defaults.password = env_pword """ The following block of lists are the supported getters per OS based on the website https://napalm.readthedocs.io/en/latest/support/ """ # IOS supported getters ios_getters = [ "arp_table", "bgp_neighbors", "bgp_neighbors_detail", "environment", "facts", "interfaces", "interfaces_counters", "interfaces_ip", "ipv6_neighbors_table", "lldp_neighbors", "lldp_neighbors_detail", "mac_address_table", "network_instances", "ntp_peers", "ntp_servers", "ntp_stats", "optics", "snmp_information", "users", ] # JUNOS supported getters junos_getters = [ "arp_table", "bgp_config", "bgp_neighbors", "bgp_neighbors_detail", "environment", "facts", "interfaces", "interfaces_counters", "interfaces_ip", "ipv6_neighbors_table", "lldp_neighbors", "lldp_neighbors_detail", "mac_address_table", "network_instances", "ntp_peers", "ntp_servers", "ntp_stats", "optics", "snmp_information", "users", ] # EOS supported getters eos_getters = [ "arp_table", "bgp_config", "bgp_neighbors", "bgp_neighbors_detail", "environment", "facts", "interfaces", "interfaces_counters", "interfaces_ip", "lldp_neighbors", "lldp_neighbors_detail", "mac_address_table", "network_instances", "ntp_servers", "ntp_stats", "optics", "snmp_information", "users", ] # NXOS supported getters nxos_getters = [ "arp_table", "bgp_neighbors", "facts", "interfaces", "interfaces_ip", "lldp_neighbors", "lldp_neighbors_detail", "mac_address_table", "ntp_peers", "ntp_servers", "ntp_stats", "snmp_information", "users", ] # IOSXR supported getters iosxr_getters = [ "arp_table", "bgp_config", "bgp_neighbors", "bgp_neighbors_detail", "environment", "facts", "interfaces", "interfaces_counters", "interfaces_ip", "lldp_neighbors", "lldp_neighbors_detail", "mac_address_table", "ntp_peers", "ntp_servers", "ntp_stats", "snmp_information", "users", ] """ The following block of code assigns a filter based on platform to a variable. This variable is used later on to apply logic in for loops """ ios_devices = nr.filter(platform="ios") junos_devices = nr.filter(platform="junos") eos_devices = nr.filter(platform="eos") nxos_devices = nr.filter(platform="nxos") iosxr_devices = nr.filter(platform="iosxr") """ The following block of code is a list of config getters which will be iterated over to collect the different config types per OS """ ios_config_getters = ["running", "startup"] junos_config_getters = ["running", "candidate"] eos_config_getters = ["running", "startup"] nxos_config_getters = ["running", "startup"] iosxr_config_getters = ["running", "startup"] """ The following block is the main component of the program. Each OS collects the running config, all supported getters and the startup/candidate config based on the OS. Each OS block is as uniform as possible. """ # IOS Platform Block for host in ios_devices.inventory.hosts.items(): # Assign the hostname to a variable from the host tuple hostname = host[0] # Starting processing of a host print(f"{Fore.MAGENTA}** Start Processing Host: " + str(hostname)) log_file.write("** Start Processing Host: " + str(hostname) + "\n") for config in ios_config_getters: # Start collecting the config getters print("Processing " + str(config) + " config ... ") log_file.write("Processing " + str(config) + " config ... " + "\n") # Execute the collect_config function configs = nr.run(task=collect_config, getter=config, on_failed=True) """ Access the specific 'napalm_get' result out of the collect_getters function and store whether the failed boolean is True (failure) or False (success) """ configs_results = configs[hostname][0].failed # Conditional block to record success/fail count of the 'napalm_get' result if configs_results is True: print( f"{Fore.RED}FAILURE : " + str(hostname) + " - " + str(config) + " config" ) log_file.write( "FAILURE : " + str(hostname) + " - " + str(config) + " config" + "\n" ) fail_count += 1 else: print( f"{Fore.GREEN}SUCCESS : " + str(hostname) + " - " + str(config) + " config" ) log_file.write( "SUCCESS : " + str(hostname) + " - " + str(config) + " config" + "\n" ) success_count += 1 # For block to collect all supported getters for entry in ios_getters: # Start processing getters print("Processing Getter: " + str(entry)) log_file.write("Processing Getter: " + str(entry) + "\n") # Execute collect_getters function getters = nr.run(task=collect_getters, getter=entry, on_failed=True) """ Access the specific 'napalm_get' result out of the collect_getters function and store whether the failed boolean is True (failure) or False (success) """ getters_results = getters[hostname][0].failed # Conditional block to record success/fail count of the 'napalm_get' result if getters_results is True: log_file.write("FAILURE : " + str(hostname) + " - " + str(entry) + "\n") print(f"{Fore.RED}FAILURE : " + str(hostname) + " - " + str(entry)) fail_count += 1 else: log_file.write("SUCCESS : " + str(hostname) + " - " + str(entry) + "\n") print(f"{Fore.GREEN}SUCCESS : " + str(hostname) + " - " + str(entry)) success_count += 1 # Ending processing of host print(f"{Fore.MAGENTA}** End Processing Host: " + str(hostname)) log_file.write("** End Processing Host: " + str(hostname) + "\n\n") # EOS Platform Block for host in eos_devices.inventory.hosts.items(): # Assign the hostname to a variable from the host tuple hostname = host[0] # Starting processing of a host print(f"{Fore.MAGENTA}** Start Processing Host: " + str(hostname)) log_file.write("** Start Processing Host: " + str(hostname) + "\n") for config in eos_config_getters: # Start collecting the config getters print("Processing " + str(config) + " config ... ") log_file.write("Processing " + str(config) + " config ... " + "\n") # Execute the collect_config function configs = nr.run(task=collect_config, getter=config, on_failed=True) """ Access the specific 'napalm_get' result out of the collect_getters function and store whether the failed boolean is True (failure) or False (success) """ configs_results = configs[hostname][0].failed # Conditional block to record success/fail count of the 'napalm_get' result if configs_results is True: print( f"{Fore.RED}FAILURE : " + str(hostname) + " - " + str(config) + " config" ) log_file.write( "FAILURE : " + str(hostname) + " - " + str(config) + " config" + "\n" ) fail_count += 1 else: print( f"{Fore.GREEN}SUCCESS : " + str(hostname) + " - " + str(config) + " config" ) log_file.write( "SUCCESS : " + str(hostname) + " - " + str(config) + " config" + "\n" ) success_count += 1 # For block to collect all supported getters for entry in eos_getters: # Start processing getters print("Processing Getter: " + str(entry)) log_file.write("Processing Getter: " + str(entry) + "\n") # Execute collect_getters function getters = nr.run(task=collect_getters, getter=entry, on_failed=True) """ Access the specific 'napalm_get' result out of the collect_getters function and store whether the failed boolean is True (failure) or False (success) """ getters_results = getters[hostname][0].failed # Conditional block to record success/fail count of the 'napalm_get' result if getters_results is True: log_file.write("FAILURE : " + str(hostname) + " - " + str(entry) + "\n") print(f"{Fore.RED}FAILURE : " + str(hostname) + " - " + str(entry)) fail_count += 1 else: log_file.write("SUCCESS : " + str(hostname) + " - " + str(entry) + "\n") print(f"{Fore.GREEN}SUCCESS : " + str(hostname) + " - " + str(entry)) success_count += 1 # Ending processing of host print(f"{Fore.MAGENTA}** End Processing Host: " + str(hostname)) log_file.write("** End Processing Host: " + str(hostname) + "\n\n") # NX-OS Platform Block for host in nxos_devices.inventory.hosts.items(): # Assign the hostname to a variable from the host tuple hostname = host[0] # Starting processing of a host print(f"{Fore.MAGENTA}** Start Processing Host: " + str(hostname)) log_file.write("** Start Processing Host: " + str(hostname) + "\n") for config in nxos_config_getters: # Start collecting the config getters print("Processing " + str(config) + " config ... ") log_file.write("Processing " + str(config) + " config ... " + "\n") # Execute the collect_config function configs = nr.run(task=collect_config, getter=config, on_failed=True) """ Access the specific 'napalm_get' result out of the collect_getters function and store whether the failed boolean is True (failure) or False (success) """ configs_results = configs[hostname][0].failed # Conditional block to record success/fail count of the 'napalm_get' result if configs_results is True: print( f"{Fore.RED}FAILURE : " + str(hostname) + " - " + str(config) + " config" ) log_file.write( "FAILURE : " + str(hostname) + " - " + str(config) + " config" + "\n" ) fail_count += 1 else: print( f"{Fore.GREEN}SUCCESS : " + str(hostname) + " - " + str(config) + " config" ) log_file.write( "SUCCESS : " + str(hostname) + " - " + str(config) + " config" + "\n" ) success_count += 1 # For block to collect all supported getters for entry in nxos_getters: # Start processing getters print("Processing Getter: " + str(entry)) log_file.write("Processing Getter: " + str(entry) + "\n") # Execute collect_getters function getters = nr.run(task=collect_getters, getter=entry, on_failed=True) """ Access the specific 'napalm_get' result out of the collect_getters function and store whether the failed boolean is True (failure) or False (success) """ getters_results = getters[hostname][0].failed # Conditional block to record success/fail count of the 'napalm_get' result if getters_results is True: log_file.write("FAILURE : " + str(hostname) + " - " + str(entry) + "\n") print(f"{Fore.RED}FAILURE : " + str(hostname) + " - " + str(entry)) fail_count += 1 else: log_file.write("SUCCESS : " + str(hostname) + " - " + str(entry) + "\n") print(f"{Fore.GREEN}SUCCESS : " + str(hostname) + " - " + str(entry)) success_count += 1 # Ending processing of host print(f"{Fore.MAGENTA}** End Processing Host: " + str(hostname) + "\n") log_file.write("** End Processing Host: " + str(hostname) + "\n\n") # JUNOS Platform Block for host in junos_devices.inventory.hosts.items(): # Assign the hostname to a variable from the host tuple hostname = host[0] # Starting processing of a host print(f"{Fore.MAGENTA}** Start Processing Host: " + str(hostname)) log_file.write("** Start Processing Host: " + str(hostname) + "\n") for config in junos_config_getters: # Start collecting the config getters print("Processing " + str(config) + " config ... ") log_file.write("Processing " + str(config) + " config ... " + "\n") # Execute the collect_config function configs = nr.run(task=collect_config, getter=config, on_failed=True) """ Access the specific 'napalm_get' result out of the collect_getters function and store whether the failed boolean is True (failure) or False (success) """ configs_results = configs[hostname][0].failed # Conditional block to record success/fail count of the 'napalm_get' result if configs_results is True: print( f"{Fore.RED}FAILURE : " + str(hostname) + " - " + str(config) + " config" ) log_file.write( "FAILURE : " + str(hostname) + " - " + str(config) + " config" + "\n" ) fail_count += 1 else: print( f"{Fore.GREEN}SUCCESS : " + str(hostname) + " - " + str(config) + " config" ) log_file.write( "SUCCESS : " + str(hostname) + " - " + str(config) + " config" + "\n" ) success_count += 1 for entry in junos_getters: # Start processing getters print("Processing Getter: " + str(entry)) log_file.write("Processing Getter: " + str(entry) + "\n") # Execute collect_getters function getters = nr.run(task=collect_getters, getter=entry, on_failed=True) """ Access the specific 'napalm_get' result out of the collect_getters function and store whether the failed boolean is True (failure) or False (success) """ getters_results = getters[hostname][0].failed # Conditional block to record success/fail count of the 'napalm_get' result if getters_results is True: log_file.write("FAILURE : " + str(hostname) + " - " + str(entry) + "\n") print(f"{Fore.RED}FAILURE : " + str(hostname) + " - " + str(entry)) fail_count += 1 else: log_file.write("SUCCESS : " + str(hostname) + " - " + str(entry) + "\n") print(f"{Fore.GREEN}SUCCESS : " + str(hostname) + " - " + str(entry)) success_count += 1 # Ending processing of host print("** End Processing Host: " + str(hostname) + "\n") log_file.write("** End Processing Host: " + str(hostname) + "\n\n") # IOS-XR Platform Block for host in iosxr_devices.inventory.hosts.items(): # Assign the hostname to a variable from the host tuple hostname = host[0] # Starting processing of a host print(f"{Fore.MAGENTA}** Start Processing Host: " + str(hostname)) log_file.write("** Start Processing Host: " + str(hostname) + "\n") for config in iosxr_config_getters: # Start collecting the config getters print("Processing " + str(config) + " config ... ") log_file.write("Processing " + str(config) + " config ... " + "\n") # Execute the collect_config function configs = nr.run(task=collect_config, getter=config, on_failed=True) """ Access the specific 'napalm_get' result out of the collect_getters function and store whether the failed boolean is True (failure) or False (success) """ configs_results = configs[hostname][0].failed # Conditional block to record success/fail count of the 'napalm_get' result if configs_results is True: print( f"{Fore.RED}FAILURE : " + str(hostname) + " - " + str(config) + " config" ) log_file.write( "FAILURE : " + str(hostname) + " - " + str(config) + " config" + "\n" ) fail_count += 1 else: print( f"{Fore.GREEN}SUCCESS : " + str(hostname) + " - " + str(config) + " config" ) log_file.write( "SUCCESS : " + str(hostname) + " - " + str(config) + " config" + "\n" ) success_count += 1 # For block to collect all supported getters for entry in iosxr_getters: # Start processing getters print("Processing Getter: " + str(entry)) log_file.write("Processing Getter: " + str(entry) + "\n") # Execute collect_getters function getters = nr.run(task=collect_getters, getter=entry, on_failed=True) """ Access the specific 'napalm_get' result out of the collect_getters function and store whether the failed boolean is True (failure) or False (success) """ getters_results = getters[hostname][0].failed # Conditional block to record success/fail count of the 'napalm_get' result if getters_results is True: log_file.write("FAILURE : " + str(hostname) + " - " + str(entry) + "\n") print(f"{Fore.RED}FAILURE : " + str(hostname) + " - " + str(entry)) fail_count += 1 else: log_file.write("SUCCESS : " + str(hostname) + " - " + str(entry) + "\n") print(f"{Fore.GREEN}SUCCESS : " + str(hostname) + " - " + str(entry)) success_count += 1 # Ending processing of host print("** End Processing Host: " + str(hostname)) log_file.write("** End Processing Host: " + str(hostname) + "\n\n") # Add the two variables together to get a total count into a variable total_count = success_count + fail_count # Provide a summary of the main function and add to log file print("SUMMARY" + "\n") log_file.write("SUMMARY" + "\n\n") print(f"{Fore.GREEN}SUCCESS COUNT : " + str(success_count)) log_file.write("SUCCESS COUNT : " + str(success_count) + "\n") print(f"{Fore.RED}FAILURE COUNT : " + str(fail_count)) log_file.write("FAILURE COUNT : " + str(fail_count) + "\n") print("TOTAL COUNT : " + str(total_count)) log_file.write("TOTAL COUNT : " + str(total_count) + "\n") # Close the log file log_file.close()
4,922
def seq_to_sentence(seq: Iterator[int], vocab: Vocab, ignore: Iterator[int]) -> str: """Convert a sequence of integers to a string of (space-separated) words according to a vocabulary. :param seq: Iterator[int] A sequence of integers (tokens) to be converted. :param vocab: Vocab A Torchtext Vocab object containing a mapping from integers to strings (words). :param ignore: Iterator[int] A sequence of integers representing "special tokens" to ignore (convert as blanks). :return: str The resulting sentence. """ return ' '.join(vocab.itos[i] if vocab.itos[i] not in ignore else '' for i in seq).strip()
4,923
def teardown_module(): """ teardown any state that was previously setup with a setup_module method. """ os.chdir(saved_cwd)
4,924
def p_sub_directory_stmt(p): """sub_directory_stmt : SUB_DIRECTORY_ID COLON WORD""" p[0] = Node("sub_directory", value=p[3])
4,925
def estimate_exposures(imgs, exif_exp, metadata, method, noise_floor=16, percentile=10, invert_gamma=False, cam=None, outlier='cerman'): """ Exposure times may be inaccurate. Estimate the correct values by fitting a linear system. :imgs: Image stack :exif_exp: Exposure times read from image metadata :metadata: Internal camera metadata dictionary :method: Pick from ['gfxdisp', 'cerman'] :noise_floor: All pixels smaller than this will be ignored :percentile: Use a small percentage of the least noisy pixels for the estimation :invert_gamma: If the images are gamma correct invert to work with linear values :cam: Camera noise parameters for better estimation :return: Corrected exposure times """ assert method in ('gfxdisp', 'cerman') num_exp = len(imgs) assert num_exp > 1, f'Files not found or are invalid: {files}' # Mask out saturated and noisy pixels black_frame = np.tile(metadata['black_level'].reshape(2, 2), (metadata['h']//2, metadata['w']//2)) \ if metadata['raw_format'] else metadata['black_level'] Y = np.maximum(imgs - black_frame, 1e-6) # Add epsilon since we need log(Y) if invert_gamma: max_value = np.iinfo(metadata['dtype']).max Y = (Y / max_value)**(invert_gamma) * max_value if method == 'cerman': ''' L. Cerman and V. Hlavac, “Exposure time estimation for high dynamic range imaging with hand held camera” in Proc. of Computer Vision Winter Workshop, Czech Republic. 2006. ''' from skimage.exposure import histogram, match_histograms rows, cols, m, W = np.zeros((4, 0)) for i in range(num_exp - 1): # Ensure images are sorted in increasing order of exposure time assert all(e1 <= e2 for e1, e2 in zip(exif_exp[:-1], exif_exp[1:])), \ 'Please name the input files in increasing order of exposure time when sorted' im1, im2 = Y[i], Y[i+1] mask = np.stack((im1 + black_frame < metadata['saturation_point'], im2 + black_frame < metadata['saturation_point'], im1 > noise_floor, im2 > noise_floor)).all(axis=0) # Match histograms of consecutive exposures im1_hat = match_histograms(im1, im2) im2_hat = match_histograms(im2, im1) # Construct the simple sparse linear system. There are 2 sets for each pair (Eq. 4) num_pix = np.count_nonzero(mask) rows = np.concatenate((rows, np.arange(2*num_pix) + len(rows))) cols = np.concatenate((cols, np.repeat(i, 2*num_pix))) m = np.concatenate((m, (im1_hat[mask]/im1[mask]), (im2[mask]/im2_hat[mask]))) # Weights are given by sqrt() of histogram counts (Eq. 4) im1, im2 = im1.astype(np.uint16), im2.astype(np.uint16) counts, bins = histogram(im1) weights1 = np.sqrt(counts[np.searchsorted(bins, im1[mask])]) counts, bins = histogram(im2) weights2 = np.sqrt(counts[np.searchsorted(bins, im2[mask])]) W = np.concatenate((W, weights1, weights2)) num_rows = rows.shape[0] data = np.ones(num_rows) O = csr_matrix((data, (rows, cols)), shape=(num_rows, (num_exp - 1))) elif method == 'gfxdisp': logger.info(f'Estimate using logarithmic linear system with noise model') num_pix = int(percentile/100*metadata['h']*metadata['w']) # If noise parameters is provided, retrieve variances, else use simplified model L = np.log(Y) if cam == 'default': cam = HDRutils.NormalNoise('Sony', 'ILCE-7R', 100, bits=14) bits = cam.bits if cam else 14 scaled_var = np.stack([(cam.var(y)/y**2) if cam else 1/y**2 for y in Y/(2**bits - 1)]) # Construct logarithmic sparse linear system W.O.e = W.m logger.info(f'Constructing sparse matrix (O) and vector (m) using {num_pix} pixels') rows = np.arange(0, (num_exp - 1)*num_pix, 0.5) cols, data = np.repeat(np.ones_like(rows)[None], 2, axis=0) data[1::2] = -1 m = np.zeros((num_exp - 1)*num_pix, dtype=np.float32) W = np.zeros_like(m) for i in range(num_exp - 1): cols[i*num_pix*2:(i + 1)*num_pix*2:2] = i # Collect unsaturated pixels from all longer exposures for j in range(i + 1, num_exp): mask = np.stack((Y[i] + black_frame < metadata['saturation_point'], Y[j] + black_frame < metadata['saturation_point'], Y[i] > noise_floor, Y[j] > noise_floor)).all(axis=0) # if mask.sum() < num_pix: # continue weights = np.concatenate((W[i*num_pix:(i+1)*num_pix], (1/(scaled_var[i] + scaled_var[j]) * mask).flatten())) logdiff = np.concatenate((m[i*num_pix:(i+1)*num_pix], (L[i] - L[j]).flatten())) selected = np.argsort(weights)[-num_pix:] W[i*num_pix:(i + 1)*num_pix] = weights[selected] m[i*num_pix:(i + 1)*num_pix] = logdiff[selected] cols[i*num_pix*2 + 1:(i + 1)*num_pix*2:2][selected > num_pix] = j O = csr_matrix((data, (rows, cols)), shape=((num_exp - 1)*num_pix, num_exp)) logger.info('Solving the sparse linear system using least squares') if outlier == 'cerman': err_prev = np.finfo(float).max t = trange(1000, leave=False) for i in t: exp = lsqr(diags(W) @ O, W * m)[0] err = (W*(O @ exp - m))**2 selected = err < 3*err.mean() W = W[selected] m = m[selected] O = O[selected] if err.mean() < 1e-6 or err_prev - err.mean() < 1e-6: # assert err_prev - err.mean() > 0 break err_prev = err.mean() t.set_description(f'loss={err.mean()}') del err, selected gc.collect() logger.warning(f'Used {O.shape[0]/(num_exp - 1)/num_pix*100}% of the initial pixels') elif outlier == 'ransac': assert method == 'gfxdisp' num_rows = W.shape[0] # Randomly select 10% of the data selected = np.zeros(num_rows, dtype=bool) selected[:num_rows//10] = True loss = np.finfo(float).max WO = diags(W) @ O Wm = W*m t = trange(100, leave=False) for i in t: np.random.shuffle(selected) exp_i = lsqr(WO[selected], Wm[selected])[0] exp_i = np.exp(exp_i - exp_i.max()) * exif_exp.max() reject = np.maximum(exp_i/exif_exp, exif_exp/exp_i) > 3 exp_i[reject] = exif_exp[reject] err = ((W*(O @ exp_i - m))**2).sum() if err < loss: loss = err exp = np.log(exp_i) t.set_description(f'loss={err}; i={i}') else: exp = lsqr(diags(W) @ O, W * m)[0] if method == 'cerman': exp = np.append(exp, exif_exp[-1]) for e in range(num_exp - 2, -1, -1): exp[e] = exif_exp[e+1]/exp[e] elif method == 'gfxdisp': exp = np.exp(exp - exp.max()) * exif_exp.max() # logger.warning(f'Exposure times in EXIF: {exif_exp}, estimated exposures: {exp}. Outliers removed {i} times') # reject = np.maximum(exp/exif_exp, exif_exp/exp) > 3 # exp[reject] = exif_exp[reject] # if reject.any(): # logger.warning(f'Exposure estimation failed {reject}. Try using more pixels') return exp
4,926
def plot_confusion_matrix(cm, cms, classes, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. cm: confusion matrix in perecentage cms: standard deviation error from cm classes: Name of each label """ plt.figure() plt.imshow(cm, interpolation='nearest', cmap=cmap) #plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) plt.tight_layout() thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): # In case standar error wants to be displayed plt.text(j, i, '{0:.2f}'.format(cm[i, j]) + '\n$\pm$' + '{0:.2f}'.format(cms[i, j]), horizontalalignment="center", verticalalignment="center", fontsize=14, color="white" if cm[i, j] > thresh else "black") # In case only percentage is displayed #plt.text(j, i, '{0:.2f}'.format(cm[i, j]) + '%', # horizontalalignment="center", # verticalalignment="center", fontsize=15, # color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label')
4,927
def test_only_specials(): """ Tests passwords that only contain special characters and are under 8 chars. """ passwords = ["!!!!", '???', '****', '%%%%%%%%', '$$$$', '@@@@@'] for p in passwords: assert check_password(p) == 0.25
4,928
def indicators(bot, update): """ Display enabled indicators """ chat_id = update.message.chat_id user_id = 'usr_{}'.format(chat_id) _config = users_config[user_id] update.message.reply_text('Configured indicators ... ') for indicator in _config.indicators: msg = indicator for conf in _config.indicators[indicator] : if conf['enabled']: msg = '%s %s' % (msg, conf['candle_period']) if msg != indicator : update.message.reply_text(msg)
4,929
def default_reverse(*args, **kwargs): """ Acts just like django.core.urlresolvers.reverse() except that if the resolver raises a NoReverseMatch exception, then a default value will be returned instead. If no default value is provided, then the exception will be raised as normal. NOTE: Any exception that is not NoReverseMatch will always be raised as normal, even if a default is provided. """ # We're explicitly NOT happy to just re-raise the exception, as that may # adversely affect stack traces. if 'default' not in kwargs: return reverse(*args, **kwargs) else: default = kwargs.pop('default', None) try: return reverse(*args, **kwargs) except NoReverseMatch: return default
4,930
def _backup_file(filename): """Renames a given filename so it has a `-YYYY-MM-DD-HHMMSS` suffix""" dt_now = datetime.datetime.now() move_filename = "{}-{}-{}-{}-{}{:02d}{:02d}".format(filename, dt_now.year, dt_now.month, dt_now.day, dt_now.hour, dt_now.minute, dt_now.second ) print("Backing up \"{}\" to \"{}\"".format(filename, move_filename)) shutil.move(filename, move_filename)
4,931
def get_company_periods_up_to(period): """ Get all periods for a company leading up to the given period, including the given period """ company = period.company return (company.period_set .filter(company=company, end__lte=period.end))
4,932
def format_count( label: str, counts: List[int], color: str, dashed: bool = False ) -> dict: """Format a line dataset for chart.js""" ret = { "label": label, "data": counts, "borderColor": color, "borderWidth": 2, "fill": False, } if dashed: ret["borderDash"] = [5, 5] return ret
4,933
def main(): """ Main """ # parse command line arguments parser = argparse.ArgumentParser() parser.add_argument("-s", "--server", action="store_true", help="run server") parser.add_argument("-c", "--client", action="store_true", help="run client") parser.add_argument("-a", "--address", help="listen/connect address") parser.add_argument("-p", "--port", help="listen/connect port") args = parser.parse_args() # run other commands based on command line arguments if args.client: client(args.address, args.port) return if args.server: server(args.address, args.port) return
4,934
def test_initialize_file(ds_from_uvfits, test_outfile): """Test initializing file onto disk.""" ds = ds_from_uvfits ds.initialize_save_file(test_outfile) ds.data_array = None ds.flag_array = None ds.nsample_array = None ds.noise_array = None ds_in = DelaySpectrum() ds_in.read(test_outfile, read_data=False) assert ds_in == ds
4,935
def detect_ripples(eeg): """Detect sharp wave ripples (SWRs) from single channel eeg (AnalogSignalArray). """ # Maggie defines ripples by doing: # (1) filter 150-250 # (2) hilbert envelope # (3) smooth with Gaussian (4 ms SD) # (4) 3.5 SD above the mean for 15 ms # (5) full ripple defined as window back to mean assert eeg.n_signals == 1, "only single channel ripple detection currently supported!" # (1) ripple_eeg = nel.filtering.sosfiltfilt(eeg, fl=150, fh=250) # (2, 3) ripple_envelope = nel.utils.signal_envelope1D(ripple_eeg, sigma=0.004) # (4, 5) bounds, maxes, events = nel.utils.get_events_boundaries( x=ripple_envelope.data, PrimaryThreshold=ripple_envelope.mean() + 3.5*ripple_envelope.std(), # cm/s SecondaryThreshold=ripple_envelope.mean(), # cm/s minThresholdLength=0.015, # threshold crossing must be at least 15 ms long minLength=0.0, # total ripple duration must be at least XXX ms long ds = 1/ripple_envelope.fs ) # convert bounds to time in seconds timebounds = ripple_envelope.time[bounds] # add 1/fs to stops for open interval timebounds[:,1] += 1/eeg.fs # create EpochArray with bounds ripple_epochs = nel.EpochArray(timebounds) # Adjust ripple centers to align to a peak ripple_centers = np.floor( (ripple_epochs.centers - eeg.time[0])*eeg.fs ).astype(int) ch = 7 # this was on some of Sibo's data, for CA1 adjusted_centers = [(p-10)+np.argmax(eeg.data[ch,p-10:p+10]) for p in ripple_centers[1:-1].tolist()] return ripple_epochs
4,936
def usage(): """ Usage function """ print("Usage: %s <json conf file>" % sys.argv[0]) sys.exit(0)
4,937
def cymdtodoy(str): """cymdtodoy(str) -> string Convert a legal CCSDS time string with the date expressed as a month and day to a simple time string with the date expressed as a day-of-year.""" try: (year, mon, day, hour, min, sec) = cymdtoaymd(str) doy = aymdtodoy(year, mon, day, hour, min, sec) except TypeError, e: raise TypeError, e.args[0] except ValueError, e: raise ValueError, e.args[0] return doy
4,938
def configured_hosts(hass): """Return a set of the configured hosts.""" """For future to use with discovery!""" out = {} for entry in hass.config_entries.async_entries(DOMAIN): out[entry.data[CONF_ADDRESS]] = { UUID: entry.data[UUID], CONF_ADDRESS: entry.data[CONF_ADDRESS], ACCESS_KEY: entry.data[ACCESS_KEY], SENSORS: entry.data.get(SENSORS, []), } return out
4,939
def load_model(model_name, weights, model_paths, module_name, model_params): """Import model and load pretrained weights""" if model_paths: sys.path.extend(model_paths) try: module = importlib.import_module(module_name) creator = getattr(module, model_name) model = creator(**model_params) except ImportError as err: if model_paths: print('Module {} in {} doesn\'t exist. Check import path and name'.format( model_name, os.pathsep.join(model_paths))) else: print('Module {} doesn\'t exist. Check if it is installed'.format(model_name)) sys.exit(err) except AttributeError as err: print('ERROR: Module {} contains no class or function with name {}!' .format(module_name, model_name)) sys.exit(err) try: if weights: model.load_state_dict(torch.load(weights, map_location='cpu')) except RuntimeError as err: print('ERROR: Weights from {} cannot be loaded for model {}! Check matching between model and weights'.format( weights, model_name)) sys.exit(err) return model
4,940
def generate_keys(directory: str, pwd: bytes = None) -> (ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey): """ Generate the public and private keys Generated keys have a default name, you should rename them This can be done with os.rename() :param directory: folder where the keys are made overwrite the existing keys :param pwd: password: if not None, Best available encryption is chosen and the private key is encrypted with a the password :return: private, public keys """ private_key = generate_private_key(directory, pwd) public_key = generate_public_key(directory, private_key) return private_key, public_key
4,941
def test_notebooks(): """ Run all notebooks in the directories given by the list `notebook_paths`. The notebooks are run locally using [treon](https://github.com/ReviewNB/treon) and executed in each directory so that local resources can be imported. Returns: num_errors (int): Number of notebooks that failed to run num_passed (int): Number of notebooks that successfully run """ num_errors = 0 num_passed = 0 for nb_path in notebook_paths: abs_nb_path = os.path.join(SGDIR, nb_path) cmd_line = f"treon . --threads=2" print(f"\033[1;33;40m Running {abs_nb_path}\033[0m") # Add path to PYTHONPATH environ = dict(os.environ, PYTHONPATH=abs_nb_path) procout = subprocess.run( cmd_line, shell=True, check=False, env=environ, cwd=abs_nb_path, # stdout=subprocess.PIPE, # stderr=subprocess.PIPE, ) if procout.returncode != 0: num_errors += 1 else: num_passed += 1 print() return num_errors, num_passed
4,942
def _iter_sample( logp_dlogp_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]], model_ndim: int, draws: int, tune: int, step: Union[NUTS, HamiltonianMC], start: np.ndarray, random_seed: Union[None, int, List[int]] = None, callback=None, ): """ Yield one chain in one process. Main iterator for singleprocess sampling. """ if random_seed is not None: np.random.seed(random_seed) q = start trace = np.zeros([model_ndim, tune + draws]) stats: List[SamplerWarning] = [] step.tune = bool(tune) if hasattr(step, "reset_tuning"): step.reset_tuning() for i in range(tune + draws): if i == 0 and hasattr(step, "iter_count"): step.iter_count = 0 if i == tune: step.stop_tuning() q, step_stats = step._astep(q) trace[:, i] = q stats.extend(step_stats) if callback is not None: warns = getattr(step, "warnings", None) # FIXME: implement callbacks # callback( # trace=trace, draw=(chain, i == draws, i, i < tune, stats, point, warns), # ) yield trace, stats
4,943
def get_storage_config_by_data_type(result_table_id): """ 根据rt_id获取存储配置列表 :param result_table_id:rtid :return: response:存储配置列表 """ return DataStorageConfig.objects.filter(result_table_id=result_table_id, data_type="raw_data")
4,944
def separate_args(args: List[str]) -> (List[str], List[str]): """Separate args into preparser args and primary parser args. Args: args: Raw command line arguments. Returns: A tuple of lists (preparser_args, mainparser_args). """ preparser_args = [] if args and args[0].startswith("-"): cur = 0 while cur < len(args) and args[cur].startswith("-"): if args[cur] in _repobee.cli.preparser.PRE_PARSER_OPTS: preparser_args += args[cur : cur + 2] cur += 2 elif args[cur] in _repobee.cli.preparser.PRE_PARSER_FLAGS: preparser_args.append(args[cur]) cur += 1 else: break return preparser_args, args[len(preparser_args) :]
4,945
def send_to_stream(stream, msg): """ Pickle & send to stdout a message. Used primarily to communicate back-and-forth with a separately launched ztv process. """ if isinstance(msg, str): msg = (msg,) pkl = pickle.dumps(msg) stream.write(pkl + '\n' + end_of_message_message) stream.flush()
4,946
def test_string_map_key_index(): """ Tests string map key indexes across all set names, index names, and index paths. """ lib.backup_and_restore( lambda context: create_indexes(lib.create_string_map_key_index), None, lambda context: check_indexes(lib.check_map_key_index, "foobar") )
4,947
def lookup_alive_tags_shallow(repository_id, start_pagination_id=None, limit=None): """ Returns a list of the tags alive in the specified repository. Note that the tags returned *only* contain their ID and name. Also note that the Tags are returned ordered by ID. """ query = (Tag .select(Tag.id, Tag.name) .where(Tag.repository == repository_id) .order_by(Tag.id)) if start_pagination_id is not None: query = query.where(Tag.id >= start_pagination_id) if limit is not None: query = query.limit(limit) return filter_to_alive_tags(query)
4,948
def plotLevelSubsystems(Model, graph_reactions, fig_dpi=100): """ Computes the frequencies of the subsystems of the reactions appearing in the specified graph level which have the specified macrosystem """ GEM = Model.GEM macrosystems = ['Amino acid metabolism', 'Carbohydrate metabolism', 'Cell wall biosynthesis', 'Cofactor and vitamin metabolism', 'Energy and maintenance', 'Lipid metabolism', 'Nucleotide metabolism', 'Transport'] macrosystem_frequencies_per_level = getSystemDistributionPerGraphLevel( GEM, graph_reactions, 'macrosystem') macrosystem_frequencies_across_levels = getSystemDistributionAcrossGraphLevels( GEM, graph_reactions, 'macrosystem') def plot_func(macrosystem, macro_freq_type, level_number, save_fig): if macro_freq_type > 0: macro_frequencies = macrosystem_frequencies_per_level ylabel = 'frequency in level' else: macro_frequencies = macrosystem_frequencies_across_levels ylabel = 'frequency' total_subsystems = {} level_reactions = graph_reactions[level_number] fig, axs = plt.subplots(nrows=2, ncols=1, sharex=False, sharey=False, figsize=(14, 12)) plt.subplots_adjust(wspace=None, hspace=0.3) # Plot macrosystems df1 = pd.DataFrame.from_dict(macro_frequencies[macrosystem]) ax1 = df1.plot.bar(ax=axs[0], rot=0, fontsize=12) for p in ax1.patches: height = round(p.get_height(), 2) if height > 0: ax1.annotate(format(height, '.2f'), (p.get_x() * 1.005, p.get_height() * 1.008)) axs[0].set_title(macrosystem, fontsize=16) axs[0].set_ylabel(ylabel) axs[0].set_xlabel('graph level') # Plot subsystems for source in level_reactions.keys(): subsystems = extractMetabolicSystems(GEM, level_reactions[source], 'subsystem', macrosystem) total_subsystems[source] = getListFrequencies(subsystems) df2 = pd.DataFrame(dict([(k, pd.Series(v)) for k, v in total_subsystems.items()])) try: df2.plot(ax=axs[1], kind='bar', rot=75, fontsize=12) axs[1].set_title('Subsystems in level ' + str(level_number), fontsize=16) axs[1].set_ylabel('frequency') except Exception: axs[1].set_title('No data available', fontsize=16) if save_fig: plt.savefig('figure.png', dpi=fig_dpi, bbox_inches="tight") interact(plot_func, macrosystem=macrosystems, macro_freq_type=widgets.Dropdown( options=[('across graph levels', 0), ('per graph level', 1)], value=0, description='frequency'), level_number=widgets.IntSlider( value=0, min=0, max=20, step=1, description='graph level', readout=True), save_fig=widgets.ToggleButton( value=False, description='Save figure', disabled=False, layout=widgets.Layout(margin='3% 0 3% 8%')) )
4,949
def getLesson(request): """ Get the JSON representation for a lesson. """ print("getLesson called...") lesson_id = None if 'lesson_id' in request.matchdict: lesson_id = request.matchdict['lesson_id'] if lesson_id is None: # This should return an appropriate error about not finding the # requested lesson. pass lesson = getLessonById(lesson_id) return lesson
4,950
def config(): """Modify spaceconfig files""" pass
4,951
def vector3d(mode,xdata,ydata,zdata,udata,vdata,wdata,scalardata=None,fig=None,zscale=500.,vector_color=(0,0,0),vector_cmap=None,alpha=1.0,vector_mode='2darrow', scale=1, spacing=8., set_view=None): """ fig: integer or string, optional. Figure key will plot data on corresponding mlab figure, if it exists, or create a new one mode: string; coordinate system of 3D projection. Options are 'rectangle' (default), 'sphere' or 'cylinder' xdata: 1D array; longitude values for data array ydata: 1D array; latitude values for data array zdata: 1D array; depth values for data array udata: 2D or 3D array; u vector component vdata: 2D or 3D array; v vector component wdata: 2D or 3D array; w vector component zscale: scalar, optional; change vertical scaling for plotting, such that the vertical axis is scaled as topo_z/zscale (assumes topo_z units are m); default zscale is 500 vector_mode: string, optional; style of vector plot color: colormap or rgb triplet,optional; color of quiver plot default is black (0,0,0). alpha: float or int, optional; opacity for data surface from 0 to 1, default is 1 scale: float or int, optional; scaling for length of vectors, default is 1. spacing: int, optional; If supplied, only one out of 'spacing' data points is displayed. This option is useful to reduce the number of points displayed on large datasets Must be an integer (int or long) or None set_view: array_like, optional; set the mayavi camera angle with input [azimuth, elevation, distance, focal point], default is """ #make figure if fig is None: mlab.figure(size = (1024,768),bgcolor = (1,1,1)) mlab.clf() else: mlab.figure(figure=fig,bgcolor = (1,1,1)) #do coordinate transformation if xdata is not None and ydata is not None and zdata is not None: #TODO add an error message if not all data fields are provided #prep data grid phi_iso, theta_iso = np.meshgrid(((ydata*np.pi*2)/360.)+np.pi/2.,(xdata*np.pi*2)/360.) if mode is 'sphere': x_iso = np.sin(phi_iso) * np.cos(theta_iso[::-1]) * (1 -zdata/zscale) y_iso = np.sin(phi_iso) * np.sin(theta_iso[::-1]) * (1 -zdata/zscale) z_iso = np.cos(phi_iso) * (1 -zdata/zscale) elif mode is 'cylinder': x_iso = np.sin(phi_iso) * np.cos(theta_iso[::-1]) y_iso = np.sin(phi_iso) * np.sin(theta_iso[::-1]) z_iso = zdata/zscale elif mode is 'rectangle': y_iso,z_iso = np.meshgrid(ydata,zdata) x_iso,z_iso = np.meshgrid(xdata,zdata) z_iso =-z_iso/zscale else: #raise error if all three fields are not provided print('ERROR: not all data fields are provided. Must provide 1D data x, y and z data points') #do quiver plot if scalardata is not None: m = mlab.quiver3d(x_iso, y_iso, z_iso, udata, vdata, wdata, scalars=scalardata, scale_mode=None,colormap=vector_cmap,mode=vector_mode,opacity=alpha,scale_factor=scale,mask_points=spacing) elif vector_cmap is not None: m = mlab.quiver3d(x_iso, y_iso, z_iso, udata, vdata, wdata, colormap=vector_cmap,mode=vector_mode,opacity=alpha,scale_factor=scale,mask_points=spacing) else: m = mlab.quiver3d(x_iso, y_iso, z_iso, udata, vdata, wdata, color=vector_color,mode=vector_mode,opacity=alpha,scale_factor=scale,mask_points=spacing) #optional: change mayavi camera settings return m
4,952
def make_daysetting_from_data(data): """ Constructs a new setting from a given dataset. This method will automatically instantiate a new class matching the type of the given dataset. It will fill all values provided by the dataset and then return the created instance """ factory = { "color": ColorType, "scalar": ScalarType } return make_setting_from_factory(data, factory)
4,953
def read_report(file) -> Optional[Report]: """ Reads the report meta-data section of the file. :param file: The file being read from. :return: The report section of the file. """ # Use a peeker so we don't read beyond the end of the header section peeker = line_peeker(file) # Read each line as a property properties = {} while True: line = next(peeker) # Finish when we reach a non-report line if not is_report_line(line): break # Skip comment lines if is_comment_line(line): continue # Extract the property name and value from the line name, value = split_field_line(line) properties[name] = value # Return the report (if there was one) if len(properties) == 0: return None else: return properties_to_report(properties)
4,954
def get_valid_segment(text): """ Returns None or the valid Loki-formatted urn segment for the given input string. """ if text == '': return None else: # Return the converted text value with invalid characters removed. valid_chars = ['.', '_', '-'] new_text = '' for char in text: if char in valid_chars or char.isalnum(): new_text += char return new_text
4,955
def on_camera_image(cli, new_im): """ Handle new images, coming from the robot. """ global last_im last_im = new_im
4,956
def get_aimpoint_offsets(): """ Get most recent aimpoint offset values :returns: tuple of dy_acis_i, dz_acis_i, dy_acis_s, dz_acis_s (arcsec) """ info_file = os.path.join(opt.data_root, 'info.json') with open(info_file, 'r') as fh: info = json.load(fh) process_time = Time(opt.process_time) if opt.process_time else Time.now() if (process_time - Time(info['date'])).jd > 14: logger.info('WARNING: offsets are more than 2 weeks old, last updated {}' .format(info['date'])) offsets = (info['acisi']['dDY'], info['acisi']['dDZ'], info['aciss']['dDY'], info['aciss']['dDZ']) logger.info('Read {} updated {} and found offsets {:.2f}, {:.2f}, {:.2f}, {:.2f}' .format(info_file, info['date'], *offsets)) return offsets
4,957
def cmd_ci(argv, args): """ Usage: localstack ci <subcommand> [options] Commands: init Initialize CI configuration for a new repository repos List continuous integration repositories Options: --provider=<> CI provider (default: travis) --repo=<> Repository identifier --token=<> API access token for CI provider """ args.update(docopt(cmd_ci.__doc__.strip(), argv=argv)) auth = import_auth() if args['<subcommand>'] == 'repos': provider = args['--provider'] or 'travis' result = auth.get_ci_repos(provider) print(result) elif args['<subcommand>'] == 'init': provider = opt_params(args, ['--provider']) or 'travis' repo, token = mand_params(args, ['--repo', '--token']) auth.init_ci_repo(repo, token, provider=provider) print(repo, token)
4,958
def save_dataset(ds: object, target_dir: str, context_folder=None): """Copies the dataset graph into the provided target directory. EXPERIMENTAL/UNSTABLE Parameters ---------- ds: the dataset graph to save target_dir the target directory to save to context_folder a folder node within the dataset graph to limit to """ dataset_plugins = get_plugins(WritingPlugin) for dsplugin in dataset_plugins: dsplugin.execute(ds, target_dir, context_folder=context_folder)
4,959
def mparse(filename, staticObstacleList=list(), **kwargs): """ Parses a map file into a list of obstacles @param filename The file name of the map file @return A list of obstacles """ polyList = kwargs.get("nodes", list()) obstacleList = list() try: if filename is not None: f = open(filename, "r+") numberOfPolys = int(f.readline()) file_ext = filename.split(".")[-1] # determine if obstacles are dynamic if file_ext == "obstacles": dynamicObstacle = True else: dynamicObstacle = False # loop through file and create PolyObstacle objects for _ in range(numberOfPolys): # parse obstacle details polyList = list() line = [line for line in f.readline().split()[1:]] intList = map(lambda s: int(float(s)), line) polyList += [ [ ( mapVal( intList[2*i], -29, 29, 0, con.Configuration.xSize ), con.Configuration.ySize - mapVal( intList[2*i + 1], -29, 29, 0, con.Configuration.ySize ) ) for i in range(len(intList) / 2) ] ] # create and append PolyObstacle to obstacleList obstacleList += [ obstacle.PolyObstacle( pList, con.Configuration.screen, dynamic=dynamicObstacle ) for pList in polyList ] else: # auto generate dyanmic obstacles for pList in polyList: obst = obstacle.PolyObstacle( pList, con.Configuration.screen, dynamic=True, start_point=kwargs.get("start_point", None), end_point=kwargs.get("end_point", None) ) obstacleList.append(obst) except Exception: print("Error occured while parsing file [{0}]!".format(filename)) finally: return obstacleList
4,960
def install_pip_packages(python_executable: str, pip_packages: typing.List[str]) -> bool: """Install pip packages for the specified python. Args: python_executable: Python executable used to install pip packages. pip_packages: List of pip packages to install. Raises: subprocess.CalledProcessError if package installation fails. Returns: True if packages get installed, False otherwise. """ if pip_packages: for package in pip_packages: if not is_package_installed(package): logging.info('Package %s not installed.', package) command = ' '.join( [python_executable, '-m', 'pip', 'install', '--user']) command += ' ' + package logging.info('Install pip package: %s', package) if not run_and_check_result(command): return False return True logging.debug('no python packages were provided for installation.') return True
4,961
def sort_f_df(f_df): """Sorts f_df by s_idx first then by l_idx. E.g. for scenario 0, see all decision alternatives in order, then scenario 1, scenario 2, etc. Parameters ---------- f_df : pandas.DataFrame A dataframe of performance values, `f`, with indexes for the scenario, `s`, and decision alternative, `l`. Columns: `['s_idx', 'l_idx', '<f1_name>', '<f2_name>', ...]` """ # This will sort first by s_idx then by l_idx, both from 0 to ... f_df.sort_values(['l_idx', 's_idx'], ascending=[True, True]) return f_df
4,962
def get_empath_scores(text): """ Obtains empath analysis on the text. Takes the dictionary mapping categories to scores, which is produced by passing the text to empath, and returns the scores. Args: text: string containing text to perform empath analysis on Returns: A list of empath scores, such that there is a score in the list for each of empath's pre-built categories """ empath_dict = lexicon.analyze(text, normalize=True) empath_scores = list(empath_dict.values()) return empath_scores
4,963
def ending_counts(sequences): """Return a dictionary keyed to each unique value in the input sequences list that counts the number of occurrences where that value is at the end of a sequence. For example, if 18 sequences end with DET, then you should return a dictionary such that your_starting_counts[DET] == 18 """ # TODO: Finish this function! # Initialize the ending_dict ending_dict = {} # Create the starting_dict with tags as key for tag in data.training_set.tagset: ending_dict[tag] = 0 # Update the value of the starting dict for tag_list in data.training_set.Y: tag = tag_list[-1] if tag in ending_dict: ending_dict[tag] += 1 return ending_dict
4,964
def sqlite_insert(engine, table_name, data): """ Inserts data into a table - either one row or in bulk. Create the table if not exists. Parameters ---------- engine: sqlalchemy engine for sqlite uri: string data: dict Returns ------- bool """ dtype = type(data) try: with session_scope(engine) as session: try: conditionally_create_generic_table(engine, table_name) except TableCreationException: pass # most likely because it already exists, ignore if dtype is list: for row in data: session.execute('insert into ' + table_name + ' (data) values (:values)', {'values': json.dumps(row)}) elif dtype is dict: # investigate: http://docs.sqlalchemy.org/en/latest/faq/performance.html # Bulk_insert_mappings or use raw sqlite3 row = data session.execute('insert into ' + table_name + ' (data) values (:values)', {'values': json.dumps(row)}) return True except IntegrityError as e: logging.error(e) raise DuplicateRowException except (OperationalError, StatementError) as e: logging.error(e) raise InsertException except Exception as e: logging.error(e) raise Exception('not sure what went wrong - could not insert data')
4,965
def _fit_solver(solver): """ Call ``fit`` on the solver. Needed for multiprocessing. """ return solver.fit()
4,966
def flatten(l): """ recursively turns any nested list into a regular list (using a DFS) """ res = [] for x in l: if (isinstance(x, types.ListType)): res += flatten(x) else: res.append(x) return res
4,967
def test_trial_heartbeat_not_updated_inbetween(exp, trial): """Test that the heartbeat of a trial is not updated before wait time.""" trial_monitor = TrialPacemaker(trial, wait_time=5) trial_monitor.start() time.sleep(1) trials = exp.fetch_trials_by_status("reserved") assert trial.heartbeat.replace(microsecond=0) == trials[0].heartbeat.replace( microsecond=0 ) heartbeat = trials[0].heartbeat time.sleep(6) trials = exp.fetch_trials_by_status(status="reserved") assert heartbeat != trials[0].heartbeat trial_monitor.stop()
4,968
def build_column_hierarchy(param_list, level_names, ts_columns, hide_levels=[]): """For each parameter in `param_list`, create a new column level with parameter values. Combine this level with columns `ts_columns` using Cartesian product.""" checks.assert_same_shape(param_list, level_names, axis=0) param_indexes = [] for i in range(len(param_list)): if level_names[i] not in hide_levels: param_index = index_fns.index_from_values(param_list[i], name=level_names[i]) param_indexes.append(param_index) if len(param_indexes) > 1: param_columns = index_fns.stack_indexes(*param_indexes) elif len(param_indexes) == 1: param_columns = param_indexes[0] else: param_columns = None if param_columns is not None: return index_fns.combine_indexes(param_columns, ts_columns) return ts_columns
4,969
def emitlabel(start): """emit the labelling functions""" printf("static void %Plabel1(NODEPTR_TYPE p) {\n" "%1%Passert(p, PANIC(\"NULL tree in %Plabel\\n\"));\n" "%1switch (%Parity[OP_LABEL(p)]) {\n" "%1case 0:\n") if Tflag: printf("%2%Pnp = p;\n"); printf("%2STATE_LABEL(p) = %Pstate(OP_LABEL(p), 0, 0);\n%2break;\n" "%1case 1:\n%2%Plabel1(LEFT_CHILD(p));\n"); if Tflag: printf("%2%Pnp = p;\n"); printf("%2STATE_LABEL(p) = %Pstate(OP_LABEL(p),\n" "%3STATE_LABEL(LEFT_CHILD(p)), 0);\n%2break;\n" "%1case 2:\n%2%Plabel1(LEFT_CHILD(p));\n%2%Plabel1(RIGHT_CHILD(p));\n"); if Tflag: printf("%2%Pnp = p;\n") printf("%2STATE_LABEL(p) = %Pstate(OP_LABEL(p),\n" "%3STATE_LABEL(LEFT_CHILD(p)),\n%3STATE_LABEL(RIGHT_CHILD(p)));\n%2break;\n" "%1}\n}\n\n") printf( "STATE_TYPE %Plabel(NODEPTR_TYPE p) {\n%1%Plabel1(p);\n" "%1return ((struct %Pstate *)STATE_LABEL(p))->rule.%P%S ? STATE_LABEL(p) : 0;\n" "}\n\n", start)
4,970
def gradient_descent(f, xk, delta = 0.01, plot=False, F = None, axlim = 10): """ f: multivariable function with 1 array as parameter xk : a vector to start descent delta : precision of search plot : option to plot the results or not F : the function f expressed with 2 arrays in argument (X,Y) representing the colomns xk[0] and xk[1] for ploting issues. used only if plot == True axlim : limit of the plot 3 axis (x,y,z) """ if plot : ax = plt.axes(projection='3d') A = [] t = perf_counter() dk = nd.Gradient(f)(xk) while la.norm(dk) > delta : if plot and len(A) < 10 : A.append(xk) xt = xk phi = lambda s : f(xk - s * dk) alpha = op.newton(phi, 1) xk -= alpha * dk if plot and len(A) < 10 : A.append(xk) dk = nd.Gradient(f)(xk) if la.norm(xk - xt) < delta : break t = perf_counter() - t print("execution time: ",t) if plot : for u in A: ax.scatter(u[0], u[1], f(u), c = 'b', s = 50) ax.scatter(xk[0], xk[1], f(xk), c = 'r', s = 50,label="optimum") x = np.arange(-axlim, axlim, axlim/100) y = np.arange(-axlim, axlim, axlim/100) X, Y = np.meshgrid(x, y) Z = F(X,Y) ax.set_xlabel('x', labelpad=20) ax.set_ylabel('y', labelpad=20) ax.set_zlabel('z', labelpad=20) surf = ax.plot_surface(X, Y, Z, cmap = plt.cm.cividis) plt.legend() plt.title("optimizition with Gradient Descent") plt.show() return xk
4,971
def exercise_dict(show_result=True): """Exercise 2: Basic Dictionary Manipulations Notebook: PCP_python.ipynb""" if show_result is False: return student_dict = {123: ['Meier', 'Sebastian'], 456: ['Smith', 'Walter']} print(student_dict) student_dict[789] = ['Wang', 'Ming'] print(student_dict) print(list(student_dict.keys())) print(list(student_dict.values())) print(student_dict[456][0]) del student_dict[456] print(student_dict) print(len(student_dict))
4,972
def _mark_workflow_as_deleted_in_db(workflow): """Mark workflow as deleted.""" workflow.status = RunStatus.deleted current_db_sessions = Session.object_session(workflow) current_db_sessions.add(workflow) current_db_sessions.commit()
4,973
def unregister_dialect(name): # real signature unknown; restored from __doc__ """ Delete the name/dialect mapping associated with a string name. csv.unregister_dialect(name) """ pass
4,974
def _get_cells(obj): """Extract cells and cell_data from a vtkDataSet and sort it by types.""" cells, cell_data = {}, {} data = _get_data(obj.GetCellData()) arr = vtk2np(obj.GetCells().GetData()) loc = vtk2np(obj.GetCellLocationsArray()) types = vtk2np(obj.GetCellTypesArray()) for typ in VTK_TYP: if not isinstance(typ, int): continue cell_name = VTK_TYP[typ] n_no = NODE_NO[cell_name] cell_loc_i = np.where(types == typ)[0] loc_i = loc[cell_loc_i] # if there are no cells of the actual type continue if len(loc_i) == 0: # if not loc_i: continue arr_i = np.empty((len(loc_i), n_no), dtype=int) for i in range(n_no): arr_i[:, i] = arr[loc_i + i + 1] cells[cell_name] = arr_i cell_data_i = {} for data_i in data: cell_data_i[data_i] = data[data_i][cell_loc_i] if cell_data_i != {}: cell_data[cell_name] = cell_data_i return cells, cell_data
4,975
def check_X(X, enforce_univariate=False, enforce_min_instances=1): """Validate input data. Parameters ---------- X : pd.DataFrame enforce_univariate : bool, optional (default=False) Enforce that X is univariate. enforce_min_instances : int, optional (default=1) Enforce minimum number of instances. Returns ------- X : pd.DataFrame Raises ------ ValueError If X is an invalid input """ if not isinstance(X, pd.DataFrame): raise ValueError(f"X must be a pd.DataFrame, but found: " f"{(type(X))}") if enforce_univariate: _enforce_X_univariate(X) if enforce_min_instances > 0: _enforce_min_instances(X, min_instances=enforce_min_instances) return X
4,976
def parse_amount(value: int) -> Decimal: """Return a scaled down amount.""" return Decimal(value) / Decimal(AMOUNT_SCALE_FACTOR)
4,977
def log(fun, user, message, debug=True): # type: (str, str, str, bool) -> None """ Log in a CSV file Header is: "time", "command", "user_id", "message" Time is in local-time :rtype: None """ _log = ( datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S") + "," + ",".join( [fun, user, '"' + message.replace('"', "'").replace("\n", "|") + '"'] ) + "\n" ) with codecs.open("log.csv", "a", "utf-8") as _file: _file.write(_log) if debug: print("*log = " + _log)
4,978
def post_mode(data): """ Example: data = {'name': "EACH_RANDOM", 'fade_duration': 1000, 'delay_duration': 1000, } """ requests.post(f'http://{ip}/api/mode', data=data)
4,979
async def _delete_session(bot, guild): """Deletes the session for the given guild.""" session_data = data.remove(bot, __name__, 'data', guild_id=guild.id, safe=True) if not session_data: raise CBException("Session does not exist.") channel_id, webhook_id = session_data['channel'], session_data['webhook'] channel = data.get_channel(bot, channel_id, safe=True) webhooks = await channel.webhooks() for webhook in webhooks: if webhook.id == webhook_id: await webhook.delete() break else: logger.warn('Webhook to delete (%s) not found!', webhook_id) try: WEBHOOK_SET.remove(webhook_id) except KeyError: logger.warn("Webhook not found in WEBHOOK_SET") data.list_data_remove(bot, __name__, 'webhooks', value=webhook_id, safe=True) if guild.voice_client and guild.voice_client.channel.id == session_data['voice_channel']: await utilities.stop_audio(bot, guild)
4,980
def svn_ra_do_update2(*args): """ svn_ra_do_update2(svn_ra_session_t session, svn_ra_reporter3_t reporter, void report_baton, svn_revnum_t revision_to_update_to, char update_target, svn_depth_t depth, svn_boolean_t send_copyfrom_args, svn_delta_editor_t update_editor, void update_baton, apr_pool_t pool) -> svn_error_t """ return apply(_ra.svn_ra_do_update2, args)
4,981
def resolve(schema, copy=False): """Resolve schema references. :param schema: The schema to resolve. :return: The resolved schema. """ from jsonschemaplus.schemas import metaschema from jsonschemaplus.schemas import hyperschema _substitutions = {'%25': '%', '~1': '/', '~0': '~'} def _resolve_refs(schema, root=None, id_acc=None): """Resolve schema references and modify supplied schema as a side effect. If function parses value that equals schema's root, _resolve_refs early exits because references have already been resolved. :param schema: The schema to resolve. :param root: The root of the schema. :side effect: Modifies schema. :return: None :TODO: resolve all http ref values """ if root is None: root = schema ref = '$ref' id_ = 'id' if object_(schema): value = schema.get(id_) if value and string(value): if uri(value): id_acc = value else: if id_acc is None: raise SchemaError('Error resolving schema with id: %s' % value) else: id_acc += value if not uri(id_acc): raise SchemaError('Error resolving schema with id: %s' % value) value = schema.get(ref) if value and string(value): if uri(value): schema.pop(ref) if (value == 'http://json-schema.org/draft-04/schema#' and root != metaschema): schema.update(deepcopy(metaschema)) # elif (value == 'http://json-schema.org/draft-04/hyper-schema#' # and root != hyperschema): # schema.update(deepcopy(hyperschema)) else: try: (url_, path_) = url(value) data = resolve(get(url_)) schema.update(_path(data, path_)) except: raise SchemaError('Error resolving schema with $ref: %s' % value) _resolve_refs(schema, root, id_acc) elif value[0] == '#': schema.pop(ref) subschema = _path(root, value) if object_(subschema) and ref in subschema and string(subschema[ref]): _resolve_refs(subschema, root, id_acc) subschema = _path(root, value) schema.update(subschema) elif value.find('.json') != -1: schema.pop(ref) (url_, path_) = url(id_acc + value) data = resolve(get(url_)) schema.update(_path(data, path_)) _resolve_refs(schema, root, id_acc) else: raise SchemaError('Error resolving schema with $ref: %s' % value) for k, v in schema.items(): if k != ref and k != id_ and v != root: _resolve_refs(v, root, id_acc) elif array(schema): for item in schema: if item != root: _resolve_refs(item, root, id_acc) def _path(schema, path): components = path[1:].split('/')[1:] subschema = schema for c in components: for k, v in _substitutions.items(): if k in c: c = c.replace(k, v) if array(subschema): try: index = int(c) subschema = subschema[index] except: raise SchemaError('Invalid path %s' % path) elif object_(subschema): subschema = subschema.get(c) else: raise SchemaError('Invalid path %s' % path) return subschema resolve.resolve_refs = _resolve_refs resolve.path = _path if copy: schema_ = deepcopy(schema) else: schema_ = schema _resolve_refs(schema_) return schema_
4,982
def test_truncate_wide_end(all_terms): """Ensure that terminal.truncate has the correct behaviour for wide characters.""" @as_subprocess def child(kind): from blessed import Terminal term = Terminal(kind) test_string = u"AB\uff23" # ABC assert term.truncate(test_string, 3) == u"AB" child(all_terms)
4,983
def sentiwords_tag(doc, output="bag"): """Tag doc with SentiWords polarity priors. Performs left-to-right, longest-match annotation of token spans with polarities from SentiWords. Uses no part-of-speech information; when a span has multiple possible taggings in SentiWords, the mean is returned. Parameters ---------- doc : document or list of strings output : string, optional Output format. Either "bag" for a histogram (dict) of annotated token span frequencies, or "tokens" a mixed list of strings and (list of strings, polarity) pairs. """ from ._sentiwords import tag doc = _tokenize_if_needed(fetch(doc)) tagged = tag(doc) if output == "bag": d = {} for ngram, polarity in tagged: if polarity == 0: continue if ngram in d: d[ngram][1] += 1 else: d[ngram] = [polarity, 1] return d elif output == "tokens": return [ngram if polarity == 0 else (ngram, polarity) for ngram, polarity in tagged] else: raise ValueError("unknown output format %r" % output)
4,984
def _GetCommandTaskIds(command): """Get a command's task ids.""" # A task count is the number of tasks we put in the command queue for this # command. We cap this number to avoid a single command with large run count # dominating an entire cluster. If a task count is smaller than a run count, # completed tasks will be rescheduled as needed. task_count = min(command.run_count, MAX_TASK_COUNT) _, request_id, _, command_id = command.key.flat() return ["%s-%s-%s" % (request_id, command_id, i) for i in range(task_count)]
4,985
def save_orchestrator_response(url, jsonresponse, dryrun): """Given a URL and JSON response create/update the corresponding mockfile.""" endpoint = url.split("/api/")[1].rstrip("/") try: path, identifier = endpoint.rsplit("/", maxsplit=1) except ValueError: path, identifier = None, endpoint if any(char in identifier for char in "?&="): # Skip urls with query parameters for now (can be normalized if it's needed) print(f"Unsupported URL parameters: {url}") return if any(pattern in url for pattern in TO_EXCLUDE): print(f"Excluding URL {url}") return def get_id(string): """Defines how final URL component can be used as identifier""" try: parsed = uuid.UUID(string) return str(parsed)[:8] except ValueError: if string.isnumeric(): return string return None try: response = json.loads(jsonresponse) except json.JSONDecodeError as e: print(f"Invalid JSON response: {url} ({e})") return if (parsed_id := get_id(identifier)) is None: # URL ends on a word "products" or "organisations" filename = f"{identifier}.json" else: # URL ends on UUID or integer if "/domain-model/" in url: filename_prefix = "".join(c for c in response["product"]["tag"].lower() if c.isalpha()) else: filename_prefix = "" filename = f"{filename_prefix}-{parsed_id}.json" if filename_prefix else f"{parsed_id}.json" if not path: # Store in data/ fpath = DATA_ROOT / filename print( f"{endpoint} -> {'update (if changed)' if fpath.is_file() else 'create'} '{filename}' in root directory" ) else: # Store in data/<subfolder>/ dpath = DATA_ROOT / path fpath = dpath / filename print( f"{endpoint} -> {'update (if changed)' if fpath.is_file() else 'create'} '{filename}' " f"in {'new' if not dpath.is_dir() else 'existing'} directory '{path}'" ) if not dpath.is_dir() and not dryrun: dpath.mkdir(parents=True) if not dryrun: with fpath.open(mode="w") as handle: json.dump(response, handle, sort_keys=True, indent=4)
4,986
def test_get_function_and_class_names(code, target): """Test get_function_and_class_names function.""" res = get_function_and_class_names(code) assert sorted(res) == sorted(target)
4,987
def aggregate_responses(instrument_ids, current_user, patch_dstu2=False): """Build a bundle of QuestionnaireResponses :param instrument_ids: list of instrument_ids to restrict results to :param current_user: user making request, necessary to restrict results to list of patients the current_user has permission to see """ # Gather up the patient IDs for whom current user has 'view' permission user_ids = OrgTree().visible_patients(current_user) annotated_questionnaire_responses = [] questionnaire_responses = QuestionnaireResponse.query.filter( QuestionnaireResponse.subject_id.in_(user_ids)).order_by( QuestionnaireResponse.authored.desc()) if instrument_ids: instrument_filters = ( QuestionnaireResponse.document[ ("questionnaire", "reference") ].astext.endswith(instrument_id) for instrument_id in instrument_ids ) questionnaire_responses = questionnaire_responses.filter(or_(*instrument_filters)) patient_fields = ("careProvider", "identifier") for questionnaire_response in questionnaire_responses: subject = questionnaire_response.subject encounter = questionnaire_response.encounter encounter_fhir = encounter.as_fhir() questionnaire_response.document["encounter"] = encounter_fhir questionnaire_response.document["subject"] = { k: v for k, v in subject.as_fhir().items() if k in patient_fields } if subject.organizations: questionnaire_response.document["subject"]["careProvider"] = [ Reference.organization(org.id).as_fhir() for org in subject.organizations ] # Hack: add missing "resource" wrapper for DTSU2 compliance # Remove when all interventions compliant if patch_dstu2: questionnaire_response.document = { 'resource': questionnaire_response.document, # Todo: return URL to individual QuestionnaireResponse resource 'fullUrl': url_for( '.assessment', patient_id=questionnaire_response.subject_id, _external=True, ), } annotated_questionnaire_responses.append(questionnaire_response.document) bundle = { 'resourceType': 'Bundle', 'updated': FHIR_datetime.now(), 'total': len(annotated_questionnaire_responses), 'type': 'searchset', 'entry': annotated_questionnaire_responses, } return bundle
4,988
def compute_bleu(reference_corpus, translation_corpus, max_order=4, use_bp=True): """Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. use_bp: boolean, whether to apply brevity penalty. Returns: BLEU score. """ reference_length = 0 translation_length = 0 bp = 1.0 geo_mean = 0 matches_by_order = [0] * max_order possible_matches_by_order = [0] * max_order precisions = [] for (references, translations) in zip(reference_corpus, translation_corpus): reference_length += len(references) translation_length += len(translations) ref_ngram_counts = _get_ngrams_with_counter(references, max_order) translation_ngram_counts = _get_ngrams_with_counter(translations, max_order) overlap = dict((ngram, min(count, translation_ngram_counts[ngram])) for ngram, count in ref_ngram_counts.items()) for ngram in overlap: matches_by_order[len(ngram) - 1] += overlap[ngram] for ngram in translation_ngram_counts: possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[ ngram] precisions = [0] * max_order smooth = 1.0 for i in range(0, max_order): if possible_matches_by_order[i] > 0: precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i] if matches_by_order[i] > 0: precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[ i] else: smooth *= 2 precisions[i] = 1.0 / (smooth * possible_matches_by_order[i]) else: precisions[i] = 0.0 if max(precisions) > 0: p_log_sum = sum(math.log(p) for p in precisions if p) geo_mean = math.exp(p_log_sum / max_order) if use_bp: ratio = translation_length / reference_length bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0 bleu = geo_mean * bp return np.float32(bleu)
4,989
def flip(inputDirection: direction) -> direction: """ Chooses what part of the general pointer to flip, by DP%2 == CC rule, providing the following flow: (0,0) -> (0,1) (0,1) -> (1,1) (1,1) -> (1,0) (1,0) -> (2,0) (2,0) -> (2,1) (2,1) -> (3,1) (3,1) -> (3,0) (3,0) -> (0,0) :param inputDirection: Original state of the pointers :return: Tuple of ints containing new pointers """ if inputDirection.pointers[0] % 2 == inputDirection.pointers[1]: return direction((inputDirection.pointers[0], flipCC(inputDirection.pointers[1]))) return direction((flipDP(inputDirection.pointers[0]), inputDirection.pointers[1]))
4,990
def save(request): """Update the column levels in campaign_tree table with the user's input from the data warehouse frontend.""" if any(request["changes"]): query = 'UPDATE campaign_tree SET ' query += ', '.join([f"""levels[{index + 1}] = trim(regexp_replace(%s, '\s+', ' ', 'g'))""" for index, change in enumerate(request["changes"]) if change]) where_clause, variables = _build_where_clause(request) query += ' ' + where_clause with mara_db.postgresql.postgres_cursor_context('mara') as cursor: # type: psycopg2.extensions.cursor cursor.execute(query, tuple([change for change in request['changes'] if change] + variables)) return f'Successfully updated {cursor.rowcount} rows: <tt>{str(cursor.query.decode("utf-8"))}</tt>' else: return 'No changes to be made'
4,991
def netmiko_prompting_del(task): """ Some commands prompt for confirmation: nxos1# del bootflash:/text.txt Do you want to delete "/text.txt" ? (yes/no/abort) [y] y """ # Manually create Netmiko connection net_connect = task.host.get_connection("netmiko", task.nornir.config) filename = "scp_copy_example.txt" del_cmd = f"del bootflash:/{filename}" cmd_list = [del_cmd, "\n"] output = "" import ipdb ipdb.set_trace() for cmd in cmd_list: # Use timing mode output += net_connect.send_command_timing( cmd, strip_prompt=False, strip_command=False ) print() print("#" * 80) print(task.host.name) print("---") print(output) print("#" * 80) print()
4,992
def test_non_existent_route(client: FlaskClient) -> None: """Test getting non-existant page.""" res = client.get("/") assert res.status_code == 404 assert b"The requested URL was not found on the server" in res.data
4,993
def print_options_data(ticker: str, export: str): """Scrapes Barchart.com for the options information Parameters ---------- ticker: str Ticker to get options info for export: str Format of export file """ data = barchart_model.get_options_info(ticker) print(tabulate(data, tablefmt="fancy_grid", showindex=False)) print("") export_data(export, os.path.dirname(os.path.abspath(__file__)), "info", data)
4,994
def _print_model(server, user_key, device_type_model): """ Print the model for a given device type :param device_type_model: Device type ID to print the model for """ name = None model = [] parameters = _get_parameters(server, user_key) parameters = parameters['deviceParams'] try: device_type_model = int(device_type_model) except: print(Color.RED + 'Please provide an integer device type.' + Color.END + '\n') return 0 if device_type_model == 22 or device_type_model == 23 or device_type_model == 24: if device_type_model == 22: name = 'Web Camera' elif device_type_model == 23: name = 'Android Camera' elif device_type_model == 24: name = 'iOS Camera' model = ['accessCameraSettings', 'audioStreaming', 'videoStreaming', 'ppc.hdStatus', 'ppc.rapidMotionStatus', 'batteryLevel', 'ppc.charging', 'motionStatus', 'selectedCamera', 'ppc.autoFocus', 'ppc.recordSeconds', 'ppc.motionSensitivity', 'version', 'ppc.robotConnected', 'ppc.robotMotionDirection', 'ppc.robotOrientation', 'ppc.robotVantageSphericalCoordinates', 'ppc.robotVantageTimer', 'ppc.robotVantageConfigurationStatus', 'ppc.robotVantageName', 'ppc.robotVantageSequence', 'ppc.robotVantageMoveToIndex', 'ppc.availableBytes', 'twitterAutoShare', 'twitterDescription', 'ppc.twitterReminder', 'ppc.twitterStatus', 'ppc.motionCountDownTime', 'ppc.blackoutScreenOn', 'ppc.warningStatus', 'ppc.warningText', 'ppc.recordFullDuration', 'ppc.flashOn', 'streamError', 'ppc.streamStatus', 'model', 'timeZoneId', 'ppc.motionActivity', 'ppc.outputVolume', 'ppc.captureImage', 'recordStatus', 'ppc.alarm', 'ppc.countdown', 'ppc.playSound', 'ppc.motionAlarm', 'ppc.cameraName', 'ppc.throttleStatus'] elif device_type_model == 31: name = 'Gateway' model = ['firmware', 'ipAddress', 'manufacturer', 'model', 'numberOfChildren', 'permitJoining', 'zbChannel', 'reboot', 'cloud', 'firmwareUpdateStatus', 'firmwareUrl', 'firmwareChecksum'] elif device_type_model == 130: name = 'LintAlert PRO Plus' model = ['sig.led', 'sig.pressure', 'sig.wciPressure', 'sig.status', 'sig.runtime', 'sig.maxled', 'sig.curMaxLed', 'sig.type', 'sig.table', 'sig.clean', 'waterLeak', 'version', 'rssi'] elif device_type_model == 4200: name = 'Netatmo Healthy Home Coach' model = ['degC', 'co2', 'relativeHumidity', 'noise', 'firmware', 'wifiSignal', 'pressure', 'nam.healthIdx'] elif device_type_model == 4201: name = 'Netatmo Weather Station Indoor Module' model = ['degC', 'co2', 'relativeHumidity', 'noise', 'pressure', 'firmware', 'wifiSignal'] elif device_type_model == 4202: name = 'Netatmo Weather Station Outdoor Module' model = ['degC', 'relativeHumidity', 'firmware', 'signalStrength', 'batteryLevel'] elif device_type_model == 4204: name = 'Netatmo Welcome' model = ['status', 'ipc.sdStatus', 'ppc.charging', 'ipc.mainVideoUrl'] elif device_type_model == 4220: name = 'Sensibo' model = ['degC', 'relativeHumidity', 'powerStatus', 'systemMode', 'coolingSetpoint', 'fanMode', 'swingMode', 'systemModeValues', 'fanModeValues', 'swingValues', 'tempValues'] elif device_type_model == 9001: name = 'GE Dimmer Switch' model = ['currentLevel', 'state', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 9002: name = 'Siren' model = ['ppc.alarmWarn', 'ppc.alarmDuration', 'ppc.alarmStrobe', 'ppc.alarmSquawk', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 9003: name = 'Temperature & Humidity Sensor' model = ['relativeHumidity', 'degC', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 9006: name = 'Fire Alarm' model = ['alarmStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 9007: name = 'Smoke Detector' model = ['alarmStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 9008: name = 'Heat Detector' model = ['alarmStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 9010: name = 'Smart Lock' model = ['degC', 'lockStatus', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 10014: name = 'Entry Sensor' model = ['doorStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 10017: name = 'Water Sensor' model = ['waterLeak', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 10019: name = 'Touch Sensor' model = ['vibrationStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 10031: name = 'Gateway' model = ['firmware', 'ipAddress', 'model', 'numberOfChildren', 'permitJoining', 'zbChannel'] elif device_type_model == 10033: name = 'Temperature Sensor' model = ['degC', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 10034: name = 'Humidity Sensor' model = ['relativeHumidity', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 10035: name = 'Smart Plug' model = ['power', 'energy', 'outletStatus', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 10036: name = 'Smart Bulb' model = ['currentLevel', 'state', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 10037: name = 'Thermostat' model = ['degC', 'fanModeSequence', 'systemMode', 'controlSequenceOfOperation', 'coolingSetpoint', 'heatingSetpoint', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] elif device_type_model == 10038: name = 'Motion Sensor' model = ['motionStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer'] if len(model) > 0: print(Color.GREEN + name + Color.END) for m in model: description = '' for p in parameters: if p['name'] == m: description = '(' if 'systemUnit' in p: description += p['systemUnit'] + ', ' if p['numeric']: description += 'numeric' else: description += 'non-numeric' if 'description' in p: description += ', ' + p['description'] description += ')' print(' ' + Color.BOLD + m + Color.END + ' ' + description) else: print(Color.RED + 'This device type does not yet have a model defined.' + Color.END) return
4,995
def parse_xiaomi(self, data, source_mac, rssi): """Parser for Xiaomi sensors""" # check for adstruc length i = 9 # till Frame Counter msg_length = len(data) if msg_length < i: _LOGGER.debug("Invalid data length (initial check), adv: %s", data.hex()) return None # extract frame control bits frctrl = data[4] + (data[5] << 8) frctrl_mesh = (frctrl >> 7) & 1 # mesh device frctrl_version = frctrl >> 12 # version frctrl_auth_mode = (frctrl >> 10) & 3 frctrl_solicited = (frctrl >> 9) & 1 frctrl_registered = (frctrl >> 8) & 1 frctrl_object_include = (frctrl >> 6) & 1 frctrl_capability_include = (frctrl >> 5) & 1 frctrl_mac_include = (frctrl >> 4) & 1 # check for MAC address in data frctrl_is_encrypted = (frctrl >> 3) & 1 # check for encryption being used frctrl_request_timing = frctrl & 1 # old version # Check that device is not of mesh type if frctrl_mesh != 0: _LOGGER.debug("Xiaomi device data is a mesh type device, which is not supported. Data: %s", data.hex()) return None # Check that version is 2 or higher if frctrl_version < 2: _LOGGER.debug("Xiaomi device data is using old data format, which is not supported. Data: %s", data.hex()) return None # Check that MAC in data is the same as the source MAC if frctrl_mac_include != 0: i += 6 if msg_length < i: _LOGGER.debug("Invalid data length (in MAC check), adv: %s", data.hex()) return None xiaomi_mac_reversed = data[9:15] xiaomi_mac = xiaomi_mac_reversed[::-1] if xiaomi_mac != source_mac: _LOGGER.debug("Xiaomi MAC address doesn't match data MAC address. Data: %s", data.hex()) return None else: xiaomi_mac = source_mac # determine the device type device_id = data[6] + (data[7] << 8) try: device_type = XIAOMI_TYPE_DICT[device_id] except KeyError: if self.report_unknown == "Xiaomi": _LOGGER.info( "BLE ADV from UNKNOWN Xiaomi device: RSSI: %s, MAC: %s, ADV: %s", rssi, to_mac(source_mac), data.hex() ) _LOGGER.debug("Unknown Xiaomi device found. Data: %s", data.hex()) return None packet_id = data[8] sinfo = 'MiVer: ' + str(frctrl_version) sinfo += ', DevID: ' + hex(device_id) + ' : ' + device_type sinfo += ', FnCnt: ' + str(packet_id) if frctrl_request_timing != 0: sinfo += ', Request timing' if frctrl_registered != 0: sinfo += ', Registered and bound' else: sinfo += ', Not bound' if frctrl_solicited != 0: sinfo += ', Request APP to register and bind' if frctrl_auth_mode == 0: sinfo += ', Old version certification' elif frctrl_auth_mode == 1: sinfo += ', Safety certification' elif frctrl_auth_mode == 2: sinfo += ', Standard certification' # check for MAC presence in sensor whitelist, if needed if self.discovery is False and xiaomi_mac not in self.sensor_whitelist: _LOGGER.debug("Discovery is disabled. MAC: %s is not whitelisted!", to_mac(xiaomi_mac)) return None # check for unique packet_id and advertisement priority try: prev_packet = self.lpacket_ids[xiaomi_mac] except KeyError: # start with empty first packet prev_packet = None if device_type in ["LYWSD03MMC", "CGG1", "MHO-C401", "CGDK2"]: # Check for adv priority and packet_id for devices that can also send in ATC format adv_priority = 19 try: prev_adv_priority = self.adv_priority[xiaomi_mac] except KeyError: # start with initial adv priority prev_adv_priority = 0 if adv_priority > prev_adv_priority: # always process advertisements with a higher priority self.adv_priority[xiaomi_mac] = adv_priority elif adv_priority == prev_adv_priority: # only process messages with same priority that have a unique packet id if prev_packet == packet_id: if self.filter_duplicates is True: return None else: pass else: pass else: # do not process advertisements with lower priority (ATC advertisements will be used instead) prev_adv_priority -= 1 self.adv_priority[xiaomi_mac] = prev_adv_priority return None else: if prev_packet == packet_id: if self.filter_duplicates is True: # only process messages with highest priority and messages with unique packet id return None self.lpacket_ids[xiaomi_mac] = packet_id # check for capability byte present if frctrl_capability_include != 0: i += 1 if msg_length < i: _LOGGER.debug("Invalid data length (in capability check), adv: %s", data.hex()) return None capability_types = data[i - 1] sinfo += ', Capability: ' + hex(capability_types) if (capability_types & 0x20) != 0: i += 1 if msg_length < i: _LOGGER.debug("Invalid data length (in capability type check), adv: %s", data.hex()) return None capability_io = data[i - 1] sinfo += ', IO: ' + hex(capability_io) # check that data contains object if frctrl_object_include != 0: # check for encryption if frctrl_is_encrypted != 0: sinfo += ', Encryption' firmware = "Xiaomi (MiBeacon V" + str(frctrl_version) + " encrypted)" if frctrl_version <= 3: payload = decrypt_mibeacon_legacy(self, data, i, xiaomi_mac) else: payload = decrypt_mibeacon_v4_v5(self, data, i, xiaomi_mac) else: # No encryption # check minimum advertisement length with data firmware = "Xiaomi (MiBeacon V" + str(frctrl_version) + ")" sinfo += ', No encryption' if msg_length < i + 3: _LOGGER.debug("Invalid data length (in non-encrypted data), adv: %s", data.hex()) return None payload = data[i:] else: # data does not contain Object _LOGGER.debug("Advertisement doesn't contain payload, adv: %s", data.hex()) return None result = { "rssi": rssi, "mac": ''.join(f'{i:02X}' for i in xiaomi_mac), "type": device_type, "packet": packet_id, "firmware": firmware, "data": False, } if payload is not None: result.update({"data": True}) sinfo += ', Object data: ' + payload.hex() # loop through parse_xiaomi payload payload_start = 0 payload_length = len(payload) # assume that the data may have several values of different types while payload_length >= payload_start + 3: obj_typecode = payload[payload_start] + (payload[payload_start + 1] << 8) obj_length = payload[payload_start + 2] next_start = payload_start + 3 + obj_length if payload_length < next_start: _LOGGER.debug("Invalid payload data length, payload: %s", payload.hex()) break dobject = payload[payload_start + 3:next_start] if obj_length != 0: resfunc = xiaomi_dataobject_dict.get(obj_typecode, None) if resfunc: if hex(obj_typecode) in ["0x1001", "0xf", "0xb"]: result.update(resfunc(dobject, device_type)) else: result.update(resfunc(dobject)) else: if self.report_unknown == "Xiaomi": _LOGGER.info("%s, UNKNOWN dataobject in payload! Adv: %s", sinfo, data.hex()) payload_start = next_start return result
4,996
def test_md009_bad_configuration_br_spaces_invalid(): """ Test to verify that a configuration error is thrown when supplying the br_spaces value is not an integer in the proper range. """ # Arrange scanner = MarkdownScanner() supplied_arguments = [ "--set", "plugins.md009.br_spaces=$#-1", "--strict-config", "scan", "test/resources/rules/md009/good_paragraph_no_extra.md", ] expected_return_code = 1 expected_output = "" expected_error = ( "BadPluginError encountered while configuring plugins:\n" + "The value for property 'plugins.md009.br_spaces' is not valid: Allowable values are greater than or equal to 0." ) # Act execute_results = scanner.invoke_main(arguments=supplied_arguments) # Assert execute_results.assert_results( expected_output, expected_error, expected_return_code )
4,997
def parse_ipv6_addresses(text): """.""" addresses = ioc_grammars.ipv6_address.searchString(text) return _listify(addresses)
4,998
def test_blank_lines_below_marker(): """Empty lines between direct and fenced code block are OK.""" # This type of usage should be avoided. directives = fenced_block_node_directives() assert len(directives) == 1 marker = directives[0] assert marker.type == phmdoctest.direct.Marker.CLEAR_NAMES assert marker.value == "" assert marker.line == 47 assert marker.literal == "<!--phmdoctest-clear-names-->"
4,999