content
stringlengths
22
815k
id
int64
0
4.91M
def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar): """Estimate the spherical variance values. Parameters ---------- responsibilities : array-like of shape (n_samples, n_components) X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) means : array-like of shape (n_components, n_features) reg_covar : float Returns ------- variances : array, shape (n_components,) The variance values of each components. """ return _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar).mean(1)
6,200
def parsed_codebook_importer(codebook): """ Import the parsed CPS codebook Parameters: codebook (str): the filename of the parsed codebook Returns: dataframe """ path_finder('codebooks') skip = row_skipper(codebook) codebook = pd.read_csv(codebook, sep='\t', skiprows=skip).dropna() os.chdir('..') return codebook
6,201
def put_job_failure(job, message): """Notify CodePipeline of a failed job Args: job: The CodePipeline job ID message: A message to be logged relating to the job status Raises: Exception: Any exception thrown by .put_job_failure_result() """ print('Putting job failure') print(message) code_pipeline.put_job_failure_result(jobId=job, failureDetails={'message': message, 'type': 'JobFailed'})
6,202
def get_html(url): """Returns html content of the url. Retries until successful without overloading the server.""" while True: # Retry until succesful try: sleep(2) debug('Crawling %s' % url) html = urllib2.urlopen(url).read() return html except urllib2.HTTPError, e: warn('HTTP error %s while crawling %s. Trying again.' % (e, url)) sleep(5) continue except urllib2.URLError, e: warn('URL error %s while crawling %s. Trying again.' % (e, url)) sleep(5) continue
6,203
def clear_cache(): """Clears featurization cache.""" global SMILES_TO_GRAPH SMILES_TO_GRAPH = {}
6,204
def map_min(process): """ """ param_dict = {'ignore_nodata': 'bool'} return map_default(process, 'min', 'reduce', param_dict)
6,205
def geoname_exhaustive_search(request, searchstring): """ List all children of a geoname filtered by a list of featurecodes """ if request.query_params.get('fcode'): fcodes = [ s.upper() for s in request.query_params.get('fcode').split(',')] else: fcodes = [] limit = request.query_params.get('limit') or 50 if request.method == 'GET': geonames = Geoname.objects \ .filter( Q(englishname__startswith=searchstring) | Q(alternatenames__alternatename__startswith=searchstring, alternatenames__iscolloquial=0 ) ) \ .order_by('-population','-fcode__searchorder_detail').distinct() if len(fcodes) > 0: geonames = geonames.filter(fcode__code__in=fcodes) if limit: geonames = geonames[:limit] serializer = GeonameSearchSerializer(geonames,many=True) return JsonResponse(serializer.data, safe=False)
6,206
def guess_temperature_sensor(): """ Try guessing the location of the installed temperature sensor """ devices = listdir(DEVICE_FOLDER) devices = [device for device in devices if device.startswith('28-')] if devices: # print "Found", len(devices), "devices which maybe temperature sensors." return DEVICE_FOLDER + devices[0] + DEVICE_SUFFIX else: sys.exit("Sorry, no temperature sensors found")
6,207
def help_systempowerlimit(self, commands): """ Show: limit: Shows the power limit for a server ======================================================= Usage: show system power limit -i {serverid} -i -- serverid, the target server number. Typically 1-48 [-h] -help; display the correct syntax ######################################################## Set: limit: Sets the power limit for a server ======================================================= Usage: set system power limit -i {serverid} -l {powerlimit} -i -- serverid, the target server number. Typically 1-48 -l -- Power limit per server in watts [-h] -help; display the correct syntax """
6,208
def count_reads(regions_list, params): """ Count reads from bam within regions (counts position of cutsite to prevent double-counting) """ bam_f = params.bam read_shift = params.read_shift bam_obj = pysam.AlignmentFile(bam_f, "rb") log_q = params.log_q logger = TobiasLogger("", params.verbosity, log_q) #sending all logger calls to log_q #Count per region read_count = 0 logger.spam("Started counting region_chunk ({0} -> {1})".format("_".join([str(element) for element in regions_list[0]]), "_".join([str(element) for element in regions_list[-1]]))) for region in regions_list: read_lst = ReadList().from_bam(bam_obj, region) for read in read_lst: read.get_cutsite(read_shift) if read.cutsite > region.start and read.cutsite < region.end: #only reads within borders read_count += 1 logger.spam("Finished counting region_chunk ({0} -> {1})".format("_".join([str(element) for element in regions_list[0]]), "_".join([str(element) for element in regions_list[-1]]))) bam_obj.close() return(read_count)
6,209
def rgbImage2grayVector(img): """ Turns a row and column rgb image into a 1D grayscale vector """ gray = [] for row_index in range(0, len(img)): for pixel_index, pixel in enumerate(img[row_index]): gray.append(rgbPixel2grayscaleValue(pixel)) return gray
6,210
def compute_MSE(predicted, observed): """ predicted is scalar and observed as array""" if len(observed) == 0: return 0 err = 0 for o in observed: err += (predicted - o)**2/predicted return err/len(observed)
6,211
def log_sum(log_u): """Compute `log(sum(exp(log_u)))`""" if len(log_u) == 0: return NEG_INF maxi = np.argmax(log_u) max = log_u[maxi] if max == NEG_INF: return max else: exp = log_u - max np.exp(exp, out = exp) return np.log1p(np.sum(exp[:maxi]) + np.sum(exp[maxi + 1:])) + max
6,212
def rand_2d(rand, width: int, height: int): """ Infinite stream of coordinates on a 2D plane from the random source provided. Assumes indexing [(0, width - 1), (0, height - 1)]. :param rand: Random source with method <i>randint(min, max)</i> with [min, max]. :param width: Width, first dimension :param height: Height, second dimension """ width -= 1 height -= 1 while True: yield rand.randint(0, width), rand.randint(0, height)
6,213
def gather_basic_file_info(filename: str): """ Build out the basic file metadata that can be gathered from any file on the file system. Parameters ---------- filename full file path to a file Returns ------- dict basic file attributes as dict """ if not os.path.exists(filename): raise EnvironmentError('{} does not exist'.format(filename)) elif not os.path.isfile(filename): raise EnvironmentError('{} is not a file'.format(filename)) last_modified_time = None created_time = None filesize = None time_added = None try: stat_blob = os.stat(filename) last_modified_time = datetime.fromtimestamp(stat_blob.st_mtime, tz=timezone.utc) created_time = datetime.fromtimestamp(stat_blob.st_ctime, tz=timezone.utc) filesize = np.around(stat_blob.st_size / 1024, 3) # size in kB time_added = datetime.now(tz=timezone.utc) except FileNotFoundError: print('Unable to read from {}'.format(filename)) return {'file_path': filename, 'last_modified_time_utc': last_modified_time, 'created_time_utc': created_time, 'file_size_kb': filesize, 'time_added': time_added}
6,214
def special_value_sub(lhs, rhs): """ Subtraction between special values or between special values and numbers """ if is_nan(lhs): return FP_QNaN(lhs.precision) elif is_nan(rhs): return FP_QNaN(rhs.precision) elif (is_plus_infty(lhs) and is_plus_infty(rhs)) or \ (is_minus_infty(lhs) and is_minus_infty(rhs)): return FP_QNaN(lhs.precision) elif is_plus_infty(lhs) and is_minus_infty(rhs): return lhs elif is_minus_infty(lhs) and is_plus_infty(rhs): return lhs elif is_infty(lhs) and is_zero(rhs): return lhs elif is_infty(lhs): # invalid inf - inf excluded previous return lhs elif is_infty(rhs): return -rhs else: return lhs + (-rhs)
6,215
def parse_git_repo(git_repo): """Parse a git repository URL. git-clone(1) lists these as examples of supported URLs: - ssh://[user@]host.xz[:port]/path/to/repo.git/ - git://host.xz[:port]/path/to/repo.git/ - http[s]://host.xz[:port]/path/to/repo.git/ - ftp[s]://host.xz[:port]/path/to/repo.git/ - rsync://host.xz/path/to/repo.git/ - [user@]host.xz:path/to/repo.git/ - ssh://[user@]host.xz[:port]/~[user]/path/to/repo.git/ - git://host.xz[:port]/~[user]/path/to/repo.git/ - [user@]host.xz:/~[user]/path/to/repo.git/ - /path/to/repo.git/ - file:///path/to/repo.git/ This function doesn't support the <transport>::<address> syntax, and it doesn't understand insteadOf shortcuts from ~/.gitconfig. """ if '://' in git_repo: return urlparse.urlparse(git_repo) if ':' in git_repo: netloc, colon, path = git_repo.partition(':') return urlparse.ParseResult('ssh', netloc, path, '', '', '') else: return urlparse.ParseResult('file', '', git_repo, '', '', '')
6,216
def make_wavefunction_list(circuit, include_initial_wavefunction=True): """ simulate the circuit, keeping track of the state vectors at ench step""" wavefunctions = [] simulator = cirq.Simulator() for i, step in enumerate(simulator.simulate_moment_steps(circuit)): wavefunction_scrambled = step.state_vector() wavefunction = unscramble_wavefunction(wavefunction_scrambled) wavefunctions.append(wavefunction) if include_initial_wavefunction: initial_wavefunction = wavefunctions[0]*0 # create a blank vector initial_wavefunction[0] = 1 wavefunctions = [initial_wavefunction]+wavefunctions return wavefunctions
6,217
def if_else(cond, a, b): """Work around Python 2.4 """ if cond: return a else: return b
6,218
def _update_machine_metadata(esh_driver, esh_machine, data={}): """ NOTE: This will NOT WORK for TAGS until openstack allows JSONArrays as values for metadata! """ if not hasattr(esh_driver._connection, 'ex_set_image_metadata'): logger.info( "EshDriver %s does not have function 'ex_set_image_metadata'" % esh_driver._connection.__class__ ) return {} try: # Possible metadata that could be in 'data' # * application uuid # * application name # * specific machine version # TAGS must be converted from list --> String logger.info("New metadata:%s" % data) meta_response = esh_driver._connection.ex_set_image_metadata( esh_machine, data ) esh_machine.invalidate_machine_cache(esh_driver.provider, esh_machine) return meta_response except Exception as e: logger.exception("Error updating machine metadata") if 'incapable of performing the request' in e.message: return {} else: raise
6,219
def timedelta_to_time(data: pd.Series) -> pd.Series: """Convert ``datetime.timedelta`` data in a series ``datetime.time`` data. Parameters ---------- data : :class:`~pandas.Series` series with data as :class:`datetime.timedelta` Returns ------- :class:`~pandas.Series` series with data converted into :class:`datetime.time` """ data_cpy = data.copy() # ensure pd.Timedelta data = data + pd.Timedelta("0h") # convert to datetime data = datetime.datetime.min + data.dt.to_pytimedelta() # convert to time data = [d.time() if d is not pd.NaT else None for d in data] data = pd.Series(np.array(data), index=data_cpy.index, name=data_cpy.name) return data
6,220
def bzr_wc_target_exists_version(): """ Test updating a working copy when a target already exists. """ test = 'bzr_wc_target_exists_version' wt = '%s-test-%s' % (DIR, test) puts(magenta('Executing test: %s' % test)) from fabric.api import run from fabtools.files import is_dir from fabtools import require assert not is_dir(wt) require.bazaar.working_copy(REMOTE_URL, wt, version='2') require.bazaar.working_copy(REMOTE_URL, wt, version='4', update=True) assert_wc_exists(wt) assert run('bzr revno %s' % wt) == '4'
6,221
def nms(boxes, scores, iou_thresh, max_output_size): """ Input: boxes: (N,4,2) [x,y] scores: (N) Return: nms_mask: (N) """ box_num = len(boxes) output_size = min(max_output_size, box_num) sorted_indices = sorted(range(len(scores)), key=lambda k: -scores[k]) selected = [] for i in range(box_num): if len(selected) >= output_size: break should_select = True for j in range(len(selected) - 1, -1, -1): if ( polygon_iou(boxes[sorted_indices[i]], boxes[selected[j]])[0] > iou_thresh ): should_select = False break if should_select: selected.append(sorted_indices[i]) return np.array(selected, dtype=np.int32)
6,222
def get_db(): """Connect to the application's configured database. The connection is unique for each request and will be reused if this is called again """ if 'db' not in g: g.db = pymysql.connect( host='localhost', port=3306, user='root', password='', database='qm', charset='utf8' ) return g.db
6,223
def read_stb(library, session): """Reads a status byte of the service request. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :return: Service request status byte. """ status = ViUInt16() library.viReadSTB(session, byref(status)) return status.value
6,224
def will_expire(certificate, days): """ Returns a dict containing details of a certificate and whether the certificate will expire in the specified number of days. Input can be a PEM string or file path. .. versionadded:: 2016.11.0 certificate: The certificate to be read. Can be a path to a certificate file, or a string containing the PEM formatted text of the certificate. CLI Example: .. code-block:: bash salt '*' x509.will_expire "/etc/pki/mycert.crt" days=30 """ ret = {} if os.path.isfile(certificate): try: ret["path"] = certificate ret["check_days"] = days cert = _get_certificate_obj(certificate) _check_time = datetime.datetime.utcnow() + datetime.timedelta(days=days) _expiration_date = cert.get_not_after().get_datetime() ret["cn"] = _parse_subject(cert.get_subject())["CN"] if _expiration_date.strftime("%Y-%m-%d %H:%M:%S") <= _check_time.strftime( "%Y-%m-%d %H:%M:%S" ): ret["will_expire"] = True else: ret["will_expire"] = False except ValueError: pass return ret
6,225
def write_reset_reg(rst_reg_addr, rst_reg_space_id, rst_reg_val, config): """Write reset register info :param rst_reg_addr: reset register address :param rst_reg_space_id: reset register space id :param rst_reg_val: reset register value :param config: file pointer that opened for writing board config information """ print("\t{0}".format("<RESET_REGISTER_INFO>"), file=config) print("\t#define RESET_REGISTER_ADDRESS 0x{:0>2X}UL".format( rst_reg_addr), file=config) print("\t#define RESET_REGISTER_SPACE_ID {0}".format( SPACE_ID[rst_reg_space_id]), file=config) print("\t#define RESET_REGISTER_VALUE {0}U".format( rst_reg_val), file=config) print("\t{0}\n".format("</RESET_REGISTER_INFO>"), file=config)
6,226
def one_particle_quasilocal(sp, chli, chlo, Es=None): """ Calculate the one-particle irreducible T-matrix T(1). Parameters ---------- s : Setup Setup object describing the setup chi : int Input schannel cho : int Output channel Es : ndarray List of particle energies """ T1 = np.zeros((Es.shape), dtype=np.complex128) # guess a suitable range of energies to probe if Es is None: maxt = np.max(np.abs(sp.model.links)) + np.max(np.abs(sp.model.omegas)) + 1 Es = np.linspace(- maxt, maxt, 1000) for i, E in enumerate(Es): # single particle eigenenergies E1, _, _ = sp.eigenbasis(1, E) # numerators num1 = sp.transition(0, chli, 1, E) num2 = sp.transition(1, chlo, 0, E) # initialize the matrix # num = sp.gs[chli] * sp.gs[chlo] * num2.T * num1 num = num2.T * num1 for k in range(len(E1)): T1[i] += num[k] / (E - E1[k]) return Es, T1
6,227
def _CreateIssueForFlake(issue_generator, target_flake, create_or_update_bug): """Creates a monorail bug for a single flake. This function is used to create bugs for detected flakes and flake analysis results. Args: create_or_update_bug (bool): True to create or update monorail bug, otherwise False. Should always look for existing bugs for flakes, even if cannot update the bug. """ monorail_project = issue_generator.GetMonorailProject() # Re-uses an existing open bug if possible. issue_id = SearchOpenIssueIdForFlakyTest(target_flake.normalized_test_name, monorail_project) if not issue_id: # Reopens a recently closed bug if possible. issue_id = SearchRecentlyClosedIssueIdForFlakyTest( target_flake.normalized_test_name, monorail_project) if issue_id: logging.info('An existing issue %s was found, attach it to flake: %s.', FlakeIssue.GetLinkForIssue(monorail_project, issue_id), target_flake.key) _AssignIssueToFlake(issue_id, target_flake) if create_or_update_bug: monorail_util.UpdateIssueWithIssueGenerator( issue_id=issue_id, issue_generator=issue_generator, reopen=True) return issue_id if not create_or_update_bug: # No existing bug found, and cannot create bug, bail out. return None logging.info('No existing open issue was found, create a new one.') issue_id = monorail_util.CreateIssueWithIssueGenerator( issue_generator=issue_generator) if not issue_id: logging.warning('Failed to create monorail bug for flake: %s.', target_flake.key) return None logging.info('%s was created for flake: %s.', FlakeIssue.GetLinkForIssue(monorail_project, issue_id), target_flake.key) _AssignIssueToFlake(issue_id, target_flake) return issue_id
6,228
def get_spilled_samples(spills: List, train_dataset: Dataset): """ Returns the actual data that was spilled. Notice that it returns everything that the __getitem__ returns ie. data and labels and potentially other stuff. This is done to be more general, not just work with datasets that return: (data, label), but also for datasets with (data, label, third_thing) or similar. Notice that the function only takes in one dataset but spill is a tuple with indexes for two datasets (the other is ignored). :param spills: :param train_dataset: :return: spilled_samples: """ spilled_samples = [] for spill in spills: spill_inx = spill[0] spilled_samples.append(train_dataset.__getitem__(spill_inx)) return spilled_samples
6,229
def reshard( inputs: List[Path], output: Path, tmp: Path = None, free_original: bool = False, rm_original: bool = False, ) -> Path: """Read the given files and concatenate them to the output file. Can remove original files on completion, or just write dummy content into them to free disk. """ if tmp is None: tmp = _get_tmp(output) logging.info(f"Resharding {inputs} to {tmp}, will move later to {output}") jsonql.run_pipes(file=inputs, output=tmp) tmp.replace(output) tmp_index = get_index(tmp) if tmp_index.exists(): tmp_index.replace(get_index(output)) if not (free_original or rm_original): return output for _input in inputs: if rm_original: _input.unlink() elif free_original: # Overwrite the previous file. # This frees up disk space and allows doit to properly track the success. _input.write_text(f"Resharded into {output}") if get_index(_input).is_file(): get_index(_input).unlink() return output
6,230
def on_intent(intent_request, session): """ Called when the user specifies an intent for this skill """ print("on_intent requestId=" + intent_request['requestId'] + ", sessionId=" + session['sessionId']) intent = intent_request['intent'] intent_name = intent_request['intent']['name'] # Dispatch to your skill's intent handlers if intent_name == "StartIntent": '''if "attributes" in session.keys(): return answer_question(intent,session) ''' return start_feedback(intent, session) elif intent_name == "AnswerIntent": return answer_question(intent, session) elif intent_name == "AMAZON.ResumeIntent": return resume_feedback(intent, session) elif intent_name == "AMAZON.PauseIntent": return pause_feedback(intent, session) elif intent_name == "AMAZON.HelpIntent": return get_welcome_response() elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent": return handle_session_end_request(session) else: raise ValueError("Invalid intent")
6,231
def load_twitter(path, shuffle=True, rnd=1): """ load text files from twitter data :param path: path of the root directory of the data :param subset: what data will be loaded, train or test or all :param shuffle: :param rnd: random seed value :param vct: vectorizer :return: :raise ValueError: """ data = bunch.Bunch() data = convert_tweet_2_data(path, rnd) data = minimum_size_sraa(data) if shuffle: random_state = np.random.RandomState(rnd) indices = np.arange(data.target.shape[0]) random_state.shuffle(indices) data.target = data.target[indices] # Use an object array to shuffle: avoids memory copy data_lst = np.array(data.data, dtype=object) data_lst = data_lst[indices] data.data = data_lst return data
6,232
def parse_vtables(f): """ Parse a given file f and constructs or extend the vtable function dicts of the module specified in f. :param f: file containing a description of the vtables in a module (*_vtables.txt file) :return: the object representing the module specified in f """ marx_module = Module(f.readline().strip()) for line in f: tokens = line.split() vtable = marx_module.vtables[int(tokens.pop(0), 16)] vtable.offset_to_top = int(tokens.pop(0)) index = 0 for target_address in tokens: if index not in vtable.functions: vtable.functions[index] = Addressable(int(target_address, 16), marx_module) index += 1 return marx_module
6,233
def getCifar10Dataset(root, isTrain=True): """Cifar-10 Dataset""" normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) if isTrain: trans = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, 4), transforms.ToTensor(), normalize, ]) else: trans = transforms.Compose([ transforms.ToTensor(), normalize, ]) return datasets.CIFAR10(root=root, train=isTrain, transform=trans, download=isTrain)
6,234
def histogram(layer, num_bins : int = 256, minimum = None, maximum = None, use_cle=True): """ This function determines a histogram for a layer and caches it within the metadata of the layer. If the same histogram is requested, it will be taken from the cache. :return: """ if "bc_histogram_num_bins" in layer.metadata.keys() and "bc_histogram" in layer.metadata.keys(): if num_bins == layer.metadata["bc_histogram_num_bins"]: return layer.metadata["bc_histogram"] data = layer.data if "dask" in str(type(data)): # ugh data = np.asarray(data) intensity_range = None if minimum is not None and maximum is not None: intensity_range = (minimum, maximum) if use_cle: try: import pyclesperanto_prototype as cle hist = np.asarray(cle.histogram(data, num_bins=num_bins, minimum_intensity=minimum, maximum_intensity=maximum, determine_min_max=False)) except ImportError: use_cle = False if not use_cle: hist, _ = np.histogram(data, bins=num_bins, range=intensity_range) # cache result if hasattr(layer.data, "bc_histogram_num_bins") and hasattr(layer.data, "bc_histogram"): if num_bins == layer.data.bc_histogram_num_bins: return layer.data.bc_histogram_num_bins # delete cache when data is changed def _refresh_data(event): reset_histogram_cache(layer) layer.events.data.disconnect(_refresh_data) layer.events.data.connect(_refresh_data) layer.metadata["bc_histogram_num_bins"] = num_bins layer.metadata["bc_histogram"] = hist return hist
6,235
def get_tbl_type(num_tbl, num_cols, len_tr, content_tbl): """ obtain table type based on table features """ count_very_common = len([i for i, x in enumerate(content_tbl) if re.match(r'^very common',x) ]) count_common = len([i for i, x in enumerate(content_tbl) if re.match(r'^common',x) ]) count_uncommon = len([i for i, x in enumerate(content_tbl) if re.match(r'^uncommon',x) ]) count_rare = len([i for i, x in enumerate(content_tbl) if re.match(r'^rare',x) ]) count_very_rare = len([i for i, x in enumerate(content_tbl) if re.match(r'^very rare',x) ]) count_unknown = len([i for i, x in enumerate(content_tbl) if "known" in x]) count_feats = [count_very_common,count_common,count_uncommon,count_rare,count_very_rare,count_unknown] if num_cols>3 and sum(count_feats) > num_cols+5: tbl_type = 'table type: vertical' elif ((all(i <2 for i in count_feats) and num_tbl<=5) or num_cols>4) and len_tr>2: tbl_type = 'table type: horizontal' else: tbl_type = 'table type: vertical' return tbl_type
6,236
def decode_EAN13(codes): """ คืนสตริงของเลขที่ได้จากการถอดรหัสจากสตริง 0/1 ที่เก็บใน codes แบบ EAN-13 ถ้าเกิดกรณีต่อไปนี้ ให้คืนสตริงว่าง (สาเหตุเหล่านี้มักมาจากเครื่องอ่านบาร์โค้ดอ่านรหัส 0 และ 1 มาผิด) codes เก็บจำนวนบิต หรือรูปแบบไม่ตรงข้อกำหนด รหัสบางส่วนแปลงเป็นตัวเลขไม่ได้ เลขหลักที่ 13 ที่อ่านได้ ไม่ตรงกับค่า check digit ที่คำนวณได้ หมายเหตุ: เป็นไปได้ว่า ผู้ใช้เครื่องบาร์โค้ด อาจสแกนบาร์โค้ดที่วางกลับหัวก็ได้ ฟังก์ชันนี้ก็ต้องรองรับกรณีเช่นนี้ด้วย Doctest : >>> c = '10100100110011001010011100110110010111001001101010111001010111001001110110011011101001101100101' >>> decode_EAN13(c) '3210292045192' >>> c = '10111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111101' >>> decode_EAN13(c) '' """ result = '' try: if len(codes) != 95: return '' else: number_group1 = digits_of(codes[3:45]) code_group1 = digits_from(codes[3:45]) number_group2 = digits_of(codes[50:-3]) code_group2 = digits_from(codes[50:-3]) if code_group2 == 'RRRRRR': result = str(return_group1(code_group1)) + number_group1 + number_group2 elif code_group1 == 'RRRRRR': result = str(return_group1(code_group1)) + number_group1 + number_group2 else: # Support when barcode reader read a barcode upside down reverse_codes = codes reverse_codes.reverse() number_group1 = digits_of(codes[3:45]) code_group1 = digits_from(codes[3:45]) number_group2 = digits_of(codes[50:-3]) code_group2 = digits_from(codes[50:-3]) if code_group2 == 'RRRRRR': result = str(return_group1(code_group1)) + number_group1 + number_group2 elif code_group1 == 'RRRRRR': result = str(return_group1(code_group1)) + number_group1 + number_group2 return result except: return ''
6,237
def GiveNewT2C(Hc, T2C): """ The main routine, which computes T2C that diagonalized Hc, and is close to the previous T2C transformation, if possible, and it makes the new orbitals Y_i = \sum_m T2C[i,m] Y_{lm} real, if possible. """ ee = linalg.eigh(Hc) Es = ee[0] Us0= ee[1] Us = matrix(ee[1]) #print 'In Eigensystem:' #mprint(Us.H * Hc * Us) # Us.H * Hc * Us === diagonal print 'Eigenvalues=', Es.tolist() print 'Starting with transformation in crystal harmonics=' mprint(Us) print # Finds if there are any degeneracies in eigenvalues. deg = FindDegeneracies(Es) print 'deg=', deg for ig in deg: if len(ig)>1: # Two or more states are degenerate, we transform them with a unitary transformation, # so that they are close to previous set of eigenvectors. # This is not necessary, but convenient to keep the character similar to previous iteration. This is useful # in particular when H has small off-diagonal elements, which we would like to eliminate, and we call this # routine iteratively Us = TransformToSimilar(ig, Us, Es) print 'Next, the transformation in crystal harmonics=' mprint(Us) print final = array( Us.T*T2C ) print 'And the same transformation in spheric harmonics=' mprint( final ) # Here we will try to make the transformation real, so that ctqmc will have minimal sign problem even when Full is used. for ig in deg: final = TransformToReal(final, ig, Es) # finally checking if all transformations are real for ig in deg: i0 = ig[0] i2 = ig[-1]+1 #print 'Checking the set of orbitals:', Es[i0:i2] UtU = ComputeUtU(final[i0:i2,:], ig) if allclose( UtU, identity(len(ig)), rtol=1e-04, atol=1e-04 ): print ':SUCCESS For orbital', ig, 'the final transformation is real' else: print """:WARNING: The set of rbitals """, ig, """ could not be made purely real. You should use only Coulomb='Ising' and avoid Coulomb='Full' """ print 'UtU=', mprint(UtU) print return final
6,238
def normalize(form, text): """Return the normal form form for the Unicode string unistr. Valid values for form are 'NFC', 'NFKC', 'NFD', and 'NFKD'. """ return unicodedata.normalize(form, text)
6,239
def handle_internal_validation_error(error): """ Error handler to use when a InternalValidationError is raised. Alert message can be modified here as needed. :param error: The error that is handled. :return: an error view """ alert_message = format_alert_message(error.__class__.__name__, str(error)) return _handle_error(alert_message)
6,240
def bbox_encode(bboxes, targets): """ :param bboxes: bboxes :param targets: target ground truth boxes :return: deltas """ bw = bboxes[:, 2] - bboxes[:, 0] + 1.0 bh = bboxes[:, 3] - bboxes[:, 1] + 1.0 bx = bboxes[:, 0] + 0.5 * bw by = bboxes[:, 1] + 0.5 * bh tw = targets[:, 2] - targets[:, 0] + 1.0 th = targets[:, 3] - targets[:, 1] + 1.0 tx = targets[:, 0] + 0.5 * tw ty = targets[:, 1] + 0.5 * th dx = (tx - bx) / bw dy = (ty - by) / bh dw = np.log(tw / bw) dh = np.log(th / bh) deltas = np.vstack((dx, dy, dw, dh)).transpose() return deltas
6,241
def cipher(text: str, key: str, charset: str = DEFAULT_CHARSET) -> str: """ Cipher given text using Vigenere method. Be aware that different languages use different charsets. Default charset is for english language, if you are using any other you should use a proper dataset. For instance, if you are ciphering an spanish text, you should use a charset with "ñ" character. This module uses only lowercase charsets. That means that caps will be kept but lowercase and uppercase will follow ths same substitutions. :param text: Text to be ciphered. :param key: Secret key. Both ends should know this and use the same one. The longer key you use the harder to break ciphered text. :param charset: Charset used for Vigenere method. Both ends, ciphering and deciphering, should use the same charset or original text won't be properly recovered. :return: Ciphered text. """ ciphered_text = _vigenere_offset(text, key, Vigenere.CIPHER, charset) return ciphered_text
6,242
def product_delete(product_id): """ Delete product from database """ product_name = product_get_name(product_id) res = False # Delete product from database if product_name: mongo.db.products.delete_one({"_id": (ObjectId(product_id))}) flash( product_name + " succesfully deleted from products", "success") res = True return res
6,243
def alpha_072(enddate, index='all'): """ Inputs: enddate: 必选参数,计算哪一天的因子 index: 默认参数,股票指数,默认为所有股票'all' Outputs: Series:index 为成分股代码,values为对应的因子值 公式: (rank(decay_linear(correlation(((high + low) / 2), adv40, 8.93345), 10.1519)) / rank(decay_linear(correlation(Ts_Rank(vwap, 3.72469), Ts_Rank(volume, 18.5188), 6.86671), 2.95011))) """ enddate = to_date_str(enddate) func_name = sys._getframe().f_code.co_name return JQDataClient.instance().get_alpha_101(**locals())
6,244
def update_input(): """ update all / reads the input and stores it to be returned by read_input() """ global UP, DOWN, LEFT, RIGHT, NEXT, BACK, pygame_events global up_state, down_state, left_state, right_state, next_state, back_state global up_state_prev, down_state_prev, left_state_prev, right_state_prev, next_state_prev, back_state_prev if gl.os_is_linux: global UP_BT, DOWN_BT, LEFT_BT, RIGHT_BT, NEXT_BT, BACK_BT # refresh previous states up_state_prev, down_state_prev, left_state_prev, right_state_prev, next_state_prev, back_state_prev = up_state, down_state, left_state, right_state, next_state, back_state # read current state if gl.os_is_linux: # for the raspberry pi / buttons on gpio up_state = not UP_BT.is_pressed # read every input down_state = not DOWN_BT.is_pressed left_state = not LEFT_BT.is_pressed right_state = not RIGHT_BT.is_pressed next_state = not NEXT_BT.is_pressed back_state = not BACK_BT.is_pressed # for keyboard for event in pygame_events: if event.type == pygame.KEYDOWN: # if pressed down if(event.key == pygame.K_UP): up_state = True if(event.key == pygame.K_DOWN): down_state = True if(event.key == pygame.K_LEFT): left_state = True if(event.key == pygame.K_RIGHT): right_state = True if(event.key == pygame.K_RETURN): next_state = True if(event.key == pygame.K_DELETE): back_state = True if event.type == pygame.KEYUP: # if released if(event.key == pygame.K_UP): up_state = False if(event.key == pygame.K_DOWN): down_state = False if(event.key == pygame.K_LEFT): left_state = False if(event.key == pygame.K_RIGHT): right_state = False if(event.key == pygame.K_RETURN): next_state = False if(event.key == pygame.K_DELETE): back_state = False # check for credits (activate by either pressing all button down at once or pressing tab) # it only test for one key first to save time if (not up_state and gl.os_is_linux == True) or (event.type == pygame.KEYDOWN and event.key == pygame.K_TAB): if not down_state and not left_state and not right_state and not next_state and not back_state or (event.type == pygame.KEYDOWN and event.key == pygame.K_TAB): print("[IO UI] go to credits") gl.cr_prev_pos = gl.prog_pos gl.prog_pos = 'cr'
6,245
def getBoxFolderPathName(annotationDict, newWidth, newHeight): """ getBoxFolderPathName returns the folder name which contains the resized image files for an original image file. Given image 'n02085620_7', you can find the resized images at: 'F:/dogs/images/n02085620-Chihuahua/boxes_64_64/' input: annotationDict: dictionary, contains filename newWidth: int, the new width for the image newHeight: int, the new height for the image output: returns a string, the folder path for the resized images """ folderName = getImageFolderPathName(annotationDict) boxFolder = BOX_FOLDER + str(newWidth) + '_' + str(newHeight) return IMAGE_PATH + folderName + '/' + boxFolder + '/'
6,246
def _download_artifact_from_uri(artifact_uri, output_path=None): """ :param artifact_uri: The *absolute* URI of the artifact to download. :param output_path: The local filesystem path to which to download the artifact. If unspecified, a local output path will be created. """ store = _get_store(artifact_uri=artifact_uri) artifact_path_module =\ get_artifact_repository(artifact_uri, store).get_path_module() artifact_src_dir = artifact_path_module.dirname(artifact_uri) artifact_src_relative_path = artifact_path_module.basename(artifact_uri) artifact_repo = get_artifact_repository( artifact_uri=artifact_src_dir, store=store) return artifact_repo.download_artifacts( artifact_path=artifact_src_relative_path, dst_path=output_path)
6,247
def api_get_project_members(request, key=None, hproPk=True): """Return the list of project members""" if not check_api_key(request, key, hproPk): return HttpResponseForbidden if settings.PIAPI_STANDALONE: if not settings.PIAPI_REALUSERS: users = [generate_user(pk="-1"), generate_user(pk="-2"), generate_user(pk="-3")] else: users = DUser.object.all() else: (_, _, hproject) = getPlugItObject(hproPk) users = [] for u in hproject.getMembers(): u.ebuio_member = True u.ebuio_admin = hproject.isMemberWrite(u) u.subscription_labels = _get_subscription_labels(u, hproject) users.append(u) liste = [] for u in users: retour = {} for prop in settings.PIAPI_USERDATA: if hasattr(u, prop): retour[prop] = getattr(u, prop) retour['id'] = str(retour['pk']) liste.append(retour) return HttpResponse(json.dumps({'members': liste}), content_type="application/json")
6,248
def sic_or_lm_silhoutte(image, sensor): """ :param sensor: string with the sensor to be used, options: - mw_sic - lm """ print "Preparing silhoutte for {0} using {1}".format(image, sensor) if sensor == 'mw_sic': img = SIC(image) if sensor == 'lm': img = LM(image) silhoutte(img)
6,249
def backtest_chart3(Results, title='Portfolio Backtests', figsize=(15, 9), save=False, show=True, colormap='jet'): """ Plots the performance for all efficient frontier portfolios. :param Results: (object) Results object from bt.backtest.Result(*backtests). Refer to the following documentation https://pmorissette.github.io/bt/bt.html?highlight=display#bt.backtest.Result :param figsize: (float, float) Optional, multiple by which to multiply the maximum weighting constraints at the ticker level. Defaults to (15, 9). :param save: (bool) Optional, width, height in inches. Defaults to False. :param show: (bool) Optional, displays plot. Defaults to True. :param colormap: (str or matplotlib colormap object) Colormap to select colors from. If string, load colormap with that name from matplotlib. Defaults to 'jet'. :return: (fig) Plot of performance for all efficient frontier portfolios. """ plot = Results.plot(title=title, figsize=figsize, colormap=colormap) fig = plot.get_figure() plt.legend(loc="upper left") if save == True: plt.savefig( '../charts/linechart_{}.png'.format(datetime.today().strftime('%m-%d-%Y')), bbox_inches='tight') if show == False: plt.close()
6,250
def random(n, mind): """Does not guarantee that it's connected (TODO)!""" return bidirectional({i: sample(range(n), mind) for i in range(n)})
6,251
def process_tare_drag(nrun, plot=False): """Processes a single tare drag run.""" print("Processing tare drag run", nrun) times = {0.2: (15, 120), 0.3: (10, 77), 0.4: (10, 56), 0.5: (8, 47), 0.6: (10, 40), 0.7: (8, 33), 0.8: (5, 31), 0.9: (8, 27), 1.0: (6, 24), 1.1: (9, 22), 1.2: (8, 21), 1.3: (7, 19), 1.4: (6, 18)} rdpath = os.path.join(raw_data_dir, "Tare-drag", str(nrun)) with open(os.path.join(rdpath, "metadata.json")) as f: metadata = json.load(f) speed = float(metadata["Tow speed (m/s)"]) nidata = loadhdf(os.path.join(rdpath, "nidata.h5")) time_ni = nidata["time"] drag = nidata["drag_left"] + nidata["drag_right"] drag = drag - np.mean(drag[:2000]) t1, t2 = times[speed] meandrag, x = ts.calcstats(drag, t1, t2, 2000) print("Tare drag =", meandrag, "N at", speed, "m/s") if plot: plt.figure() plt.plot(time_ni, drag, 'k') plt.show() return speed, meandrag
6,252
def _alpha_blend_numexpr1(rgb1, alpha1, rgb2, alpha2): """ Alternative. Not well optimized """ import numexpr alpha1_ = alpha1[..., None] # NOQA alpha2_ = alpha2[..., None] # NOQA alpha3 = numexpr.evaluate('alpha1 + alpha2 * (1.0 - alpha1)') alpha3_ = alpha3[..., None] # NOQA rgb3 = numexpr.evaluate('((rgb1 * alpha1_) + (rgb2 * alpha2_ * (1.0 - alpha1_))) / alpha3_') rgb3[alpha3 == 0] = 0
6,253
def chunks(arr: list, n: int) -> Generator: """ Yield successive n-sized chunks from arr. :param arr :param n :return generator """ for i in range(0, len(arr), n): yield arr[i:i + n]
6,254
def test_serialize_unknown_type(): """Check that RuleSerializeError is raised on attempt to serialize rule of unknown type. 1. Create rule factory. 2. Register new rule type. 3. Try to serialize a rule with unregistered type. 4. Check that RuleSerializeError is raised. 5. Check the message of the error. """ rule_factory = RuleFactory() serializer = lambda *args, **kwargs: None rule_factory.register_rule(rule_type="Test", parser=None, serializer=serializer) rule = RuleClass(rule_type="NonExistentType", parameters={}) with pytest.raises(RuleSerializeError) as exception_info: rule_factory.serialize_rule(rule=rule) expected_message = "Failed to serialize rule {0}. Unknown type 'NonExistentType'".format(rule) assert exception_info.value.args[0] == expected_message, "Wrong error message"
6,255
def clean_weight(v): """Clean the weight variable Args: v (pd.Series): Series containing all weight values Returns: v (pd.Series): Series containing all cleaned weight values """ # Filter out erroneous non-float values indices = v.astype(str).apply( lambda x: not re.match(reg_exps['re_lab_vals'], x)) v.loc[indices] = None # Convert values to float v = v.astype(float) # Sometimes the value is given in grams -- convert to kg indices_g = v > 100 v.loc[indices_g] = v[indices_g].apply(lambda x: x / 1000) return v
6,256
def step_matcher(name): """ DEPRECATED, use :func:`use_step_matcher()` instead. """ # -- BACKWARD-COMPATIBLE NAME: Mark as deprecated. warnings.warn("deprecated: Use 'use_step_matcher()' instead", DeprecationWarning, stacklevel=2) use_step_matcher(name)
6,257
def get_yield(category): """ Get the primitive yield node of a syntactic category. """ if isinstance(category, PrimitiveCategory): return category elif isinstance(category, FunctionalCategory): return get_yield(category.res()) else: raise ValueError("unknown category type with instance %r" % category)
6,258
def box(type_): """Create a non-iterable box type for an object. Parameters ---------- type_ : type The type to create a box for. Returns ------- box : type A type to box values of type ``type_``. """ class c(object): __slots__ = 'value', def __init__(self, value): if not isinstance(value, type_): raise TypeError( "values must be of type '%s' (received '%s')" % ( type_.__name__, type(value).__name__, ), ) self.value = value c.__name__ = 'Boxed%s' + type_.__name__ return c
6,259
def cmorization(in_dir, out_dir, cfg, _): """Cmorization func call.""" cmorizer = OSICmorizer(in_dir, out_dir, cfg, 'sh') cmorizer.cmorize()
6,260
def test__reac__elimination(): """ test elimination functionality """ rct_smis = ['CCCO[O]'] prd_smis = ['CC=C', 'O[O]'] rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis) rxn, geo, _, _ = rxn_objs[0] # reaction object aligned to z-matrix keys # (for getting torsion coordinate names) zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo) zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct) # You can also do this to determine linear atoms from zmatrix: # bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma) bnd_keys = automol.reac.rotational_bond_keys(zrxn) names = {automol.zmat.torsion_coordinate_name(zma, *k) for k in bnd_keys} assert names == {'D9'} print(automol.zmat.string(zma, one_indexed=False)) print(names) scan_name = automol.reac.scan_coordinate(zrxn, zma) const_names = automol.reac.constraint_coordinates(zrxn, zma) assert scan_name == 'R2' assert const_names == () print(scan_name) print(const_names) # graph aligned to geometry keys # (for getting rotational groups and symmetry numbers) geo, gdummy_key_dct = automol.zmat.geometry_with_conversion_info(zma) grxn = automol.reac.relabel_for_geometry(zrxn) print(automol.geom.string(geo)) # Check that the reaction object can be converted back, if needed old_zrxn = zrxn zrxn = automol.reac.insert_dummy_atoms(grxn, gdummy_key_dct) assert zrxn == old_zrxn gbnd_keys = automol.reac.rotational_bond_keys(grxn) assert len(gbnd_keys) == len(bnd_keys) axes = sorted(map(sorted, gbnd_keys)) groups_lst = [automol.reac.rotational_groups(grxn, *a) for a in axes] sym_nums = [ automol.reac.rotational_symmetry_number(grxn, *a) for a in axes] assert sym_nums == [3] for axis, groups, sym_num in zip(axes, groups_lst, sym_nums): print('axis:', axis) print('\tgroup 1:', groups[0]) print('\tgroup 2:', groups[1]) print('\tsymmetry number:', sym_num) # Extra test cases: rxn_smis_lst = [ (['CCC'], ['CC', '[CH2]']), ] for rct_smis, prd_smis in rxn_smis_lst: rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis) rxn, geo, _, _ = rxn_objs[0] # reaction object aligned to z-matrix keys # (for getting torsion coordinate names) zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo) zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct) # You can also do this to determine linear atoms from zmatrix: # bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma) bnd_keys = automol.reac.rotational_bond_keys(zrxn) names = {automol.zmat.torsion_coordinate_name(zma, *k) for k in bnd_keys} print(automol.zmat.string(zma, one_indexed=True)) print(names) scan_name = automol.reac.scan_coordinate(zrxn, zma) const_names = automol.reac.constraint_coordinates(zrxn, zma) print(scan_name) print(const_names) # graph aligned to geometry keys # (for getting rotational groups and symmetry numbers) geo, _ = automol.zmat.geometry_with_conversion_info(zma) grxn = automol.reac.relabel_for_geometry(zrxn) print(automol.geom.string(geo)) gbnd_keys = automol.reac.rotational_bond_keys(grxn) axes = sorted(map(sorted, gbnd_keys)) for axis in axes: print('axis:', axis) groups = automol.reac.rotational_groups(grxn, *axis) print('\tgroup 1:', groups[0]) print('\tgroup 2:', groups[1]) sym_num = automol.reac.rotational_symmetry_number(grxn, *axis) print('\tsymmetry number:', sym_num)
6,261
def rssError(yArr, yHatArr): """ Desc: 计算分析预测误差的大小 Args: yArr:真实的目标变量 yHatArr:预测得到的估计值 Returns: 计算真实值和估计值得到的值的平方和作为最后的返回值 """ return ((yArr - yHatArr) ** 2).sum()
6,262
def initialize_vocabulary(vocabulary_file): """ Initialize vocabulary from file. :param vocabulary_file: file containing vocabulary. :return: vocabulary and reversed vocabulary """ if gfile.Exists(vocabulary_file): rev_vocab = [] with gfile.GFile(vocabulary_file, mode="rb") as f: rev_vocab.extend(f.readlines()) rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab] vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)]) return vocab, rev_vocab else: raise ValueError("Vocabulary file %s doesn't exist.", vocabulary_file)
6,263
def mock_create_draft(mocker): """Mock the createDraft OpenAPI. Arguments: mocker: The mocker fixture. Returns: The patched mocker and response data. """ response_data = {"draftNumber": 1} return ( mocker.patch( f"{gas.__name__}.Client.open_api_do", return_value=mock_response(data=response_data) ), response_data, )
6,264
def test_vi_xbuild(): """ iv = ['7', '19', '23'] opts = {'--build': True} rv = ['7', '19', '23', '1'] """ pytest.dbgfunc() inp, exp = '7.19.23', '7.19.23.1' assert exp.split('.') == gitr.version_increment(inp.split('.'), {'--build': True})
6,265
def to_csv(data_path="data"): """Transform data and save as CSV. Args: data_path (str, optional): Path to dir holding JSON dumps. Defaults to "data". save_path (str, optional): Path to save transformed CSV. Defaults to "data_transformed.csv". """ elements = [] for data in tqdm(list_data_dir(data_path)): try: data = load_json(data) add_gw_and_download_time( data["elements"], data["download_time"], get_game_week(data["events"]) ) add_unique_id(data["elements"]) elements.extend(data["elements"]) # Add transformations here except TypeError: print(f"Something is wrong in {data}") return pd.DataFrame(elements)
6,266
def do_on_subscribe(source: Observable, on_subscribe): """Invokes an action on subscription. This can be helpful for debugging, logging, and other side effects on the start of an operation. Args: on_subscribe: Action to invoke on subscription """ def subscribe(observer, scheduler=None): on_subscribe() return source.subscribe_(observer.on_next, observer.on_error, observer.on_completed, scheduler) return Observable(subscribe)
6,267
def get_language_file_path(language): """ :param language: string :return: string: path to where the language file lies """ return "{lang}/localization_{lang}.json".format(lang=language)
6,268
def json_loader(path: str = None) -> Union[Dict, list]: """ Loads json or jsonl data Args: path (str, optional): path to file Returns: objs : Union[Dict, list]: Returns a list or dict of json data json_format : format of file (json or jsonl) """ check_extension = os.path.splitext(path)[1] objs = [] json_format = None with open(path, "r") as file_p: if check_extension == ".jsonl": lines = file_p.readlines() for line in lines: objs.append(json.loads(line)) json_format = "jsonl" elif check_extension == ".json": objs = json.loads(file_p) json_format = "json" return objs, json_format
6,269
def handle_response(request_object): """Parses the response from a request object. On an a resolvable error, raises a DataRequestException with a default error message. Parameters ---------- request_object: requests.Response The response object from an executed request. Returns ------- dict, str or None Note that this function checks the content-type of a response and returns the appropriate type. A Dictionary parsed from a JSON object, or a string. Returns None when a 204 is encountered. Users should be mindful of the expected response body from the API. Raises ------ sfa_dash.errors.DataRequestException If a recoverable 400 level error has been encountered. The errors attribute will contain a dict of errors. requests.exceptions.HTTPError If the status code received from the API could not be handled. """ if not request_object.ok: errors = {} if request_object.status_code == 400: errors = request_object.json() elif request_object.status_code == 401: errors = { '401': "Unauthorized." } elif request_object.status_code == 404: previous_page = request.headers.get('Referer', None) errors = {'404': ( 'The requested object could not be found. You may need to ' 'request access from the data owner.') } if previous_page is not None and previous_page != request.url: errors['404'] = errors['404'] + ( f' <a href="{escape(previous_page)}">Return to the ' 'previous page.</a>') elif request_object.status_code == 422: errors = request_object.json()['errors'] if errors: raise DataRequestException(request_object.status_code, **errors) else: # Other errors should be due to bugs and not by attempts to reach # inaccessible data. Allow exceptions to be raised # so that they can be reported to Sentry. request_object.raise_for_status() if request_object.request.method == 'GET': # all GET endpoints should return a JSON object if request_object.headers['Content-Type'] == 'application/json': return request_object.json() else: return request_object.text # POST responses should contain a single string uuid of a newly created # object unless a 204 No Content was returned. if request_object.request.method == 'POST': if request_object.status_code != 204: return request_object.text
6,270
def _get_zoom_list_recordings_list() -> List[str]: """Get the list of all the recordings.""" # The local path for zoom recording is ~/Documents/Zoom # Get the home directory file_list = os.listdir(ZOOM_DIR) files = [] for f in file_list: files.append(f) files.append(Separator()) return files
6,271
def evaluate(expn): """ Evaluate a simple mathematical expression. @rtype: C{Decimal} """ try: result, err = CalcGrammar(expn).apply('expn') return result except ParseError: raise SyntaxError(u'Could not evaluate the provided mathematical expression')
6,272
def is_device_removable(device): """ This function returns whether a given device is removable or not by looking at the corresponding /sys/block/<device>/removable file @param device: The filesystem path to the device, e.g. /dev/sda1 """ # Shortcut the case where the device an SD card. The kernel/udev currently # consider SD cards (mmcblk devices) to be non-removable. if os.path.basename(device).startswith("mmcblk"): return True path = _get_device_removable_file_path(device) if not path: return False contents = None try: with open(path, "r") as f: contents = f.readline() except IOError: return False if contents.strip() == "1": return True return False
6,273
def get_user(is_external: Optional[bool] = None, name: Optional[str] = None, username: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUserResult: """ Use this data source to retrieve information about a Rancher v2 user ## Example Usage ```python import pulumi import pulumi_rancher2 as rancher2 foo = rancher2.get_user(username="foo") ``` :param bool is_external: Set is the user if the user is external. Default: `false` (bool) :param str name: The name of the user (string) :param str username: The username of the user (string) """ __args__ = dict() __args__['isExternal'] = is_external __args__['name'] = name __args__['username'] = username if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('rancher2:index/getUser:getUser', __args__, opts=opts, typ=GetUserResult).value return AwaitableGetUserResult( annotations=__ret__.annotations, enabled=__ret__.enabled, id=__ret__.id, is_external=__ret__.is_external, labels=__ret__.labels, name=__ret__.name, principal_ids=__ret__.principal_ids, username=__ret__.username)
6,274
def randbytes(size) -> bytes: """Custom implementation of random.randbytes, since that's a Python 3.9 feature """ return bytes(random.sample(list(range(0, 255)), size))
6,275
def path_to_model(path): """Return model name from path.""" epoch = str(path).split("phase")[-1] model = str(path).split("_dir/")[0].split("/")[-1] return f"{model}_epoch{epoch}"
6,276
def _cpp_het_stat(amplitude_distribution, t_stop, rates, t_start=0.*pq.ms): """ Generate a Compound Poisson Process (CPP) with amplitude distribution A and heterogeneous firing rates r=r[0], r[1], ..., r[-1]. Parameters ---------- amplitude_distribution : np.ndarray CPP's amplitude distribution :math:`A`. `A[j]` represents the probability of a synchronous event of size `j` among the generated spike trains. The sum over all entries of :math:`A` must be equal to one. t_stop : pq.Quantity The end time of the output spike trains rates : pq.Quantity Array of firing rates of each spike train generated with t_start : pq.Quantity, optional The start time of the output spike trains Default: 0 pq.ms Returns ------- list of neo.SpikeTrain List of neo.SpikeTrains with different firing rates, forming a CPP with amplitude distribution `A`. """ # Computation of Parameters of the two CPPs that will be merged # (uncorrelated with heterog. rates + correlated with homog. rates) n_spiketrains = len(rates) # number of output spike trains # amplitude expectation expected_amplitude = np.dot( amplitude_distribution, np.arange(n_spiketrains + 1)) r_sum = np.sum(rates) # sum of all output firing rates r_min = np.min(rates) # minimum of the firing rates # rate of the uncorrelated CPP r_uncorrelated = r_sum - n_spiketrains * r_min # rate of the correlated CPP r_correlated = r_sum / expected_amplitude - r_uncorrelated # rate of the hidden mother process r_mother = r_uncorrelated + r_correlated # Check the analytical constraint for the amplitude distribution if amplitude_distribution[1] < (r_uncorrelated / r_mother).rescale( pq.dimensionless).magnitude: raise ValueError('A[1] too small / A[i], i>1 too high') # Compute the amplitude distribution of the correlated CPP, and generate it amplitude_distribution = \ amplitude_distribution * (r_mother / r_correlated).magnitude amplitude_distribution[1] = \ amplitude_distribution[1] - r_uncorrelated / r_correlated compound_poisson_spiketrains = _cpp_hom_stat( amplitude_distribution, t_stop, r_min, t_start) # Generate the independent heterogeneous Poisson processes poisson_spiketrains = \ [StationaryPoissonProcess( rate=rate - r_min, t_start=t_start, t_stop=t_stop ).generate_spiketrain() for rate in rates] # Pool the correlated CPP and the corresponding Poisson processes return [_pool_two_spiketrains(compound_poisson_spiketrain, poisson_spiketrain) for compound_poisson_spiketrain, poisson_spiketrain in zip(compound_poisson_spiketrains, poisson_spiketrains)]
6,277
def validate_uncles(state, block): """Validate the uncles of this block.""" # Make sure hash matches up if utils.sha3(rlp.encode(block.uncles)) != block.header.uncles_hash: raise VerificationFailed("Uncle hash mismatch") # Enforce maximum number of uncles if len(block.uncles) > state.config['MAX_UNCLES']: raise VerificationFailed("Too many uncles") # Uncle must have lower block number than blockj for uncle in block.uncles: if uncle.number >= block.header.number: raise VerificationFailed("Uncle number too high") # Check uncle validity MAX_UNCLE_DEPTH = state.config['MAX_UNCLE_DEPTH'] ancestor_chain = [block.header] + \ [a for a in state.prev_headers[:MAX_UNCLE_DEPTH + 1] if a] # Uncles of this block cannot be direct ancestors and cannot also # be uncles included 1-6 blocks ago ineligible = [b.hash for b in ancestor_chain] for blknum, uncles in state.recent_uncles.items(): if state.block_number > int( blknum) >= state.block_number - MAX_UNCLE_DEPTH: ineligible.extend([u for u in uncles]) eligible_ancestor_hashes = [x.hash for x in ancestor_chain[2:]] for uncle in block.uncles: if uncle.prevhash not in eligible_ancestor_hashes: raise VerificationFailed("Uncle does not have a valid ancestor") parent = [x for x in ancestor_chain if x.hash == uncle.prevhash][0] if uncle.difficulty != calc_difficulty( parent, uncle.timestamp, config=state.config): raise VerificationFailed("Difficulty mismatch") if uncle.number != parent.number + 1: raise VerificationFailed("Number mismatch") if uncle.timestamp < parent.timestamp: raise VerificationFailed("Timestamp mismatch") if uncle.hash in ineligible: raise VerificationFailed("Duplicate uncle") if uncle.gas_used > uncle.gas_limit: raise VerificationFailed("Uncle used too much gas") if not check_pow(state, uncle): raise VerificationFailed('uncle pow mismatch') ineligible.append(uncle.hash) return True
6,278
def quote_index(q_t,tr_t): """Get start and end index of quote times in `q_t` with the same timestamp as trade times in `tr_t`.""" left, right = get_ind(q_t,tr_t) right[left<right] -=1 # last quote cannot be traded on, so shift index left -=1 # consider last quote from before the timestamp of the trade left[left<0] = 0 return left, right
6,279
def calculate_learning_curves_train_test(K, y, train_indices, test_indices, sampled_order_train, tau, stop_t=None): """Calculate learning curves (train, test) from running herding algorithm Using the sampled order from the sampled_order indexing array calculate the learning curves on the train set using GKRR. Note that we pass K instead of calculating it on the fly, that's why we don't use s2 explicitly, it's already used in calculating K. :param K: (np.ndarray, (n, n)) full kernel matrix from dataset :param y: (np.ndarray, (n, 1)) output array :param train_indices: (np.ndarray, (n_train,)) train indices from the original dataset :param test_indices: (np.ndarray, (n_train,)) test indices from the original dataset :param sampled_order_train: (np.ndarray, (n_train,)) order of the sampled training indices :param tau: (float) regularisation parameter used in GKRR :param stop_t: (int) final step of calculations :return learning_curve_train: (np.ndarray, (stop_t,)) array of mse for train set :return learning_curve_test: (np.ndarray, (stop_t,)) array of mse for test set """ gaussian_kr = GaussianKernelRidgeRegression( tau=tau, s2=None, precompute_K=True) # Index K differently depending on what we do. # When predicting, we need the kernel matrix to be # K_mn, where m indexes the set to predict over and # n indexes the set we train over K_train = K[np.ix_(train_indices, train_indices)] K_test = K[np.ix_(test_indices, test_indices)] K_test_train = K[np.ix_(test_indices, train_indices)] K_sampled_train = K_train[np.ix_(sampled_order_train, sampled_order_train)] y_train = y[train_indices] y_test = y[test_indices] y_sampled_train = y_train[sampled_order_train] n_train = K_train.shape[0] n_test = K_test.shape[0] if stop_t is None: stop_t = n_train learning_curve_train = np.zeros(stop_t) learning_curve_test = np.zeros(stop_t) for t in range(stop_t): K_sampled_train_t = K_sampled_train[0:t+1, 0:t+1] gaussian_kr.fit(X=K_sampled_train_t, y=y_sampled_train[:t+1]) # Predict for train set K_xn_train = K_train[np.ix_( np.arange(n_train), sampled_order_train[:t+1])] y_train_ = gaussian_kr.predict(K_xn_train) learning_curve_train[t] = mean_squared_error(y_train, y_train_) # Then test set K_xn_test = K_test_train[np.ix_( np.arange(n_test), sampled_order_train[:t+1])] y_test_ = gaussian_kr.predict(K_xn_test) learning_curve_test[t] = mean_squared_error(y_test, y_test_) return learning_curve_train, learning_curve_test
6,280
def json2vcf(jsonfile, outputfile): """Function to grab variant(s) from JSON file and spit out VCF in outputdir. Currently this function assumes that there is a chromosome field that has the chromosome number encoded in 'code'. Hopefully this generalizes.""" # Make the vcf file w/ header vcf_filename = outputfile try: os.remove(vcf_filename) except OSError: pass # Get variant from JSON file j = json.load(open(jsonfile)) chrom = j['referenceSeq']['chromosome']['coding'][0]['code'] pos = j['variant'][0]['start'] ref = j['variant'][0]['referenceAllele'] alt = j['variant'][0]['observedAllele'] patient = j['patient']['reference'] rspos = j['repository'][0]['variantsetId'].find('rs') rsid = j['repository'][0]['variantsetId'][rspos:] # Write the entry with open(vcf_filename, 'a') as v: # Write the metadata header v.write('##fileformat=VCFv4.0\n') v.write(('##source=' + jsonfile + '\n')) v.write('#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n') # Write the variant row v.write('{}\t{}\t{}\t{}\t{}\t.\t.\t.'.format(chrom, pos, rsid, ref, alt))
6,281
def _CheckRequirements(requirements_file_path): """Checks that all package requirements specified in a file are met. Args: requirements_file_path: string. Path to a pip requirements file. """ try: with open(requirements_file_path, 'rb') as fp: for line in fp: pkg_resources.require(line) except (pkg_resources.DistributionNotFound, pkg_resources.VersionConflict) as e: # In newer versions of setuptools, these exception classes have a report # method that provides a readable description of the error. report = getattr(e, 'report', None) err_msg = report() if report else str(e) raise errors.Setup.PythonPackageRequirementUnfulfilled( 'A Python package requirement was not met while checking "{path}": ' '{msg}{linesep}To install required packages, execute the following ' 'command:{linesep}pip install -r "{path}"{linesep}To bypass package ' 'requirement checks, run PerfKit Benchmarker with the ' '--ignore_package_requirements flag.'.format( linesep=os.linesep, msg=err_msg, path=requirements_file_path))
6,282
def create_account(param: CreateAccountParams) -> Transaction: """Generate a Transaction that creates a new account.""" raise NotImplementedError("create_account not implemented")
6,283
def check(datapath, config, output): """Cli for apps/morph_check.""" morph_check.main(datapath, config, output)
6,284
def removeString2(string, removeLen): """骚操作 直接使用字符串替换""" alphaNums = [] for c in string: if c not in alphaNums: alphaNums.append(c) while True: preLength = len(string) for c in alphaNums: replaceStr = c * removeLen string = string.replace(replaceStr, '') if preLength == len(string): break return string
6,285
def process_filing(client, file_path: str, filing_buffer: Union[str, bytes] = None, store_raw: bool = False, store_text: bool = False): """ Process a filing from a path or filing buffer. :param file_path: path to process; if filing_buffer is none, retrieved from here :param filing_buffer: buffer; if not present, s3_path must be set :param store_raw: :param store_text: :return: """ # Log entry logger.info("Processing filing {0}...".format(file_path)) # Check for existing record first try: filing = Filing.objects.get(s3_path=file_path) if filing is not None: logger.error("Filing {0} has already been created in record {1}".format(file_path, filing)) return None except Filing.DoesNotExist: logger.info("No existing record found.") except Filing.MultipleObjectsReturned: logger.error("Multiple existing record found.") return None # Get buffer if filing_buffer is None: logger.info("Retrieving filing buffer from S3...") filing_buffer = client.get_buffer(file_path) # Get main filing data structure filing_data = openedgar.parsers.edgar.parse_filing(filing_buffer, extract=store_text) if filing_data["cik"] is None: logger.error("Unable to parse CIK from filing {0}; assuming broken and halting...".format(file_path)) return None try: # Get company company = Company.objects.get(cik=filing_data["cik"]) logger.info("Found existing company record.") # Check if record exists for date try: _ = CompanyInfo.objects.get(company=company, date=filing_data["date_filed"]) logger.info("Found existing company info record.") except CompanyInfo.DoesNotExist: # Create company info record company_info = CompanyInfo() company_info.company = company company_info.name = filing_data["company_name"] company_info.sic = filing_data["sic"] company_info.state_incorporation = filing_data["state_incorporation"] company_info.state_location = filing_data["state_location"] company_info.date = filing_data["date_filed"].date() if isinstance(filing_data["date_filed"], datetime.datetime) else \ filing_data["date_filed"] company_info.save() logger.info("Created new company info record.") except Company.DoesNotExist: # Create company company = Company() company.cik = filing_data["cik"] try: # Catch race with another task/thread company.save() try: _ = CompanyInfo.objects.get(company=company, date=filing_data["date_filed"]) except CompanyInfo.DoesNotExist: # Create company info record company_info = CompanyInfo() company_info.company = company company_info.name = filing_data["company_name"] company_info.sic = filing_data["sic"] company_info.state_incorporation = filing_data["state_incorporation"] company_info.state_location = filing_data["state_location"] company_info.date = filing_data["date_filed"] company_info.save() except django.db.utils.IntegrityError: company = Company.objects.get(cik=filing_data["cik"]) logger.info("Created company and company info records.") # Now create the filing record try: filing = Filing() filing.form_type = filing_data["form_type"] filing.accession_number = filing_data["accession_number"] filing.date_filed = filing_data["date_filed"] filing.document_count = filing_data["document_count"] filing.company = company filing.sha1 = hashlib.sha1(filing_buffer).hexdigest() filing.s3_path = file_path filing.is_processed = False filing.is_error = True filing.save() except Exception as e: # pylint: disable=broad-except logger.error("Unable to create filing record: {0}".format(e)) return None # Create filing document records try: create_filing_documents(client, filing_data["documents"], filing, store_raw=store_raw, store_text=store_text) filing.is_processed = True filing.is_error = False filing.save() return filing except Exception as e: # pylint: disable=broad-except logger.error("Unable to create filing documents for {0}: {1}".format(filing, e)) return None
6,286
def metadata_file(): """ Return the path to the first (as per a descending alphabetic sort) .csv file found at the expected location (<ffmeta_package_dir>/data/*.csv) This is assumed to be the latest metadata csv file. :return: The absolute path of the latest metadata csv file. """ dirname = os.path.dirname(ffmeta.__file__) valid_files = list(glob.glob(os.path.join(dirname, 'data', '*.csv'))) if not valid_files: raise RuntimeError('No valid metadata csv files found.') else: return sorted(valid_files)[-1]
6,287
def determineDicom(importedFile): """ Determines whether the Dicom file is PET, CT or invalid""" dicom_info = pydicom.dcmread(importedFile) # Different file types passes to one of two functions. if dicom_info.Modality == 'CT': textArea.insert(END, 'CT Dicom file:\n') CTdicom(importedFile) elif dicom_info.Modality == 'PT': textArea.insert(END, 'PET Dicom file:\n') PETdicom(importedFile) else: textArea.insert(END, 'Not a Dicom or reconstruction parameter log file\n')
6,288
def _get_anchor_negative_triplet_mask(labels): """Return a 2D mask where mask[a, n] is True iff a and n have distinct labels. Args: labels: tf.int32 `Tensor` with shape [batch_size] Returns: mask: tf.bool `Tensor` with shape [batch_size, batch_size] """ # Check if labels[i] != labels[k] # Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1) return ~(labels.unsqueeze(0) == labels.unsqueeze(1)).all(-1)
6,289
def add_tax_data(file_location): """Adds tax data""" data = read_csv_file(file_location) session = setup_session() list_size = len(data) list_counter = 0 for entry in data: if (entry[0] == "QLD"): list_counter += 1 tax = Tax( postcode = entry[1], gross_num = int(entry[5].replace(',', "")), gross_tax = int(entry[6].replace(',', "")), medicare_levy = int(entry[8].replace(',', "")), help_debt = int(entry[14].replace(',', "")) ) print(tax) try: session.add(tax) print("Adding ({}/{}): {}".format(list_counter, list_size, tax)) session.commit() except Exception as e: session.rollback() print("Could not add entry")
6,290
async def test_wait_form_displayed_after_checking(hass, smartthings_mock): """Test error is shown when the user has not installed the app.""" flow = SmartThingsFlowHandler() flow.hass = hass flow.access_token = str(uuid4()) result = await flow.async_step_wait_install({}) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'wait_install' assert result['errors'] == {'base': 'app_not_installed'}
6,291
def test_H(): """Tests the Hamiltonian. """ from pydft.schrodinger import _H from numpy.matlib import randn s = [6,6,4] R = np.array([[6,0,0],[0,6,0],[0,0,6]]) a = np.array(randn(np.prod(s), 1) + 1j*randn(np.prod(s), 1)) b = np.array(randn(np.prod(s), 1) + 1j*randn(np.prod(s), 1)) out1 = np.conj(np.dot(np.conj(a.T),_H(s,R,b))) out2 = np.dot(np.conj(b.T),_H(s,R,a)) assert np.allclose(out1,out2)
6,292
def random_k_edge_connected_graph(size, k, p=.1, rng=None): """ Super hacky way of getting a random k-connected graph Example: >>> from graphid import util >>> size, k, p = 25, 3, .1 >>> rng = util.ensure_rng(0) >>> gs = [] >>> for x in range(4): >>> G = random_k_edge_connected_graph(size, k, p, rng) >>> gs.append(G) >>> # xdoc: +REQUIRES(--show) >>> pnum_ = util.PlotNums(nRows=2, nSubplots=len(gs)) >>> fnum = 1 >>> for g in gs: >>> util.show_nx(g, fnum=fnum, pnum=pnum_()) """ for count in it.count(0): seed = None if rng is None else rng.randint((2 ** 31 - 1)) # Randomly generate a graph g = nx.fast_gnp_random_graph(size, p, seed=seed) conn = nx.edge_connectivity(g) # If it has exactly the desired connectivity we are one if conn == k: break # If it has more, then we regenerate the graph with fewer edges elif conn > k: p = p / 2 # If it has less then we add a small set of edges to get there elif conn < k: # p = 2 * p - p ** 2 # if count == 2: aug_edges = list(k_edge_augmentation(g, k)) g.add_edges_from(aug_edges) break return g
6,293
def finish_round(): """Clean up the folders at the end of the round. After round N, the cur-round folder is renamed to round-N. """ last_round = get_last_round_num() cur_round = last_round + 1 round_dir = os.path.join("rounds", f"round-{cur_round}") os.rename(CUR_ROUND_DIR, round_dir) timestamp = datetime.datetime.now().strftime("%y%m%d%H%M%S") # Keep only the machines that actually have a team assigned machine_team = machine2team(cur_round) for cur_src, cur_team in machine_team.items(): log_name = f"{timestamp}-log" dst_path = os.path.join(TEAMS_DIR, cur_team, LOGS_SUBDIR, log_name) copyfile(os.path.join(round_dir, SOURCE_SUBDIR, cur_src, LOGNAME), dst_path) # Gather the scores sink_dir = os.path.join(round_dir, SINK_SUBDIR) results = defaultdict(dict) for cur_sink in os.listdir(sink_dir): with open(os.path.join(sink_dir, cur_sink, SCORE_FILE), 'r') as infile: reader = csv.reader(infile, delimiter='\t') for line in reader: # results: (src, dst, bytes) results[line[0]][cur_sink] = int(line[1]) # Scores goals, src2team = load_goals(os.path.join(CONFIGS_DIR, f"config_round_{cur_round}.csv")) scores = score_run(goals, results) # If there is no src entry in the scores, set the score to zero teamscores = {team: scores[src] if src in scores else 0 for src, team in src2team.items()} print(teamscores) # Send scores to influx _push_to_influxdb(teamscores, cur_round) return "Round finished and scores pushed"
6,294
def _SendGerritJsonRequest( host: str, path: str, reqtype: str = 'GET', headers: Optional[Dict[str, str]] = None, body: Any = None, accept_statuses: FrozenSet[int] = frozenset([200]), ) -> Optional[Any]: """Send a request to Gerrit, expecting a JSON response.""" result = _SendGerritHttpRequest( host, path, reqtype, headers, body, accept_statuses) # The first line of the response should always be: )]}' s = result.readline() if s and s.rstrip() != ")]}'": raise GerritError(200, 'Unexpected json output: %s' % s) # Read the rest of the response. s = result.read() if not s: return None return json.loads(s)
6,295
def contains_sequence(dna_sequence, subsequence): """ Checks if a defined subsequence exists in a sequence of dna. :param dna_sequence: The dna sequence to check in for a subsequence. ex: ['a', 't', 'g', ...] :param subsequence: The subsequence of the dna to check for. :return: True if the subsequence exists in python. """ pass
6,296
def test_sep_digits(): """Must separate digits on 1000s.""" func = utils.sep_digits assert func('12345678') == '12345678' assert func(12345678) == '12 345 678' assert func(1234.5678) == '1 234.57' assert func(1234.5678, precision=4) == '1 234.5678' assert func(1234.0, precision=4) == '1 234.0000' assert func(1234.0, precision=0) == '1 234'
6,297
def readTableRules(p4info_helper, sw, table): """ Reads the table entries from all tables on the switch. :param p4info_helper: the P4Info helper :param sw: the switch connection """ print '\n----- Reading tables rules for %s -----' % sw.name ReadTableEntries1 = {'table_entries': []} ReadTableEntries2 = [] for response in sw.ReadTableEntries(): for entity in response.entities: ReadTableEntry = {} entry = entity.table_entry table_name = p4info_helper.get_tables_name(entry.table_id) if table==None or table==table_name: # if table==None: ReadTableEntry['table'] = table_name print '%s: ' % table_name, for m in entry.match: print p4info_helper.get_match_field_name(table_name, m.field_id), try: print "\\x00"+"".join("\\x"+"{:02x}".format(ord(c)) for c in "".join([d for d in (p4info_helper.get_match_field_value(m))])), except: print '%r' % (p4info_helper.get_match_field_value(m),), match_name = p4info_helper.get_match_field_name(table_name, m.field_id) tmp_match_value = (p4info_helper.get_match_field_value(m),) ReadTableEntry['match']={} ReadTableEntry['match'][match_name] = tmp_match_value action = entry.action.action action_name = p4info_helper.get_actions_name(action.action_id) ReadTableEntry['action_name'] = action_name print '->', action_name, for p in action.params: print p4info_helper.get_action_param_name(action_name, p.param_id), print '%r' % p.value, action_params = p4info_helper.get_action_param_name(action_name, p.param_id) tmp_action_value = p.value ### possibly needs bytify =>> struct. pack and unpack ReadTableEntry['action_params'] = {} ReadTableEntry['action_params'][action_params] = tmp_action_value print ReadTableEntries1.setdefault('table_entries',[]).append(ReadTableEntry) ReadTableEntries2.append(ReadTableEntry) return ReadTableEntries2
6,298
def alpha_043(code, end_date=None, fq="pre"): """ 公式: SUM((CLOSE>DELAY(CLOSE,1)?VOLUME:(CLOSE<DELAY(CLOSE,1)?-VOLUME:0)),6) Inputs: code: 股票池 end_date: 查询日期 Outputs: 因子的值 """ end_date = to_date_str(end_date) func_name = sys._getframe().f_code.co_name return JQDataClient.instance().get_alpha_191(**locals())
6,299