content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def dataframe_summary (df): """ Computes summary statisitcs of a data frame :param df: Dataframe to comoute statistics for :return: Dataframe with summary statisitcs for input data frame """ df_summary = df.describe(include='all') df_summary.loc['Missing #'] = df.isnull().sum(axis=0) df_summary.loc['Missing %'] = round(df.isnull().sum(axis=0) / len(df.index) * 100, 1) return df_summary
7948a28380894ac6b5ba4f5e31ca3499215c633a
596,860
def get_opcode(instruction_bytes): """ Returns the 6-bit MIPS opcode from a 4 byte instruction. """ op = (instruction_bytes & 0xFC000000) >> (3 * 8 + 2) return op
85dab0a35b0e31a3f2f2b5c1f3d5323b1ccf3dae
101,276
def normintens_to_countrate(ni_map, star_photrate, collecting_area, coron_thrupt_peakpixel, optloss = 0.5, qe = 0.9, ifsloss = 1.0): """ Convert a normalized intensity array to an array of photoelectron count rates, based on parameters for stellar irradiance, telescope collecting area, coronagragh mask throughput, optical loss factor, and detector quantum efficiency. Parameters ---------- ni_map : numpy.ndarray Input array of normalized intensity values, normalized to the detector pixel integrated peak of an unocculted PSF. star_photrate : astropy.units.quantity.Quantity, of physical dimensions energy / time / area. This is the bandpass-integrated irradiance of the target star; suggested units are photons / second / meter^2 collecting_area : astropy.units.quantity.Quantity, of physical dimensions area. This is the collecting area of the obscured telelescope aperture. coron_thrupt_peakpixel: numpy.float Throughput from telescope aperture to the detector-pixel-inegrated peak of an unocculted PSF. optloss : numpy.float Cumulative attenuation due to mirror reflections and transmissive optics qe : numpy.float Detector quantum efficiency; can also include losses due to readout efficiencies and cosmic rays ifsloss : numpy.float signal attenuation factor due to re-imaging onto spectrograph detector; for no IFS leave at the default value of 1.0. Returns ------- countrate_map : numpy.ndarray of astropy.units.quantity.Quantity Photoelectron count rate map, units photons / sec """ countrate_map = (ni_map * star_photrate * collecting_area * coron_thrupt_peakpixel * optloss * qe * ifsloss) return countrate_map
7dccaa36335fadbfe2356ffa3f7a07829e60ed2d
365,439
def euclidean_distance(point_a, point_b): """ Returns the euclidean distance between two points with (i, j) coordinates. """ return (point_a[0] - point_b[0]) ** 2 + (point_a[1] - point_b[1]) ** 2
d5789f2aad96f44eeed0c09f7d1766fe370268f9
158,545
def merge_lists(a, b): """ Merge lists - e.g., [1, 2, 3, 4, 5, 6] & ['a', 'b', 'c'] => [1, 'a', 2, 'b', 3, 'c', 4, 5, 6] :param a: List a :param b: List b :return: Merged lists """ result = [] length = min([len(a), len(b)]) for i in range(length): result.append(a[i]) result.append(b[i]) result += a[length:] + b[length:] return result
8272fe726299953f1cd28aea662e6720fb75c300
694,373
import re def removePunctuation(text): """Removes punctuation, changes to lowercase, and strips leading and trailing spaces. Note: Only spaces, letters, and numbers should be retained. Other characters should should be eliminated. (e.g. it's becomes its) Args: text (str): A string. Returns: str: The cleaned up string. """ return re.sub(r'([^A-Za-z0-9\s+])', '', text.strip().lower())
b8d3fdf38507acf61d34dd702ae402de87726ce3
307,826
def beta(**kwds): """Return the parameters of the beta distribution (alpha, beta) given the mean and variance.""" mean = kwds['mean'] var = kwds['variance'] alpha = -(mean*var + mean**3 - mean**2) / var beta = ((mean-1)*var + mean**3 - 2*mean**2 + mean) / var return alpha, beta
0b2a8e8a45b99671bf05e863e9c469004ac9244e
522,279
import json def read_json_file(file_path): """ Read a JSON file. :param file_path: the local path to the JSON file. :return: a JSON object - usually a list or a dict. """ with open(file_path, 'r') as outfile: return json.load(outfile)
5fa35c88844b26ef5f3b10c4799f4da071b073a0
401,515
def main_float_input(message): """ Takes message to print when asking for input, the converts to float. Repeats user input until it can be converted to float without error. Returns that float once done. """ try_input = input(message) while True: try: x = float(try_input) return x except ValueError: try_input = input("Not a valid input, try again: ")
8bc3aa4f78069dbdc115a90751254ab4652dacdc
86,932
def get_cidr(netmask): """Convert a netmask to a CIDR.""" binary_str = '' for octet in netmask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return str(len(binary_str.rstrip('0')))
95c98496f63daf7eb4c8c441fdfc161541d0e294
113,387
def fully_connected(input, params): """Creates a fully connected layer with bias (without activation). Args: input (:obj:`tf.Tensor`): The input values. params (:obj:`tuple` of (:obj:`tf.Variable`, :obj:`tf.Variable`)): A tuple of (`weights`, `bias`). Probably obtained by :meth:`fully_connected_params`. Returns: :obj:`tf.Tensor`: The output values. """ weights, bias = params return input @ weights + bias
77815acfe17674bc20035900b75d8e4ddc982855
12,022
def check_2Dinput_consistency(structure, parameters): """ Check if structure and parameter data are complete and matching. :param input: structure, needs to be a valid aiida StructureData node :param input: parameters, needs to be valid aiida ParameterData node returns (False, errormessage) if an inconsistency has been found, otherwise return (True, '2D consistency check complete') """ # default is bulk, get 2D info from structure.pbc info (periodic boundary contitions) is2D = False if not all(structure.pbc): # check periodicity, assumes finite size in z-direction if structure.pbc != (True, True, False): return (False, "Structure.pbc is neither (True, True, True) for bulk nor (True, True, False) for surface calculation!") is2D = True # check for necessary info in 2D case inp_dict = parameters.get_dict() set_keys = [i for i in inp_dict.keys() if inp_dict[i] is not None] has2Dinfo = True for icheck in ['INTERFACE', '<NRBASIS>', '<RBLEFT>', '<RBRIGHT>', 'ZPERIODL', 'ZPERIODR', '<NLBASIS>']: if icheck not in set_keys: has2Dinfo = False if has2Dinfo and not inp_dict['INTERFACE'] and is2D: return (False, "'INTERFACE' parameter set to False but structure is 2D") if has2Dinfo!=is2D: return (False, "2D info given in parameters but structure is 3D\nstructure is 2D? {}\ninput has 2D info? {}\nset keys are: {}".format(is2D, has2Dinfo, set_keys)) # if everything is ok: return (True, "2D consistency check complete")
14c2326a35efced9186b9067e2851539677419f1
538,342
def _IsValidComposerUpgrade(cur_version, candidate_version): """Validates that only MINOR and PATCH-level version increments are attempted. (For Composer upgrades) Checks that major-level remains the same, minor-level ramains same or higher, and patch-level is same or higher (if it's the only change) Args: cur_version: current 'a.b.c' Composer version candidate_version: candidate 'a.b.d' Composer version Returns: boolean value whether Composer candidate is valid """ curr_parts = list(map(int, cur_version.split('.', 3))) cand_parts = list(map(int, candidate_version.split('.', 3))) if (curr_parts[0] == cand_parts[0] and (curr_parts[1] < cand_parts[1] or (curr_parts[1] <= cand_parts[1] and curr_parts[2] <= cand_parts[2]))): return True return False
0683c90e22df65eb0e27acd50ee21d08c1a366d9
87,405
def bid_order_array(market,p,q,reference_price): """ make array of orders from array of percents, quantities and reference price """ i = 0 orders = list() print ("bid order array. ref: " + str(reference_price)) print (str(p)) for px in p: submit_price = reference_price * (1-px) orders.append([market,"BUY",submit_price,q[i]]) i+=1 return orders
0e2810b6acc2644e797fd568a7f33ae1542bde2c
570,357
import torch def tensor2im(x: torch.Tensor, norm=False, dtype='float32'): """Convert tensor to image. Args: x(torch.Tensor): input tensor, [n, c, h, w] float32 type. norm(bool): if the tensor should be denormed first dtype(str): not used yet. Returns: an image in shape of [h, w, c]. """ if norm: x = (x + 1) / 2 x[x > 1] = 1 x[x < 0] = 0 return x.detach().cpu().data[0]
937f256e06ff2a18976796ab982989ce19b79798
507,985
def forcestring(value): """Test a value, if it is not an empty string or None, return the string representation of it, otherwise return an empty string. """ if value is None: return "" return str(value)
4d0fe3aee9de7d7c9edff1aaf48b898d9818808b
533,826
def read_embedding_set(args): """ Reads a nodelist with vertices to be embedded. """ vertices = set() with open(args.embed_subset, 'r') as f: for line in f: vertex = int(line.strip()) vertices.add(vertex) return list(vertices)
06401cd3581ccfc1f406ac9c7833381c85865497
72,515
def web_tile_zoom_out(zxy): """ Compute tile at lower zoom level that contains this tile. """ z, x, y = zxy return (z-1, x//2, y//2)
b3eb5664791ba54576f1c8bb92c0ac738bdc5dcd
178,523
import csv import random def addThing(indVign,config): """This function adds a small image (object) to a larger image obj.csv definition is: name of the image (i.e. A001.png), x-coord, y-coord, subsequent columns possible outcomes It returns a tuple (object file name, x, y)""" with open(config["csvLocation"]+"/"+config["csvObj"]) as obj: csvReader = csv.reader(obj) for row in csvReader: if row[0] == indVign: return row[random.randint(3,len(row)-1)],row[1],row[2] return 0
7317eaa9938f7c11c3bbfce77a61532f5c9b1bf5
203,135
def byte_align(pos): """Return the smallest multiple of 8 greater than ``pos``. Raises ``ValueError`` if ``pos`` is negative. """ if pos < 0: msg = "Expected positive integer, got {}" raise ValueError(msg.format(pos)) return ((pos + 7) // 8) * 8
50f050da2c24f35e7c53b887a3b81c23458ddbef
76,803
import hashlib def getHashForFile(f): """Returns a hash value for a file :param f: File to hash :type f: str :returns: str """ hashVal = hashlib.sha1() while True: r = f.read(1024) if not r: break hashVal.update(r) f.seek(0) return hashVal.hexdigest()
7adc2383a9f555ae64a4bc9fd34715c4c3057039
58,527
def sensitivity_metric(event_id_1, event_id_2): """Determine similarity between two epochs, given their event ids.""" if event_id_1 == 1 and event_id_2 == 1: return 0 # Completely similar if event_id_1 == 2 and event_id_2 == 2: return 0.5 # Somewhat similar elif event_id_1 == 1 and event_id_2 == 2: return 0.5 # Somewhat similar elif event_id_1 == 2 and event_id_1 == 1: return 0.5 # Somewhat similar else: return 1
b04c5fa27ef655dd3f371c3ce6ef0410c55dd05b
3,309
def l2_distance(pts1, pts2): """ Args: pts1, pts2: float, (n_frames, n_points, 2) Return: float: total l2 distance of these frames """ distance = 0 for i in range(pts1.shape[0]): for j in range(pts1.shape[1]): distance += ((pts1[i][j][0] - pts2[i][j][0])*(pts1[i][j][0] - pts2[i][j][0]) + \ (pts1[i][j][1] - pts2[i][j][1])*(pts1[i][j][1] - pts2[i][j][1])) return distance
bac296a2460d090adb756ece088fc969ee836480
529,042
import json def read_json_file(path): """Returns JSON parsed data from file.""" with open(path, "r") as file: return json.load(file)
eb036c43a5f16d5bf2d803089b84e95c3d932d74
615,907
def row_to_dict(row, field_names): """Convert a row from bigquery into a dictionary, and convert NaN to None """ dict_row = {} for value, field_name in zip(row, field_names): if value and str(value).lower() == "nan": value = None dict_row[field_name] = value return dict_row
10c903dda90524e6aacfb54a17d4839c8498a769
667,410
def parse_header(header): """ Extract size= and barcode= fields from the FASTA/FASTQ header line >>> parse_header("name;size=12;barcode=ACG;") ('name', 12, 'ACG') >>> parse_header("another name;size=200;foo=bar;") ('another name', 200, None) """ fields = header.split(';') query_name = fields[0] size = barcode = None for field in fields[1:]: if field == '': continue if '=' in field: key, value = field.split('=', maxsplit=1) if key == 'size': size = int(value) elif key == 'barcode': barcode = value return query_name, size, barcode
fcebaea017bf024e71da29b8bea505f0e6423fb1
127,106
def sos_model_dict(scenario_only_sos_model_dict): """Config for a SosModel with one scenario and one sector model """ config = scenario_only_sos_model_dict config['sector_models'] = [ { 'name': 'economic_model', 'inputs': [], 'parameters': [], 'outputs': [ { 'name': 'gva', 'dims': ['LSOA'], 'coords': {'LSOA': [1, 2, 3]}, 'dtype': 'float', 'unit': 'million GBP' } ] }, { 'name': 'water_supply', 'inputs': [ { 'name': 'precipitation', 'dims': ['LSOA'], 'coords': {'LSOA': [1, 2, 3]}, 'dtype': 'float', 'unit': 'ml' }, { 'name': 'rGVA', 'dims': ['LSOA'], 'coords': {'LSOA': [1, 2, 3]}, 'dtype': 'float', 'unit': 'million GBP' }, { 'name': 'reservoir_level', 'dims': ['LSOA'], 'coords': {'LSOA': [1, 2, 3]}, 'dtype': 'float', 'unit': 'ml' } ], 'parameters': [], 'outputs': [ { 'name': 'water', 'dims': ['LSOA'], 'coords': {'LSOA': [1, 2, 3]}, 'dtype': 'float', 'unit': 'Ml' }, { 'name': 'reservoir_level', 'dims': ['LSOA'], 'coords': {'LSOA': [1, 2, 3]}, 'dtype': 'float', 'unit': 'ml' } ] } ] config['scenario_dependencies'] = [ { 'source': 'climate', 'source_output': 'precipitation', 'sink_input': 'precipitation', 'sink': 'water_supply', 'timestep': 'CURRENT' }, { 'source': 'climate', 'source_output': 'reservoir_level', 'sink_input': 'reservoir_level', 'sink': 'water_supply', 'timestep': 'CURRENT' } ] config['model_dependencies'] = [ { 'source': 'economic_model', 'source_output': 'gva', 'sink_input': 'rGVA', 'sink': 'water_supply', 'timestep': 'CURRENT' }, { 'source': 'water_supply', 'source_output': 'reservoir_level', 'sink_input': 'reservoir_level', 'sink': 'water_supply', 'timestep': 'PREVIOUS' } ] return config
90e6de680b22b77ca2eb32ee015afcfcbe415364
82,606
def _trim_name(image): """Remove the slash at the end of the filename.""" return image[:-1] if image[-1] == '/' else image
823dd63920673352a18d73f83190853d5a234483
4,773
def _parse_title(dom, details): """ Parse title/name of the book. Args: dom (obj): HTMLElement containing whole HTML page. details (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title. Raises: AssertionError: If title not found. """ title = details.find("h1") # if the header is missing, try to parse title from the <title> tag if not title: title = dom.find("title") assert title, "Can't find <title> tag!" return title[0].getContent().split("|")[0].strip() return title[0].getContent().strip()
41e0581ac886ed6301b765ef53fcc21db3d4f15c
417,314
def _create_expr(symbols, prefix='B', suffix='NK'): """Create einsum expr with prefix and suffix.""" return prefix + ''.join(symbols) + suffix
d5e0aff866f375d611b4a4fa82665a0a5447bf02
36,720
def power_n( data, power=2.0, verbose=False ): """Returns the voxel-wise power of either a data array or a NIfTI-1 image object. To get the nth root, use 1/power. nth root of either a data array or a NIfTI image object. E.g. power_n(nii, 1./3) returns a NIfTI image whose voxel values are cube rooted. """ if verbose: if power < 1.0: print(" Getting root-{0} of image data ...".format(power)) else: print(" Getting image data^{0} ...".format(power)) return data ** power # End of power_n() definition
e29bd9170a14c91fcba25159e9e8a1708c3dcb6d
235,787
def CheckAgreement(ex, min_agreement, all_targets, max_agreement=100): """Return the labels that at least min_agreement raters agree on.""" sum_ratings = ex[all_targets].sum(axis=0) agreement = ((sum_ratings >= min_agreement) & (sum_ratings <= max_agreement)) return ",".join(sum_ratings.index[agreement].tolist())
4095405e8965cb363da638c6d73aabff86900a81
457,980
def translate_variable(variable_number: int) -> str: """ Get variable name from its number Variable numbers represent the index in the following sequence: ``` Y, X1, Z1, X2, Z2, X3, Z3, ... ``` """ # return "Y" if the number is 1 if variable_number == 1: return "Y" # X if even index, Z if odd index letter = "X" if variable_number % 2 == 0 else "Z" # index number of the variable number = variable_number // 2 # return the variable name return letter + str(number)
c579310f58ef087e2f4475e4bdbb0dc22a958416
390,463
def apply_mask(image, mask): """Return a masked image.""" return image * mask
f8898e7e746a690a443d8280abd5b29c4ffff781
441,062
def fprime(x, A, b): """ The gradient of the objective function with respect to the heights (x). This gradient is a vector the same size as x. A^T (Ax-b) """ return A.transpose() * (A * x - b)
3be17624f363ff5db106c27f725ed5559cce7dfa
450,305
def crop(img, left, top, right, bottom): """ Crop rectangle from image. Inputs: img - The image to crop. left - The leftmost index to crop the image. top - The topmost index. right - The rightmost index. bottom - The bottommost index. Outputs: img - The cropped image. """ return img[left:right, top:bottom]
1507a55bba07dc656f51f873d2328b69f70682c9
709,521
import time def measureTime(method): """ Decorator for measuring how long the given function took to execute. Credit to: https://thenextweb.com/news/decorators-in-python-make-code-better-syndication Args: method (function): Function to measure time to execute. """ def wrapper(): start_time = time.perf_counter() method() end_time = time.perf_counter() print(method.__name__ + ' took ' + str(round((end_time - start_time), 3)) + ' seconds.') return wrapper
d4f54954bed53fcfb50da0641d49b58b04567f6e
57,870
def getImName(image_path,folder_path): """ get image name by clean up image path Parameters: -image_path: path of image in the folder -folder_path: path of folder that contain images Returns: -name: name of the image """ name = image_path.replace(folder_path.replace("/*.jpg","")+"\\","") print(name) return name
e1b6a432d06f7c45d0a7408b52fcfc6464da2b07
65,848
import random def get_random_int(min_v=0, max_v=10, number=5, seed=None): """Return a list of random integer by the given range and quantity. Parameters ----------- min_v : number The minimum value. max_v : number The maximum value. number : int Number of value. seed : int or None The seed for random. Examples --------- >>> r = get_random_int(min_v=0, max_v=10, number=5) ... [10, 2, 3, 3, 7] """ rnd = random.Random() if seed: rnd = random.Random(seed) # return [random.randint(min,max) for p in range(0, number)] return [rnd.randint(min_v, max_v) for p in range(0, number)]
fb551cbcbb8983dfada2368c17607dfad4f1aa66
621,336
from typing import Dict from typing import Any def _transform_underscore(d: Dict[str, Any]) -> Dict[str, Any]: """transform "-" to "_" recursively Args: d: Returns: Dict[str, Any]: new dict """ new_dict = d.copy() for k, v in new_dict.items(): if isinstance(v, Dict): new_dict[k] = _transform_underscore(v) for k in list(new_dict.keys()): if "-" in k: new_dict[k.replace("-", "_")] = new_dict[k] del new_dict[k] return new_dict
6c541c41929c8443c3185159cf98a40f2aa78d1e
151,150
def DiffValueLists(new_list, old_list): """Give an old list and a new list, return the added and removed items.""" if not old_list: return new_list, [] if not new_list: return [], old_list added = [] removed = old_list[:] # Assume everything was removed, then narrow that down for val in new_list: if val in removed: removed.remove(val) else: added.append(val) return added, removed
4c3dc471bd0e9e9aea3b5f0b8c906eb1ca27d196
52,124
import re def slugify(text): """ Generates an ASCII-only slug. """ return re.sub('-$', '', re.sub('[^A-Za-z0-9\-]+', '-', text))
18859e4e3237d392c2d2b18ed82b9d6d27fe6598
341,793
def average_error(state_edges_predicted, state_edges_actual): """ Given predicted state edges and actual state edges, returns the average error of the prediction. """ total=0 for key in state_edges_predicted.keys(): #print(key) total+=abs(state_edges_predicted[key]-state_edges_actual[key]) #print(state_edges_predicted[state]) return total/len(state_edges_predicted) #Returns weighted average error
40d132682e49b556e8cc0fc71015947202b9cf08
28,643
def extract_steering_wheel_image(screenshot_rs): """Extract the part of a screenshot (resized to 180x320 HxW) that usually contains the steering wheel.""" h, w = screenshot_rs.shape[0:2] x1 = int(w * (470/1280)) x2 = int(w * (840/1280)) y1 = int(h * (500/720)) y2 = int(h * (720/720)) return screenshot_rs[y1:y2+1, x1:x2+1, :]
6bb3d982d695bcfc1615e7f15b59ca14ed439416
263,572
def split_columns(string): """ Splits the return-columns argument or reads it from .txt --return-columns 'temperature c, "heat$"' -> ['temperature c', '"heat$"'] --return-columns my_vars.txt -> ['temperature c', '"heat$"'] """ if string.endswith('.txt'): with open(string, 'r') as file: return [col.rstrip('\n').strip() for col in file] return [s.strip() for s in string.split(',')]
0977f978a2ae5d22eb7b873901a6f4bfa6c92cc4
52,164
def get_slot_values(slotted_instance): """Get all slot values in a class with slots.""" # thanks: https://stackoverflow.com/a/6720815/782170 return [getattr(slotted_instance, slot) for slot in slotted_instance.__slots__]
4adfe1d7dc66cd42cc12c2c26b5e1069d7dac7a8
219,852
def writexl_new_content_types_text(db): """ Returns [Content_Types].xml text :param pylightxl.Database db: database contains sheetnames, and their data :return str: [Content_Types].xml text """ # location: [Content_Types].xml # inserts: many_tag_sheets, tag_sharedStrings # note calcChain is part of this but it is not necessary for excel to open xml_base = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\r\n' \ '<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">\r\n' \ '<Default Extension="rels" ContentType="application/vnd.openxmlformats-package.relationships+xml"/>\r\n' \ '<Default Extension="xml" ContentType="application/xml"/>\r\n' \ '<Override PartName="/xl/workbook.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml"/>\r\n' \ '{many_tag_sheets}\r\n' \ '{tag_sharedStrings}\r\n' \ '<Override PartName="/docProps/core.xml" ContentType="application/vnd.openxmlformats-package.core-properties+xml"/>\r\n' \ '<Override PartName="/docProps/app.xml" ContentType="application/vnd.openxmlformats-officedocument.extended-properties+xml"/>\r\n' \ '</Types>' xml_tag_sheet = '<Override PartName="/xl/worksheets/sheet{sheet_id}.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml"/>\r\n' xml_tag_sharedStrings = '<Override PartName="/xl/sharedStrings.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml"/>\r\n' many_tag_sheets = '' for sheet_id, _ in enumerate(db.ws_names, 1): many_tag_sheets += xml_tag_sheet.format(sheet_id=sheet_id) if db._sharedStrings: tag_sharedStrings = xml_tag_sharedStrings else: tag_sharedStrings = '' rv = xml_base.format(many_tag_sheets=many_tag_sheets, tag_sharedStrings=tag_sharedStrings) return rv
3762cfe7680fa1754885622b123c914a341aea34
680,691
import textwrap def multiline_fix(s): """Remove indentation from a multi-line string.""" return textwrap.dedent(s).lstrip()
9cb964eb88ebcaadc00acc222d174d6a320c44cf
679,426
def get_random_urls(prefix='http://www.example-shop.com/product/', size=1000, start_index=None): """ Create random url endpoints. Args: size (int): number of urls to be created. start_index (int): optional argument for starting number. """ if not start_index: start_index = 1 end_index = start_index + size urls = [prefix + str(url_suffix) for url_suffix in range(start_index, end_index + 1)] return urls
e302b4f1003391e1eb6e4e400b9d49e0710782fd
48,670
def getIntersection(in1_list, in2_list): """Function for determining the intersection of two lists. Returns the common elements.""" return set(in1_list).intersection(in2_list)
6396d883ab4acc8c3247ba4eada1c7722d0375d8
516,168
import difflib, time, os def pydiff(text1, text2, text1_name='text1', text2_name='text2', prefix_diff_files='tmp_diff', n=3): """ Use Python's ``difflib`` module to compute the difference between strings `text1` and `text2`. Produce text and html diff in files with `prefix_diff_files` as prefix. The `text1_name` and `text2_name` arguments can be used to label the two texts in the diff output files. No files are produced if the texts are equal. """ if text1 == text2: return False # Else: text1_lines = text1.splitlines() text2_lines = text2.splitlines() diff_html = difflib.HtmlDiff().make_file( text1_lines, text2_lines, text1_name, text2_name, context=True, numlines=n) diff_plain = difflib.unified_diff( text1_lines, text2_lines, text1_name, text2_name, n=n) filename_plain = prefix_diff_files + '.txt' filename_html = prefix_diff_files + '.html' f = open(filename_plain, 'w') # Need to add newlines despite doc saying that trailing newlines are # inserted... diff_plain = [line + '\n' for line in diff_plain] f.writelines(diff_plain) f.close() f = open(filename_html, 'w') f.writelines(diff_html) f.close() return True
98cc5dcb7726120ecd85237b4795467a16676291
302,593
def original_scorer(worda_count, wordb_count, bigram_count, len_vocab, min_count, corpus_word_count): """Bigram scoring function, based on the original `Mikolov, et. al: "Distributed Representations of Words and Phrases and their Compositionality" <https://arxiv.org/abs/1310.4546>`_. Parameters ---------- worda_count : int Number of occurrences for first word. wordb_count : int Number of occurrences for second word. bigram_count : int Number of co-occurrences for phrase "worda_wordb". len_vocab : int Size of vocabulary. min_count: int Minimum score threshold. corpus_word_count : int Not used in this particular scoring technique. Notes ----- Formula: :math:`\\frac{(bigram\_count - min\_count) * len\_vocab }{ (worda\_count * wordb\_count)}`. """ return (bigram_count - min_count) / worda_count / wordb_count * len_vocab
228ba69d5598cac078c3ed3d25ee907c1d58aef8
212,252
async def healthCheck(): """ Returns 200 for a healthcheck for AWS """ return {'ok'}
6cc232df6661f26a1db4e4f6bf35e5de284abff1
691,926
import torch def gpu_non_dominated_sort(swarm: torch.Tensor): """ The GPU version of non-dominated sorting algorithms Args: swarm (np.ndarray): m x n scoring matrix, where m is the number of samples and n is the number of objectives. Returns: fronts (List): a list of Pareto fronts, in which the dominated solutions are on the top, and non-dominated solutions are on the bottom. """ domina = (swarm.unsqueeze(1) <= swarm.unsqueeze(0)).all(-1) domina_any = (swarm.unsqueeze(1) < swarm.unsqueeze(0)).any(-1) domina = (domina & domina_any).half() fronts = [] while (domina.diag() == 0).any(): count = domina.sum(dim=0) front = torch.where(count == 0)[0] fronts.append(front) domina[front, :] = 0 domina[front, front] = -1 return fronts
8aff282bbc6b6af5dbe1f64dfa1499dc3b3936ef
223,915
import re def delete_useless(statement: str) -> str: """ 無駄な移動/計算を相殺、削除 >>> delete_useless("+++--<<>>>") '+>' >>> delete_useless("---++>><<<") '-<' >>> delete_useless(">++++[-][-]") '>[-]' >>> delete_useless(">--[-]++[-]") '>[-]' """ while True: if "<>" in statement: # 無駄な移動の相殺・その1 statement = statement.replace("<>", "") continue if "><" in statement: # 無駄な移動の相殺・その2 statement = statement.replace("><", "") continue if "+-" in statement: # 無駄な加減算の相殺・その1 statement = statement.replace("+-", "") continue if "-+" in statement: # 無駄な加減算の相殺・その2 statement = statement.replace("-+", "") continue if "+[-]" in statement or "-[-]" in statement: # ゼロクリアの前の加減算の削除 statement = re.sub(r'[-+]+\[-\]', "[-]", statement) continue if "[-][-]" in statement: # 複数回のゼロクリアを1回に statement = statement.replace("[-][-]", "[-]") continue break return statement
3bd2b63f0634b53360c18637029a27eb30957c91
623,522
def get_targets_in_sif_file(sif_file, targets): """ Get the targets that are inside the network given by the user """ targets_in_network = set() str_tar = [str(x) for x in targets] with open(sif_file, 'r') as sif_fd: for line in sif_fd: node1, score, node2 = line.strip().split('\t') if node1 in str_tar: targets_in_network.add(node1) if node2 in str_tar: targets_in_network.add(node2) return list(targets_in_network)
5f788edaa60a6fa99f8c26458598f825ed8fe2ce
297,802
import re def argument_exists(arg_name, args): """ Test whether given argument name exists in given argument list. :param arg_name: Argument name. :param args: Argument list. :return: Whether given argument name exists in given argument list. """ # Create regular expression object that matches `^_ARG_$` or `^_ARG_=` re_obj = re.compile('^%s(?:$|=)' % arg_name) # For given argument list's each argument for arg in args: # If the argument matches the regular expression if re_obj.search(arg): # Return True return True # If none of the arguments matches the regular expression. else: # Return False return False
5d3ed9335989c6e8466697b091267b8b1968e5bc
198,469
import math def cosine(r_tokens: list, s_tokens: list) -> float: """Computes cosine similarity. COS(r, s) = |r ∩ s| / sqrt(|r| * |s|) Parameters ---------- r_tokens : list First token list. s_tokens : list Second token list. Returns ------- Cosine similarity of r and s. """ return len(set(r_tokens).intersection(s_tokens)) / math.sqrt(len(r_tokens) * len(s_tokens))
234b7298e8c0c29cbb3d79d420e63518decfe4e9
694,152
def trimmed(goal, num=200): """Trim the goal column to ``num`` words so it fits in the notes column.""" words = goal.split() trimmed_words = words[:num] joined = " ".join(trimmed_words) if len(trimmed_words) < len(words): # trimmed_words is actually shorter than words, so we have cut # something out joined += " […]" return joined
56876c10def91f5b1dd2352f432378cde1398afc
382,355
import math def get_auto_embedding_dim(num_classes): """ Calculate the dim of embedding vector according to number of classes in the category emb_dim = [6 * (num_classes)^(1/4)] ref: Ruoxi Wang, Bin Fu, Gang Fu, and Mingliang Wang. 2017. Deep & Cross Network for Ad Click Predictions. In Proceedings of the ADKDD’17 (ADKDD’17). Association for Computing Machinery, New York, NY, USA, Article 12, 1–7. DOI:https://doi.org/10.1145/3124749.3124754 :param num_classes: number of classes in the category :return: the dim of embedding vector """ return math.floor(6 * math.pow(num_classes, 0.26))
9c91c8bf2d775d029349849c7efa56ba51714448
671,734
import csv def read_from_csv(_path:str): """Read csv file and return field names and data rows.""" fieldNames, dataRows = [], [] with open(_path, "r", encoding="utf-8") as file: # Create a csv reader object. csvReader = csv.reader(file) # Extract field names through first row. fieldNames = next(csvReader) # Extract each data row one by one. for row in csvReader: dataRows.append(row) return (fieldNames, dataRows)
e973b13c916afd0707435278f8391870d160263e
308,652
def moving_func_trim(window_diameter, *arrays): """Trim any number of arrays to valid dimension after calling a centered bottleneck moving window function Parameters ---------- window_diameter: int odd number window width arrays: 1 or more numpy.ndarray Returns ------- tuple of numpy.ndarrays """ window_radius = window_diameter // 2 assert (2 * window_radius + 1) == window_diameter, "window_diameter must be odd" if window_radius == 0: return (array for array in arrays) return (array[window_radius:-window_radius] for array in arrays)
4a30466c9d0dcc50914ca84b63042db8bf22948c
303,694
import math def nice(v): """Returns nearest nice number Give highest value if equally close to two nice numbers """ e = math.floor(math.log10(v)) # Exponent b = v / 10**e if b < 1.5: a = 1 elif b < 3.5: a = 2 elif b < 7.5: a = 5 else: a = 10 d = a*10**e return d
f5ba294932d95986705688dae7e3bba9ab5bf294
171,881
def cn(uc, w, eis, frisch, vphi): """Return optimal c, n as function of u'(c) given parameters""" return uc ** (-eis), (w * uc / vphi) ** frisch
74247338f5d26c81e24748f597f1e47386829832
126,673
def expand_and_clamp(box, im_shape, s=1.25): """Expand the bbox and clip it to fit the image shape. Args: box (list): x1, y1, x2, y2 im_shape (ndarray): image shape (h, w, c) s (float): expand ratio Returns: list: x1, y1, x2, y2 """ x1, y1, x2, y2 = box[:4] w = x2 - x1 h = y2 - y1 deta_w = w * (s - 1) / 2 deta_h = h * (s - 1) / 2 x1, y1, x2, y2 = x1 - deta_w, y1 - deta_h, x2 + deta_w, y2 + deta_h img_h, img_w = im_shape[:2] x1 = min(max(0, int(x1)), img_w - 1) y1 = min(max(0, int(y1)), img_h - 1) x2 = min(max(0, int(x2)), img_w - 1) y2 = min(max(0, int(y2)), img_h - 1) return [x1, y1, x2, y2]
1ac2f5d66d190af46832ed984092979aa70cb58e
408,313
def isSuffixOf(xs, ys): """``isSuffixOf :: Eq a => [a] -> [a] -> Bool`` Returns True if the first list is a suffix of the second. The second list must be finite. """ return xs == ys[-len(xs):]
c57da0216d8064f4a5902f2397025d29fa04784d
575,917
def discrimine(pred, sequence): """Split a collection in two collections using a predicate. >>> discrimine(lambda x: x < 5, [3, 4, 5, 6, 7, 8]) ... ([3, 4], [5, 6, 7, 8]) """ positive, negative = [], [] for item in sequence: if pred(item): positive.append(item) else: negative.append(item) return positive, negative
23beece3fe4771fbe155d3960978d50e929f82b9
74,155
def _format_to_rows(dl): """Helper method to take data in DOL (dict of lists) format, and convert it to LOD (list of dict) Args: data (list): Dict of lists to be converted Returns: list: A list of dicts representing data in row form """ return [dict(zip(dl, t)) for t in zip(*dl.values())]
2daa034a172e4a75a697c0d799e657729f3617ee
182,220
import json def load_dataset(data_path): """Load existing json format dataset """ datafile = open(data_path) return json.load(datafile)
7d125a6308c89de15cf4fa6a6edb8cd7c656148c
630,384
from typing import Iterable def is_seq_consecutive(it: Iterable[int]) -> bool: """ :param it: any Iterable of integers :return: whether the iterable consists of consecutive numbers """ sorted_list = sorted(it) consecutive_list = list(range(sorted_list[0], sorted_list[-1] + 1)) return consecutive_list == sorted_list
9759b0c46efe6b2bd30f6425c2f52bb2ec816291
359,854
def signature(obj): """Generate a timestamp. """ return obj.get_timestamp()
2fbdbb4e77286f3339334f40794f596fe62d2bef
136,699
import re def parse_show_mirror(raw_result): """ Parse the 'show mirror' command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show mirror command in a \ dictionary of the form. Returns None if no mirror found: for 'show mirror': :: { 'My_Session_1': {'name': 'My_Session_1', 'status': 'active'}, 'Other-Session-2': {'name': 'Other-Session-2', 'status': 'shutdown'} } for 'show mirror <name>': :: { 'name': 'My_Session_1', 'status': 'active', 'source': [{'type': 'interface', 'id:' '2', 'direction': 'both'}, {'type': 'interface', 'id:' '3', 'direction': 'rx'}], 'destination': {'type': 'interface', 'id:' '1'}, 'output_packets': '123456789' 'output_bytes': '8912345678' } """ mirror_list_header_re = (r'\s*name\s+status') mirror_list_re = (r'^\s*(?!name|-+\s)(?P<name>\S+)\s+(?P<status>\S+)') mirror_re = ( r'\s*Mirror\sSession:\s+(?P<name>\S+)\s*' r'\s*Status:\s+(?P<status>\w+)(?:\s|.)*' r'\s*Output\sPackets:\s+(?P<output_packets>\d+)\s*' r'\s*Output\sBytes:\s+(?P<output_bytes>\d+)' ) mirror_sorce_re = ( r'Source:\s+(?P<type>\w+)\s+(?P<id>\S+)\s+(?P<direction>(tx|rx|both))' ) mirror_sorce_re2 = ( r'Source:\s+(?P<type>\w+)\s+(?P<direction>(tx|rx|both))\s+(?P<id>none)' ) mirror_destination_re = ( r'Destination:\s+(?P<type>\w+)\s+(?P<id>\S+)' ) result = {} if re.match(mirror_list_header_re, raw_result, re.IGNORECASE): for line in raw_result.splitlines(): re_result = re.search(mirror_list_re, line) if re_result: partial = re_result.groupdict() result[partial['name']] = partial else: re_result = re.match(mirror_re, raw_result) if re_result: result = re_result.groupdict() for key, value in result.items(): if value and value.isdigit(): result[key] = int(value) result['source'] = [] for line in raw_result.splitlines(): re_result = re.search(mirror_sorce_re, line) if re_result: partial = re_result.groupdict() result['source'].append(partial) re_result = re.search(mirror_sorce_re2, line) if re_result: partial = re_result.groupdict() result['source'].append(partial) result['destination'] = [] for line in raw_result.splitlines(): re_result = re.search(mirror_destination_re, line) if re_result: partial = re_result.groupdict() result['destination'] = partial if result == {}: if 'Invalid mirror session' in raw_result: return "Invalid" if 'No mirror' in raw_result: return "None" else: return result
ae4d86a1a5bc3ff363dbff62c313f18538f6b126
468,826
def atom(text): """Parse text into a single float or int or str.""" try: x = float(text) return round(x) if round(x) == x else x except ValueError: return text
f8d3856c7864a1f6a07ad0c9e8a6cd7f2f16ac8b
45,033
def compute_trajectory(cpu_status, mem_status): """ Function to alert whether mutliple resources are crossing thresholds or not. Args: cpu_status: Keyword 'High', 'Normal' or 'Low' mentioning the status of cpu mem_status: Keyword 'High', 'Normal' or 'Low' mentioning the status of memory Returns: status: Keyword 'High', 'Normal' or 'Low' mentioning the status of the resources """ if cpu_status == "High" and mem_status == "High": return "High" elif cpu_status == "Low" and mem_status == "Low": return "Low" else: return "Normal"
6986eb577e5322e67d0917250048a00841d47297
576,330
def _profile_function(function, profiles, game): """Map a profile function over profiles""" return [function(game, prof) for prof in profiles]
540831714c29380407f04f2d50ecf883da585bbd
520,792
def allowed_file(filename, allowed_exts): """ Given a filename, check if its extension is allowed from the list of allowed extensions. """ return '.' in filename and filename.rsplit('.', 1)[1].lower() in allowed_exts
342eb59a3095de34698025e4c34ce272219bce60
203,239
import torch def expand_many(x, axes): """Call expand_dims many times on x once for each item in axes.""" for ax in axes: x = torch.unsqueeze(x, ax) return x
22ef04759db98be7d7eeeab85fd468dfa257b392
64,852
def fail(exc_type, exc_val, exc_tb): """ Transitions the execution to fail with a specific error. This will prompt the execution of any RequestTemplate.after_exception hooks. Args: exc_type: The exception class. exc_val: The exception object. exc_tb: The exception's stacktrace. """ def action(state): return state.fail(exc_type, exc_val, exc_tb) return action
d53f6a6047abb9608f661114f248431fbeb8e7e7
473,973
from typing import Callable def item_getter(*args, default=None) -> Callable: """ Get a function that gets multiple items. Similar to `operator.itemgetter` but always returns a tuple, and supports defaults. """ def f(x): return tuple(x.get(arg, default) for arg in args) return f
958c659493683bc83f02b20089ba2d16790d2a3e
528,390
def diffNumAbs(arg1: int, arg2: int) -> int: """ The function takes two arguments arg1 and arg2, both int. The function as result returns the absolute value of the difference of the two numbers feeded as arguments. Example : >>> diffNumAbs(4,6) 2 >>> """ res = arg1 - arg2 return abs(res)
c0ac15cfe8b57213f22e39c6fc4e5465c63f6942
56,539
def predict(model, test_x): """ The method predicts labels for testing samples by using trained model. Parameters ---------- model: trained model test_x: features of testing data """ return model.predict(test_x)
4c23431eebb47b7dd22bc7ed42c6762fbd3b7295
293,777
from typing import OrderedDict def transform_deploymentParam(result): """Transforms a paramter into a row for a table""" result = OrderedDict([('Description', result['description']), ('Name', result['name'] if 'name' in result else ''), ('Type', result['type'] if 'type' in result else ''), ('Lists Allowed Values', 'Yes' if 'allowedValues' in result else 'No'), ('Default', result['defaultValue'] if 'defaultValue' in result else '')]) return result
cc6fa4fc80ad18df16731aab6272bade96dd3120
512,945
import pytz def validate_timezone(zone): """Return an IETF timezone from the given IETF zone or common abbreviation. If the length of the zone is 4 or less, it will be upper-cased before being looked up; otherwise it will be title-cased. This is the expected case-insensitivity behavior in the majority of cases. For example, ``'edt'`` and ``'america/new_york'`` will both return ``'America/New_York'``. If the zone is not valid, ``ValueError`` will be raised. If ``pytz`` is not available, and the given zone is anything other than ``'UTC'``, ``ValueError`` will be raised. """ if zone is None: return None if not pytz: if zone.upper() != 'UTC': raise ValueError('Only UTC available, since pytz is not installed.') else: return zone zone = '/'.join(reversed(zone.split(', '))).replace(' ', '_') if len(zone) <= 4: zone = zone.upper() else: zone = zone.title() if zone in pytz.all_timezones: return zone else: raise ValueError("Invalid time zone.")
41673d0f70ead61b74a186206d198f2278ace0b7
647,733
def abs2(x): """ Calculates the squared magnitude of a complex array. """ return x.real * x.real + x.imag * x.imag
981dc43c1b1f4cf6ac882b23624cbb022fde7c10
549,757
def patched_file_serialize(mocker): """Patches function that deserializes file contents to website content""" return mocker.patch( "content_sync.backends.github.deserialize_file_to_website_content" )
477be05acd3f44088ee2d11ad627658419ffd2f0
581,834
import base64 def encode(a): """Base64 encode a numpy array""" try: data = a.tobytes() except AttributeError: # np < 1.9 data = a.tostring() return base64.b64encode(data).decode('utf-8')
a73ea9b683b6399142235d9fcb9153e0b9d31f89
410,194
def dt_to_hms(td): """ convert a datetime.timedelta object into days, hours, minutes and seconds """ days, hours, minutes = td.days, td.seconds // 3600, td.seconds % 3600 // 60 seconds = td.seconds - hours*3600 - minutes*60 return days, hours, minutes, seconds
d58ac27a77690f60fff594bdae300576f46f8774
387,870
from typing import Any from contextlib import suppress def safe_get(key, obj, default=None) -> Any: """ Safely retrieve key from object Args: obj: Any. target object key: inner object adders token default: value to return on failure Returns: on success: value of requested key on failure default on failure PyDocTests >>> safe_get("str-val", 'some-key', "nop") nop >>> safe_get(1, [33,44,55,66], default="sdf") """ res = default _primitives_ = (bool, int, float, str) if obj is None or isinstance(obj, _primitives_): # skip primitives res = default elif isinstance(key, int) and isinstance(obj, list): # return list item by index if 0 <= key < len(obj): res = obj[key] elif isinstance(obj, dict): # return dict item by key res = obj.get(key, default) elif hasattr(obj, 'get'): with suppress(Exception): res = obj.get(key) elif isinstance(key, str) and hasattr(obj, key): res = getattr(obj, key) else: with suppress(Exception): res = obj[key] return res
21929399b123f46e7a24289052b1e9a9bbc71fa0
648,116
import json def read_configuration_file(path): """ Read the configuration file and return a configuration object """ with open(path, "r") as configuration_file: return json.load(configuration_file)
425cbb9291cba0e4f54ce55f3bab681a322978ec
162,306
def _get_token(credential) -> str: """Extract token from a azure.identity object.""" token = credential.modern.get_token("https://management.azure.com/.default") return token.token
f11193743b91b1b7bf7fe0025c4e65d20410a4ab
475,661
def aggregate_customers_sessions(sessions): """ Receives as input what products customers interacted with and returns their final aggregation. Args ---- sessions: list of list of dicts. List where each element is a list of dict of type: [{'action': '', 'sku': ''}] Returns ------- results: list of dicts Each resulting dict is aggregated on the sku and action level (repeating clauses are filtered out). """ result = [] for session in sessions: for hit in session: result.append(hit) return [dict(t) for t in {tuple(d.items()) for d in result}]
d500b11d66f26f4ba0ec99004ec84035a1f9b536
192,004
def sftp_prefix(config): """ Generate SFTP URL prefix """ login_str = '' port_str = '' if config['username'] and config['password']: login_str = '%s:%s@' % (config['username'], config['password']) elif config['username']: login_str = '%s@' % config['username'] if config['port'] and config['port'] != 22: port_str = ':%d' % config['port'] return 'sftp://%s%s%s/' % (login_str, config['host'], port_str)
225ae15212f7024590b1aa91f3ad7a32594cb9c3
41,250
def rgb_to_ansi256(r, g, b): """ Convert RGB to ANSI 256 color """ if r == g and g == b: if r < 8: return 16 if r > 248: return 231 return round(((r - 8) / 247.0) * 24) + 232 ansi_r = 36 * round(r / 255.0 * 5.0) ansi_g = 6 * round(g / 255.0 * 5.0) ansi_b = round(b / 255.0 * 5.0) ansi = 16 + ansi_r + ansi_g + ansi_b return ansi
bf5912b0f0be5b3c60e7ef7803d1e7f73c0573ff
400,320
import functools import time def retry(exception_check, tries=3, delay=1, backoff=1): """Retry calling the decorated function using an exponential backoff. original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry :param exception_check: ``Exception || Tuple`` the exception to check. may be a tuple of exceptions to check :param tries: ``int`` number of times to try (not retry) before giving up :param delay: ``int`` initial delay between retries in seconds :param backoff: ``int`` backoff multiplier e.g. value of 2 will double the delay each retry """ def deco_retry(f): @functools.wraps(f) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: return f(*args, **kwargs) except exception_check: time.sleep(mdelay) mtries -= 1 mdelay *= backoff return f(*args, **kwargs) return f_retry # true decorator return deco_retry
ae5f7616320fb64207e6c2aab3acc05a5e559c5a
472,264
import json def read_json(file_path: str) -> dict: """Reads a json file and returns the dict """ with open(file_path) as f: return json.load(f)
93df73379300052dbecc6706050b0779ac568b98
668,236
def sieve_of_eratosthenes(end: int, start: int = 2) -> list[int]: """compute all primes between certain bounds :param end: upper bound for primes :param start: lower bound for primes :return: list of all primes between start and end """ prime = [True for _ in range(end + 1)] p = 2 while p * p <= end: if prime[p]: for i in range(p * p, end + 1, p): prime[i] = False p += 1 return [index for index, p in enumerate(prime) if p and index >= start]
24523c3037ea5671352a8baab83298908b6c26d8
268,012
def _ParseIssueReferences(issue_ref_list): """Parses a list of issue references into a tuple of IDs added/removed. For example: [ "alpha:7", "beta:8", "-gamma:9" ] => ([ "7", "8" ], [ "9" ]) NOTE: We don't support cross-project issue references. Rather we just assume the issue reference is within the same project. """ added = [] removed = [] for proj in issue_ref_list: parts = proj.split(":") proj_id = parts[1] if len(parts) >= 2 else proj[1:] if proj[0] != "-": added.append(proj_id) else: removed.append(proj_id) return added, removed
a8c8ebea8ebd289c84bd34bfdc064b8b90daf830
689,214
def get_duplicates(iterable): """Return a set of the elements which appear multiple times in iterable.""" seen, duplicates = set(), set() for elem in iterable: if elem in seen: duplicates.add(elem) else: seen.add(elem) return duplicates
7e14f37a3819c6d7fe28c577173ba91aae2b687f
631,773
def SimpleElement(tag, value): """ Args: tag: xml tag name value: character data Returns: XML: <tag>value</tag> """ return '<%s>%s</%s>\n' % (tag, value, tag)
8684d661f0fbf04c6d6cb4153041201378a258cc
65,422
def _escape_html(text): """Escape text for inclusion in html""" return ( text.replace("&", "&amp;") .replace("<", "&lt;") .replace(">", "&gt;") .replace(" ", "&nbsp;") )
8f194b03630f64fcc7f64ff26b22c2f80f8fba2b
459,950